title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Backport PR #27767 on branch 0.25.x (BUG: Fix windowing over read-only arrays)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 943a6adb7944e..66b760a76dad3 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -118,6 +118,7 @@ Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) +- Bug in windowing over read-only arrays (:issue:`27766`) - - diff --git a/pandas/core/window.py b/pandas/core/window.py index 86574208a3fc0..30aa2f2cea8dc 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -235,8 +235,10 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray: "cannot handle this type -> {0}" "".format(values.dtype) ) - # Always convert inf to nan - values[np.isinf(values)] = np.NaN + # Convert inf to nan for C funcs + inf = np.isinf(values) + if inf.any(): + values = np.where(inf, np.nan, values) return values diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index c7177e1d3914f..f0787ab3d191f 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -326,3 +326,11 @@ def test_rolling_axis_count(self, axis_frame): result = df.rolling(2, axis=axis_frame).count() tm.assert_frame_equal(result, expected) + + def test_readonly_array(self): + # GH-27766 + arr = np.array([1, 3, np.nan, 3, 5]) + arr.setflags(write=False) + result = pd.Series(arr).rolling(2).mean() + expected = pd.Series([np.nan, 2, np.nan, np.nan, 4]) + tm.assert_series_equal(result, expected)
Backport PR #27767: BUG: Fix windowing over read-only arrays
https://api.github.com/repos/pandas-dev/pandas/pulls/27782
2019-08-06T15:39:47Z
2019-08-06T20:52:55Z
2019-08-06T20:52:55Z
2019-08-06T20:52:56Z
DOC: Add CoC to the README
https://github.com/pandas-dev/pandas/pull/27780.diff
- [ ] closes # xxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27780
2019-08-06T14:26:35Z
2019-08-10T04:17:14Z
null
2019-08-10T07:52:36Z
Avoid calling S3File.s3
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 943a6adb7944e..7d78a8fe6dd84 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -103,7 +103,7 @@ MultiIndex I/O ^^^ -- +- Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) - - diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 82c460300582b..6fc70e9f4a737 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -184,12 +184,14 @@ def write( def read(self, path, columns=None, **kwargs): if is_s3_url(path): + from pandas.io.s3 import get_file_and_filesystem + # When path is s3:// an S3File is returned. # We need to retain the original path(str) while also # pass the S3File().open function to fsatparquet impl. - s3, _, _, should_close = get_filepath_or_buffer(path) + s3, filesystem = get_file_and_filesystem(path) try: - parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open) + parquet_file = self.api.ParquetFile(path, open_with=filesystem.open) finally: s3.close() else: diff --git a/pandas/io/s3.py b/pandas/io/s3.py index 0a7c082fec51c..7e0a37e8cba20 100644 --- a/pandas/io/s3.py +++ b/pandas/io/s3.py @@ -1,8 +1,11 @@ """ s3 support for remote file interactivity """ +from typing import IO, Any, Optional, Tuple from urllib.parse import urlparse as parse_url from pandas.compat._optional import import_optional_dependency +from pandas._typing import FilePathOrBuffer + s3fs = import_optional_dependency( "s3fs", extra="The s3fs package is required to handle s3 files." ) @@ -14,9 +17,9 @@ def _strip_schema(url): return result.netloc + result.path -def get_filepath_or_buffer( - filepath_or_buffer, encoding=None, compression=None, mode=None -): +def get_file_and_filesystem( + filepath_or_buffer: FilePathOrBuffer, mode: Optional[str] = None +) -> Tuple[IO, Any]: from botocore.exceptions import NoCredentialsError if mode is None: @@ -24,7 +27,7 @@ def get_filepath_or_buffer( fs = s3fs.S3FileSystem(anon=False) try: - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) + file = fs.open(_strip_schema(filepath_or_buffer), mode) except (FileNotFoundError, NoCredentialsError): # boto3 has troubles when trying to access a public file # when credentialed... @@ -33,5 +36,15 @@ def get_filepath_or_buffer( # A NoCredentialsError is raised if you don't have creds # for that bucket. fs = s3fs.S3FileSystem(anon=True) - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) - return filepath_or_buffer, None, compression, True + file = fs.open(_strip_schema(filepath_or_buffer), mode) + return file, fs + + +def get_filepath_or_buffer( + filepath_or_buffer: FilePathOrBuffer, + encoding: Optional[str] = None, + compression: Optional[str] = None, + mode: Optional[str] = None, +) -> Tuple[IO, Optional[str], Optional[str], bool]: + file, _fs = get_file_and_filesystem(filepath_or_buffer, mode=mode) + return file, None, compression, True
When reading from S3 using fastparquet. This attribute was removed in s3fs 0.3.0. This change avoids accessing it by using a new method `get_file_and_filesystem` which returns the filesystem in addition to the file. - [x] closes #27756 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27777
2019-08-06T12:48:03Z
2019-08-12T19:10:50Z
2019-08-12T19:10:50Z
2019-08-12T19:10:55Z
DEPR: Removed the previously deprecated ExtensionArray._formatting_values
diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index 407aab4bb1f1b..78e8734e9b5ff 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -34,7 +34,6 @@ objects. api.extensions.ExtensionArray._concat_same_type api.extensions.ExtensionArray._formatter - api.extensions.ExtensionArray._formatting_values api.extensions.ExtensionArray._from_factorized api.extensions.ExtensionArray._from_sequence api.extensions.ExtensionArray._from_sequence_of_strings diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index c7f8bb70e3461..bca7bf8cbefbd 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -65,7 +65,7 @@ Removal of prior version deprecations/changes - Changed the the default value of `inplace` in :meth:`DataFrame.set_index` and :meth:`Series.set_axis`. It now defaults to False (:issue:`27600`) - :meth:`pandas.Series.str.cat` now defaults to aligning ``others``, using ``join='left'`` (:issue:`27611`) - :meth:`pandas.Series.str.cat` does not accept list-likes *within* list-likes anymore (:issue:`27611`) -- +- Removed the previously deprecated :meth:`ExtensionArray._formatting_values`. Use :attr:`ExtensionArray._formatter` instead. (:issue:`23601`) .. _whatsnew_1000.performance: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index e517be4f03a16..00e1d092ffa22 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -66,7 +66,6 @@ class ExtensionArray: unique _concat_same_type _formatter - _formatting_values _from_factorized _from_sequence _from_sequence_of_strings @@ -908,21 +907,6 @@ def _formatter(self, boxed: bool = False) -> Callable[[Any], Optional[str]]: return str return repr - def _formatting_values(self) -> np.ndarray: - # At the moment, this has to be an array since we use result.dtype - """ - An array of values to be printed in, e.g. the Series repr - - .. deprecated:: 0.24.0 - - Use :meth:`ExtensionArray._formatter` instead. - - Returns - ------- - array : ndarray - """ - return np.array(self) - # ------------------------------------------------------------------------ # Reshaping # ------------------------------------------------------------------------ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9f3aa699cfaf4..12dcabdb0f680 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -68,13 +68,7 @@ ) import pandas.core.algorithms as algos -from pandas.core.arrays import ( - Categorical, - DatetimeArray, - ExtensionArray, - PandasDtype, - TimedeltaArray, -) +from pandas.core.arrays import Categorical, DatetimeArray, PandasDtype, TimedeltaArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import extract_array @@ -209,10 +203,6 @@ def internal_values(self, dtype=None): """ return self.values - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self.internal_values() - def get_values(self, dtype=None): """ return an internal format, currently just the ndarray @@ -1867,21 +1857,6 @@ def _slice(self, slicer): return self.values[slicer] - def formatting_values(self): - # Deprecating the ability to override _formatting_values. - # Do the warning here, it's only user in pandas, since we - # have to check if the subclass overrode it. - fv = getattr(type(self.values), "_formatting_values", None) - if fv and fv != ExtensionArray._formatting_values: - msg = ( - "'ExtensionArray._formatting_values' is deprecated. " - "Specify 'ExtensionArray._formatter' instead." - ) - warnings.warn(msg, FutureWarning, stacklevel=10) - return self.values._formatting_values() - - return self.values - def concat_same_type(self, to_concat, placement=None): """ Concatenate list of single blocks of the same type. diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index b30ddbc383906..1c31542daa5de 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1582,10 +1582,6 @@ def external_values(self): def internal_values(self): return self._block.internal_values() - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self._block.formatting_values() - def get_values(self): """ return a dense type view """ return np.array(self._block.to_dense(), copy=False) diff --git a/pandas/core/series.py b/pandas/core/series.py index 9e317d365ccb8..4e64a25e430eb 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -562,13 +562,6 @@ def _values(self): """ return self._data.internal_values() - def _formatting_values(self): - """ - Return the values that can be formatted (used by SeriesFormatter - and DataFrameFormatter). - """ - return self._data.formatting_values() - def get_values(self): """ Same as values (but handles sparseness conversions); is a view. diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index d8a370d77ea31..61af935bd8227 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -336,9 +336,11 @@ def _get_formatted_index(self) -> Tuple[List[str], bool]: return fmt_index, have_header def _get_formatted_values(self) -> List[str]: - values_to_format = self.tr_series._formatting_values() return format_array( - values_to_format, None, float_format=self.float_format, na_rep=self.na_rep + self.tr_series._values, + None, + float_format=self.float_format, + na_rep=self.na_rep, ) def to_string(self) -> str: @@ -903,9 +905,8 @@ def to_latex( def _format_col(self, i: int) -> List[str]: frame = self.tr_frame formatter = self._get_formatter(i) - values_to_format = frame.iloc[:, i]._formatting_values() return format_array( - values_to_format, + frame.iloc[:, i]._values, formatter, float_format=self.float_format, na_rep=self.na_rep, diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 9dec023f4073a..3ac9d37ccf4f3 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -392,17 +392,6 @@ def test_ufunc_fallback(data): tm.assert_series_equal(result, expected) -def test_formatting_values_deprecated(): - class DecimalArray2(DecimalArray): - def _formatting_values(self): - return np.array(self) - - ser = pd.Series(DecimalArray2([decimal.Decimal("1.0")])) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - repr(ser) - - def test_array_ufunc(): a = to_decimal([1, 2, 3]) result = np.exp(a) diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py index 1a4f84e2c0fd2..6311070cfe2bb 100644 --- a/pandas/tests/extension/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas.core.internals import BlockManager, SingleBlockManager +from pandas.core.internals import BlockManager from pandas.core.internals.blocks import Block, NonConsolidatableMixIn @@ -10,9 +10,6 @@ class CustomBlock(NonConsolidatableMixIn, Block): _holder = np.ndarray - def formatting_values(self): - return np.array(["Val: {}".format(i) for i in self.values]) - def concat_same_type(self, to_concat, placement=None): """ Always concatenate disregarding self.ndim as the values are @@ -35,22 +32,6 @@ def df(): return pd.DataFrame(block_manager) -def test_custom_repr(): - values = np.arange(3, dtype="int64") - - # series - block = CustomBlock(values, placement=slice(0, 3)) - - s = pd.Series(SingleBlockManager(block, pd.RangeIndex(3))) - assert repr(s) == "0 Val: 0\n1 Val: 1\n2 Val: 2\ndtype: int64" - - # dataframe - block = CustomBlock(values, placement=slice(0, 1)) - blk_mgr = BlockManager([block], [["col"], range(3)]) - df = pd.DataFrame(blk_mgr) - assert repr(df) == " col\n0 Val: 0\n1 Val: 1\n2 Val: 2" - - def test_concat_series(): # GH17728 values = np.arange(3, dtype="int64")
and revert #17143, see https://github.com/pandas-dev/pandas/pull/17143#issuecomment-505080265
https://api.github.com/repos/pandas-dev/pandas/pulls/27774
2019-08-06T10:31:33Z
2019-08-07T06:23:26Z
2019-08-07T06:23:26Z
2019-08-08T14:20:03Z
BUG: _can_use_numexpr fails when passed large Series
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index dfa216b1db56e..21f8f33e2b439 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -54,7 +54,7 @@ Numeric ^^^^^^^ - Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`) - Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`) -- +- Bug where :class:`DataFrame` arithmetic operators such as :meth:`DataFrame.mul` with a :class:`Series` with axis=1 would raise an ``AttributeError`` on :class:`DataFrame` larger than the minimum threshold to invoke numexpr (:issue:`27636`) - Conversion diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index d9dc194d484ae..1959242a88897 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -76,16 +76,17 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: - # check for dtype compatibility dtypes = set() for o in [a, b]: - if hasattr(o, "dtypes"): + # Series implements dtypes, check for dimension count as well + if hasattr(o, "dtypes") and o.ndim > 1: s = o.dtypes.value_counts() if len(s) > 1: return False dtypes |= set(s.index.astype(str)) - elif isinstance(o, np.ndarray): + # ndarray and Series Case + elif hasattr(o, "dtype"): dtypes |= {o.dtype.name} # allowed are a superset diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 4070624985068..ca514f62f451d 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -66,7 +66,7 @@ def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=Tr operator_name = "truediv" if test_flex: - op = lambda x, y: getattr(df, arith)(y) + op = lambda x, y: getattr(x, arith)(y) op.__name__ = arith else: op = getattr(operator, operator_name) @@ -318,7 +318,6 @@ def testit(): for f in [self.frame, self.frame2, self.mixed, self.mixed2]: for cond in [True, False]: - c = np.empty(f.shape, dtype=np.bool_) c.fill(cond) result = expr.where(c, f.values, f.values + 1) @@ -431,3 +430,29 @@ def test_bool_ops_column_name_dtype(self, test_input, expected): # GH 22383 - .ne fails if columns containing column name 'dtype' result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]]) assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv") + ) + @pytest.mark.parametrize("axis", (0, 1)) + def test_frame_series_axis(self, axis, arith): + # GH#26736 Dataframe.floordiv(Series, axis=1) fails + if axis == 1 and arith == "floordiv": + pytest.xfail("'floordiv' does not succeed with axis=1 #27636") + + df = self.frame + if axis == 1: + other = self.frame.iloc[0, :] + else: + other = self.frame.iloc[:, 0] + + expr._MIN_ELEMENTS = 0 + + op_func = getattr(df, arith) + + expr.set_use_numexpr(False) + expected = op_func(other, axis=axis) + expr.set_use_numexpr(True) + + result = op_func(other, axis=axis) + assert_frame_equal(expected, result)
This fixes a regression introduced in #27145 where _can_use_numexpr would fail if passed a Series and not a DataFrame. I decided not to use run_arithmetic in the test_suite because there is a separate issue when running floordiv that is out of the scope of the fix. I will open a separate issue in improving the test coverage of "test_expressions.py" as it currently does not check any of the arguments you can pass to the operators, such as 'axis', 'level' and 'fill_value'. - [x] closes #27636 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27773
2019-08-06T10:27:57Z
2019-08-19T17:20:24Z
2019-08-19T17:20:24Z
2019-08-19T17:20:29Z
[BLD] Add script that fails build if git tags do not exist
diff --git a/.travis.yml b/.travis.yml index 9be4291d10874..79fecc41bec0d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,7 +21,7 @@ env: git: # for cloning - depth: 2000 + depth: false matrix: fast_finish: true @@ -63,7 +63,7 @@ before_install: - pwd - uname -a - git --version - - git tag + - ./ci/check_git_tags.sh # Because travis runs on Google Cloud and has a /etc/boto.cfg, # it breaks moto import, see: # https://github.com/spulec/moto/issues/1771 diff --git a/ci/check_git_tags.sh b/ci/check_git_tags.sh new file mode 100755 index 0000000000000..9dbcd4f98683e --- /dev/null +++ b/ci/check_git_tags.sh @@ -0,0 +1,28 @@ +set -e + +if [[ ! $(git tag) ]]; then + echo "No git tags in clone, please sync your git tags with upstream using:" + echo " git fetch --tags upstream" + echo " git push --tags origin" + echo "" + echo "If the issue persists, the clone depth needs to be increased in .travis.yml" + exit 1 +fi + +# This will error if there are no tags and we omit --always +DESCRIPTION=$(git describe --long --tags) +echo "$DESCRIPTION" + +if [[ "$DESCRIPTION" == *"untagged"* ]]; then + echo "Unable to determine most recent tag, aborting build" + exit 1 +else + if [[ "$DESCRIPTION" != *"g"* ]]; then + # A good description will have the hash prefixed by g, a bad one will be + # just the hash + echo "Unable to determine most recent tag, aborting build" + exit 1 + else + echo "$(git tag)" + fi +fi diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 479e55c86fcd1..65b2dab1b02a8 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,4 +1,5 @@ import collections +from distutils.version import LooseVersion from functools import partial import string @@ -117,3 +118,13 @@ def test_git_version(): git_version = pd.__git_version__ assert len(git_version) == 40 assert all(c in string.hexdigits for c in git_version) + + +def test_version_tag(): + version = pd.__version__ + try: + version > LooseVersion("0.0.1") + except TypeError: + raise ValueError( + "No git tags exist, please sync tags between upstream and your repo" + )
The Travis build process can fail in hard to decipher ways if git tags are not synced between `upstream` and a developer's repo. This failure mode would occur in the main repo if we go 2000 commits (current clone depth, ~9 months) without tagging a release. This PR explicitly fails the build with instructions of how to resolve if this situation is encountered. The underlying issue is that `versioneer` and the underlying `git describe` rely on tags to exist in the local repo for the purposes of creating a version description like: ``` v0.25.0-116-gee54d95952 ``` As Travis makes a shallow clone, these tags are will not exist if: - A developer has not explicitly synced tags between repos (the default git behavior) - `pandas` goes 2000 commits without tagging a commit If tags do not exist, we get something like: ``` 0+untagged.2000.g66ada8c ``` Which causes causes tests involving downstream libraries like `pyarrow`, `statsmodels`, etc to fail their internal version checks in sometimes indecipherable ways like this: ``` =================================== FAILURES =================================== ____________________________ TestFeather.test_error ____________________________ [gw0] linux -- Python 3.7.3 /home/travis/miniconda3/envs/pandas-dev/bin/python self = <pandas.tests.io.test_feather.TestFeather object at 0x7f353ab95c18> def test_error(self): for obj in [ pd.Series([1, 2, 3]), 1, "foo", pd.Timestamp("20130101"), np.array([1, 2, 3]), ]: > self.check_error_on_write(obj, ValueError) pandas/tests/io/test_feather.py:49: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ pandas/tests/io/test_feather.py:27: in check_error_on_write to_feather(df, path) pandas/io/feather_format.py:24: in to_feather from pyarrow import feather _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ from distutils.version import LooseVersion import os import six import pandas as pd import warnings > from pyarrow.compat import pdapi E ImportError: cannot import name 'pdapi' from 'pyarrow.compat' (/home/travis/miniconda3/envs/pandas-dev/lib/python3.7/site-packages/pyarrow/compat.py) ``` With some digging, you can eventually find that the root cause is: ``` self = LooseVersion ('0+untagged.2000.g66ada8c'), other = LooseVersion ('0.21') def _cmp (self, other): if isinstance(other, str): other = LooseVersion(other) if self.version == other.version: return 0 > if self.version < other.version: E TypeError: '<' not supported between instances of 'str' and 'int' ``` Syncing tags between repos resolves the issue. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27770
2019-08-06T03:53:13Z
2019-08-07T13:27:04Z
2019-08-07T13:27:04Z
2019-08-07T13:27:04Z
REF: Make CategoricalIndex comparison defer to Categorical comparison
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index d22b4bd4d3f2b..984f1835bd078 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -89,6 +89,9 @@ def f(self, other): return NotImplemented other = lib.item_from_zerodim(other) + if is_list_like(other) and len(other) != len(self): + # TODO: Could this fail if the categories are listlike objects? + raise ValueError("Lengths must match.") if not self.ordered: if op in ["__lt__", "__gt__", "__le__", "__ge__"]: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 356ae20b2240a..bd998656914c6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -48,6 +48,7 @@ ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( + ABCCategorical, ABCDataFrame, ABCDateOffset, ABCDatetimeArray, @@ -99,11 +100,14 @@ def _make_comparison_op(op, cls): def cmp_method(self, other): - if isinstance(other, (np.ndarray, Index, ABCSeries)): + if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)): if other.ndim > 0 and len(self) != len(other): raise ValueError("Lengths must match to compare") - if is_object_dtype(self) and not isinstance(self, ABCMultiIndex): + if is_object_dtype(self) and isinstance(other, ABCCategorical): + left = type(other)(self._values, dtype=other.dtype) + return op(left, other) + elif is_object_dtype(self) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops._comp_method_OBJECT_ARRAY(op, self.values, other) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 0f6aa711adc90..8bfa7e8d20b4f 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -899,31 +899,12 @@ def _make_compare(op): opname = "__{op}__".format(op=op.__name__) def _evaluate_compare(self, other): - - # if we have a Categorical type, then must have the same - # categories - if isinstance(other, CategoricalIndex): - other = other._values - elif isinstance(other, Index): - other = self._create_categorical(other._values, dtype=self.dtype) - - if isinstance(other, (ABCCategorical, np.ndarray, ABCSeries)): - if len(self.values) != len(other): - raise ValueError("Lengths must match to compare") - - if isinstance(other, ABCCategorical): - if not self.values.is_dtype_equal(other): - raise TypeError( - "categorical index comparisons must " - "have the same categories and ordered " - "attributes" - ) - - result = op(self.values, other) + with np.errstate(all="ignore"): + result = op(self.array, other) if isinstance(result, ABCSeries): # Dispatch to pd.Categorical returned NotImplemented # and we got a Series back; down-cast to ndarray - result = result.values + result = result._values return result return compat.set_function_name(_evaluate_compare, opname, cls) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 4ab1941e3493f..c78d5c79453ab 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -1038,8 +1038,14 @@ def wrapper(self, other, axis=None): # Defer to DataFrame implementation; fail early return NotImplemented - elif isinstance(other, ABCSeries) and not self._indexed_same(other): + if isinstance(other, ABCSeries) and not self._indexed_same(other): raise ValueError("Can only compare identically-labeled Series objects") + elif ( + is_list_like(other) + and len(other) != len(self) + and not isinstance(other, (set, frozenset)) + ): + raise ValueError("Lengths must match") elif is_categorical_dtype(self): # Dispatch to Categorical implementation; CategoricalIndex diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 280b0a99c7e68..67bf9bd20e716 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -823,6 +823,11 @@ def test_equals_categorical(self): msg = ( "categorical index comparisons must have the same categories" " and ordered attributes" + "|" + "Categoricals can only be compared if 'categories' are the same. " + "Categories are different lengths" + "|" + "Categoricals can only be compared if 'ordered' is the same" ) with pytest.raises(TypeError, match=msg): ci1 == ci2
Partially addresses #19513. After this, CategoricalIndex will be defining comparison ops identically to DTA/TDA/PA, could share some code.
https://api.github.com/repos/pandas-dev/pandas/pulls/27769
2019-08-06T02:22:00Z
2019-08-13T12:05:44Z
2019-08-13T12:05:44Z
2019-08-14T21:37:58Z
CLN: short-circuit case in Block.replace
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9f3aa699cfaf4..8779bf8ca5b54 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -772,8 +772,11 @@ def replace( # If we cannot replace with own dtype, convert to ObjectBlock and # retry if not self._can_hold_element(to_replace): - # TODO: we should be able to infer at this point that there is - # nothing to replace + if not isinstance(to_replace, list): + if inplace: + return [self] + return [self.copy()] + # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): @@ -798,14 +801,27 @@ def replace( filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False + if not mask.any(): + if inplace: + return [self] + return [self.copy()] + try: blocks = self.putmask(mask, value, inplace=inplace) + # Note: it is _not_ the case that self._can_hold_element(value) + # is always true at this point. In particular, that can fail + # for: + # "2u" with bool-dtype, float-dtype + # 0.5 with int64-dtype + # np.nan with int64-dtype except (TypeError, ValueError): # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise + assert not self._can_hold_element(value), value + # try again with a compatible block block = self.astype(object) return block.replace( @@ -960,6 +976,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) # if we are passed a scalar None, convert it here if not is_list_like(new) and isna(new) and not self.is_object: + # FIXME: make sure we have compatible NA new = self.fill_value if self._can_hold_element(new):
Running out of try/excepts to get rid of
https://api.github.com/repos/pandas-dev/pandas/pulls/27768
2019-08-06T01:23:57Z
2019-08-12T18:58:43Z
2019-08-12T18:58:43Z
2019-08-12T19:04:19Z
BUG: Fix windowing over read-only arrays
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 943a6adb7944e..66b760a76dad3 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -118,6 +118,7 @@ Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) +- Bug in windowing over read-only arrays (:issue:`27766`) - - diff --git a/pandas/core/window.py b/pandas/core/window.py index a7425bc1466c3..3e3f17369db7b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -246,8 +246,10 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray: except (ValueError, TypeError): raise TypeError("cannot handle this type -> {0}".format(values.dtype)) - # Always convert inf to nan - values[np.isinf(values)] = np.NaN + # Convert inf to nan for C funcs + inf = np.isinf(values) + if inf.any(): + values = np.where(inf, np.nan, values) return values diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index c7177e1d3914f..f0787ab3d191f 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -326,3 +326,11 @@ def test_rolling_axis_count(self, axis_frame): result = df.rolling(2, axis=axis_frame).count() tm.assert_frame_equal(result, expected) + + def test_readonly_array(self): + # GH-27766 + arr = np.array([1, 3, np.nan, 3, 5]) + arr.setflags(write=False) + result = pd.Series(arr).rolling(2).mean() + expected = pd.Series([np.nan, 2, np.nan, np.nan, 4]) + tm.assert_series_equal(result, expected)
- [x] closes #27766 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27767
2019-08-06T00:42:26Z
2019-08-06T15:39:20Z
2019-08-06T15:39:20Z
2019-08-06T15:39:34Z
TYPING: more type hints for io.formats.printing
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 4ec9094ce4abe..ead51693da791 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -3,12 +3,14 @@ """ import sys -from typing import Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union from pandas._config import get_option from pandas.core.dtypes.inference import is_sequence +EscapeChars = Union[Dict[str, str], Iterable[str]] + def adjoin(space: int, *lists: List[str], **kwargs) -> str: """ @@ -148,19 +150,16 @@ def _pprint_dict( def pprint_thing( - thing, + thing: Any, _nest_lvl: int = 0, - escape_chars: Optional[Union[Dict[str, str], Iterable[str]]] = None, + escape_chars: Optional[EscapeChars] = None, default_escapes: bool = False, quote_strings: bool = False, max_seq_items: Optional[int] = None, ) -> str: """ This function is the sanctioned way of converting objects - to a unicode representation. - - properly handles nested sequences containing unicode strings - (unicode(object) does not) + to a string representation and properly handles nested sequences. Parameters ---------- @@ -178,21 +177,13 @@ def pprint_thing( Returns ------- - result - unicode str + str """ - def as_escaped_unicode(thing, escape_chars=escape_chars): - # Unicode is fine, else we try to decode using utf-8 and 'replace' - # if that's not it either, we have no way of knowing and the user - # should deal with it himself. - - try: - result = str(thing) # we should try this first - except UnicodeDecodeError: - # either utf-8 or we replace errors - result = str(thing).decode("utf-8", "replace") - + def as_escaped_string( + thing: Any, escape_chars: Optional[EscapeChars] = escape_chars + ) -> str: translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} if isinstance(escape_chars, dict): if default_escapes: @@ -202,10 +193,11 @@ def as_escaped_unicode(thing, escape_chars=escape_chars): escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or tuple() + + result = str(thing) for c in escape_chars: result = result.replace(c, translate[c]) - - return str(result) + return result if hasattr(thing, "__next__"): return str(thing) @@ -224,11 +216,11 @@ def as_escaped_unicode(thing, escape_chars=escape_chars): max_seq_items=max_seq_items, ) elif isinstance(thing, str) and quote_strings: - result = "'{thing}'".format(thing=as_escaped_unicode(thing)) + result = "'{thing}'".format(thing=as_escaped_string(thing)) else: - result = as_escaped_unicode(thing) + result = as_escaped_string(thing) - return str(result) # always unicode + return result def pprint_thing_encoded(
xref #27568 ``` $ mypy pandas/io/formats/printing.py --check-untyped-defs --follow-imports skip pandas\io\formats\printing.py:194: error: "str" has no attribute "decode"; maybe "encode"? ```
https://api.github.com/repos/pandas-dev/pandas/pulls/27765
2019-08-05T21:37:03Z
2019-08-23T22:36:59Z
2019-08-23T22:36:59Z
2019-08-24T07:45:54Z
CLN/REF: Remove _try_cast_result, _try_coerce_and_cast_result
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 2ad85903b916b..ea2bd22cccc3d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -21,7 +21,11 @@ from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution -from pandas.core.dtypes.cast import maybe_convert_objects, maybe_downcast_to_dtype +from pandas.core.dtypes.cast import ( + maybe_convert_objects, + maybe_downcast_numeric, + maybe_downcast_to_dtype, +) from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, @@ -180,10 +184,8 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1): continue finally: if result is not no_result: - dtype = block.values.dtype - # see if we can cast the block back to the original dtype - result = block._try_coerce_and_cast_result(result, dtype=dtype) + result = maybe_downcast_numeric(result, block.dtype) newb = block.make_block(result) new_items.append(locs) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 676f243c9c8d3..b0c629f017dd3 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -591,6 +591,8 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs): if is_datetime64tz_dtype(orig_values.dtype): result = type(orig_values)(result.astype(np.int64), dtype=orig_values.dtype) + elif is_datetimelike and kind == "aggregate": + result = result.astype(orig_values.dtype) return result, names diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9f3aa699cfaf4..8c3cf7cc51495 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -18,6 +18,7 @@ find_common_type, infer_dtype_from, infer_dtype_from_scalar, + maybe_downcast_numeric, maybe_downcast_to_dtype, maybe_infer_dtype_type, maybe_promote, @@ -55,7 +56,6 @@ ABCDataFrame, ABCDatetimeIndex, ABCExtensionArray, - ABCIndexClass, ABCPandasArray, ABCSeries, ) @@ -685,28 +685,6 @@ def _can_hold_element(self, element): return issubclass(tipo.type, dtype) return isinstance(element, dtype) - def _try_cast_result(self, result, dtype=None): - """ try to cast the result to our original type, we may have - roundtripped thru object in the mean-time - """ - if dtype is None: - dtype = self.dtype - - if self.is_integer or self.is_bool or self.is_datetime: - pass - elif self.is_float and result.dtype == self.dtype: - # protect against a bool/object showing up here - if isinstance(dtype, str) and dtype == "infer": - return result - - # This is only reached via Block.setitem, where dtype is always - # either "infer", self.dtype, or values.dtype. - assert dtype == self.dtype, (dtype, self.dtype) - return result - - # may need to change the dtype here - return maybe_downcast_to_dtype(result, dtype) - def _try_coerce_args(self, other): """ provide coercion to our input arguments """ @@ -729,10 +707,6 @@ def _try_coerce_args(self, other): return other - def _try_coerce_and_cast_result(self, result, dtype=None): - result = self._try_cast_result(result, dtype=dtype) - return result - def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ @@ -925,8 +899,6 @@ def setitem(self, indexer, value): else: values[indexer] = value - # coerce and try to infer the dtypes of the result - values = self._try_coerce_and_cast_result(values, dtype) if transpose: values = values.T block = self.make_block(values) @@ -1444,10 +1416,6 @@ def func(cond, values, other): if transpose: result = result.T - # try to cast if requested - if try_cast: - result = self._try_cast_result(result) - return [self.make_block(result)] # might need to separate out blocks @@ -1459,7 +1427,7 @@ def func(cond, values, other): for m in [mask, ~mask]: if m.any(): taken = result.take(m.nonzero()[0], axis=axis) - r = self._try_cast_result(taken) + r = maybe_downcast_numeric(taken, self.dtype) nb = self.make_block(r.T, placement=self.mgr_locs[m]) result_blocks.append(nb) @@ -1692,9 +1660,6 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) new_values[mask] = new return [self.make_block(values=new_values)] - def _try_cast_result(self, result, dtype=None): - return result - def _get_unstack_items(self, unstacker, new_columns): """ Get the placement, values, and mask for a Block unstack. @@ -1746,7 +1711,8 @@ def __init__(self, values, placement, ndim=None): super().__init__(values, placement, ndim) def _maybe_coerce_values(self, values): - """Unbox to an extension array. + """ + Unbox to an extension array. This will unbox an ExtensionArray stored in an Index or Series. ExtensionArrays pass through. No dtype coercion is done. @@ -1759,9 +1725,7 @@ def _maybe_coerce_values(self, values): ------- ExtensionArray """ - if isinstance(values, (ABCIndexClass, ABCSeries)): - values = values._values - return values + return extract_array(values) @property def _holder(self):
This finishes getting the post-call casting/coersion out of the Blocks. Pre-call coercion is still in there, exclusively for TimedeltaBlock and DatetimeBlock, i.e. will not be necessary if/when those are backed by EA.
https://api.github.com/repos/pandas-dev/pandas/pulls/27764
2019-08-05T21:37:00Z
2019-08-06T15:40:40Z
2019-08-06T15:40:40Z
2019-08-06T16:03:44Z
Backport PR #27733 on branch 0.25.x (BUG: fix to_datetime(dti, utc=True))
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 4d9ee4c676759..943a6adb7944e 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -31,7 +31,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ - +- Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`) - - - diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index e9d2c3f07bfae..0c41d8a8050e6 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -334,6 +334,9 @@ def _convert_listlike_datetimes( return DatetimeIndex(arg, tz=tz, name=name) except ValueError: pass + elif tz: + # DatetimeArray, DatetimeIndex + return arg.tz_localize(tz) return arg diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 8db15709da35d..9af0f47f6dce9 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1620,6 +1620,18 @@ def test_dayfirst(self, cache): tm.assert_index_equal(expected, idx5) tm.assert_index_equal(expected, idx6) + @pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray]) + def test_to_datetime_dta_tz(self, klass): + # GH#27733 + dti = date_range("2015-04-05", periods=3).rename("foo") + expected = dti.tz_localize("UTC") + + obj = klass(dti) + expected = klass(expected) + + result = to_datetime(obj, utc=True) + tm.assert_equal(result, expected) + class TestGuessDatetimeFormat: @td.skip_if_not_us_locale
Backport PR #27733: BUG: fix to_datetime(dti, utc=True)
https://api.github.com/repos/pandas-dev/pandas/pulls/27763
2019-08-05T20:29:30Z
2019-08-06T15:33:38Z
2019-08-06T15:33:38Z
2019-08-06T15:33:38Z
BUG: right merge not preserve row order (#27453)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index ec6ad38bbc7cf..a4b32e453d9d2 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -300,6 +300,30 @@ New repr for :class:`~pandas.arrays.IntervalArray` pd.arrays.IntervalArray.from_tuples([(0, 1), (2, 3)]) +:meth:`DataFrame.merge` preserves right frame's row order +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:meth:`DataFrame.merge` now preserves right frame's row order when executing a right merge (:issue:`27453`) + +.. code-block:: python + + left_df = pd.DataFrame({"colors": ["blue", "red"]}, index=pd.Index([0, 1])) + right_df = pd.DataFrame({"hats": ["small", "big"]}, index=pd.Index([1, 0])) + left_df + right_df + +*pandas 0.25.x* + +.. code-block:: python + left_df.merge(right_df, left_index=True, right_index=True, how="right") + + +*pandas 1.0.0* + +.. code-block:: python + left_df.merge(right_df, left_index=True, right_index=True, how="right") + + + ``DataFrame.rename`` now only accepts one positional argument ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index ceee2f66dba42..3ed86c7dd51f4 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -568,10 +568,10 @@ def __init__( indicator: bool = False, validate=None, ): - _left = _validate_operand(left) - _right = _validate_operand(right) - self.left = self.orig_left = _left - self.right = self.orig_right = _right + left = validate_operand(left) + right = validate_operand(right) + self.left = self.orig_left = left + self.right = self.orig_right = right self.how = how self.axis = axis @@ -1295,6 +1295,9 @@ def _get_join_indexers( right_keys ), "left_key and right_keys must be the same length" + # bind `sort` arg. of _factorize_keys + fkeys = partial(_factorize_keys, sort=sort) + # get left & right join labels and num. of levels at each location mapped = ( _factorize_keys(left_keys[n], right_keys[n], sort=sort) @@ -1309,15 +1312,20 @@ def _get_join_indexers( # factorize keys to a dense i8 space # `count` is the num. of unique keys # set(lkey) | set(rkey) == range(count) - lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort) + # flip left and right keys if performing a right merge + # to preserve right merge row order (GH 27453) + if how == "right": + factorized_rkey, factorized_lkey, count = fkeys(rkey, lkey) + else: + factorized_lkey, factorized_rkey, count = fkeys(lkey, rkey) # preserve left frame order if how == 'left' and sort == False kwargs = copy.copy(kwargs) if how == "left": kwargs["sort"] = sort join_func = _join_functions[how] - return join_func(lkey, rkey, count, **kwargs) + return join_func(factorized_lkey, factorized_rkey, count, **kwargs) def _restore_dropped_levels_multijoin( diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 30c440035d48e..7254ca14947b3 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1288,17 +1288,17 @@ def test_merge_on_index_with_more_values(self, how, index, expected_index): # GH 24212 # pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that # -1 is interpreted as a missing value instead of the last element - df1 = pd.DataFrame({"a": [1, 2, 3], "key": [0, 2, 2]}, index=index) - df2 = pd.DataFrame({"b": [1, 2, 3, 4, 5]}) + df1 = pd.DataFrame({"a": [0, 1, 2], "key": [0, 1, 2]}, index=index) + df2 = pd.DataFrame({"b": [0, 1, 2, 3, 4, 5]}) result = df1.merge(df2, left_on="key", right_index=True, how=how) expected = pd.DataFrame( [ - [1.0, 0, 1], - [2.0, 2, 3], - [3.0, 2, 3], - [np.nan, 1, 2], - [np.nan, 3, 4], - [np.nan, 4, 5], + [0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [np.nan, 3, 3], + [np.nan, 4, 4], + [np.nan, 5, 5], ], columns=["a", "key", "b"], ) @@ -2152,3 +2152,35 @@ def test_merge_multiindex_columns(): expected["id"] = "" tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("how", ["left", "right"]) +def test_merge_preserves_row_order(how): + # GH 27453 + population = [ + ("Jenn", "Jamaica", 3), + ("Beth", "Bulgaria", 7), + ("Carl", "Canada", 30), + ] + columns = ["name", "country", "population"] + population_df = DataFrame(population, columns=columns) + + people = [("Abe", "America"), ("Beth", "Bulgaria"), ("Carl", "Canada")] + columns = ["name", "country"] + people_df = DataFrame(people, columns=columns) + + expected_data = [ + ("Abe", "America", np.nan), + ("Beth", "Bulgaria", 7), + ("Carl", "Canada", 30), + ] + expected_cols = ["name", "country", "population"] + expected = DataFrame(expected_data, columns=expected_cols) + + if how == "right": + left_df, right_df = population_df, people_df + elif how == "left": + left_df, right_df = people_df, population_df + + result = left_df.merge(right_df, on=("name", "country"), how=how) + assert_frame_equal(expected, result)
- [ ] closes #27453 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27762
2019-08-05T19:13:16Z
2020-01-23T03:48:40Z
null
2020-01-24T17:15:10Z
TST: missed from #27720
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index ed80e249220fd..05b58b0eca9b8 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -1038,10 +1038,6 @@ def test_replace_series(self, how, to_key, from_key): "from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"] ) def test_replace_series_datetime_tz(self, how, to_key, from_key): - how = "series" - from_key = "datetime64[ns, US/Eastern]" - to_key = "timedelta64[ns]" - index = pd.Index([3, 4], name="xyz") obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key
pointed out by @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/27759
2019-08-05T14:20:16Z
2019-08-05T15:03:45Z
2019-08-05T15:03:45Z
2019-08-05T16:44:44Z
Added diff testing file
diff --git a/pandas/tests/series/test_diff.py b/pandas/tests/series/test_diff.py new file mode 100644 index 0000000000000..d6c443d2c5b62 --- /dev/null +++ b/pandas/tests/series/test_diff.py @@ -0,0 +1,17 @@ +from pandas import ( + Series +) + +from numpy import nan + +def test_diff(): + data = Series([0,-1,-2,-3,-4,-3,-2,-1,0,-1,-1,0,-1,-2,-3,-2,0]) + + filtered = data.between(-2,0, inclusive = True) + diff_boolean = filtered.diff() + expected_boolean = Series([nan, False, False, True, False, False, True, False, False, False, False, False, False, False, True, True, False]) + assert diff_boolean.equals(expected_boolean) + + diff_data = data.diff() + expected_data = Series([nan, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 0.0, 1.0, -1.0, -1.0, -1.0, 1.0, 2.0]) + assert diff_data.equals(expected_data)
Should test the diff for a specific series. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27757
2019-08-05T13:55:19Z
2019-08-05T14:05:53Z
null
2019-08-05T14:05:53Z
BUG: Fix numpy boolean subtraction error in Series.diff
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 6912d15abf3d6..944ce9b4fb1f6 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1 +1,2 @@ custom: https://pandas.pydata.org/donate.html +tidelift: pypi/pandas diff --git a/.github/SECURITY.md b/.github/SECURITY.md new file mode 100644 index 0000000000000..f3b059a5d4f13 --- /dev/null +++ b/.github/SECURITY.md @@ -0,0 +1 @@ +To report a security vulnerability to pandas, please go to https://tidelift.com/security and see the instructions there. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 32ffb3330564c..5cc22c638c9b1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,17 +1,21 @@ repos: - - repo: https://github.com/python/black - rev: stable - hooks: - - id: black - language_version: python3.7 - - repo: https://gitlab.com/pycqa/flake8 - rev: 3.7.7 - hooks: - - id: flake8 - language: python_venv - additional_dependencies: [flake8-comprehensions] - - repo: https://github.com/pre-commit/mirrors-isort - rev: v4.3.20 - hooks: - - id: isort - language: python_venv +- repo: https://github.com/python/black + rev: stable + hooks: + - id: black + language_version: python3.7 +- repo: https://gitlab.com/pycqa/flake8 + rev: 3.7.7 + hooks: + - id: flake8 + language: python_venv + additional_dependencies: [flake8-comprehensions] +- repo: https://github.com/pre-commit/mirrors-isort + rev: v4.3.20 + hooks: + - id: isort + language: python_venv +- repo: https://github.com/asottile/seed-isort-config + rev: v1.9.2 + hooks: + - id: seed-isort-config diff --git a/.travis.yml b/.travis.yml index 9be4291d10874..79fecc41bec0d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,7 +21,7 @@ env: git: # for cloning - depth: 2000 + depth: false matrix: fast_finish: true @@ -63,7 +63,7 @@ before_install: - pwd - uname -a - git --version - - git tag + - ./ci/check_git_tags.sh # Because travis runs on Google Cloud and has a /etc/boto.cfg, # it breaks moto import, see: # https://github.com/spulec/moto/issues/1771 diff --git a/README.md b/README.md index aeeea1464e1fd..3cde98d3145f2 100644 --- a/README.md +++ b/README.md @@ -233,3 +233,5 @@ You can also triage issues which may include reproducing bug reports, or asking Or maybe through using pandas you have an idea of your own or are looking for something in the documentation and thinking ‘this can be improved’...you can do something about it! Feel free to ask questions on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Gitter](https://gitter.im/pydata/pandas). + +As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/pandas/blob/master/.github/CODE_OF_CONDUCT.md) diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py index c43e5dfd729aa..501e27b9078ec 100644 --- a/asv_bench/benchmarks/attrs_caching.py +++ b/asv_bench/benchmarks/attrs_caching.py @@ -1,4 +1,5 @@ import numpy as np + from pandas import DataFrame try: @@ -32,4 +33,4 @@ def time_cache_readonly(self): self.obj.prop -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py index fd3324b78f1c3..58e0db67d6025 100644 --- a/asv_bench/benchmarks/binary_ops.py +++ b/asv_bench/benchmarks/binary_ops.py @@ -1,4 +1,5 @@ import numpy as np + from pandas import DataFrame, Series, date_range from pandas.core.algorithms import checked_add_with_arr @@ -155,4 +156,4 @@ def time_add_overflow_both_arg_nan(self): ) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 8097118a79d20..559aa7050a640 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -1,7 +1,9 @@ +import warnings + import numpy as np + import pandas as pd import pandas.util.testing as tm -import warnings try: from pandas.api.types import union_categoricals @@ -280,4 +282,4 @@ def time_sort_values(self): self.index.sort_values(ascending=False) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py index 654075292cdf6..ec3dd7a48a89f 100644 --- a/asv_bench/benchmarks/ctors.py +++ b/asv_bench/benchmarks/ctors.py @@ -1,6 +1,7 @@ import numpy as np + +from pandas import DatetimeIndex, Index, MultiIndex, Series, Timestamp import pandas.util.testing as tm -from pandas import Series, Index, DatetimeIndex, Timestamp, MultiIndex def no_change(arr): @@ -113,4 +114,4 @@ def time_multiindex_from_iterables(self): MultiIndex.from_product(self.iterables) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/dtypes.py b/asv_bench/benchmarks/dtypes.py index 60800b1f9cae7..24cc1c6f9fa70 100644 --- a/asv_bench/benchmarks/dtypes.py +++ b/asv_bench/benchmarks/dtypes.py @@ -1,14 +1,14 @@ +import numpy as np + from pandas.api.types import pandas_dtype -import numpy as np from .pandas_vb_common import ( - numeric_dtypes, datetime_dtypes, - string_dtypes, extension_dtypes, + numeric_dtypes, + string_dtypes, ) - _numpy_dtypes = [ np.dtype(dtype) for dtype in (numeric_dtypes + datetime_dtypes + string_dtypes) ] @@ -40,4 +40,4 @@ def time_pandas_dtype_invalid(self, dtype): pass -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py index 84e94315cc28b..06a181875aaa8 100644 --- a/asv_bench/benchmarks/eval.py +++ b/asv_bench/benchmarks/eval.py @@ -1,4 +1,5 @@ import numpy as np + import pandas as pd try: @@ -62,4 +63,4 @@ def time_query_with_boolean_selection(self): self.df.query("(a >= @self.min_val) & (a <= @self.max_val)") -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py index acfb26bcf5d7c..3944e0bc523d8 100644 --- a/asv_bench/benchmarks/frame_ctor.py +++ b/asv_bench/benchmarks/frame_ctor.py @@ -1,6 +1,7 @@ import numpy as np + +from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range import pandas.util.testing as tm -from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range try: from pandas.tseries.offsets import Nano, Hour @@ -104,4 +105,4 @@ def time_frame_from_lists(self): self.df = DataFrame(self.data) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index e2f6764c76eef..05f98c66faa2b 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -1,5 +1,5 @@ -import warnings import string +import warnings import numpy as np @@ -609,4 +609,4 @@ def time_dataframe_describe(self): self.df.describe() -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index 0d0b75561d057..d57492dd37268 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -1,7 +1,8 @@ import numpy as np -import pandas.util.testing as tm -from pandas import DataFrame, Series, read_csv, factorize, date_range + +from pandas import DataFrame, Series, date_range, factorize, read_csv from pandas.core.algorithms import take_1d +import pandas.util.testing as tm try: from pandas import ( @@ -36,7 +37,7 @@ def wrapper(fname): return wrapper -from .pandas_vb_common import BaseIO +from .pandas_vb_common import BaseIO # noqa: E402 isort:skip class ParallelGroupbyMethods: @@ -301,4 +302,4 @@ def time_loop(self, threads): self.loop() -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 39b07d4734399..d51c53e2264f1 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -15,7 +15,6 @@ ) import pandas.util.testing as tm - method_blacklist = { "object": { "median", @@ -626,4 +625,4 @@ def time_first(self): self.df_nans.groupby("key").transform("first") -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index 6541ddcb0397d..a94960d494707 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -1,14 +1,17 @@ +import gc + import numpy as np -import pandas.util.testing as tm + from pandas import ( - Series, - date_range, DatetimeIndex, - Index, - RangeIndex, Float64Index, + Index, IntervalIndex, + RangeIndex, + Series, + date_range, ) +import pandas.util.testing as tm class SetOperations: @@ -225,4 +228,21 @@ def time_intersection_both_duplicate(self, N): self.intv.intersection(self.intv2) -from .pandas_vb_common import setup # noqa: F401 +class GC: + params = [1, 2, 5] + + def create_use_drop(self): + idx = Index(list(range(1000 * 1000))) + idx._engine + + def peakmem_gc_instances(self, N): + try: + gc.disable() + + for _ in range(N): + self.create_use_drop() + finally: + gc.enable() + + +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 84604b8196536..ac35139c1954a 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -1,22 +1,23 @@ import warnings import numpy as np -import pandas.util.testing as tm + from pandas import ( - Series, + CategoricalIndex, DataFrame, - MultiIndex, - Int64Index, - UInt64Index, Float64Index, - IntervalIndex, - CategoricalIndex, IndexSlice, + Int64Index, + IntervalIndex, + MultiIndex, + Series, + UInt64Index, concat, date_range, option_context, period_range, ) +import pandas.util.testing as tm class NumericSeriesIndexing: @@ -371,4 +372,4 @@ def time_chained_indexing(self, mode): df2["C"] = 1.0 -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py index 66ef4f2aec380..e85b3bd2c7687 100644 --- a/asv_bench/benchmarks/inference.py +++ b/asv_bench/benchmarks/inference.py @@ -1,8 +1,9 @@ import numpy as np -import pandas.util.testing as tm + from pandas import DataFrame, Series, to_numeric +import pandas.util.testing as tm -from .pandas_vb_common import numeric_dtypes, lib +from .pandas_vb_common import lib, numeric_dtypes class NumericInferOps: @@ -120,4 +121,4 @@ def time_convert(self, data): lib.maybe_convert_numeric(data, set(), coerce_numeric=False) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py index 4525e504fc4dd..9b8599b0a1b64 100644 --- a/asv_bench/benchmarks/io/csv.py +++ b/asv_bench/benchmarks/io/csv.py @@ -1,10 +1,11 @@ +from io import StringIO import random import string import numpy as np + +from pandas import Categorical, DataFrame, date_range, read_csv, to_datetime import pandas.util.testing as tm -from pandas import DataFrame, Categorical, date_range, read_csv, to_datetime -from io import StringIO from ..pandas_vb_common import BaseIO @@ -406,4 +407,4 @@ def time_to_datetime_format_DD_MM_YYYY(self, cache_dates): to_datetime(df["date"], cache=cache_dates, format="%d-%m-%Y") -from ..pandas_vb_common import setup # noqa: F401 +from ..pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py index 12e70f84e5203..9aa5cbd5b6f7c 100644 --- a/asv_bench/benchmarks/io/excel.py +++ b/asv_bench/benchmarks/io/excel.py @@ -1,6 +1,8 @@ from io import BytesIO + import numpy as np -from pandas import DataFrame, date_range, ExcelWriter, read_excel + +from pandas import DataFrame, ExcelWriter, date_range, read_excel import pandas.util.testing as tm @@ -35,4 +37,4 @@ def time_write_excel(self, engine): writer_write.save() -from ..pandas_vb_common import setup # noqa: F401 +from ..pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py index 2874a7889156b..8ec04a2087f1b 100644 --- a/asv_bench/benchmarks/io/hdf.py +++ b/asv_bench/benchmarks/io/hdf.py @@ -1,5 +1,6 @@ import numpy as np -from pandas import DataFrame, date_range, HDFStore, read_hdf + +from pandas import DataFrame, HDFStore, date_range, read_hdf import pandas.util.testing as tm from ..pandas_vb_common import BaseIO @@ -127,4 +128,4 @@ def time_write_hdf(self, format): self.df.to_hdf(self.fname, "df", format=format) -from ..pandas_vb_common import setup # noqa: F401 +from ..pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py index fc07f2a484102..b249c92b53e93 100644 --- a/asv_bench/benchmarks/io/json.py +++ b/asv_bench/benchmarks/io/json.py @@ -1,6 +1,7 @@ import numpy as np + +from pandas import DataFrame, concat, date_range, read_json, timedelta_range import pandas.util.testing as tm -from pandas import DataFrame, date_range, timedelta_range, concat, read_json from ..pandas_vb_common import BaseIO @@ -214,4 +215,4 @@ def peakmem_float(self, frames): df.to_json() -from ..pandas_vb_common import setup # noqa: F401 +from ..pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/io/msgpack.py b/asv_bench/benchmarks/io/msgpack.py index d97b4ae13f0bd..f5038602539ab 100644 --- a/asv_bench/benchmarks/io/msgpack.py +++ b/asv_bench/benchmarks/io/msgpack.py @@ -1,5 +1,7 @@ import warnings + import numpy as np + from pandas import DataFrame, date_range, read_msgpack import pandas.util.testing as tm @@ -27,4 +29,4 @@ def time_write_msgpack(self): self.df.to_msgpack(self.fname) -from ..pandas_vb_common import setup # noqa: F401 +from ..pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/io/pickle.py b/asv_bench/benchmarks/io/pickle.py index 286ac767c02e7..647e9d27dec9d 100644 --- a/asv_bench/benchmarks/io/pickle.py +++ b/asv_bench/benchmarks/io/pickle.py @@ -1,4 +1,5 @@ import numpy as np + from pandas import DataFrame, date_range, read_pickle import pandas.util.testing as tm @@ -25,4 +26,4 @@ def time_write_pickle(self): self.df.to_pickle(self.fname) -from ..pandas_vb_common import setup # noqa: F401 +from ..pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/io/sql.py b/asv_bench/benchmarks/io/sql.py index b80872b17a9e4..fe84c869717e3 100644 --- a/asv_bench/benchmarks/io/sql.py +++ b/asv_bench/benchmarks/io/sql.py @@ -1,10 +1,11 @@ import sqlite3 import numpy as np -import pandas.util.testing as tm -from pandas import DataFrame, date_range, read_sql_query, read_sql_table from sqlalchemy import create_engine +from pandas import DataFrame, date_range, read_sql_query, read_sql_table +import pandas.util.testing as tm + class SQL: @@ -141,4 +142,4 @@ def time_read_sql_table_column(self, dtype): read_sql_table(self.table_name, self.con, columns=[dtype]) -from ..pandas_vb_common import setup # noqa: F401 +from ..pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/io/stata.py b/asv_bench/benchmarks/io/stata.py index b3ed71af47dc8..28829785d72e9 100644 --- a/asv_bench/benchmarks/io/stata.py +++ b/asv_bench/benchmarks/io/stata.py @@ -1,4 +1,5 @@ import numpy as np + from pandas import DataFrame, date_range, read_stata import pandas.util.testing as tm @@ -50,4 +51,4 @@ def setup(self, convert_dates): self.df.to_stata(self.fname, self.convert_dates) -from ..pandas_vb_common import setup # noqa: F401 +from ..pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 7c899e3dc6ac8..6aa82a43a4d6a 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -1,8 +1,9 @@ import string import numpy as np + +from pandas import DataFrame, MultiIndex, Series, concat, date_range, merge, merge_asof import pandas.util.testing as tm -from pandas import DataFrame, Series, MultiIndex, date_range, concat, merge, merge_asof try: from pandas import merge_ordered @@ -348,4 +349,4 @@ def time_series_align_left_monotonic(self): self.ts1.align(self.ts2, join="left") -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/multiindex_object.py b/asv_bench/benchmarks/multiindex_object.py index eda059a68e8a5..3f4fd7ad911c1 100644 --- a/asv_bench/benchmarks/multiindex_object.py +++ b/asv_bench/benchmarks/multiindex_object.py @@ -1,8 +1,9 @@ import string import numpy as np + +from pandas import DataFrame, MultiIndex, date_range import pandas.util.testing as tm -from pandas import date_range, MultiIndex, DataFrame class GetLoc: @@ -146,4 +147,4 @@ def time_categorical_level(self): self.df.set_index(["a", "b"]) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/offset.py b/asv_bench/benchmarks/offset.py index 31c3b6fb6cb60..d822646e712ae 100644 --- a/asv_bench/benchmarks/offset.py +++ b/asv_bench/benchmarks/offset.py @@ -1,7 +1,8 @@ -import warnings from datetime import datetime +import warnings import numpy as np + import pandas as pd try: diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index fdc8207021c0f..1faf13329110d 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -1,7 +1,8 @@ -import os from importlib import import_module +import os import numpy as np + import pandas as pd # Compatibility import for lib diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py index 2f8ae0650ab75..7303240a25f29 100644 --- a/asv_bench/benchmarks/period.py +++ b/asv_bench/benchmarks/period.py @@ -1,4 +1,5 @@ from pandas import DataFrame, Period, PeriodIndex, Series, date_range, period_range + from pandas.tseries.frequencies import to_offset diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py index 4fb0876f05a0a..5c718516360ed 100644 --- a/asv_bench/benchmarks/plotting.py +++ b/asv_bench/benchmarks/plotting.py @@ -1,11 +1,12 @@ +import matplotlib import numpy as np -from pandas import DataFrame, Series, DatetimeIndex, date_range + +from pandas import DataFrame, DatetimeIndex, Series, date_range try: from pandas.plotting import andrews_curves except ImportError: from pandas.tools.plotting import andrews_curves -import matplotlib matplotlib.use("Agg") @@ -93,4 +94,4 @@ def time_plot_andrews_curves(self): andrews_curves(self.df, "Name") -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py index 8d4c9ebaf3e89..cd450f801c805 100644 --- a/asv_bench/benchmarks/reindex.py +++ b/asv_bench/benchmarks/reindex.py @@ -1,6 +1,8 @@ import numpy as np + +from pandas import DataFrame, Index, MultiIndex, Series, date_range, period_range import pandas.util.testing as tm -from pandas import DataFrame, Series, MultiIndex, Index, date_range, period_range + from .pandas_vb_common import lib @@ -159,4 +161,4 @@ def time_lib_fast_zip(self): lib.fast_zip(self.col_array_list) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py index 6137e944e6b9e..2a115fb0b4fe3 100644 --- a/asv_bench/benchmarks/replace.py +++ b/asv_bench/benchmarks/replace.py @@ -1,4 +1,5 @@ import numpy as np + import pandas as pd @@ -36,6 +37,23 @@ def time_replace_series(self, inplace): self.s.replace(self.to_rep, inplace=inplace) +class ReplaceList: + # GH#28099 + + params = [(True, False)] + param_names = ["inplace"] + + def setup(self, inplace): + self.df = pd.DataFrame({"A": 0, "B": 0}, index=range(4 * 10 ** 7)) + + def time_replace_list(self, inplace): + self.df.replace([np.inf, -np.inf], np.nan, inplace=inplace) + + def time_replace_list_one_match(self, inplace): + # the 1 can be held in self._df.blocks[0], while the inf and -inf cant + self.df.replace([np.inf, -np.inf, 1], np.nan, inplace=inplace) + + class Convert: params = (["DataFrame", "Series"], ["Timestamp", "Timedelta"]) @@ -56,4 +74,4 @@ def time_replace(self, constructor, replace_data): self.data.replace(self.to_replace) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index cc373f413fb88..441f4b380656e 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -1,9 +1,10 @@ -import string from itertools import product +import string import numpy as np -from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long + import pandas as pd +from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long class Melt: @@ -262,4 +263,4 @@ def time_explode(self, n_rows, max_list_length): self.series.explode() -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index a70977fcf539f..3640513d31be2 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -1,6 +1,7 @@ -import pandas as pd import numpy as np +import pandas as pd + class Methods: @@ -121,4 +122,4 @@ def peakmem_fixed(self): self.roll.max() -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index 6038a2ab4bd9f..a3f1d92545c3f 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -1,8 +1,9 @@ from datetime import datetime import numpy as np + +from pandas import NaT, Series, date_range import pandas.util.testing as tm -from pandas import Series, date_range, NaT class SeriesConstructor: @@ -275,4 +276,4 @@ def time_func(self, func, N, dtype): self.func() -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py index 19d08c086a508..ac78ca53679fd 100644 --- a/asv_bench/benchmarks/sparse.py +++ b/asv_bench/benchmarks/sparse.py @@ -136,4 +136,4 @@ def time_division(self, fill_value): self.arr1 / self.arr2 -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py index 620a6de0f5f34..6032bee41958e 100644 --- a/asv_bench/benchmarks/stat_ops.py +++ b/asv_bench/benchmarks/stat_ops.py @@ -1,6 +1,6 @@ import numpy as np -import pandas as pd +import pandas as pd ops = ["mean", "sum", "median", "std", "skew", "kurt", "mad", "prod", "sem", "var"] @@ -148,4 +148,4 @@ def time_cov_series(self, use_bottleneck): self.s.cov(self.s2) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index 6be2fa92d9eac..f30b2482615bd 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -1,7 +1,8 @@ import warnings import numpy as np -from pandas import Series, DataFrame + +from pandas import DataFrame, Series import pandas.util.testing as tm diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index 1020b773f8acb..498774034d642 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -2,7 +2,9 @@ import dateutil import numpy as np -from pandas import to_datetime, date_range, Series, DataFrame, period_range + +from pandas import DataFrame, Series, date_range, period_range, to_datetime + from pandas.tseries.frequencies import infer_freq try: @@ -426,4 +428,4 @@ def time_dt_accessor_year(self, tz): self.series.dt.year -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/azure-pipelines.yml b/azure-pipelines.yml index cfd7f6546833d..263a87176a9c9 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -22,22 +22,17 @@ jobs: timeoutInMinutes: 90 steps: - script: | - # XXX next command should avoid redefining the path in every step, but - # made the process crash as it couldn't find deactivate - #echo '##vso[task.prependpath]$HOME/miniconda3/bin' + echo '##vso[task.prependpath]$(HOME)/miniconda3/bin' echo '##vso[task.setvariable variable=ENV_FILE]environment.yml' echo '##vso[task.setvariable variable=AZURE]true' displayName: 'Setting environment variables' # Do not require a conda environment - - script: | - export PATH=$HOME/miniconda3/bin:$PATH - ci/code_checks.sh patterns + - script: ci/code_checks.sh patterns displayName: 'Looking for unwanted patterns' condition: true - script: | - export PATH=$HOME/miniconda3/bin:$PATH sudo apt-get install -y libc6-dev-i386 ci/setup_env.sh displayName: 'Setup environment and build pandas' @@ -45,14 +40,12 @@ jobs: # Do not require pandas - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev ci/code_checks.sh lint displayName: 'Linting' condition: true - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev ci/code_checks.sh dependencies displayName: 'Dependencies consistency' @@ -60,42 +53,36 @@ jobs: # Require pandas - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev ci/code_checks.sh code displayName: 'Checks on imported code' condition: true - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev ci/code_checks.sh doctests displayName: 'Running doctests' condition: true - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev ci/code_checks.sh docstrings displayName: 'Docstring validation' condition: true - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev ci/code_checks.sh typing displayName: 'Typing validation' condition: true - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev pytest --capture=no --strict scripts - displayName: 'Testing docstring validaton script' + displayName: 'Testing docstring validation script' condition: true - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev cd asv_bench asv check -E existing @@ -124,16 +111,15 @@ jobs: steps: - script: | echo '##vso[task.setvariable variable=ENV_FILE]environment.yml' + echo '##vso[task.prependpath]$(HOME)/miniconda3/bin' displayName: 'Setting environment variables' - script: | - export PATH=$HOME/miniconda3/bin:$PATH sudo apt-get install -y libc6-dev-i386 ci/setup_env.sh displayName: 'Setup environment and build pandas' - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev # Next we should simply have `doc/make.py --warnings-are-errors`, everything else is required because the ipython directive doesn't fail the build on errors (https://github.com/ipython/ipython/issues/11547) doc/make.py --warnings-are-errors | tee sphinx.log ; SPHINX_RET=${PIPESTATUS[0]} diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml index 39f862290e720..6093df46ffb60 100644 --- a/ci/azure/posix.yml +++ b/ci/azure/posix.yml @@ -56,17 +56,15 @@ jobs: steps: - script: | if [ "$(uname)" == "Linux" ]; then sudo apt-get install -y libc6-dev-i386 $EXTRA_APT; fi + echo '##vso[task.prependpath]$(HOME)/miniconda3/bin' echo "Creating Environment" ci/setup_env.sh displayName: 'Setup environment and build pandas' - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev ci/run_tests.sh displayName: 'Test' - - script: | - export PATH=$HOME/miniconda3/bin:$PATH - source activate pandas-dev && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd + - script: source activate pandas-dev && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd - task: PublishTestResults@2 inputs: testResultsFiles: 'test-data-*.xml' @@ -97,7 +95,6 @@ jobs: } displayName: 'Check for test failures' - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev python ci/print_skipped.py displayName: 'Print skipped tests' diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml index 20cad1bb4af96..dfa82819b9826 100644 --- a/ci/azure/windows.yml +++ b/ci/azure/windows.yml @@ -17,7 +17,9 @@ jobs: CONDA_PY: "37" steps: - - powershell: Write-Host "##vso[task.prependpath]$env:CONDA\Scripts" + - powershell: | + Write-Host "##vso[task.prependpath]$env:CONDA\Scripts" + Write-Host "##vso[task.prependpath]$HOME/miniconda3/bin" displayName: 'Add conda to PATH' - script: conda update -q -n base conda displayName: Update conda @@ -52,7 +54,6 @@ jobs: } displayName: 'Check for test failures' - script: | - export PATH=$HOME/miniconda3/bin:$PATH source activate pandas-dev python ci/print_skipped.py displayName: 'Print skipped tests' diff --git a/ci/check_git_tags.sh b/ci/check_git_tags.sh new file mode 100755 index 0000000000000..9dbcd4f98683e --- /dev/null +++ b/ci/check_git_tags.sh @@ -0,0 +1,28 @@ +set -e + +if [[ ! $(git tag) ]]; then + echo "No git tags in clone, please sync your git tags with upstream using:" + echo " git fetch --tags upstream" + echo " git push --tags origin" + echo "" + echo "If the issue persists, the clone depth needs to be increased in .travis.yml" + exit 1 +fi + +# This will error if there are no tags and we omit --always +DESCRIPTION=$(git describe --long --tags) +echo "$DESCRIPTION" + +if [[ "$DESCRIPTION" == *"untagged"* ]]; then + echo "Unable to determine most recent tag, aborting build" + exit 1 +else + if [[ "$DESCRIPTION" != *"g"* ]]; then + # A good description will have the hash prefixed by g, a bad one will be + # just the hash + echo "Unable to determine most recent tag, aborting build" + exit 1 + else + echo "$(git tag)" + fi +fi diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 06d45e38bfcdb..333136ddfddd9 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -263,8 +263,8 @@ fi ### DOCSTRINGS ### if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then - MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA05)' ; echo $MSG - $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL04,GL05,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA05 + MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA05)' ; echo $MSG + $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA05 RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml index 8f8273f57c3fe..6a77b5dbedc61 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-36-locale.yaml @@ -20,8 +20,8 @@ dependencies: - xlsxwriter=0.9.8 - xlwt=1.2.0 # universal - - pytest>=4.0.2,<5.0.0 - - pytest-xdist + - pytest>=5.0.0 + - pytest-xdist>=1.29.0 - pytest-mock - pytest-azurepipelines - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 05adbf0c924dc..26dcd213bbfa0 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -26,8 +26,8 @@ dependencies: - xlsxwriter - xlwt # universal - - pytest>=4.0.2 - - pytest-xdist + - pytest>=5.0.1 + - pytest-xdist>=1.29.0 - pytest-mock - pytest-azurepipelines - pip diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index 5cf897c98da10..65c92ec1dcf0d 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -6,7 +6,8 @@ dependencies: - pytz - Cython>=0.28.2 # universal - - pytest>=4.0.2 + # pytest < 5 until defaults has pytest-xdist>=1.29.0 + - pytest>=4.0.2,<5.0 - pytest-xdist - pytest-mock - hypothesis>=3.58.0 diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml index 98859b596ab2a..39315b15a018b 100644 --- a/ci/deps/azure-macos-35.yaml +++ b/ci/deps/azure-macos-35.yaml @@ -22,11 +22,12 @@ dependencies: - xlrd - xlsxwriter - xlwt + - pip - pip: - pyreadstat # universal - - pytest==4.5.0 - - pytest-xdist + - pytest>=5.0.1 + - pytest-xdist>=1.29.0 - pytest-mock - hypothesis>=3.58.0 # https://github.com/pandas-dev/pandas/issues/27421 diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index b0f3f5389ac85..ff9264a36cb12 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -23,8 +23,8 @@ dependencies: - xlwt # universal - cython>=0.28.2 - - pytest>=4.0.2 - - pytest-xdist + - pytest>=5.0.1 + - pytest-xdist>=1.29.0 - pytest-mock - pytest-azurepipelines - hypothesis>=3.58.0 diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 08208d1e2d59a..075234a937035 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -26,8 +26,8 @@ dependencies: - xlwt # universal - cython>=0.28.2 - - pytest>=4.0.2 - - pytest-xdist + - pytest>=5.0.0 + - pytest-xdist>=1.29.0 - pytest-mock - pytest-azurepipelines - hypothesis>=3.58.0 diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index a3f6d5b30f3e1..19002cbb8575e 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -39,8 +39,8 @@ dependencies: - xlsxwriter - xlwt # universal - - pytest - - pytest-xdist + - pytest>=5.0.1 + - pytest-xdist>=1.29.0 - pytest-cov - pytest-mock - hypothesis>=3.58.0 diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml index 538a82f66e4c8..9564bf5bb3a9f 100644 --- a/ci/deps/travis-36-slow.yaml +++ b/ci/deps/travis-36-slow.yaml @@ -25,8 +25,8 @@ dependencies: - xlsxwriter - xlwt # universal - - pytest>=4.0.2,<5.0.0 - - pytest-xdist + - pytest>=5.0.0 + - pytest-xdist>=1.29.0 - pytest-mock - moto - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index c9a8c274fb144..9e08c41a3d9c0 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -13,8 +13,8 @@ dependencies: - pyarrow - pytz # universal - - pytest>=4.0.2 - - pytest-xdist + - pytest>=5.0.0 + - pytest-xdist>=1.29.0 - pytest-mock - hypothesis>=3.58.0 - s3fs diff --git a/ci/print_skipped.py b/ci/print_skipped.py index a44281044e11d..6bc1dcfcd320d 100755 --- a/ci/print_skipped.py +++ b/ci/print_skipped.py @@ -1,8 +1,8 @@ #!/usr/bin/env python +import math import os import sys -import math import xml.etree.ElementTree as et diff --git a/ci/run_tests.sh b/ci/run_tests.sh index ee46da9f52eab..27d3fcb4cf563 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -50,9 +50,10 @@ do # if no tests are found (the case of "single and slow"), pytest exits with code 5, and would make the script fail, if not for the below code sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret" - if [[ "$COVERAGE" && $? == 0 ]]; then - echo "uploading coverage for $TYPE tests" - echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" - bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME - fi + # 2019-08-21 disabling because this is hitting HTTP 400 errors GH#27602 + # if [[ "$COVERAGE" && $? == 0 && "$TRAVIS_BRANCH" == "master" ]]; then + # echo "uploading coverage for $TYPE tests" + # echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" + # bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME + # fi done diff --git a/doc/logo/pandas_logo.py b/doc/logo/pandas_logo.py index 5a07b094e6ad3..89410e3847bef 100644 --- a/doc/logo/pandas_logo.py +++ b/doc/logo/pandas_logo.py @@ -1,7 +1,6 @@ # script to generate the pandas logo -from matplotlib import pyplot as plt -from matplotlib import rcParams +from matplotlib import pyplot as plt, rcParams import numpy as np rcParams["mathtext.fontset"] = "cm" diff --git a/doc/make.py b/doc/make.py index 48febef20fbe6..cbb1fa6a5324a 100755 --- a/doc/make.py +++ b/doc/make.py @@ -11,18 +11,18 @@ $ python make.py html $ python make.py latex """ +import argparse +import csv import importlib -import sys import os import shutil -import csv import subprocess -import argparse +import sys import webbrowser + import docutils import docutils.parsers.rst - DOC_PATH = os.path.dirname(os.path.abspath(__file__)) SOURCE_PATH = os.path.join(DOC_PATH, "source") BUILD_PATH = os.path.join(DOC_PATH, "build") diff --git a/doc/source/conf.py b/doc/source/conf.py index 3ebc5d8b6333b..1da1948e45268 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -10,15 +10,15 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys -import os -import inspect import importlib +import inspect import logging +import os +import sys + import jinja2 -from sphinx.ext.autosummary import _import_by_name from numpydoc.docscrape import NumpyDocString - +from sphinx.ext.autosummary import _import_by_name logger = logging.getLogger(__name__) @@ -141,7 +141,7 @@ # built documents. # # The short X.Y version. -import pandas +import pandas # noqa: E402 isort:skip # version = '%s r%s' % (pandas.__version__, svn_version()) version = str(pandas.__version__) @@ -315,7 +315,6 @@ import numpy as np import pandas as pd - randn = np.random.randn np.random.seed(123456) np.set_printoptions(precision=4, suppress=True) pd.options.display.max_rows = 15 @@ -433,10 +432,14 @@ # Add custom Documenter to handle attributes/methods of an AccessorProperty # eg pandas.Series.str and pandas.Series.dt (see GH9322) -import sphinx -from sphinx.util import rpartition -from sphinx.ext.autodoc import Documenter, MethodDocumenter, AttributeDocumenter -from sphinx.ext.autosummary import Autosummary +import sphinx # noqa: E402 isort:skip +from sphinx.util import rpartition # noqa: E402 isort:skip +from sphinx.ext.autodoc import ( # noqa: E402 isort:skip + AttributeDocumenter, + Documenter, + MethodDocumenter, +) +from sphinx.ext.autosummary import Autosummary # noqa: E402 isort:skip class AccessorDocumenter(MethodDocumenter): diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 80dc8b0d8782b..be6555b2ab936 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -133,22 +133,11 @@ Installing a C compiler Pandas uses C extensions (mostly written using Cython) to speed up certain operations. To install pandas from source, you need to compile these C extensions, which means you need a C compiler. This process depends on which -platform you're using. Follow the `CPython contributing guide -<https://devguide.python.org/setup/#compile-and-build>`_ for getting a -compiler installed. You don't need to do any of the ``./configure`` or ``make`` -steps; you only need to install the compiler. - -For Windows developers, when using Python 3.5 and later, it is sufficient to -install `Visual Studio 2017 <https://visualstudio.com/>`_ with the -**Python development workload** and the **Python native development tools** -option. Otherwise, the following links may be helpful. - -* https://blogs.msdn.microsoft.com/pythonengineering/2017/03/07/python-support-in-vs2017/ -* https://blogs.msdn.microsoft.com/pythonengineering/2016/04/11/unable-to-find-vcvarsall-bat/ -* https://github.com/conda/conda-recipes/wiki/Building-from-Source-on-Windows-32-bit-and-64-bit -* https://cowboyprogrammer.org/building-python-wheels-for-windows/ -* https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ -* https://support.enthought.com/hc/en-us/articles/204469260-Building-Python-extensions-with-Canopy +platform you're using. + +* Windows: https://devguide.python.org/setup/#windows-compiling +* Mac: https://devguide.python.org/setup/#macos +* Unix: https://devguide.python.org/setup/#unix-compiling Let us know if you have any difficulties by opening an issue or reaching out on `Gitter`_. @@ -710,6 +699,136 @@ You'll also need to See :ref:`contributing.warnings` for more. +.. _contributing.type_hints: + +Type Hints +---------- + +*pandas* strongly encourages the use of :pep:`484` style type hints. New development should contain type hints and pull requests to annotate existing code are accepted as well! + +Style Guidelines +~~~~~~~~~~~~~~~~ + +Types imports should follow the ``from typing import ...`` convention. So rather than + +.. code-block:: python + + import typing + + primes = [] # type: typing.List[int] + +You should write + +.. code-block:: python + + from typing import List, Optional, Union + + primes = [] # type: List[int] + +``Optional`` should be used where applicable, so instead of + +.. code-block:: python + + maybe_primes = [] # type: List[Union[int, None]] + +You should write + +.. code-block:: python + + maybe_primes = [] # type: List[Optional[int]] + +In some cases in the code base classes may define class variables that shadow builtins. This causes an issue as described in `Mypy 1775 <https://github.com/python/mypy/issues/1775#issuecomment-310969854>`_. The defensive solution here is to create an unambiguous alias of the builtin and use that without your annotation. For example, if you come across a definition like + +.. code-block:: python + + class SomeClass1: + str = None + +The appropriate way to annotate this would be as follows + +.. code-block:: python + + str_type = str + + class SomeClass2: + str = None # type: str_type + +In some cases you may be tempted to use ``cast`` from the typing module when you know better than the analyzer. This occurs particularly when using custom inference functions. For example + +.. code-block:: python + + from typing import cast + + from pandas.core.dtypes.common import is_number + + def cannot_infer_bad(obj: Union[str, int, float]): + + if is_number(obj): + ... + else: # Reasonably only str objects would reach this but... + obj = cast(str, obj) # Mypy complains without this! + return obj.upper() + +The limitation here is that while a human can reasonably understand that ``is_number`` would catch the ``int`` and ``float`` types mypy cannot make that same inference just yet (see `mypy #5206 <https://github.com/python/mypy/issues/5206>`_. While the above works, the use of ``cast`` is **strongly discouraged**. Where applicable a refactor of the code to appease static analysis is preferable + +.. code-block:: python + + def cannot_infer_good(obj: Union[str, int, float]): + + if isinstance(obj, str): + return obj.upper() + else: + ... + +With custom types and inference this is not always possible so exceptions are made, but every effort should be exhausted to avoid ``cast`` before going down such paths. + +Syntax Requirements +~~~~~~~~~~~~~~~~~~~ + +Because *pandas* still supports Python 3.5, :pep:`526` does not apply and variables **must** be annotated with type comments. Specifically, this is a valid annotation within pandas: + +.. code-block:: python + + primes = [] # type: List[int] + +Whereas this is **NOT** allowed: + +.. code-block:: python + + primes: List[int] = [] # not supported in Python 3.5! + +Note that function signatures can always be annotated per :pep:`3107`: + +.. code-block:: python + + def sum_of_primes(primes: List[int] = []) -> int: + ... + + +Pandas-specific Types +~~~~~~~~~~~~~~~~~~~~~ + +Commonly used types specific to *pandas* will appear in `pandas._typing <https://github.com/pandas-dev/pandas/blob/master/pandas/_typing.py>`_ and you should use these where applicable. This module is private for now but ultimately this should be exposed to third party libraries who want to implement type checking against pandas. + +For example, quite a few functions in *pandas* accept a ``dtype`` argument. This can be expressed as a string like ``"object"``, a ``numpy.dtype`` like ``np.int64`` or even a pandas ``ExtensionDtype`` like ``pd.CategoricalDtype``. Rather than burden the user with having to constantly annotate all of those options, this can simply be imported and reused from the pandas._typing module + +.. code-block:: python + + from pandas._typing import Dtype + + def as_type(dtype: Dtype) -> ...: + ... + +This module will ultimately house types for repeatedly used concepts like "path-like", "array-like", "numeric", etc... and can also hold aliases for commonly appearing parameters like `axis`. Development of this module is active so be sure to refer to the source for the most up to date list of available types. + +Validating Type Hints +~~~~~~~~~~~~~~~~~~~~~ + +*pandas* uses `mypy <http://mypy-lang.org>`_ to statically analyze the code base and type hints. After making any change you can ensure your type hints are correct by running + +.. code-block:: shell + + mypy pandas .. _contributing.ci: diff --git a/doc/source/development/developer.rst b/doc/source/development/developer.rst index a283920ae4377..923ef005d5926 100644 --- a/doc/source/development/developer.rst +++ b/doc/source/development/developer.rst @@ -37,12 +37,19 @@ So that a ``pandas.DataFrame`` can be faithfully reconstructed, we store a .. code-block:: text - {'index_columns': ['__index_level_0__', '__index_level_1__', ...], + {'index_columns': [<descr0>, <descr1>, ...], 'column_indexes': [<ci0>, <ci1>, ..., <ciN>], 'columns': [<c0>, <c1>, ...], - 'pandas_version': $VERSION} + 'pandas_version': $VERSION, + 'creator': { + 'library': $LIBRARY, + 'version': $LIBRARY_VERSION + }} -Here, ``<c0>``/``<ci0>`` and so forth are dictionaries containing the metadata +The "descriptor" values ``<descr0>`` in the ``'index_columns'`` field are +strings (referring to a column) or dictionaries with values as described below. + +The ``<c0>``/``<ci0>`` and so forth are dictionaries containing the metadata for each column, *including the index columns*. This has JSON form: .. code-block:: text @@ -53,26 +60,37 @@ for each column, *including the index columns*. This has JSON form: 'numpy_type': numpy_type, 'metadata': metadata} -.. note:: +See below for the detailed specification for these. + +Index Metadata Descriptors +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``RangeIndex`` can be stored as metadata only, not requiring serialization. The +descriptor format for these as is follows: - Every index column is stored with a name matching the pattern - ``__index_level_\d+__`` and its corresponding column information is can be - found with the following code snippet. +.. code-block:: python - Following this naming convention isn't strictly necessary, but strongly - suggested for compatibility with Arrow. + index = pd.RangeIndex(0, 10, 2) + {'kind': 'range', + 'name': index.name, + 'start': index.start, + 'stop': index.stop, + 'step': index.step} - Here's an example of how the index metadata is structured in pyarrow: +Other index types must be serialized as data columns along with the other +DataFrame columns. The metadata for these is a string indicating the name of +the field in the data columns, for example ``'__index_level_0__'``. - .. code-block:: python +If an index has a non-None ``name`` attribute, and there is no other column +with a name matching that value, then the ``index.name`` value can be used as +the descriptor. Otherwise (for unnamed indexes and ones with names colliding +with other column names) a disambiguating name with pattern matching +``__index_level_\d+__`` should be used. In cases of named indexes as data +columns, ``name`` attribute is always stored in the column descriptors as +above. - # assuming there's at least 3 levels in the index - index_columns = metadata['index_columns'] # noqa: F821 - columns = metadata['columns'] # noqa: F821 - ith_index = 2 - assert index_columns[ith_index] == '__index_level_2__' - ith_index_info = columns[-len(index_columns):][ith_index] - ith_index_level_name = ith_index_info['name'] +Column Metadata +~~~~~~~~~~~~~~~ ``pandas_type`` is the logical type of the column, and is one of: @@ -161,4 +179,8 @@ As an example of fully-formed metadata: 'numpy_type': 'int64', 'metadata': None} ], - 'pandas_version': '0.20.0'} + 'pandas_version': '0.20.0', + 'creator': { + 'library': 'pyarrow', + 'version': '0.13.0' + }} diff --git a/doc/source/getting_started/10min.rst b/doc/source/getting_started/10min.rst index 9045e5b32c29f..41520795bde62 100644 --- a/doc/source/getting_started/10min.rst +++ b/doc/source/getting_started/10min.rst @@ -278,7 +278,7 @@ Using a single column's values to select data. .. ipython:: python - df[df.A > 0] + df[df['A'] > 0] Selecting values from a DataFrame where a boolean condition is met. diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst index 3f6f56376861f..802ffadf2a81e 100644 --- a/doc/source/getting_started/basics.rst +++ b/doc/source/getting_started/basics.rst @@ -926,7 +926,7 @@ Single aggregations on a ``Series`` this will return a scalar value: .. ipython:: python - tsdf.A.agg('sum') + tsdf['A'].agg('sum') Aggregating with multiple functions @@ -950,13 +950,13 @@ On a ``Series``, multiple functions return a ``Series``, indexed by the function .. ipython:: python - tsdf.A.agg(['sum', 'mean']) + tsdf['A'].agg(['sum', 'mean']) Passing a ``lambda`` function will yield a ``<lambda>`` named row: .. ipython:: python - tsdf.A.agg(['sum', lambda x: x.mean()]) + tsdf['A'].agg(['sum', lambda x: x.mean()]) Passing a named function will yield that name for the row: @@ -965,7 +965,7 @@ Passing a named function will yield that name for the row: def mymean(x): return x.mean() - tsdf.A.agg(['sum', mymean]) + tsdf['A'].agg(['sum', mymean]) Aggregating with a dict +++++++++++++++++++++++ @@ -1065,7 +1065,7 @@ Passing a single function to ``.transform()`` with a ``Series`` will yield a sin .. ipython:: python - tsdf.A.transform(np.abs) + tsdf['A'].transform(np.abs) Transform with multiple functions @@ -1084,7 +1084,7 @@ resulting column names will be the transforming functions. .. ipython:: python - tsdf.A.transform([np.abs, lambda x: x + 1]) + tsdf['A'].transform([np.abs, lambda x: x + 1]) Transforming with a dict diff --git a/doc/source/getting_started/comparison/comparison_with_r.rst b/doc/source/getting_started/comparison/comparison_with_r.rst index 444e886bc951d..f67f46fc2b29b 100644 --- a/doc/source/getting_started/comparison/comparison_with_r.rst +++ b/doc/source/getting_started/comparison/comparison_with_r.rst @@ -81,7 +81,7 @@ R pandas =========================================== =========================================== ``select(df, col_one = col1)`` ``df.rename(columns={'col1': 'col_one'})['col_one']`` ``rename(df, col_one = col1)`` ``df.rename(columns={'col1': 'col_one'})`` -``mutate(df, c=a-b)`` ``df.assign(c=df.a-df.b)`` +``mutate(df, c=a-b)`` ``df.assign(c=df['a']-df['b'])`` =========================================== =========================================== @@ -258,8 +258,8 @@ index/slice as well as standard boolean indexing: df = pd.DataFrame({'a': np.random.randn(10), 'b': np.random.randn(10)}) df.query('a <= b') - df[df.a <= df.b] - df.loc[df.a <= df.b] + df[df['a'] <= df['b']] + df.loc[df['a'] <= df['b']] For more details and examples see :ref:`the query documentation <indexing.query>`. @@ -284,7 +284,7 @@ In ``pandas`` the equivalent expression, using the df = pd.DataFrame({'a': np.random.randn(10), 'b': np.random.randn(10)}) df.eval('a + b') - df.a + df.b # same as the previous expression + df['a'] + df['b'] # same as the previous expression In certain cases :meth:`~pandas.DataFrame.eval` will be much faster than evaluation in pure Python. For more details and examples see :ref:`the eval diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst index 366fdd546f58b..6a03c06de3699 100644 --- a/doc/source/getting_started/comparison/comparison_with_sql.rst +++ b/doc/source/getting_started/comparison/comparison_with_sql.rst @@ -49,6 +49,20 @@ With pandas, column selection is done by passing a list of column names to your Calling the DataFrame without the list of column names would display all columns (akin to SQL's ``*``). +In SQL, you can add a calculated column: + +.. code-block:: sql + + SELECT *, tip/total_bill as tip_rate + FROM tips + LIMIT 5; + +With pandas, you can use the :meth:`DataFrame.assign` method of a DataFrame to append a new column: + +.. ipython:: python + + tips.assign(tip_rate=tips['tip'] / tips['total_bill']).head(5) + WHERE ----- Filtering in SQL is done via a WHERE clause. diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index b57ce83cfc33c..f5669626aa2b3 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -39,7 +39,7 @@ See the :ref:`overview` for more detail about what's in the library. :hidden: {% endif %} {% if not single_doc %} - What's New in 0.25.0 <whatsnew/v0.25.0> + What's New in 1.0.0 <whatsnew/v1.0.0> install getting_started/index user_guide/index @@ -53,7 +53,7 @@ See the :ref:`overview` for more detail about what's in the library. whatsnew/index {% endif %} -* :doc:`whatsnew/v0.25.0` +* :doc:`whatsnew/v1.0.0` * :doc:`install` * :doc:`getting_started/index` diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index 407aab4bb1f1b..4b1a99da7cd4c 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -34,7 +34,6 @@ objects. api.extensions.ExtensionArray._concat_same_type api.extensions.ExtensionArray._formatter - api.extensions.ExtensionArray._formatting_values api.extensions.ExtensionArray._from_factorized api.extensions.ExtensionArray._from_sequence api.extensions.ExtensionArray._from_sequence_of_strings @@ -45,6 +44,7 @@ objects. api.extensions.ExtensionArray.argsort api.extensions.ExtensionArray.astype api.extensions.ExtensionArray.copy + api.extensions.ExtensionArray.view api.extensions.ExtensionArray.dropna api.extensions.ExtensionArray.factorize api.extensions.ExtensionArray.fillna diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst index 9e1374a3bd8e4..2f6addf607877 100644 --- a/doc/source/reference/window.rst +++ b/doc/source/reference/window.rst @@ -5,7 +5,6 @@ ====== Window ====== -.. currentmodule:: pandas.core.window Rolling objects are returned by ``.rolling`` calls: :func:`pandas.DataFrame.rolling`, :func:`pandas.Series.rolling`, etc. Expanding objects are returned by ``.expanding`` calls: :func:`pandas.DataFrame.expanding`, :func:`pandas.Series.expanding`, etc. @@ -13,6 +12,8 @@ EWM objects are returned by ``.ewm`` calls: :func:`pandas.DataFrame.ewm`, :func: Standard moving window functions -------------------------------- +.. currentmodule:: pandas.core.window.rolling + .. autosummary:: :toctree: api/ @@ -38,6 +39,8 @@ Standard moving window functions Standard expanding window functions ----------------------------------- +.. currentmodule:: pandas.core.window.expanding + .. autosummary:: :toctree: api/ @@ -59,6 +62,8 @@ Standard expanding window functions Exponentially-weighted moving window functions ---------------------------------------------- +.. currentmodule:: pandas.core.window.ewm + .. autosummary:: :toctree: api/ diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst index 22a9791ffde30..62a9b6396404a 100644 --- a/doc/source/user_guide/advanced.rst +++ b/doc/source/user_guide/advanced.rst @@ -738,7 +738,7 @@ and allows efficient indexing and storage of an index with a large number of dup df['B'] = df['B'].astype(CategoricalDtype(list('cab'))) df df.dtypes - df.B.cat.categories + df['B'].cat.categories Setting the index will create a ``CategoricalIndex``. diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst index 15af5208a4f1f..c9d3bc3a28c70 100644 --- a/doc/source/user_guide/cookbook.rst +++ b/doc/source/user_guide/cookbook.rst @@ -592,8 +592,8 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to .. ipython:: python df = pd.DataFrame([0, 1, 0, 1, 1, 1, 0, 1, 1], columns=['A']) - df.A.groupby((df.A != df.A.shift()).cumsum()).groups - df.A.groupby((df.A != df.A.shift()).cumsum()).cumsum() + df['A'].groupby((df['A'] != df['A'].shift()).cumsum()).groups + df['A'].groupby((df['A'] != df['A'].shift()).cumsum()).cumsum() Expanding data ************** @@ -719,7 +719,7 @@ Rolling Apply to multiple columns where function calculates a Series before a Sc df def gm(df, const): - v = ((((df.A + df.B) + 1).cumprod()) - 1) * const + v = ((((df['A'] + df['B']) + 1).cumprod()) - 1) * const return v.iloc[-1] s = pd.Series({df.index[i]: gm(df.iloc[i:min(i + 51, len(df) - 1)], 5) diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst index b77bfb9778837..2df5b9d82dcc3 100644 --- a/doc/source/user_guide/enhancingperf.rst +++ b/doc/source/user_guide/enhancingperf.rst @@ -243,9 +243,9 @@ We've gotten another big improvement. Let's check again where the time is spent: .. ipython:: python - %prun -l 4 apply_integrate_f(df['a'].to_numpy(), - df['b'].to_numpy(), - df['N'].to_numpy()) + %%prun -l 4 apply_integrate_f(df['a'].to_numpy(), + df['b'].to_numpy(), + df['N'].to_numpy()) As one might expect, the majority of the time is now spent in ``apply_integrate_f``, so if we wanted to make anymore efficiencies we must continue to concentrate our @@ -393,15 +393,15 @@ Consider the following toy example of doubling each observation: .. code-block:: ipython # Custom function without numba - In [5]: %timeit df['col1_doubled'] = df.a.apply(double_every_value_nonumba) # noqa E501 + In [5]: %timeit df['col1_doubled'] = df['a'].apply(double_every_value_nonumba) # noqa E501 1000 loops, best of 3: 797 us per loop # Standard implementation (faster than a custom function) - In [6]: %timeit df['col1_doubled'] = df.a * 2 + In [6]: %timeit df['col1_doubled'] = df['a'] * 2 1000 loops, best of 3: 233 us per loop # Custom function with numba - In [7]: %timeit (df['col1_doubled'] = double_every_value_withnumba(df.a.to_numpy()) + In [7]: %timeit (df['col1_doubled'] = double_every_value_withnumba(df['a'].to_numpy()) 1000 loops, best of 3: 145 us per loop Caveats @@ -643,8 +643,8 @@ The equivalent in standard Python would be .. ipython:: python df = pd.DataFrame(dict(a=range(5), b=range(5, 10))) - df['c'] = df.a + df.b - df['d'] = df.a + df.b + df.c + df['c'] = df['a'] + df['b'] + df['d'] = df['a'] + df['b'] + df['c'] df['a'] = 1 df @@ -688,7 +688,7 @@ name in an expression. a = np.random.randn() df.query('@a < a') - df.loc[a < df.a] # same as the previous expression + df.loc[a < df['a']] # same as the previous expression With :func:`pandas.eval` you cannot use the ``@`` prefix *at all*, because it isn't defined in that context. ``pandas`` will let you know this if you try to diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index e3b75afcf945e..cf55ce0c9a6d4 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -210,7 +210,7 @@ as an attribute: See `here for an explanation of valid identifiers <https://docs.python.org/3/reference/lexical_analysis.html#identifiers>`__. - - The attribute will not be available if it conflicts with an existing method name, e.g. ``s.min`` is not allowed. + - The attribute will not be available if it conflicts with an existing method name, e.g. ``s.min`` is not allowed, but ``s['min']`` is possible. - Similarly, the attribute will not be available if it conflicts with any of the following list: ``index``, ``major_axis``, ``minor_axis``, ``items``. @@ -540,7 +540,7 @@ The ``callable`` must be a function with one argument (the calling Series or Dat columns=list('ABCD')) df1 - df1.loc[lambda df: df.A > 0, :] + df1.loc[lambda df: df['A'] > 0, :] df1.loc[:, lambda df: ['A', 'B']] df1.iloc[:, lambda df: [0, 1]] @@ -552,7 +552,7 @@ You can use callable indexing in ``Series``. .. ipython:: python - df1.A.loc[lambda s: s > 0] + df1['A'].loc[lambda s: s > 0] Using these methods / indexers, you can chain data selection operations without using a temporary variable. @@ -561,7 +561,7 @@ without using a temporary variable. bb = pd.read_csv('data/baseball.csv', index_col='id') (bb.groupby(['year', 'team']).sum() - .loc[lambda df: df.r > 100]) + .loc[lambda df: df['r'] > 100]) .. _indexing.deprecate_ix: @@ -871,9 +871,9 @@ Boolean indexing Another common operation is the use of boolean vectors to filter the data. The operators are: ``|`` for ``or``, ``&`` for ``and``, and ``~`` for ``not``. These **must** be grouped by using parentheses, since by default Python will -evaluate an expression such as ``df.A > 2 & df.B < 3`` as -``df.A > (2 & df.B) < 3``, while the desired evaluation order is -``(df.A > 2) & (df.B < 3)``. +evaluate an expression such as ``df['A'] > 2 & df['B'] < 3`` as +``df['A'] > (2 & df['B']) < 3``, while the desired evaluation order is +``(df['A > 2) & (df['B'] < 3)``. Using a boolean vector to index a Series works exactly as in a NumPy ndarray: @@ -1134,7 +1134,7 @@ between the values of columns ``a`` and ``c``. For example: df # pure python - df[(df.a < df.b) & (df.b < df.c)] + df[(df['a'] < df['b']) & (df['b'] < df['c'])] # query df.query('(a < b) & (b < c)') @@ -1241,7 +1241,7 @@ Full numpy-like syntax: df = pd.DataFrame(np.random.randint(n, size=(n, 3)), columns=list('abc')) df df.query('(a < b) & (b < c)') - df[(df.a < df.b) & (df.b < df.c)] + df[(df['a'] < df['b']) & (df['b'] < df['c'])] Slightly nicer by removing the parentheses (by binding making comparison operators bind tighter than ``&`` and ``|``). @@ -1279,12 +1279,12 @@ The ``in`` and ``not in`` operators df.query('a in b') # How you'd do it in pure Python - df[df.a.isin(df.b)] + df[df['a'].isin(df['b'])] df.query('a not in b') # pure Python - df[~df.a.isin(df.b)] + df[~df['a'].isin(df['b'])] You can combine this with other expressions for very succinct queries: @@ -1297,7 +1297,7 @@ You can combine this with other expressions for very succinct queries: df.query('a in b and c < d') # pure Python - df[df.b.isin(df.a) & (df.c < df.d)] + df[df['b'].isin(df['a']) & (df['c'] < df['d'])] .. note:: @@ -1326,7 +1326,7 @@ to ``in``/``not in``. df.query('b == ["a", "b", "c"]') # pure Python - df[df.b.isin(["a", "b", "c"])] + df[df['b'].isin(["a", "b", "c"])] df.query('c == [1, 2]') @@ -1338,7 +1338,7 @@ to ``in``/``not in``. df.query('[1, 2] not in c') # pure Python - df[df.c.isin([1, 2])] + df[df['c'].isin([1, 2])] Boolean operators @@ -1352,7 +1352,7 @@ You can negate boolean expressions with the word ``not`` or the ``~`` operator. df['bools'] = np.random.rand(len(df)) > 0.5 df.query('~bools') df.query('not bools') - df.query('not bools') == df[~df.bools] + df.query('not bools') == df[~df['bools']] Of course, expressions can be arbitrarily complex too: @@ -1362,7 +1362,10 @@ Of course, expressions can be arbitrarily complex too: shorter = df.query('a < b < c and (not bools) or bools > 2') # equivalent in pure Python - longer = df[(df.a < df.b) & (df.b < df.c) & (~df.bools) | (df.bools > 2)] + longer = df[(df['a'] < df['b']) + & (df['b'] < df['c']) + & (~df['bools']) + | (df['bools'] > 2)] shorter longer @@ -1835,14 +1838,14 @@ chained indexing expression, you can set the :ref:`option <options>` # This will show the SettingWithCopyWarning # but the frame values will be set - dfb['c'][dfb.a.str.startswith('o')] = 42 + dfb['c'][dfb['a'].str.startswith('o')] = 42 This however is operating on a copy and will not work. :: >>> pd.set_option('mode.chained_assignment','warn') - >>> dfb[dfb.a.str.startswith('o')]['c'] = 42 + >>> dfb[dfb['a'].str.startswith('o')]['c'] = 42 Traceback (most recent call last) ... SettingWithCopyWarning: diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 8e5352c337072..338c890ce317c 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -28,6 +28,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like :delim: ; text;`CSV <https://en.wikipedia.org/wiki/Comma-separated_values>`__;:ref:`read_csv<io.read_csv_table>`;:ref:`to_csv<io.store_in_csv>` + text;Fixed-Width Text File;:ref:`read_fwf<io.fwf_reader>` text;`JSON <https://www.json.org/>`__;:ref:`read_json<io.json_reader>`;:ref:`to_json<io.json_writer>` text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>` text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>` @@ -1372,6 +1373,7 @@ should pass the ``escapechar`` option: print(data) pd.read_csv(StringIO(data), escapechar='\\') +.. _io.fwf_reader: .. _io.fwf: Files with fixed width columns @@ -3204,7 +3206,7 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are: writer = pd.ExcelWriter('path_to_file.xlsx', engine='xlsxwriter') # Or via pandas configuration. - from pandas import options # noqa: E402 + from pandas import options # noqa: E402 options.io.excel.xlsx.writer = 'xlsxwriter' df.to_excel('path_to_file.xlsx', sheet_name='Sheet1') @@ -3572,7 +3574,7 @@ Closing a Store and using a context manager: Read/write API '''''''''''''' -``HDFStore`` supports an top-level API using ``read_hdf`` for reading and ``to_hdf`` for writing, +``HDFStore`` supports a top-level API using ``read_hdf`` for reading and ``to_hdf`` for writing, similar to how ``read_csv`` and ``to_csv`` work. .. ipython:: python @@ -3687,7 +3689,7 @@ Hierarchical keys Keys to a store can be specified as a string. These can be in a hierarchical path-name like format (e.g. ``foo/bar/bah``), which will generate a hierarchy of sub-stores (or ``Groups`` in PyTables -parlance). Keys can be specified with out the leading '/' and are **always** +parlance). Keys can be specified without the leading '/' and are **always** absolute (e.g. 'foo' refers to '/foo'). Removal operations can remove everything in the sub-store and **below**, so be *careful*. @@ -3825,7 +3827,7 @@ data. A query is specified using the ``Term`` class under the hood, as a boolean expression. -* ``index`` and ``columns`` are supported indexers of a ``DataFrames``. +* ``index`` and ``columns`` are supported indexers of ``DataFrames``. * if ``data_columns`` are specified, these can be used as additional indexers. Valid comparison operators are: @@ -3917,7 +3919,7 @@ Use boolean expressions, with in-line function evaluation. store.select('dfq', "index>pd.Timestamp('20130104') & columns=['A', 'B']") -Use and inline column reference +Use inline column reference. .. ipython:: python @@ -4593,8 +4595,8 @@ Performance write chunksize (default is 50000). This will significantly lower your memory usage on writing. * You can pass ``expectedrows=<int>`` to the first ``append``, - to set the TOTAL number of expected rows that ``PyTables`` will - expected. This will optimize read/write performance. + to set the TOTAL number of rows that ``PyTables`` will expect. + This will optimize read/write performance. * Duplicate rows can be written to tables, but are filtered out in selection (with the last items being selected; thus a table is unique on major, minor pairs) @@ -5491,30 +5493,29 @@ The top-level function :func:`read_spss` can read (but not write) SPSS `sav` (.sav) and `zsav` (.zsav) format files. SPSS files contain column names. By default the -whole file is read, categorical columns are converted into ``pd.Categorical`` +whole file is read, categorical columns are converted into ``pd.Categorical``, and a ``DataFrame`` with all columns is returned. -Specify a ``usecols`` to obtain a subset of columns. Specify ``convert_categoricals=False`` +Specify the ``usecols`` parameter to obtain a subset of columns. Specify ``convert_categoricals=False`` to avoid converting categorical columns into ``pd.Categorical``. -Read a spss file: +Read an SPSS file: .. code-block:: python - df = pd.read_spss('spss_data.zsav') + df = pd.read_spss('spss_data.sav') -Extract a subset of columns ``usecols`` from SPSS file and +Extract a subset of columns contained in ``usecols`` from an SPSS file and avoid converting categorical columns into ``pd.Categorical``: .. code-block:: python - df = pd.read_spss('spss_data.zsav', usecols=['foo', 'bar'], + df = pd.read_spss('spss_data.sav', usecols=['foo', 'bar'], convert_categoricals=False) -More info_ about the sav and zsav file format is available from the IBM -web site. +More information about the `sav` and `zsav` file format is available here_. -.. _info: https://www.ibm.com/support/knowledgecenter/en/SSLVMB_22.0.0/com.ibm.spss.statistics.help/spss/base/savedatatypes.htm +.. _here: https://www.ibm.com/support/knowledgecenter/en/SSLVMB_22.0.0/com.ibm.spss.statistics.help/spss/base/savedatatypes.htm .. _io.other: diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index f118fe84d523a..dd6d3062a8f0a 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -469,7 +469,7 @@ If ``crosstab`` receives only two Series, it will provide a frequency table. 'C': [1, 1, np.nan, 1, 1]}) df - pd.crosstab(df.A, df.B) + pd.crosstab(df['A'], df['B']) Any input passed containing ``Categorical`` data will have **all** of its categories included in the cross-tabulation, even if the actual data does @@ -489,13 +489,13 @@ using the ``normalize`` argument: .. ipython:: python - pd.crosstab(df.A, df.B, normalize=True) + pd.crosstab(df['A'], df['B'], normalize=True) ``normalize`` can also normalize values within each row or within each column: .. ipython:: python - pd.crosstab(df.A, df.B, normalize='columns') + pd.crosstab(df['A'], df['B'], normalize='columns') ``crosstab`` can also be passed a third ``Series`` and an aggregation function (``aggfunc``) that will be applied to the values of the third ``Series`` within @@ -503,7 +503,7 @@ each group defined by the first two ``Series``: .. ipython:: python - pd.crosstab(df.A, df.B, values=df.C, aggfunc=np.sum) + pd.crosstab(df['A'], df['B'], values=df['C'], aggfunc=np.sum) Adding margins ~~~~~~~~~~~~~~ @@ -512,7 +512,7 @@ Finally, one can also add margins or normalize this output. .. ipython:: python - pd.crosstab(df.A, df.B, values=df.C, aggfunc=np.sum, normalize=True, + pd.crosstab(df['A'], df['B'], values=df['C'], aggfunc=np.sum, normalize=True, margins=True) .. _reshaping.tile: diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index fdceaa5868cec..fa16b2f216610 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -1148,10 +1148,10 @@ To plot data on a secondary y-axis, use the ``secondary_y`` keyword: .. ipython:: python - df.A.plot() + df['A'].plot() @savefig series_plot_secondary_y.png - df.B.plot(secondary_y=True, style='g') + df['B'].plot(secondary_y=True, style='g') .. ipython:: python :suppress: @@ -1205,7 +1205,7 @@ Here is the default behavior, notice how the x-axis tick labeling is performed: plt.figure() @savefig ser_plot_suppress.png - df.A.plot() + df['A'].plot() .. ipython:: python :suppress: @@ -1219,7 +1219,7 @@ Using the ``x_compat`` parameter, you can suppress this behavior: plt.figure() @savefig ser_plot_suppress_parm.png - df.A.plot(x_compat=True) + df['A'].plot(x_compat=True) .. ipython:: python :suppress: @@ -1235,9 +1235,9 @@ in ``pandas.plotting.plot_params`` can be used in a `with statement`: @savefig ser_plot_suppress_context.png with pd.plotting.plot_params.use('x_compat', True): - df.A.plot(color='r') - df.B.plot(color='g') - df.C.plot(color='b') + df['A'].plot(color='r') + df['B'].plot(color='g') + df['C'].plot(color='b') .. ipython:: python :suppress: diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index aeab2cf5809e7..fe80cc8bb959a 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 0.25 .. toctree:: :maxdepth: 2 + v0.25.2 v0.25.1 v0.25.0 diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst index 59ea6b9776232..2e0442364b2f3 100644 --- a/doc/source/whatsnew/v0.10.0.rst +++ b/doc/source/whatsnew/v0.10.0.rst @@ -498,7 +498,7 @@ Here is a taste of what to expect. .. code-block:: ipython - In [58]: p4d = Panel4D(randn(2, 2, 5, 4), + In [58]: p4d = Panel4D(np.random.randn(2, 2, 5, 4), ....: labels=['Label1','Label2'], ....: items=['Item1', 'Item2'], ....: major_axis=date_range('1/1/2000', periods=5), diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index ef6108ae3ec90..62604dd3edd2d 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -495,7 +495,7 @@ Other enhancements - :func:`pandas.util.hash_pandas_object` has gained the ability to hash a ``MultiIndex`` (:issue:`15224`) - ``Series/DataFrame.squeeze()`` have gained the ``axis`` parameter. (:issue:`15339`) - ``DataFrame.to_excel()`` has a new ``freeze_panes`` parameter to turn on Freeze Panes when exporting to Excel (:issue:`15160`) -- ``pd.read_html()`` will parse multiple header rows, creating a MutliIndex header. (:issue:`13434`). +- ``pd.read_html()`` will parse multiple header rows, creating a MultiIndex header. (:issue:`13434`). - HTML table output skips ``colspan`` or ``rowspan`` attribute if equal to 1. (:issue:`15403`) - :class:`pandas.io.formats.style.Styler` template now has blocks for easier extension, see the :ref:`example notebook </user_guide/style.ipynb#Subclassing>` (:issue:`15649`) - :meth:`Styler.render() <pandas.io.formats.style.Styler.render>` now accepts ``**kwargs`` to allow user-defined variables in the template (:issue:`15649`) diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 4d9ee4c676759..63dd56f4a3793 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -1,161 +1,115 @@ .. _whatsnew_0251: -What's new in 0.25.1 (July XX, 2019) ------------------------------------- +What's new in 0.25.1 (August 21, 2019) +-------------------------------------- -Enhancements -~~~~~~~~~~~~ - - -.. _whatsnew_0251.enhancements.other: +These are the changes in pandas 0.25.1. See :ref:`release` for a full changelog +including other versions of pandas. -Other enhancements -^^^^^^^^^^^^^^^^^^ +I/O and LZMA +~~~~~~~~~~~~ -- -- -- +Some users may unknowingly have an incomplete Python installation lacking the `lzma` module from the standard library. In this case, `import pandas` failed due to an `ImportError` (:issue: `27575`). +Pandas will now warn, rather than raising an `ImportError` if the `lzma` module is not present. Any subsequent attempt to use `lzma` methods will raise a `RuntimeError`. +A possible fix for the lack of the `lzma` module is to ensure you have the necessary libraries and then re-install Python. +For example, on MacOS installing Python with `pyenv` may lead to an incomplete Python installation due to unmet system dependencies at compilation time (like `xz`). Compilation will succeed, but Python might fail at run time. The issue can be solved by installing the necessary dependencies and then re-installing Python. .. _whatsnew_0251.bug_fixes: Bug fixes ~~~~~~~~~ - Categorical ^^^^^^^^^^^ -- -- -- +- Bug in :meth:`Categorical.fillna` that would replace all values, not just those that are ``NaN`` (:issue:`26215`) Datetimelike ^^^^^^^^^^^^ -- -- -- - -Timedelta -^^^^^^^^^ - -- -- -- +- Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`) +- Bug in :meth:`Period.to_timestamp` where a :class:`Period` outside the :class:`Timestamp` implementation bounds (roughly 1677-09-21 to 2262-04-11) would return an incorrect :class:`Timestamp` instead of raising ``OutOfBoundsDatetime`` (:issue:`19643`) +- Bug in iterating over :class:`DatetimeIndex` when the underlying data is read-only (:issue:`28055`) Timezones ^^^^^^^^^ - Bug in :class:`Index` where a numpy object array with a timezone aware :class:`Timestamp` and ``np.nan`` would not return a :class:`DatetimeIndex` (:issue:`27011`) -- -- Numeric ^^^^^^^ + - Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`) - Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`) -- -- +- Bug where :class:`DataFrame` arithmetic operators such as :meth:`DataFrame.mul` with a :class:`Series` with axis=1 would raise an ``AttributeError`` on :class:`DataFrame` larger than the minimum threshold to invoke numexpr (:issue:`27636`) +- Bug in :class:`DataFrame` arithmetic where missing values in results were incorrectly masked with ``NaN`` instead of ``Inf`` (:issue:`27464`) Conversion ^^^^^^^^^^ - Improved the warnings for the deprecated methods :meth:`Series.real` and :meth:`Series.imag` (:issue:`27610`) -- -- - -Strings -^^^^^^^ - -- -- -- - Interval ^^^^^^^^ + - Bug in :class:`IntervalIndex` where `dir(obj)` would raise ``ValueError`` (:issue:`27571`) -- -- -- Indexing ^^^^^^^^ - Bug in partial-string indexing returning a NumPy array rather than a ``Series`` when indexing with a scalar like ``.loc['2015']`` (:issue:`27516`) -- -- +- Break reference cycle involving :class:`Index` and other index classes to allow garbage collection of index objects without running the GC. (:issue:`27585`, :issue:`27840`) +- Fix regression in assigning values to a single column of a DataFrame with a ``MultiIndex`` columns (:issue:`27841`). +- Fix regression in ``.ix`` fallback with an ``IntervalIndex`` (:issue:`27865`). Missing ^^^^^^^ -- -- -- - -MultiIndex -^^^^^^^^^^ - -- -- -- +- Bug in :func:`pandas.isnull` or :func:`pandas.isna` when the input is a type e.g. ``type(pandas.Series())`` (:issue:`27482`) I/O ^^^ -- -- -- +- Avoid calling ``S3File.s3`` when reading parquet, as this was removed in s3fs version 0.3.0 (:issue:`27756`) +- Better error message when a negative header is passed in :func:`pandas.read_csv` (:issue:`27779`) +- Follow the ``min_rows`` display option (introduced in v0.25.0) correctly in the HTML repr in the notebook (:issue:`27991`). Plotting ^^^^^^^^ -- Added a pandas_plotting_backends entrypoint group for registering plot backends. See :ref:`extending.plotting-backends` for more (:issue:`26747`). -- -- +- Added a ``pandas_plotting_backends`` entrypoint group for registering plot backends. See :ref:`extending.plotting-backends` for more (:issue:`26747`). +- Fixed the re-instatement of Matplotlib datetime converters after calling + :meth:`pandas.plotting.deregister_matplotlib_converters` (:issue:`27481`). +- Fix compatibility issue with matplotlib when passing a pandas ``Index`` to a plot call (:issue:`27775`). Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ +- Fixed regression in :meth:`pands.core.groupby.DataFrameGroupBy.quantile` raising when multiple quantiles are given (:issue:`27526`) - Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) -- -- +- Bug in :meth:`pandas.core.groupby.GroupBy.nth` where ``observed=False`` was being ignored for Categorical groupers (:issue:`26385`) +- Bug in windowing over read-only arrays (:issue:`27766`) +- Fixed segfault in `pandas.core.groupby.DataFrameGroupBy.quantile` when an invalid quantile was passed (:issue:`27470`) Reshaping ^^^^^^^^^ - A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) +- Bug :meth:`merge_asof` could not merge :class:`Timedelta` objects when passing `tolerance` kwarg (:issue:`27642`) +- Bug in :meth:`DataFrame.crosstab` when ``margins`` set to ``True`` and ``normalize`` is not ``False``, an error is raised. (:issue:`27500`) - :meth:`DataFrame.join` now suppresses the ``FutureWarning`` when the sort parameter is specified (:issue:`21952`) -- +- Bug in :meth:`DataFrame.join` raising with readonly arrays (:issue:`27943`) Sparse ^^^^^^ -- -- -- - - -Build Changes -^^^^^^^^^^^^^ - -- -- -- - -ExtensionArray -^^^^^^^^^^^^^^ - -- -- -- +- Bug in reductions for :class:`Series` with Sparse dtypes (:issue:`27080`) Other ^^^^^ + - Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`) -- -- -- +- Bug in :meth:`Series.rename` when using a custom type indexer. Now any value that isn't callable or dict-like is treated as a scalar. (:issue:`27814`) .. _whatsnew_0.251.contributors: diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst new file mode 100644 index 0000000000000..1cdf213d81a74 --- /dev/null +++ b/doc/source/whatsnew/v0.25.2.rst @@ -0,0 +1,110 @@ +.. _whatsnew_0252: + +What's new in 0.25.2 (October XX, 2019) +--------------------------------------- + +These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog +including other versions of pandas. + +.. _whatsnew_0252.bug_fixes: + +Bug fixes +~~~~~~~~~ + +Categorical +^^^^^^^^^^^ + +- + +Datetimelike +^^^^^^^^^^^^ + +- +- +- + +Timezones +^^^^^^^^^ + +- + +Numeric +^^^^^^^ + +- +- +- +- + +Conversion +^^^^^^^^^^ + +- + +Interval +^^^^^^^^ + +- + +Indexing +^^^^^^^^ + +- +- +- +- + +Missing +^^^^^^^ + +- + +I/O +^^^ + +- Fix regression in notebook display where <th> tags not used for :attr:`DataFrame.index` (:issue:`28204`). +- Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`) +- +- + +Plotting +^^^^^^^^ + +- +- +- + +Groupby/resample/rolling +^^^^^^^^^^^^^^^^^^^^^^^^ + +- Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`pandas.core.groupby.DataFrameGroupBy.quantile` (:issue:`28113`). +- +- +- + +Reshaping +^^^^^^^^^ + +- +- +- +- +- + +Sparse +^^^^^^ + +- + +Other +^^^^^ + +- Compatibility with Python 3.8 in :meth:`DataFrame.query` (:issue:`27261`) +- + +.. _whatsnew_0.252.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v0.25.1..HEAD diff --git a/doc/source/whatsnew/v0.7.3.rst b/doc/source/whatsnew/v0.7.3.rst index a8697f60d7467..020cf3bdc2d59 100644 --- a/doc/source/whatsnew/v0.7.3.rst +++ b/doc/source/whatsnew/v0.7.3.rst @@ -25,8 +25,6 @@ New features from pandas.tools.plotting import scatter_matrix scatter_matrix(df, alpha=0.2) # noqa F821 -.. image:: ../savefig/scatter_matrix_kde.png - :width: 5in - Add ``stacked`` argument to Series and DataFrame's ``plot`` method for :ref:`stacked bar plots <visualization.barplot>`. @@ -35,15 +33,11 @@ New features df.plot(kind='bar', stacked=True) # noqa F821 -.. image:: ../savefig/bar_plot_stacked_ex.png - :width: 4in .. code-block:: python df.plot(kind='barh', stacked=True) # noqa F821 -.. image:: ../savefig/barh_plot_stacked_ex.png - :width: 4in - Add log x and y :ref:`scaling options <visualization.basic>` to ``DataFrame.plot`` and ``Series.plot`` diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 04cd5e4c2c918..3b6288146bdf2 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -21,27 +21,27 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ -.. _whatsnew_1000.enhancements.other: - - - +.. _whatsnew_1000.enhancements.other: + Other enhancements ^^^^^^^^^^^^^^^^^^ -.. _whatsnew_1000.api_breaking: - - - +.. _whatsnew_1000.api_breaking: + Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. _whatsnew_1000.api.other: - - :class:`pandas.core.groupby.GroupBy.transform` now raises on invalid operation names (:issue:`27489`). - +.. _whatsnew_1000.api.other: + Other API changes ^^^^^^^^^^^^^^^^^ @@ -65,7 +65,8 @@ Removal of prior version deprecations/changes - Changed the the default value of `inplace` in :meth:`DataFrame.set_index` and :meth:`Series.set_axis`. It now defaults to False (:issue:`27600`) - :meth:`pandas.Series.str.cat` now defaults to aligning ``others``, using ``join='left'`` (:issue:`27611`) - :meth:`pandas.Series.str.cat` does not accept list-likes *within* list-likes anymore (:issue:`27611`) -- +- Removed the previously deprecated :meth:`ExtensionArray._formatting_values`. Use :attr:`ExtensionArray._formatter` instead. (:issue:`23601`) +- Removed the previously deprecated ``IntervalIndex.from_intervals`` in favor of the :class:`IntervalIndex` constructor (:issue:`19263`) .. _whatsnew_1000.performance: @@ -75,6 +76,7 @@ Performance improvements - Performance improvement in indexing with a non-unique :class:`IntervalIndex` (:issue:`27489`) - Performance improvement in `MultiIndex.is_monotonic` (:issue:`27495`) - Performance improvement in :func:`cut` when ``bins`` is an :class:`IntervalIndex` (:issue:`27668`) +- Performance improvement in :meth:`DataFrame.replace` when provided a list of values to replace (:issue:`28099`) .. _whatsnew_1000.bug_fixes: @@ -86,6 +88,7 @@ Bug fixes Categorical ^^^^^^^^^^^ +- Added test to assert the :func:`fillna` raises the correct ValueError message when the value isn't a value from categories (:issue:`13628`) - - @@ -94,6 +97,7 @@ Datetimelike ^^^^^^^^^^^^ - Bug in :meth:`Series.__setitem__` incorrectly casting ``np.timedelta64("NaT")`` to ``np.datetime64("NaT")`` when inserting into a :class:`Series` with datetime64 dtype (:issue:`27311`) - Bug in :meth:`Series.dt` property lookups when the underlying data is read-only (:issue:`27529`) +- Bug in ``HDFStore.__getitem__`` incorrectly reading tz attribute created in Python 2 (:issue:`26443`) - @@ -138,7 +142,7 @@ Interval Indexing ^^^^^^^^ -- +- Bug in assignment using a reverse slicer (:issue:`26939`) - Missing @@ -156,14 +160,18 @@ MultiIndex I/O ^^^ -- +- :meth:`read_csv` now accepts binary mode file buffers when using the Python csv engine (:issue:`23779`) +- Bug in :meth:`DataFrame.to_json` where using a Tuple as a column or index value and using ``orient="columns"`` or ``orient="index"`` would produce invalid JSON (:issue:`20500`) - Plotting ^^^^^^^^ +- Bug in :meth:`Series.plot` not able to plot boolean values (:issue:`23719`) - -- +- Bug in :meth:`DataFrame.plot` producing incorrect legend markers when plotting multiple series on the same axis (:issue:`18222`) +- Bug in :meth:`DataFrame.plot` when ``kind='box'`` and data contains datetime or timedelta data. These types are now automatically dropped (:issue:`22799`) +- Bug in :meth:`DataFrame.plot.line` and :meth:`DataFrame.plot.area` produce wrong xlim in x-axis (:issue:`27686`, :issue:`25160`, :issue:`24784`) Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -171,6 +179,7 @@ Groupby/resample/rolling - - - Bug in :meth:`DataFrame.groupby` not offering selection by column name when ``axis=1`` (:issue:`27614`) +- Bug in :meth:`DataFrameGroupby.agg` not able to use lambda function with named aggregation (:issue:`27519`) Reshaping ^^^^^^^^^ @@ -187,6 +196,7 @@ Sparse Build Changes ^^^^^^^^^^^^^ +- Fixed pyqt development dependency issue because of different pyqt package name in conda and PyPI (:issue:`26838`) ExtensionArray @@ -195,6 +205,14 @@ ExtensionArray - - + +Other +^^^^^ +- Trying to set the ``display.precision``, ``display.max_rows`` or ``display.max_columns`` using :meth:`set_option` to anything but a ``None`` or a positive int will raise a ``ValueError`` (:issue:`23348`) +- Using :meth:`DataFrame.replace` with overlapping keys in a nested dictionary will no longer raise, now matching the behavior of a flat dictionary (:issue:`27660`) +- :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now support dicts as ``compression`` argument with key ``'method'`` being the compression method and others as additional compression options when the compression method is ``'zip'``. (:issue:`26023`) + + .. _whatsnew_1000.contributors: Contributors diff --git a/doc/sphinxext/contributors.py b/doc/sphinxext/contributors.py index 4256e4659715d..1a064f71792e9 100644 --- a/doc/sphinxext/contributors.py +++ b/doc/sphinxext/contributors.py @@ -8,12 +8,11 @@ code contributors and commits, and then list each contributor individually. """ +from announce import build_components from docutils import nodes from docutils.parsers.rst import Directive import git -from announce import build_components - class ContributorsDirective(Directive): required_arguments = 1 diff --git a/environment.yml b/environment.yml index 93e8302b498a0..6d2cd701c3854 100644 --- a/environment.yml +++ b/environment.yml @@ -71,7 +71,7 @@ dependencies: - lxml # pandas.read_html - openpyxl # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile - pyarrow>=0.9.0 # pandas.read_paquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - - pyqt # pandas.read_clipbobard + - pyqt>=5.9.2 # pandas.read_clipboard - pytables>=3.4.2 # pandas.read_hdf, DataFrame.to_hdf - python-snappy # required by pyarrow - s3fs # pandas.read_csv... when using 's3://...' path diff --git a/pandas/_config/config.py b/pandas/_config/config.py index 4f0720abd1445..890db5b41907e 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -787,6 +787,7 @@ def is_instance_factory(_type): ValueError if x is not an instance of `_type` """ + if isinstance(_type, (tuple, list)): _type = tuple(_type) type_repr = "|".join(map(str, _type)) @@ -820,6 +821,32 @@ def inner(x): return inner +def is_nonnegative_int(value): + """ + Verify that value is None or a positive int. + + Parameters + ---------- + value : None or int + The `value` to be checked. + + Raises + ------ + ValueError + When the value is not None or is a negative integer + """ + + if value is None: + return + + elif isinstance(value, int): + if value >= 0: + return + + msg = "Value must be a nonnegative integer or None" + raise ValueError(msg) + + # common type validators, for convenience # usage: register_option(... , validator = is_int) is_int = is_type_factory(int) diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index e3f18572abca1..3069bbbf34bb7 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -719,6 +719,11 @@ def group_quantile(ndarray[float64_t] out, ndarray[int64_t] counts, non_na_counts, sort_arr assert values.shape[0] == N + + if not (0 <= q <= 1): + raise ValueError("'q' must be between 0 and 1. Got" + " '{}' instead".format(q)) + inter_methods = { 'linear': INTERPOLATION_LINEAR, 'lower': INTERPOLATION_LOWER, diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 3e620f5934d5e..b8df78e600a46 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -108,7 +108,7 @@ cdef class Int64Factorizer: def get_count(self): return self.count - def factorize(self, int64_t[:] values, sort=False, + def factorize(self, const int64_t[:] values, sort=False, na_sentinel=-1, na_value=None): """ Factorize values with nans replaced by na_sentinel diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index f704ceffa662e..7424c4ddc3d92 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -47,10 +47,6 @@ cpdef get_value_at(ndarray arr, object loc, object tz=None): return util.get_value_at(arr, loc) -def get_value_box(arr: ndarray, loc: object) -> object: - return get_value_at(arr, loc, tz=None) - - # Don't populate hash tables in monotonic indexes larger than this _SIZE_CUTOFF = 1000000 diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index cafc31dad3568..6cc9dd22ce7c9 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -2,7 +2,6 @@ # See LICENSE for the license import bz2 import gzip -import lzma import os import sys import time @@ -59,9 +58,12 @@ from pandas.core.arrays import Categorical from pandas.core.dtypes.concat import union_categoricals import pandas.io.common as icom +from pandas.compat import _import_lzma, _get_lzma_file from pandas.errors import (ParserError, DtypeWarning, EmptyDataError, ParserWarning) +lzma = _import_lzma() + # Import CParserError as alias of ParserError for backwards compatibility. # Ultimately, we want to remove this import. See gh-12665 and gh-14479. CParserError = ParserError @@ -645,9 +647,9 @@ cdef class TextReader: 'zip file %s', str(zip_names)) elif self.compression == 'xz': if isinstance(source, str): - source = lzma.LZMAFile(source, 'rb') + source = _get_lzma_file(lzma)(source, 'rb') else: - source = lzma.LZMAFile(filename=source) + source = _get_lzma_file(lzma)(filename=source) else: raise ValueError('Unrecognized compression type: %s' % self.compression) diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index f95685c337969..c892c1cf1b8a3 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -296,8 +296,6 @@ cdef class SeriesBinGrouper: islider.advance(group_size) vslider.advance(group_size) - except: - raise finally: # so we don't free the wrong memory islider.reset() @@ -425,8 +423,6 @@ cdef class SeriesGrouper: group_size = 0 - except: - raise finally: # so we don't free the wrong memory islider.reset() diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h index 0470fef450dde..ee6e7081bf00e 100644 --- a/pandas/_libs/src/ujson/lib/ultrajson.h +++ b/pandas/_libs/src/ujson/lib/ultrajson.h @@ -307,11 +307,4 @@ EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer); EXPORTFUNCTION void encode(JSOBJ, JSONObjectEncoder *, const char *, size_t); -#define Buffer_Reserve(__enc, __len) \ - if ((size_t)((__enc)->end - (__enc)->offset) < (size_t)(__len)) { \ - Buffer_Realloc((__enc), (__len)); \ - } - -void Buffer_Realloc(JSONObjectEncoder *enc, size_t cbNeeded); - #endif // PANDAS__LIBS_SRC_UJSON_LIB_ULTRAJSON_H_ diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c index 2d6c823a45515..d5b379bee585b 100644 --- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c +++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c @@ -714,6 +714,12 @@ int Buffer_EscapeStringValidated(JSOBJ obj, JSONObjectEncoder *enc, } } +#define Buffer_Reserve(__enc, __len) \ + if ( (size_t) ((__enc)->end - (__enc)->offset) < (size_t) (__len)) \ + { \ + Buffer_Realloc((__enc), (__len));\ + } \ + #define Buffer_AppendCharUnchecked(__enc, __chr) *((__enc)->offset++) = __chr; FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC strreverse(char *begin, diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 926440218b5d9..4b612bb033761 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -16,18 +16,19 @@ derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +DISCLAIMED. IN NO EVENT SHALL ESN SOCIAL SOFTWARE AB OR JONAS TARNSTROM BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Portions of code from MODP_ASCII - Ascii transformations (upper/lower, etc) https://github.com/client9/stringencoders -Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights reserved. +Copyright (c) 2007 Nick Galbreath -- nickg [at] modp [dot] com. All rights +reserved. Numeric decoder derived from from TCL library http://www.opensource.apple.com/source/tcl/tcl-14/tcl/license.terms @@ -48,13 +49,13 @@ Numeric decoder derived from from TCL library #include <../../../tslibs/src/datetime/np_datetime_strings.h> #include "datetime.h" -#define NPY_JSON_BUFSIZE 32768 - static PyTypeObject *type_decimal; static PyTypeObject *cls_dataframe; static PyTypeObject *cls_series; static PyTypeObject *cls_index; static PyTypeObject *cls_nat; +PyObject *cls_timestamp; +PyObject *cls_timedelta; npy_int64 get_nat(void) { return NPY_MIN_INT64; } @@ -64,9 +65,9 @@ typedef void *(*PFN_PyTypeToJSON)(JSOBJ obj, JSONTypeContext *ti, typedef struct __NpyArrContext { PyObject *array; char *dataptr; - int curdim; // current dimension in array's order - int stridedim; // dimension we are striding over - int inc; // stride dimension increment (+/- 1) + int curdim; // current dimension in array's order + int stridedim; // dimension we are striding over + int inc; // stride dimension increment (+/- 1) npy_intp dim; npy_intp stride; npy_intp ndim; @@ -83,8 +84,8 @@ typedef struct __PdBlockContext { int ncols; int transpose; - int *cindices; // frame column -> block column map - NpyArrContext **npyCtxts; // NpyArrContext for each column + int *cindices; // frame column -> block column map + NpyArrContext **npyCtxts; // NpyArrContext for each column } PdBlockContext; typedef struct __TypeContext { @@ -148,13 +149,12 @@ enum PANDAS_FORMAT { SPLIT, RECORDS, INDEX, COLUMNS, VALUES }; int PdBlock_iterNext(JSOBJ, JSONTypeContext *); -void *initObjToJSON(void) -{ +void *initObjToJSON(void) { PyObject *mod_pandas; PyObject *mod_nattype; PyObject *mod_decimal = PyImport_ImportModule("decimal"); type_decimal = - (PyTypeObject *)PyObject_GetAttrString(mod_decimal, "Decimal"); + (PyTypeObject *)PyObject_GetAttrString(mod_decimal, "Decimal"); Py_DECREF(mod_decimal); PyDateTime_IMPORT; @@ -166,13 +166,15 @@ void *initObjToJSON(void) cls_index = (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Index"); cls_series = (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Series"); + cls_timestamp = PyObject_GetAttrString(mod_pandas, "Timestamp"); + cls_timedelta = PyObject_GetAttrString(mod_pandas, "Timedelta"); Py_DECREF(mod_pandas); } mod_nattype = PyImport_ImportModule("pandas._libs.tslibs.nattype"); if (mod_nattype) { - cls_nat = (PyTypeObject *)PyObject_GetAttrString(mod_nattype, - "NaTType"); + cls_nat = + (PyTypeObject *)PyObject_GetAttrString(mod_nattype, "NaTType"); Py_DECREF(mod_nattype); } @@ -210,7 +212,6 @@ static TypeContext *createTypeContext(void) { return pc; } - static int is_sparse_array(PyObject *obj) { // TODO can be removed again once SparseArray.values is removed (GH26421) if (PyObject_HasAttrString(obj, "_subtyp")) { @@ -225,7 +226,6 @@ static int is_sparse_array(PyObject *obj) { return 0; } - static PyObject *get_values(PyObject *obj) { PyObject *values = NULL; @@ -240,7 +240,8 @@ static PyObject *get_values(PyObject *obj) { values = PyObject_CallMethod(values, "to_numpy", NULL); } - if (!is_sparse_array(values) && PyObject_HasAttrString(values, "values")) { + if (!is_sparse_array(values) && + PyObject_HasAttrString(values, "values")) { PyObject *subvals = get_values(values); PyErr_Clear(); PRINTMARK(); @@ -355,20 +356,20 @@ static Py_ssize_t get_attr_length(PyObject *obj, char *attr) { } static npy_int64 get_long_attr(PyObject *o, const char *attr) { - npy_int64 long_val; - PyObject *value = PyObject_GetAttrString(o, attr); - long_val = (PyLong_Check(value) ? - PyLong_AsLongLong(value) : PyLong_AsLong(value)); - Py_DECREF(value); - return long_val; + npy_int64 long_val; + PyObject *value = PyObject_GetAttrString(o, attr); + long_val = + (PyLong_Check(value) ? PyLong_AsLongLong(value) : PyLong_AsLong(value)); + Py_DECREF(value); + return long_val; } static npy_float64 total_seconds(PyObject *td) { - npy_float64 double_val; - PyObject *value = PyObject_CallMethod(td, "total_seconds", NULL); - double_val = PyFloat_AS_DOUBLE(value); - Py_DECREF(value); - return double_val; + npy_float64 double_val; + PyObject *value = PyObject_CallMethod(td, "total_seconds", NULL); + double_val = PyFloat_AS_DOUBLE(value); + Py_DECREF(value); + return double_val; } static PyObject *get_item(PyObject *obj, Py_ssize_t i) { @@ -448,7 +449,7 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, if (PyUnicode_IS_COMPACT_ASCII(obj)) { Py_ssize_t len; - char *data = (char*)PyUnicode_AsUTF8AndSize(obj, &len); + char *data = (char *)PyUnicode_AsUTF8AndSize(obj, &len); *_outLen = len; return data; } @@ -503,7 +504,7 @@ static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, // TODO(anyone): Does not appear to be reached in tests. pandas_datetime_to_datetimestruct(obj->obval, - (NPY_DATETIMEUNIT)obj->obmeta.base, &dts); + (NPY_DATETIMEUNIT)obj->obmeta.base, &dts); return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); } @@ -662,9 +663,9 @@ void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { GET_TC(tc)->npyarr = npyarr; if (!npyarr) { - PyErr_NoMemory(); - GET_TC(tc)->iterNext = NpyArr_iterNextNone; - return; + PyErr_NoMemory(); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + return; } npyarr->array = (PyObject *)obj; @@ -675,17 +676,17 @@ void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { npyarr->type_num = PyArray_DESCR(obj)->type_num; if (GET_TC(tc)->transpose) { - npyarr->dim = PyArray_DIM(obj, npyarr->ndim); - npyarr->stride = PyArray_STRIDE(obj, npyarr->ndim); - npyarr->stridedim = npyarr->ndim; - npyarr->index[npyarr->ndim] = 0; - npyarr->inc = -1; + npyarr->dim = PyArray_DIM(obj, npyarr->ndim); + npyarr->stride = PyArray_STRIDE(obj, npyarr->ndim); + npyarr->stridedim = npyarr->ndim; + npyarr->index[npyarr->ndim] = 0; + npyarr->inc = -1; } else { - npyarr->dim = PyArray_DIM(obj, 0); - npyarr->stride = PyArray_STRIDE(obj, 0); - npyarr->stridedim = 0; - npyarr->index[0] = 0; - npyarr->inc = 1; + npyarr->dim = PyArray_DIM(obj, 0); + npyarr->stride = PyArray_STRIDE(obj, 0); + npyarr->stridedim = 0; + npyarr->index[0] = 0; + npyarr->inc = 1; } npyarr->columnLabels = GET_TC(tc)->columnLabels; @@ -733,8 +734,7 @@ int NpyArr_iterNextItem(JSOBJ obj, JSONTypeContext *tc) { NpyArr_freeItemValue(obj, tc); - if (PyArray_ISDATETIME(npyarr->array)) - { + if (PyArray_ISDATETIME(npyarr->array)) { PRINTMARK(); GET_TC(tc)->itemValue = obj; Py_INCREF(obj); @@ -787,30 +787,23 @@ JSOBJ NpyArr_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { return GET_TC(tc)->itemValue; } -static void NpyArr_getLabel(JSOBJ obj, JSONTypeContext *tc, size_t *outLen, - npy_intp idx, char **labels) { - JSONObjectEncoder *enc = (JSONObjectEncoder *)tc->encoder; - PRINTMARK(); - *outLen = strlen(labels[idx]); - Buffer_Reserve(enc, *outLen); - memcpy(enc->offset, labels[idx], sizeof(char) * (*outLen)); - enc->offset += *outLen; - *outLen = 0; -} - char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; npy_intp idx; PRINTMARK(); + char *cStr; if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) { idx = npyarr->index[npyarr->stridedim] - 1; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels); + cStr = npyarr->columnLabels[idx]; } else { idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels); + cStr = npyarr->rowLabels[idx]; } - return NULL; + + *outLen = strlen(cStr); + + return cStr; } //============================================================================= @@ -852,19 +845,22 @@ char *PdBlock_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; NpyArrContext *npyarr = blkCtxt->npyCtxts[0]; npy_intp idx; + char *cStr; PRINTMARK(); if (GET_TC(tc)->iterNext == PdBlock_iterNextItem) { idx = blkCtxt->colIdx - 1; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels); + cStr = npyarr->columnLabels[idx]; } else { idx = GET_TC(tc)->iterNext != PdBlock_iterNext ? npyarr->index[npyarr->stridedim - npyarr->inc] - 1 : npyarr->index[npyarr->stridedim]; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels); + cStr = npyarr->rowLabels[idx]; } - return NULL; + + *outLen = strlen(cStr); + return cStr; } char *PdBlock_iterGetName_Transpose(JSOBJ obj, JSONTypeContext *tc, @@ -872,16 +868,19 @@ char *PdBlock_iterGetName_Transpose(JSOBJ obj, JSONTypeContext *tc, PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; NpyArrContext *npyarr = blkCtxt->npyCtxts[blkCtxt->colIdx]; npy_intp idx; + char *cStr; PRINTMARK(); if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) { idx = npyarr->index[npyarr->stridedim] - 1; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels); + cStr = npyarr->columnLabels[idx]; } else { idx = blkCtxt->colIdx; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels); + cStr = npyarr->rowLabels[idx]; } - return NULL; + + *outLen = strlen(cStr); + return cStr; } int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) { @@ -942,9 +941,9 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { dtype = PyArray_DescrFromType(NPY_INT64); obj = (PyObject *)_obj; - GET_TC(tc) - ->iterGetName = GET_TC(tc)->transpose ? PdBlock_iterGetName_Transpose - : PdBlock_iterGetName; + GET_TC(tc)->iterGetName = GET_TC(tc)->transpose + ? PdBlock_iterGetName_Transpose + : PdBlock_iterGetName; blkCtxt = PyObject_Malloc(sizeof(PdBlockContext)); if (!blkCtxt) { @@ -1395,7 +1394,7 @@ void Series_iterBegin(JSOBJ obj, JSONTypeContext *tc) { PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder; GET_TC(tc)->index = 0; GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char)); - enc->outputFormat = VALUES; // for contained series + enc->outputFormat = VALUES; // for contained series if (!GET_TC(tc)->cStr) { PyErr_NoMemory(); } @@ -1454,7 +1453,7 @@ void DataFrame_iterBegin(JSOBJ obj, JSONTypeContext *tc) { PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder; GET_TC(tc)->index = 0; GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char)); - enc->outputFormat = VALUES; // for contained series & index + enc->outputFormat = VALUES; // for contained series & index if (!GET_TC(tc)->cStr) { PyErr_NoMemory(); } @@ -1578,16 +1577,30 @@ void NpyArr_freeLabels(char **labels, npy_intp len) { } } -char **NpyArr_encodeLabels(PyArrayObject *labels, JSONObjectEncoder *enc, +/* + * Function: NpyArr_encodeLabels + * ----------------------------- + * + * Builds an array of "encoded" labels. + * + * labels: PyArrayObject pointer for labels to be "encoded" + * num : number of labels + * + * "encode" is quoted above because we aren't really doing encoding + * For historical reasons this function would actually encode the entire + * array into a separate buffer with a separate call to JSON_Encode + * and would leave it to complex pointer manipulation from there to + * unpack values as needed. To make things simpler and more idiomatic + * this has instead just stringified any input save for datetime values, + * which may need to be represented in various formats. + */ +char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc, npy_intp num) { // NOTE this function steals a reference to labels. - PyObjectEncoder *pyenc = (PyObjectEncoder *)enc; PyObject *item = NULL; - npy_intp i, stride, len, need_quotes; + npy_intp i, stride, len; char **ret; - char *dataptr, *cLabel, *origend, *origst, *origoffset; - char labelBuffer[NPY_JSON_BUFSIZE]; - PyArray_GetItemFunc *getitem; + char *dataptr, *cLabel; int type_num; PRINTMARK(); @@ -1614,68 +1627,137 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, JSONObjectEncoder *enc, ret[i] = NULL; } - origst = enc->start; - origend = enc->end; - origoffset = enc->offset; - stride = PyArray_STRIDE(labels, 0); dataptr = PyArray_DATA(labels); - getitem = (PyArray_GetItemFunc *)PyArray_DESCR(labels)->f->getitem; type_num = PyArray_TYPE(labels); for (i = 0; i < num; i++) { - if (PyTypeNum_ISDATETIME(type_num) || PyTypeNum_ISNUMBER(type_num)) - { - item = (PyObject *)labels; - pyenc->npyType = type_num; - pyenc->npyValue = dataptr; - } else { - item = getitem(dataptr, labels); - if (!item) { + item = PyArray_GETITEM(labels, dataptr); + if (!item) { + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + // TODO: for any matches on type_num (date and timedeltas) should use a + // vectorized solution to convert to epoch or iso formats + if (enc->datetimeIso && + (type_num == NPY_TIMEDELTA || PyDelta_Check(item))) { + PyObject *td = PyObject_CallFunction(cls_timedelta, "(O)", item); + if (td == NULL) { + Py_DECREF(item); NpyArr_freeLabels(ret, num); ret = 0; break; } - } - cLabel = JSON_EncodeObject(item, enc, labelBuffer, NPY_JSON_BUFSIZE); + PyObject *iso = PyObject_CallMethod(td, "isoformat", NULL); + Py_DECREF(td); + if (iso == NULL) { + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } - if (item != (PyObject *)labels) { - Py_DECREF(item); + cLabel = (char *)PyUnicode_AsUTF8(iso); + Py_DECREF(iso); + len = strlen(cLabel); + } else if (PyTypeNum_ISDATETIME(type_num) || PyDateTime_Check(item) || + PyDate_Check(item)) { + PyObject *ts = PyObject_CallFunction(cls_timestamp, "(O)", item); + if (ts == NULL) { + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + if (enc->datetimeIso) { + PyObject *iso = PyObject_CallMethod(ts, "isoformat", NULL); + Py_DECREF(ts); + if (iso == NULL) { + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + cLabel = (char *)PyUnicode_AsUTF8(iso); + Py_DECREF(iso); + len = strlen(cLabel); + } else { + npy_int64 value; + // TODO: refactor to not duplicate what goes on in + // beginTypeContext + if (PyObject_HasAttrString(ts, "value")) { + PRINTMARK(); + value = get_long_attr(ts, "value"); + } else { + PRINTMARK(); + value = total_seconds(ts) * + 1000000000LL; // nanoseconds per second + } + Py_DECREF(ts); + + switch (enc->datetimeUnit) { + case NPY_FR_ns: + break; + case NPY_FR_us: + value /= 1000LL; + break; + case NPY_FR_ms: + value /= 1000000LL; + break; + case NPY_FR_s: + value /= 1000000000LL; + break; + default: + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + char buf[21] = {0}; // 21 chars for 2**63 as string + cLabel = buf; + sprintf(buf, "%" NPY_INT64_FMT, value); + len = strlen(cLabel); + } + } else { // Fallack to string representation + PyObject *str = PyObject_Str(item); + if (str == NULL) { + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + cLabel = (char *)PyUnicode_AsUTF8(str); + Py_DECREF(str); + len = strlen(cLabel); } - if (PyErr_Occurred() || enc->errorMsg) { + Py_DECREF(item); + // Add 1 to include NULL terminator + ret[i] = PyObject_Malloc(len + 1); + memcpy(ret[i], cLabel, len + 1); + + if (PyErr_Occurred()) { NpyArr_freeLabels(ret, num); ret = 0; break; } - need_quotes = ((*cLabel) != '"'); - len = enc->offset - cLabel + 1 + 2 * need_quotes; - ret[i] = PyObject_Malloc(sizeof(char) * len); - if (!ret[i]) { PyErr_NoMemory(); ret = 0; break; } - if (need_quotes) { - ret[i][0] = '"'; - memcpy(ret[i] + 1, cLabel, sizeof(char) * (len - 4)); - ret[i][len - 3] = '"'; - } else { - memcpy(ret[i], cLabel, sizeof(char) * (len - 2)); - } - ret[i][len - 2] = ':'; - ret[i][len - 1] = '\0'; dataptr += stride; } - enc->start = origst; - enc->end = origend; - enc->offset = origoffset; - Py_DECREF(labels); return ret; } @@ -1840,23 +1922,22 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { value = get_long_attr(obj, "value"); } else { PRINTMARK(); - value = - total_seconds(obj) * 1000000000LL; // nanoseconds per second + value = total_seconds(obj) * 1000000000LL; // nanoseconds per second } base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; switch (base) { - case NPY_FR_ns: - break; - case NPY_FR_us: - value /= 1000LL; - break; - case NPY_FR_ms: - value /= 1000000LL; - break; - case NPY_FR_s: - value /= 1000000000LL; - break; + case NPY_FR_ns: + break; + case NPY_FR_us: + value /= 1000LL; + break; + case NPY_FR_ms: + value /= 1000000LL; + break; + case NPY_FR_s: + value /= 1000000000LL; + break; } exc = PyErr_Occurred(); @@ -1971,8 +2052,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { goto INVALID; } pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, - (JSONObjectEncoder *)enc, + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, enc, pc->columnLabelsLen); if (!pc->columnLabels) { goto INVALID; @@ -2074,8 +2154,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { goto INVALID; } pc->columnLabelsLen = PyObject_Size(tmpObj); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, - (JSONObjectEncoder *)enc, + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, enc, pc->columnLabelsLen); Py_DECREF(tmpObj); if (!pc->columnLabels) { @@ -2096,9 +2175,8 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { goto INVALID; } pc->rowLabelsLen = PyObject_Size(tmpObj); - pc->rowLabels = - NpyArr_encodeLabels((PyArrayObject *)values, - (JSONObjectEncoder *)enc, pc->rowLabelsLen); + pc->rowLabels = NpyArr_encodeLabels((PyArrayObject *)values, enc, + pc->rowLabelsLen); Py_DECREF(tmpObj); tmpObj = (enc->outputFormat == INDEX ? PyObject_GetAttrString(obj, "columns") @@ -2116,8 +2194,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { goto INVALID; } pc->columnLabelsLen = PyObject_Size(tmpObj); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, - (JSONObjectEncoder *)enc, + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, enc, pc->columnLabelsLen); Py_DECREF(tmpObj); if (!pc->columnLabels) { @@ -2242,7 +2319,8 @@ void Object_endTypeContext(JSOBJ obj, JSONTypeContext *tc) { PyObject_Free(GET_TC(tc)->cStr); GET_TC(tc)->cStr = NULL; - if (tc->prv != &(((PyObjectEncoder *)tc->encoder)->basicTypeContext)) { // NOLINT + if (tc->prv != + &(((PyObjectEncoder *)tc->encoder)->basicTypeContext)) { // NOLINT PyObject_Free(tc->prv); } tc->prv = NULL; @@ -2305,7 +2383,7 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) { PyObject *newobj; PyObject *oinput = NULL; PyObject *oensureAscii = NULL; - int idoublePrecision = 10; // default double precision setting + int idoublePrecision = 10; // default double precision setting PyObject *oencodeHTMLChars = NULL; char *sOrient = NULL; char *sdateFormat = NULL; @@ -2328,10 +2406,10 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) { PyObject_Malloc, PyObject_Realloc, PyObject_Free, - -1, // recursionMax + -1, // recursionMax idoublePrecision, - 1, // forceAscii - 0, // encodeHTMLChars + 1, // forceAscii + 0, // encodeHTMLChars }}; JSONObjectEncoder *encoder = (JSONObjectEncoder *)&pyEncoder; @@ -2429,7 +2507,6 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) { PRINTMARK(); ret = JSON_EncodeObject(oinput, encoder, buffer, sizeof(buffer)); PRINTMARK(); - if (PyErr_Occurred()) { PRINTMARK(); return NULL; diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 4e49f660f5e19..01e500a80dcc4 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -71,7 +71,7 @@ cdef inline object create_time_from_ts( @cython.wraparound(False) @cython.boundscheck(False) -def ints_to_pydatetime(int64_t[:] arr, object tz=None, object freq=None, +def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None, str box="datetime"): """ Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 6fab1b5c02be1..020d1acf0b4ce 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -92,6 +92,9 @@ cdef class _NaT(datetime): # int64_t value # object freq + # higher than np.ndarray and np.matrix + __array_priority__ = 100 + def __hash__(_NaT self): # py3k needs this defined here return hash(self.value) @@ -103,61 +106,102 @@ cdef class _NaT(datetime): if ndim == -1: return _nat_scalar_rules[op] - if ndim == 0: + elif util.is_array(other): + result = np.empty(other.shape, dtype=np.bool_) + result.fill(_nat_scalar_rules[op]) + return result + + elif ndim == 0: if is_datetime64_object(other): return _nat_scalar_rules[op] else: raise TypeError('Cannot compare type %r with type %r' % (type(self).__name__, type(other).__name__)) + # Note: instead of passing "other, self, _reverse_ops[op]", we observe # that `_nat_scalar_rules` is invariant under `_reverse_ops`, # rendering it unnecessary. return PyObject_RichCompare(other, self, op) def __add__(self, other): + if self is not c_NaT: + # cython __radd__ semantics + self, other = other, self + if PyDateTime_Check(other): return c_NaT - + elif PyDelta_Check(other): + return c_NaT + elif is_datetime64_object(other) or is_timedelta64_object(other): + return c_NaT elif hasattr(other, 'delta'): # Timedelta, offsets.Tick, offsets.Week return c_NaT - elif getattr(other, '_typ', None) in ['dateoffset', 'series', - 'period', 'datetimeindex', - 'datetimearray', - 'timedeltaindex', - 'timedeltaarray']: - # Duplicate logic in _Timestamp.__add__ to avoid needing - # to subclass; allows us to @final(_Timestamp.__add__) - return NotImplemented - return c_NaT + + elif is_integer_object(other) or util.is_period_object(other): + # For Period compat + # TODO: the integer behavior is deprecated, remove it + return c_NaT + + elif util.is_array(other): + if other.dtype.kind in 'mM': + # If we are adding to datetime64, we treat NaT as timedelta + # Either way, result dtype is datetime64 + result = np.empty(other.shape, dtype="datetime64[ns]") + result.fill("NaT") + return result + + return NotImplemented def __sub__(self, other): # Duplicate some logic from _Timestamp.__sub__ to avoid needing # to subclass; allows us to @final(_Timestamp.__sub__) + cdef: + bint is_rsub = False + + if self is not c_NaT: + # cython __rsub__ semantics + self, other = other, self + is_rsub = True + if PyDateTime_Check(other): - return NaT + return c_NaT elif PyDelta_Check(other): - return NaT + return c_NaT + elif is_datetime64_object(other) or is_timedelta64_object(other): + return c_NaT + elif hasattr(other, 'delta'): + # offsets.Tick, offsets.Week + return c_NaT - elif getattr(other, '_typ', None) == 'datetimeindex': - # a Timestamp-DatetimeIndex -> yields a negative TimedeltaIndex - return -other.__sub__(self) + elif is_integer_object(other) or util.is_period_object(other): + # For Period compat + # TODO: the integer behavior is deprecated, remove it + return c_NaT - elif getattr(other, '_typ', None) == 'timedeltaindex': - # a Timestamp-TimedeltaIndex -> yields a negative TimedeltaIndex - return (-other).__add__(self) + elif util.is_array(other): + if other.dtype.kind == 'm': + if not is_rsub: + # NaT - timedelta64 we treat NaT as datetime64, so result + # is datetime64 + result = np.empty(other.shape, dtype="datetime64[ns]") + result.fill("NaT") + return result + + # timedelta64 - NaT we have to treat NaT as timedelta64 + # for this to be meaningful, and the result is timedelta64 + result = np.empty(other.shape, dtype="timedelta64[ns]") + result.fill("NaT") + return result + + elif other.dtype.kind == 'M': + # We treat NaT as a datetime, so regardless of whether this is + # NaT - other or other - NaT, the result is timedelta64 + result = np.empty(other.shape, dtype="timedelta64[ns]") + result.fill("NaT") + return result - elif hasattr(other, 'delta'): - # offsets.Tick, offsets.Week - neg_other = -other - return self + neg_other - - elif getattr(other, '_typ', None) in ['period', 'series', - 'periodindex', 'dateoffset', - 'datetimearray', - 'timedeltaarray']: - return NotImplemented - return NaT + return NotImplemented def __pos__(self): return NaT diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index c68d686ff2bf2..98e55f50062a2 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -21,7 +21,8 @@ PyDateTime_IMPORT from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct, - pandas_datetime_to_datetimestruct, NPY_DATETIMEUNIT, NPY_FR_D) + pandas_datetime_to_datetimestruct, check_dts_bounds, + NPY_DATETIMEUNIT, NPY_FR_D) cdef extern from "src/datetime/np_datetime.h": int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, @@ -1011,7 +1012,7 @@ def dt64arr_to_periodarr(int64_t[:] dtarr, int freq, tz=None): @cython.wraparound(False) @cython.boundscheck(False) -def periodarr_to_dt64arr(int64_t[:] periodarr, int freq): +def periodarr_to_dt64arr(const int64_t[:] periodarr, int freq): """ Convert array to datetime64 values from a set of ordinals corresponding to periods per period convention. @@ -1024,9 +1025,8 @@ def periodarr_to_dt64arr(int64_t[:] periodarr, int freq): out = np.empty(l, dtype='i8') - with nogil: - for i in range(l): - out[i] = period_ordinal_to_dt64(periodarr[i], freq) + for i in range(l): + out[i] = period_ordinal_to_dt64(periodarr[i], freq) return out.base # .base to access underlying np.ndarray @@ -1179,7 +1179,7 @@ cpdef int64_t period_ordinal(int y, int m, int d, int h, int min, return get_period_ordinal(&dts, freq) -cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil: +cdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) except? -1: cdef: npy_datetimestruct dts @@ -1187,6 +1187,7 @@ cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil: return NPY_NAT get_date_info(ordinal, freq, &dts) + check_dts_bounds(&dts) return dtstruct_to_dt64(&dts) diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index d93858cff5e05..fbda5f178e164 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -341,7 +341,8 @@ def array_strptime(object[:] values, object fmt, return result, result_timezone.base -"""_getlang, LocaleTime, TimeRE, _calc_julian_from_U_or_W are vendored +""" +_getlang, LocaleTime, TimeRE, _calc_julian_from_U_or_W are vendored from the standard library, see https://github.com/python/cpython/blob/master/Lib/_strptime.py The original module-level docstring follows. @@ -363,7 +364,8 @@ def _getlang(): class LocaleTime: - """Stores and handles locale-specific information related to time. + """ + Stores and handles locale-specific information related to time. ATTRIBUTES: f_weekday -- full weekday names (7-item list) @@ -382,7 +384,8 @@ class LocaleTime: """ def __init__(self): - """Set all attributes. + """ + Set all attributes. Order of methods called matters for dependency reasons. @@ -399,7 +402,6 @@ class LocaleTime: Only other possible issue is if someone changed the timezone and did not call tz.tzset . That is an issue for the programmer, though, since changing the timezone is worthless without that call. - """ self.lang = _getlang() self.__calc_weekday() @@ -518,15 +520,16 @@ class TimeRE(dict): """ def __init__(self, locale_time=None): - """Create keys/values. + """ + Create keys/values. Order of execution is important for dependency reasons. - """ if locale_time: self.locale_time = locale_time else: self.locale_time = LocaleTime() + self._Z = None base = super() base.__init__({ # The " \d" part of the regex is to make %c from ANSI C work @@ -555,21 +558,29 @@ class TimeRE(dict): 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'), 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'), 'p': self.__seqToRE(self.locale_time.am_pm, 'p'), - 'Z': self.__seqToRE(pytz.all_timezones, 'Z'), + # 'Z' key is generated lazily via __getitem__ '%': '%'}) base.__setitem__('W', base.__getitem__('U').replace('U', 'W')) base.__setitem__('c', self.pattern(self.locale_time.LC_date_time)) base.__setitem__('x', self.pattern(self.locale_time.LC_date)) base.__setitem__('X', self.pattern(self.locale_time.LC_time)) + def __getitem__(self, key): + if key == "Z": + # lazy computation + if self._Z is None: + self._Z = self.__seqToRE(pytz.all_timezones, 'Z') + return self._Z + return super().__getitem__(key) + def __seqToRE(self, to_convert, directive): - """Convert a list to a regex string for matching a directive. + """ + Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). - """ to_convert = sorted(to_convert, key=len, reverse=True) for value in to_convert: @@ -582,11 +593,11 @@ class TimeRE(dict): return '%s)' % regex def pattern(self, format): - """Return regex pattern for the format string. + """ + Return regex pattern for the format string. Need to make sure that any characters that might be interpreted as regex syntax are escaped. - """ processed_format = '' # The sub() call escapes all characters that might be misconstrued @@ -619,7 +630,8 @@ _regex_cache = {} cdef int _calc_julian_from_U_or_W(int year, int week_of_year, int day_of_week, int week_starts_Mon): - """Calculate the Julian day based on the year, week of the year, and day of + """ + Calculate the Julian day based on the year, week of the year, and day of the week, with week_start_day representing whether the week of the year assumes the week starts on Sunday or Monday (6 or 0). @@ -660,8 +672,10 @@ cdef int _calc_julian_from_U_or_W(int year, int week_of_year, return 1 + days_to_week + day_of_week -cdef object _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday): - """Calculate the Julian day based on the ISO 8601 year, week, and weekday. +cdef (int, int) _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday): + """ + Calculate the Julian day based on the ISO 8601 year, week, and weekday. + ISO weeks start on Mondays, with week 01 being the week containing 4 Jan. ISO week days range from 1 (Monday) to 7 (Sunday). @@ -694,7 +708,7 @@ cdef object _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday): return iso_year, ordinal -cdef parse_timezone_directive(object z): +cdef parse_timezone_directive(str z): """ Parse the '%z' directive and return a pytz.FixedOffset diff --git a/pandas/_typing.py b/pandas/_typing.py index 837a7a89e0b83..37a5d7945955d 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -11,9 +11,9 @@ from pandas.core.arrays.base import ExtensionArray # noqa: F401 from pandas.core.dtypes.dtypes import ExtensionDtype # noqa: F401 from pandas.core.indexes.base import Index # noqa: F401 - from pandas.core.frame import DataFrame # noqa: F401 from pandas.core.series import Series # noqa: F401 from pandas.core.sparse.series import SparseSeries # noqa: F401 + from pandas.core.generic import NDFrame # noqa: F401 AnyArrayLike = TypeVar( @@ -24,7 +24,10 @@ Dtype = Union[str, np.dtype, "ExtensionDtype"] FilePathOrBuffer = Union[str, Path, IO[AnyStr]] -FrameOrSeries = TypeVar("FrameOrSeries", "Series", "DataFrame") +FrameOrSeries = TypeVar("FrameOrSeries", bound="NDFrame") Scalar = Union[str, int, float] Axis = Union[str, int] Ordered = Optional[bool] + +# to maintain type information across generic functions and parametrization +_T = TypeVar("_T") diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 5ecd641fc68be..9c778f68727c6 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -10,10 +10,12 @@ import platform import struct import sys +import warnings PY35 = sys.version_info[:2] == (3, 5) PY36 = sys.version_info >= (3, 6) PY37 = sys.version_info >= (3, 7) +PY38 = sys.version_info >= (3, 8) PYPY = platform.python_implementation() == "PyPy" @@ -65,3 +67,32 @@ def is_platform_mac(): def is_platform_32bit(): return struct.calcsize("P") * 8 < 64 + + +def _import_lzma(): + """Attempts to import lzma, warning the user when lzma is not available. + """ + try: + import lzma + + return lzma + except ImportError: + msg = ( + "Could not import the lzma module. " + "Your installed Python is incomplete. " + "Attempting to use lzma compression will result in a RuntimeError." + ) + warnings.warn(msg) + + +def _get_lzma_file(lzma): + """Returns the lzma method LZMAFile when the module was correctly imported. + Otherwise, raises a RuntimeError. + """ + if lzma is None: + raise RuntimeError( + "lzma module not available. " + "A Python re-install with the proper " + "dependencies might be required to solve this issue." + ) + return lzma.LZMAFile diff --git a/pandas/compat/chainmap.py b/pandas/compat/chainmap.py index 83f1da597d6a6..84824207de2a9 100644 --- a/pandas/compat/chainmap.py +++ b/pandas/compat/chainmap.py @@ -15,9 +15,3 @@ def __delitem__(self, key): del mapping[key] return raise KeyError(key) - - # override because the m parameter is introduced in Python 3.4 - def new_child(self, m=None): - if m is None: - m = {} - return self.__class__(m, *self.maps) diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index bca33513b0069..87240a9f986c3 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -196,10 +196,6 @@ def load_newobj_ex(self): def load(fh, encoding=None, is_verbose=False): """load a pickle, with a provided encoding - if compat is True: - fake the old class hierarchy - if it works, then return the new type objects - Parameters ---------- fh : a filelike object diff --git a/pandas/conftest.py b/pandas/conftest.py index 2cf7bf6a6df41..b032e14d8f7e1 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -123,18 +123,22 @@ def ip(): @pytest.fixture(params=[True, False, None]) def observed(request): - """ pass in the observed keyword to groupby for [True, False] + """ + Pass in the observed keyword to groupby for [True, False] This indicates whether categoricals should return values for values which are not in the grouper [False / None], or only values which appear in the grouper [True]. [None] is supported for future compatibility if we decide to change the default (and would need to warn if this - parameter is not passed)""" + parameter is not passed). + """ return request.param @pytest.fixture(params=[True, False, None]) def ordered_fixture(request): - """Boolean 'ordered' parameter for Categorical.""" + """ + Boolean 'ordered' parameter for Categorical. + """ return request.param @@ -234,7 +238,8 @@ def cython_table_items(request): def _get_cython_table_params(ndframe, func_names_and_expected): - """combine frame, functions from SelectionMixin._cython_table + """ + Combine frame, functions from SelectionMixin._cython_table keys and expected result. Parameters @@ -242,7 +247,7 @@ def _get_cython_table_params(ndframe, func_names_and_expected): ndframe : DataFrame or Series func_names_and_expected : Sequence of two items The first item is a name of a NDFrame method ('sum', 'prod') etc. - The second item is the expected return value + The second item is the expected return value. Returns ------- @@ -341,7 +346,8 @@ def strict_data_files(pytestconfig): @pytest.fixture def datapath(strict_data_files): - """Get the path to a data file. + """ + Get the path to a data file. Parameters ---------- @@ -375,7 +381,9 @@ def deco(*args): @pytest.fixture def iris(datapath): - """The iris dataset as a DataFrame.""" + """ + The iris dataset as a DataFrame. + """ return pd.read_csv(datapath("data", "iris.csv")) @@ -504,7 +512,8 @@ def tz_aware_fixture(request): @pytest.fixture(params=STRING_DTYPES) def string_dtype(request): - """Parametrized fixture for string dtypes. + """ + Parametrized fixture for string dtypes. * str * 'str' @@ -515,7 +524,8 @@ def string_dtype(request): @pytest.fixture(params=BYTES_DTYPES) def bytes_dtype(request): - """Parametrized fixture for bytes dtypes. + """ + Parametrized fixture for bytes dtypes. * bytes * 'bytes' @@ -525,7 +535,8 @@ def bytes_dtype(request): @pytest.fixture(params=OBJECT_DTYPES) def object_dtype(request): - """Parametrized fixture for object dtypes. + """ + Parametrized fixture for object dtypes. * object * 'object' @@ -535,7 +546,8 @@ def object_dtype(request): @pytest.fixture(params=DATETIME64_DTYPES) def datetime64_dtype(request): - """Parametrized fixture for datetime64 dtypes. + """ + Parametrized fixture for datetime64 dtypes. * 'datetime64[ns]' * 'M8[ns]' @@ -545,7 +557,8 @@ def datetime64_dtype(request): @pytest.fixture(params=TIMEDELTA64_DTYPES) def timedelta64_dtype(request): - """Parametrized fixture for timedelta64 dtypes. + """ + Parametrized fixture for timedelta64 dtypes. * 'timedelta64[ns]' * 'm8[ns]' diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 21d12d02c9008..1132f7d6ffdfd 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -28,13 +28,11 @@ is_complex_dtype, is_datetime64_any_dtype, is_datetime64_ns_dtype, - is_datetime64tz_dtype, is_datetimelike, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, - is_interval_dtype, is_list_like, is_numeric_dtype, is_object_dtype, @@ -183,8 +181,6 @@ def _reconstruct_data(values, dtype, original): if is_extension_array_dtype(dtype): values = dtype.construct_array_type()._from_sequence(values) - elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype): - values = Index(original)._shallow_copy(values, name=None) elif is_bool_dtype(dtype): values = values.astype(dtype) @@ -1645,19 +1641,13 @@ def take_nd( May be the same type as the input, or cast to an ndarray. """ - # TODO(EA): Remove these if / elifs as datetimeTZ, interval, become EAs - # dispatch to internal type takes if is_extension_array_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) - elif is_datetime64tz_dtype(arr): - return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) - elif is_interval_dtype(arr): - return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) if is_sparse(arr): arr = arr.to_dense() elif isinstance(arr, (ABCIndexClass, ABCSeries)): - arr = arr.values + arr = arr._values arr = np.asarray(arr) @@ -1925,6 +1915,7 @@ def diff(arr, n, axis=0): dtype = arr.dtype is_timedelta = False + is_bool = False if needs_i8_conversion(arr): dtype = np.float64 arr = arr.view("i8") @@ -1933,6 +1924,7 @@ def diff(arr, n, axis=0): elif is_bool_dtype(dtype): dtype = np.object_ + is_bool = True elif is_integer_dtype(dtype): dtype = np.float64 @@ -1972,6 +1964,8 @@ def diff(arr, n, axis=0): result = res - lag result[mask] = na out_arr[res_indexer] = result + elif is_bool: + out_arr[res_indexer] = arr[res_indexer] ^ arr[lag_indexer] else: out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer] diff --git a/pandas/core/api.py b/pandas/core/api.py index 73323d93b8215..bd2a57a15bdd2 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -2,6 +2,16 @@ import numpy as np +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) +from pandas.core.dtypes.missing import isna, isnull, notna, notnull + +from pandas.core.algorithms import factorize, unique, value_counts +from pandas.core.arrays import Categorical from pandas.core.arrays.integer import ( Int8Dtype, Int16Dtype, @@ -12,45 +22,38 @@ UInt32Dtype, UInt64Dtype, ) -from pandas.core.algorithms import factorize, unique, value_counts -from pandas.core.dtypes.missing import isna, isnull, notna, notnull -from pandas.core.dtypes.dtypes import ( - CategoricalDtype, - PeriodDtype, - IntervalDtype, - DatetimeTZDtype, -) -from pandas.core.arrays import Categorical from pandas.core.construction import array + from pandas.core.groupby import Grouper, NamedAgg -from pandas.io.formats.format import set_eng_float_format + +# DataFrame needs to be imported after NamedAgg to avoid a circular import +from pandas.core.frame import DataFrame # isort:skip from pandas.core.index import ( - Index, CategoricalIndex, - Int64Index, - UInt64Index, - RangeIndex, + DatetimeIndex, Float64Index, - MultiIndex, + Index, + Int64Index, IntervalIndex, - TimedeltaIndex, - DatetimeIndex, - PeriodIndex, + MultiIndex, NaT, + PeriodIndex, + RangeIndex, + TimedeltaIndex, + UInt64Index, ) +from pandas.core.indexes.datetimes import Timestamp, bdate_range, date_range +from pandas.core.indexes.interval import Interval, interval_range from pandas.core.indexes.period import Period, period_range from pandas.core.indexes.timedeltas import Timedelta, timedelta_range -from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range -from pandas.core.indexes.interval import Interval, interval_range - -from pandas.core.series import Series -from pandas.core.frame import DataFrame - -# TODO: Remove import when statsmodels updates #18264 -from pandas.core.reshape.reshape import get_dummies - from pandas.core.indexing import IndexSlice -from pandas.core.tools.numeric import to_numeric -from pandas.tseries.offsets import DateOffset +from pandas.core.reshape.reshape import ( + get_dummies, +) # TODO: Remove get_dummies import when statsmodels updates #18264 +from pandas.core.series import Series from pandas.core.tools.datetimes import to_datetime +from pandas.core.tools.numeric import to_numeric from pandas.core.tools.timedeltas import to_timedelta + +from pandas.io.formats.format import set_eng_float_format +from pandas.tseries.offsets import DateOffset diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 5c8599dbb054b..b96b3c7572031 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -3,7 +3,7 @@ import numpy as np -from pandas._libs import reduction +from pandas._libs import reduction as libreduction from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( @@ -221,7 +221,7 @@ def apply_raw(self): """ apply to the values as a numpy array """ try: - result = reduction.compute_reduction(self.values, self.f, axis=self.axis) + result = libreduction.compute_reduction(self.values, self.f, axis=self.axis) except Exception: result = np.apply_along_axis(self.f, self.axis, self.values) @@ -281,7 +281,7 @@ def apply_standard(self): dummy = Series(empty_arr, index=index, dtype=values.dtype) try: - result = reduction.compute_reduction( + result = libreduction.compute_reduction( values, self.f, axis=self.axis, dummy=dummy, labels=labels ) return self.obj._constructor_sliced(result, index=labels) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index e517be4f03a16..0778b6726d104 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -64,9 +64,9 @@ class ExtensionArray: shift take unique + view _concat_same_type _formatter - _formatting_values _from_factorized _from_sequence _from_sequence_of_strings @@ -147,7 +147,7 @@ class ExtensionArray: If implementing NumPy's ``__array_ufunc__`` interface, pandas expects that - 1. You defer by raising ``NotImplemented`` when any Series are present + 1. You defer by returning ``NotImplemented`` when any Series are present in `inputs`. Pandas will extract the arrays and call the ufunc again. 2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class. Pandas inspect this to determine whether the ufunc is valid for the @@ -514,7 +514,7 @@ def fillna(self, value=None, method=None, limit=None): def dropna(self): """ - Return ExtensionArray without NA values + Return ExtensionArray without NA values. Returns ------- @@ -862,6 +862,27 @@ def copy(self) -> ABCExtensionArray: """ raise AbstractMethodError(self) + def view(self, dtype=None) -> Union[ABCExtensionArray, np.ndarray]: + """ + Return a view on the array. + + Parameters + ---------- + dtype : str, np.dtype, or ExtensionDtype, optional + Default None + + Returns + ------- + ExtensionArray + """ + # NB: + # - This must return a *new* object referencing the same data, not self. + # - The only case that *must* be implemented is with dtype=None, + # giving a view with the same dtype as self. + if dtype is not None: + raise NotImplementedError(dtype) + return self[:] + # ------------------------------------------------------------------------ # Printing # ------------------------------------------------------------------------ @@ -908,21 +929,6 @@ def _formatter(self, boxed: bool = False) -> Callable[[Any], Optional[str]]: return str return repr - def _formatting_values(self) -> np.ndarray: - # At the moment, this has to be an array since we use result.dtype - """ - An array of values to be printed in, e.g. the Series repr - - .. deprecated:: 0.24.0 - - Use :meth:`ExtensionArray._formatter` instead. - - Returns - ------- - array : ndarray - """ - return np.array(self) - # ------------------------------------------------------------------------ # Reshaping # ------------------------------------------------------------------------ @@ -951,7 +957,7 @@ def _concat_same_type( cls, to_concat: Sequence[ABCExtensionArray] ) -> ABCExtensionArray: """ - Concatenate multiple array + Concatenate multiple array. Parameters ---------- diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index d22b4bd4d3f2b..5929a8d51fe43 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1,3 +1,4 @@ +import operator from shutil import get_terminal_size import textwrap from typing import Type, Union, cast @@ -22,7 +23,6 @@ ensure_int64, ensure_object, ensure_platform_int, - is_categorical, is_categorical_dtype, is_datetime64_dtype, is_datetimelike, @@ -79,6 +79,8 @@ def _cat_compare_op(op): + opname = "__{op}__".format(op=op.__name__) + def f(self, other): # On python2, you can usually compare any type to any type, and # Categoricals can be seen as a custom type, but having different @@ -89,9 +91,12 @@ def f(self, other): return NotImplemented other = lib.item_from_zerodim(other) + if is_list_like(other) and len(other) != len(self): + # TODO: Could this fail if the categories are listlike objects? + raise ValueError("Lengths must match.") if not self.ordered: - if op in ["__lt__", "__gt__", "__le__", "__ge__"]: + if opname in ["__lt__", "__gt__", "__le__", "__ge__"]: raise TypeError( "Unordered Categoricals can only compare equality or not" ) @@ -118,7 +123,7 @@ def f(self, other): other_codes = other._codes mask = (self._codes == -1) | (other_codes == -1) - f = getattr(self._codes, op) + f = getattr(self._codes, opname) ret = f(other_codes) if mask.any(): # In other series, the leads to False, so do that here too @@ -128,38 +133,38 @@ def f(self, other): if is_scalar(other): if other in self.categories: i = self.categories.get_loc(other) - ret = getattr(self._codes, op)(i) + ret = getattr(self._codes, opname)(i) # check for NaN in self mask = self._codes == -1 ret[mask] = False return ret else: - if op == "__eq__": + if opname == "__eq__": return np.repeat(False, len(self)) - elif op == "__ne__": + elif opname == "__ne__": return np.repeat(True, len(self)) else: msg = ( "Cannot compare a Categorical for op {op} with a " "scalar, which is not a category." ) - raise TypeError(msg.format(op=op)) + raise TypeError(msg.format(op=opname)) else: # allow categorical vs object dtype array comparisons for equality # these are only positional comparisons - if op in ["__eq__", "__ne__"]: - return getattr(np.array(self), op)(np.array(other)) + if opname in ["__eq__", "__ne__"]: + return getattr(np.array(self), opname)(np.array(other)) msg = ( "Cannot compare a Categorical for op {op} with type {typ}." "\nIf you want to compare values, use 'np.asarray(cat) " "<op> other'." ) - raise TypeError(msg.format(op=op, typ=type(other))) + raise TypeError(msg.format(op=opname, typ=type(other))) - f.__name__ = op + f.__name__ = opname return f @@ -466,7 +471,7 @@ def ordered(self) -> Ordered: @property def dtype(self) -> CategoricalDtype: """ - The :class:`~pandas.api.types.CategoricalDtype` for this instance + The :class:`~pandas.api.types.CategoricalDtype` for this instance. """ return self._dtype @@ -517,19 +522,12 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: return self._set_dtype(dtype) return np.array(self, dtype=dtype, copy=copy) - @cache_readonly - def ndim(self) -> int: - """ - Number of dimensions of the Categorical - """ - return self._codes.ndim - @cache_readonly def size(self) -> int: """ return the len of myself """ - return len(self) + return self._codes.size @cache_readonly def itemsize(self) -> int: @@ -1248,12 +1246,12 @@ def map(self, mapper): new_categories = new_categories.insert(len(new_categories), np.nan) return np.take(new_categories, self._codes) - __eq__ = _cat_compare_op("__eq__") - __ne__ = _cat_compare_op("__ne__") - __lt__ = _cat_compare_op("__lt__") - __gt__ = _cat_compare_op("__gt__") - __le__ = _cat_compare_op("__le__") - __ge__ = _cat_compare_op("__ge__") + __eq__ = _cat_compare_op(operator.eq) + __ne__ = _cat_compare_op(operator.ne) + __lt__ = _cat_compare_op(operator.lt) + __gt__ = _cat_compare_op(operator.gt) + __le__ = _cat_compare_op(operator.le) + __ge__ = _cat_compare_op(operator.ge) # for Series/ndarray like compat @property @@ -1764,18 +1762,10 @@ def ravel(self, order="C"): ) return np.array(self) - def view(self): - """ - Return a view of myself. - - For internal compatibility with numpy arrays. - - Returns - ------- - view : Categorical - Returns `self`! - """ - return self + def view(self, dtype=None): + if dtype is not None: + raise NotImplementedError(dtype) + return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True) def to_dense(self): """ @@ -1850,8 +1840,8 @@ def fillna(self, value=None, method=None, limit=None): raise ValueError("fill value must be in categories") values_codes = _get_codes_for_values(value, self.categories) - indexer = np.where(values_codes != -1) - codes[indexer] = values_codes[values_codes != -1] + indexer = np.where(codes == -1) + codes[indexer] = values_codes[indexer] # If value is not a dict or Series it should be a scalar elif is_hashable(value): @@ -2659,18 +2649,18 @@ def _get_codes_for_values(values, categories): return coerce_indexer_dtype(t.lookup(vals), cats) -def _recode_for_categories(codes, old_categories, new_categories): +def _recode_for_categories(codes: np.ndarray, old_categories, new_categories): """ Convert a set of codes for to a new set of categories Parameters ---------- - codes : array + codes : np.ndarray old_categories, new_categories : Index Returns ------- - new_codes : array + new_codes : np.ndarray[np.int64] Examples -------- @@ -2725,17 +2715,15 @@ def _factorize_from_iterable(values): If `values` has a categorical dtype, then `categories` is a CategoricalIndex keeping the categories and order of `values`. """ - from pandas.core.indexes.category import CategoricalIndex - if not is_list_like(values): raise TypeError("Input must be list-like") - if is_categorical(values): - values = CategoricalIndex(values) - # The CategoricalIndex level we want to build has the same categories + if is_categorical_dtype(values): + values = extract_array(values) + # The Categorical we want to build has the same categories # as values but its codes are by def [0, ..., len(n_categories) - 1] cat_codes = np.arange(len(values.categories), dtype=values.codes.dtype) - categories = values._create_from_codes(cat_codes) + categories = Categorical.from_codes(cat_codes, dtype=values.dtype) codes = values.codes else: # The value of ordered is irrelevant since we don't use cat as such, diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 2747b1d7dd9f1..1988726edc79b 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -22,7 +22,6 @@ is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, - is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, @@ -44,9 +43,10 @@ from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna from pandas._typing import DatetimeLikeScalar -from pandas.core import missing, nanops, ops +from pandas.core import missing, nanops from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts import pandas.core.common as com +from pandas.core.ops.invalid import make_invalid_op from pandas.tseries import frequencies from pandas.tseries.offsets import DateOffset, Tick @@ -160,8 +160,8 @@ def strftime(self, date_format): Returns ------- - Index - Index of formatted strings. + ndarray + NumPy ndarray of formatted strings. See Also -------- @@ -179,9 +179,7 @@ def strftime(self, date_format): 'March 10, 2018, 09:00:02 AM'], dtype='object') """ - from pandas import Index - - return Index(self._format_native_types(date_format=date_format)) + return self._format_native_types(date_format=date_format).astype(object) class TimelikeOps: @@ -545,18 +543,8 @@ def astype(self, dtype, copy=True): return np.asarray(self, dtype=dtype) def view(self, dtype=None): - """ - New view on this array with the same data. - - Parameters - ---------- - dtype : numpy dtype, optional - - Returns - ------- - ndarray - With the specified `dtype`. - """ + if dtype is None or dtype is self.dtype: + return type(self)(self._data, dtype=self.dtype) return self._data.view(dtype=dtype) # ------------------------------------------------------------------ @@ -921,18 +909,18 @@ def _is_unique(self): # pow is invalid for all three subclasses; TimedeltaArray will override # the multiplication and division ops - __pow__ = ops.make_invalid_op("__pow__") - __rpow__ = ops.make_invalid_op("__rpow__") - __mul__ = ops.make_invalid_op("__mul__") - __rmul__ = ops.make_invalid_op("__rmul__") - __truediv__ = ops.make_invalid_op("__truediv__") - __rtruediv__ = ops.make_invalid_op("__rtruediv__") - __floordiv__ = ops.make_invalid_op("__floordiv__") - __rfloordiv__ = ops.make_invalid_op("__rfloordiv__") - __mod__ = ops.make_invalid_op("__mod__") - __rmod__ = ops.make_invalid_op("__rmod__") - __divmod__ = ops.make_invalid_op("__divmod__") - __rdivmod__ = ops.make_invalid_op("__rdivmod__") + __pow__ = make_invalid_op("__pow__") + __rpow__ = make_invalid_op("__rpow__") + __mul__ = make_invalid_op("__mul__") + __rmul__ = make_invalid_op("__rmul__") + __truediv__ = make_invalid_op("__truediv__") + __rtruediv__ = make_invalid_op("__rtruediv__") + __floordiv__ = make_invalid_op("__floordiv__") + __rfloordiv__ = make_invalid_op("__rfloordiv__") + __mod__ = make_invalid_op("__mod__") + __rmod__ = make_invalid_op("__rmod__") + __divmod__ = make_invalid_op("__divmod__") + __rdivmod__ = make_invalid_op("__rdivmod__") def _add_datetimelike_scalar(self, other): # Overriden by TimedeltaArray @@ -1017,9 +1005,9 @@ def _add_delta_tdi(self, other): if isinstance(other, np.ndarray): # ndarray[timedelta64]; wrap in TimedeltaIndex for op - from pandas import TimedeltaIndex + from pandas.core.arrays import TimedeltaArray - other = TimedeltaIndex(other) + other = TimedeltaArray._from_sequence(other) self_i8 = self.asi8 other_i8 = other.asi8 @@ -1241,29 +1229,17 @@ def __add__(self, other): if not is_period_dtype(self): maybe_integer_op_deprecated(self) result = self._addsub_int_array(other, operator.add) - elif is_float_dtype(other): - # Explicitly catch invalid dtypes - raise TypeError( - "cannot add {dtype}-dtype to {cls}".format( - dtype=other.dtype, cls=type(self).__name__ - ) - ) - elif is_period_dtype(other): - # if self is a TimedeltaArray and other is a PeriodArray with - # a timedelta-like (i.e. Tick) freq, this operation is valid. - # Defer to the PeriodArray implementation. - # In remaining cases, this will end up raising TypeError. - return NotImplemented - elif is_extension_array_dtype(other): - # Categorical op will raise; defer explicitly - return NotImplemented - else: # pragma: no cover + else: + # Includes Categorical, other ExtensionArrays + # For PeriodDtype, if self is a TimedeltaArray and other is a + # PeriodArray with a timedelta-like (i.e. Tick) freq, this + # operation is valid. Defer to the PeriodArray implementation. + # In remaining cases, this will end up raising TypeError. return NotImplemented if is_timedelta64_dtype(result) and isinstance(result, np.ndarray): from pandas.core.arrays import TimedeltaArray - # TODO: infer freq? return TimedeltaArray(result) return result @@ -1313,29 +1289,13 @@ def __sub__(self, other): if not is_period_dtype(self): maybe_integer_op_deprecated(self) result = self._addsub_int_array(other, operator.sub) - elif isinstance(other, ABCIndexClass): - raise TypeError( - "cannot subtract {cls} and {typ}".format( - cls=type(self).__name__, typ=type(other).__name__ - ) - ) - elif is_float_dtype(other): - # Explicitly catch invalid dtypes - raise TypeError( - "cannot subtract {dtype}-dtype from {cls}".format( - dtype=other.dtype, cls=type(self).__name__ - ) - ) - elif is_extension_array_dtype(other): - # Categorical op will raise; defer explicitly - return NotImplemented - else: # pragma: no cover + else: + # Includes ExtensionArrays, float_dtype return NotImplemented if is_timedelta64_dtype(result) and isinstance(result, np.ndarray): from pandas.core.arrays import TimedeltaArray - # TODO: infer freq? return TimedeltaArray(result) return result diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 061ee4b90d0e9..732f819e743a4 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -53,6 +53,7 @@ from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com +from pandas.core.ops.invalid import invalid_comparison from pandas.tseries.frequencies import get_period_alias, to_offset from pandas.tseries.offsets import Day, Tick @@ -171,13 +172,13 @@ def wrapper(self, other): other = _to_M8(other, tz=self.tz) except ValueError: # string that cannot be parsed to Timestamp - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) result = op(self.asi8, other.view("i8")) if isna(other): result.fill(nat_result) elif lib.is_scalar(other) or np.ndim(other) == 0: - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: @@ -191,20 +192,20 @@ def wrapper(self, other): ): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) if is_object_dtype(other): - # We have to use _comp_method_OBJECT_ARRAY instead of numpy + # We have to use comp_method_OBJECT_ARRAY instead of numpy # comparison otherwise it would fail to raise when # comparing tz-aware and tz-naive with np.errstate(all="ignore"): - result = ops._comp_method_OBJECT_ARRAY( + result = ops.comp_method_OBJECT_ARRAY( op, self.astype(object), other ) o_mask = isna(other) elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)): # e.g. is_timedelta64_dtype(other) - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) else: self._assert_tzawareness_compat(other) if isinstance(other, (ABCIndexClass, ABCSeries)): @@ -222,8 +223,6 @@ def wrapper(self, other): result = op(self.view("i8"), other.view("i8")) o_mask = other._isnan - result = com.values_from_object(result) - if o_mask.any(): result[o_mask] = nat_result @@ -1064,6 +1063,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None): Be careful with DST changes. When there is sequential data, pandas can infer the DST time: + >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', @@ -1095,6 +1095,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None): If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. + >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') @@ -1159,7 +1160,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None): def to_pydatetime(self): """ Return Datetime Array/Index as object ndarray of datetime.datetime - objects + objects. Returns ------- @@ -1284,7 +1285,7 @@ def to_perioddelta(self, freq): """ Calculate TimedeltaArray of difference between index values and index converted to PeriodArray at specified - freq. Used for vectorized offsets + freq. Used for vectorized offsets. Parameters ---------- diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 1f14bd169a228..069d661e6af34 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -21,7 +21,7 @@ is_scalar, ) from pandas.core.dtypes.dtypes import register_extension_dtype -from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries +from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna, notna from pandas.core import nanops, ops @@ -592,25 +592,29 @@ def _values_for_argsort(self) -> np.ndarray: @classmethod def _create_comparison_method(cls, op): - def cmp_method(self, other): + op_name = op.__name__ - op_name = op.__name__ - mask = None + def cmp_method(self, other): - if isinstance(other, (ABCSeries, ABCIndexClass)): + if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): # Rely on pandas to unbox and dispatch to us. return NotImplemented + other = lib.item_from_zerodim(other) + mask = None + if isinstance(other, IntegerArray): other, mask = other._data, other._mask elif is_list_like(other): other = np.asarray(other) - if other.ndim > 0 and len(self) != len(other): + if other.ndim > 1: + raise NotImplementedError( + "can only perform ops with 1-d structures" + ) + if len(self) != len(other): raise ValueError("Lengths must match to compare") - other = lib.item_from_zerodim(other) - # numpy will show a DeprecationWarning on invalid elementwise # comparisons, this will raise in the future with warnings.catch_warnings(): @@ -683,31 +687,31 @@ def _maybe_mask_result(self, result, mask, other, op_name): @classmethod def _create_arithmetic_method(cls, op): - def integer_arithmetic_method(self, other): + op_name = op.__name__ - op_name = op.__name__ - mask = None + def integer_arithmetic_method(self, other): - if isinstance(other, (ABCSeries, ABCIndexClass)): + if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): # Rely on pandas to unbox and dispatch to us. return NotImplemented - if getattr(other, "ndim", 0) > 1: - raise NotImplementedError("can only perform ops with 1-d structures") + other = lib.item_from_zerodim(other) + mask = None if isinstance(other, IntegerArray): other, mask = other._data, other._mask - elif getattr(other, "ndim", None) == 0: - other = other.item() - elif is_list_like(other): other = np.asarray(other) - if not other.ndim: - other = other.item() - elif other.ndim == 1: - if not (is_float_dtype(other) or is_integer_dtype(other)): - raise TypeError("can only perform ops with numeric values") + if other.ndim > 1: + raise NotImplementedError( + "can only perform ops with 1-d structures" + ) + if len(self) != len(other): + raise ValueError("Lengths must match") + if not (is_float_dtype(other) or is_integer_dtype(other)): + raise TypeError("can only perform ops with numeric values") + else: if not (is_float(other) or is_integer(other)): raise TypeError("can only perform ops with numeric values") diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 2b3c02bd1cade..7a14d6f1b619a 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -358,54 +358,10 @@ def from_arrays(cls, left, right, closed="right", copy=False, dtype=None): left, right, closed, copy=copy, dtype=dtype, verify_integrity=True ) - _interval_shared_docs[ - "from_intervals" - ] = """ - Construct an %(klass)s from a 1d array of Interval objects - - .. deprecated:: 0.23.0 - - Parameters - ---------- - data : array-like (1-dimensional) - Array of Interval objects. All intervals must be closed on the same - sides. - copy : boolean, default False - by-default copy the data, this is compat only and ignored - dtype : dtype or None, default None - If None, dtype will be inferred - - ..versionadded:: 0.23.0 - - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex. - %(klass)s.from_arrays : Construct an %(klass)s from a left and - right array. - %(klass)s.from_breaks : Construct an %(klass)s from an array of - splits. - %(klass)s.from_tuples : Construct an %(klass)s from an - array-like of tuples. - - Examples - -------- - >>> pd.%(qualname)s.from_intervals([pd.Interval(0, 1), - ... pd.Interval(1, 2)]) - %(klass)s([(0, 1], (1, 2]], - closed='right', dtype='interval[int64]') - - The generic Index constructor work identically when it infers an array - of all intervals: - - >>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)]) - %(klass)s([(0, 1], (1, 2]], - closed='right', dtype='interval[int64]') - """ - _interval_shared_docs[ "from_tuples" ] = """ - Construct an %(klass)s from an array-like of tuples + Construct an %(klass)s from an array-like of tuples. Parameters ---------- @@ -739,18 +695,14 @@ def isna(self): return isna(self.left) @property - def nbytes(self): + def nbytes(self) -> int: return self.left.nbytes + self.right.nbytes @property - def size(self): + def size(self) -> int: # Avoid materializing self.values return self.left.size - @property - def shape(self): - return self.left.shape - def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs): """ Take elements from the IntervalArray. @@ -902,7 +854,7 @@ def _format_space(self): def left(self): """ Return the left endpoints of each Interval in the IntervalArray as - an Index + an Index. """ return self._left @@ -910,7 +862,7 @@ def left(self): def right(self): """ Return the right endpoints of each Interval in the IntervalArray as - an Index + an Index. """ return self._right @@ -918,7 +870,7 @@ def right(self): def closed(self): """ Whether the intervals are closed on the left-side, right-side, both or - neither + neither. """ return self._closed @@ -926,7 +878,7 @@ def closed(self): "set_closed" ] = """ Return an %(klass)s identical to the current one, but closed on the - specified side + specified side. .. versionadded:: 0.24.0 @@ -965,7 +917,7 @@ def set_closed(self, closed): def length(self): """ Return an Index with entries denoting the length of each Interval in - the IntervalArray + the IntervalArray. """ try: return self.right - self.left @@ -993,7 +945,7 @@ def mid(self): ] = """ Return True if the %(klass)s is non-overlapping (no Intervals share points) and is either monotonic increasing or monotonic decreasing, - else False + else False. """ # https://github.com/python/mypy/issues/1362 # Mypy does not support decorated properties @@ -1043,7 +995,7 @@ def __array__(self, dtype=None): _interval_shared_docs[ "to_tuples" ] = """ - Return an %(return_type)s of tuples of the form (left, right) + Return an %(return_type)s of tuples of the form (left, right). Parameters ---------- diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 39529177b9e35..4e2e37d88eb9a 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -125,7 +125,11 @@ def __init__(self, values, copy=False): if isinstance(values, type(self)): values = values._ndarray if not isinstance(values, np.ndarray): - raise ValueError("'values' must be a NumPy array.") + raise ValueError( + "'values' must be a NumPy array, not {typ}".format( + typ=type(values).__name__ + ) + ) if values.ndim != 1: raise ValueError("PandasArray must be 1-dimensional.") @@ -241,11 +245,11 @@ def __setitem__(self, key, value): else: self._ndarray[key] = value - def __len__(self): + def __len__(self) -> int: return len(self._ndarray) @property - def nbytes(self): + def nbytes(self) -> int: return self._ndarray.nbytes def isna(self): diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 20ce11c70c344..f2d74794eadf5 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -426,7 +426,7 @@ def __array__(self, dtype=None): @property def is_leap_year(self): """ - Logical indicating if the date belongs to a leap year + Logical indicating if the date belongs to a leap year. """ return isleapyear_arr(np.asarray(self.year)) diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py index 47c7c72051150..201174b6b1995 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse.py @@ -39,6 +39,7 @@ ) from pandas.core.dtypes.dtypes import register_extension_dtype from pandas.core.dtypes.generic import ( + ABCDataFrame, ABCIndexClass, ABCSeries, ABCSparseArray, @@ -839,7 +840,7 @@ def fill_value(self, value): self._dtype = SparseDtype(self.dtype.subtype, value) @property - def kind(self): + def kind(self) -> str: """ The kind of sparse index for this array. One of {'integer', 'block'}. """ @@ -854,7 +855,7 @@ def _valid_sp_values(self): mask = notna(sp_vals) return sp_vals[mask] - def __len__(self): + def __len__(self) -> int: return self.sp_index.length @property @@ -868,7 +869,7 @@ def _fill_value_matches(self, fill_value): return self.fill_value == fill_value @property - def nbytes(self): + def nbytes(self) -> int: return self.sp_values.nbytes + self.sp_index.nbytes @property @@ -886,7 +887,7 @@ def density(self): return r @property - def npoints(self): + def npoints(self) -> int: """ The number of non- ``fill_value`` points. @@ -1693,6 +1694,9 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): for sp_value, fv in zip(sp_values, fill_value) ) return arrays + elif is_scalar(sp_values): + # e.g. reductions + return sp_values return self._simple_new( sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value) @@ -1732,13 +1736,15 @@ def sparse_unary_method(self): @classmethod def _create_arithmetic_method(cls, op): - def sparse_arithmetic_method(self, other): - op_name = op.__name__ + op_name = op.__name__ - if isinstance(other, (ABCSeries, ABCIndexClass)): + def sparse_arithmetic_method(self, other): + if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): # Rely on pandas to dispatch to us. return NotImplemented + other = lib.item_from_zerodim(other) + if isinstance(other, SparseArray): return _sparse_array_op(self, other, op, op_name) @@ -1781,11 +1787,11 @@ def sparse_arithmetic_method(self, other): @classmethod def _create_comparison_method(cls, op): - def cmp_method(self, other): - op_name = op.__name__ + op_name = op.__name__ + if op_name in {"and_", "or_"}: + op_name = op_name[:-1] - if op_name in {"and_", "or_"}: - op_name = op_name[:-1] + def cmp_method(self, other): if isinstance(other, (ABCSeries, ABCIndexClass)): # Rely on pandas to unbox and dispatch to us. diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index afd1e8203059e..3609c68a26c0f 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -41,9 +41,9 @@ ) from pandas.core.dtypes.missing import isna -from pandas.core import ops from pandas.core.algorithms import checked_add_with_arr import pandas.core.common as com +from pandas.core.ops.invalid import invalid_comparison from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import Tick @@ -90,14 +90,14 @@ def wrapper(self, other): other = Timedelta(other) except ValueError: # failed to parse as timedelta - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) result = op(self.view("i8"), other.value) if isna(other): result.fill(nat_result) elif not is_list_like(other): - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") @@ -106,7 +106,7 @@ def wrapper(self, other): try: other = type(self)._from_sequence(other)._data except (ValueError, TypeError): - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) result = op(self.view("i8"), other.view("i8")) result = com.values_from_object(result) @@ -173,8 +173,8 @@ class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): "ceil", ] - # Needed so that NaT.__richcmp__(DateTimeArray) operates pointwise - ndim = 1 + # Note: ndim must be defined to ensure NaT.__richcmp(TimedeltaArray) + # operates pointwise. @property def _box_func(self): @@ -776,12 +776,14 @@ def __rdivmod__(self, other): res2 = other - res1 * self return res1, res2 - # Note: TimedeltaIndex overrides this in call to cls._add_numeric_methods def __neg__(self): if self.freq is not None: return type(self)(-self._data, freq=-self.freq) return type(self)(-self._data) + def __pos__(self): + return type(self)(self._data, freq=self.freq) + def __abs__(self): # Note: freq is not preserved return type(self)(np.abs(self._data)) diff --git a/pandas/core/base.py b/pandas/core/base.py index 7d2a62318232c..2d5ffb5e91392 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -47,7 +47,6 @@ class PandasObject(DirNamesMixin): - """baseclass for various pandas objects""" @property @@ -1462,7 +1461,7 @@ def is_monotonic_decreasing(self): def memory_usage(self, deep=False): """ - Memory usage of the values + Memory usage of the values. Parameters ---------- diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 1046401850963..3e1e5ed89d877 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -9,6 +9,7 @@ from pandas.errors import PerformanceWarning import pandas as pd +from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.computation.common import _result_type_many @@ -34,7 +35,7 @@ def _zip_axes_from_type(typ, new_axes): def _any_pandas_objects(terms): """Check a sequence of terms for instances of PandasObject.""" - return any(isinstance(term.value, pd.core.generic.PandasObject) for term in terms) + return any(isinstance(term.value, PandasObject) for term in terms) def _filter_special_cases(f): @@ -132,7 +133,8 @@ def _align(terms): def _reconstruct_object(typ, obj, axes, dtype): - """Reconstruct an object given its type, raw value, and possibly empty + """ + Reconstruct an object given its type, raw value, and possibly empty (None) axes. Parameters @@ -157,7 +159,7 @@ def _reconstruct_object(typ, obj, axes, dtype): res_t = np.result_type(obj.dtype, dtype) - if not isinstance(typ, partial) and issubclass(typ, pd.core.generic.PandasObject): + if not isinstance(typ, partial) and issubclass(typ, PandasObject): return typ(obj, dtype=res_t, **axes) # special case for pathological things like ~True/~False diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py index b8e212fd2a32e..bd32c8bee1cdf 100644 --- a/pandas/core/computation/common.py +++ b/pandas/core/computation/common.py @@ -2,7 +2,7 @@ import numpy as np -import pandas as pd +from pandas._config import get_option # A token value Python's tokenizer probably will never use. _BACKTICK_QUOTED_STRING = 100 @@ -11,7 +11,7 @@ def _ensure_decoded(s): """ if we have bytes, decode them to unicode """ if isinstance(s, (np.bytes_, bytes)): - s = s.decode(pd.get_option("display.encoding")) + s = s.decode(get_option("display.encoding")) return s @@ -36,8 +36,3 @@ def _remove_spaces_column_name(name): class NameResolutionError(NameError): pass - - -class StringMixin: - # TODO: delete this class. Removing this ATM caused a failure. - pass diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py index 2c94b142a45b3..3cc34ea1f4ed7 100644 --- a/pandas/core/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -17,7 +17,8 @@ class NumExprClobberingError(NameError): def _check_ne_builtin_clash(expr): - """Attempt to prevent foot-shooting in a helpful way. + """ + Attempt to prevent foot-shooting in a helpful way. Parameters ---------- @@ -53,7 +54,8 @@ def convert(self): return printing.pprint_thing(self.expr) def evaluate(self): - """Run the engine on the expression + """ + Run the engine on the expression. This method performs alignment which is necessary no matter what engine is being used, thus its implementation is in the base class. @@ -78,7 +80,8 @@ def _is_aligned(self): @abc.abstractmethod def _evaluate(self): - """Return an evaluated expression. + """ + Return an evaluated expression. Parameters ---------- @@ -94,7 +97,6 @@ def _evaluate(self): class NumExprEngine(AbstractEngine): - """NumExpr engine class""" has_neg_frac = True @@ -127,8 +129,8 @@ def _evaluate(self): class PythonEngine(AbstractEngine): - - """Evaluate an expression in Python space. + """ + Evaluate an expression in Python space. Mostly for testing purposes. """ diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index d0d87c23e9346..45319a4d63d94 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -41,7 +41,8 @@ def tokenize_string(source): - """Tokenize a Python source code string. + """ + Tokenize a Python source code string. Parameters ---------- @@ -366,8 +367,8 @@ def f(cls): @disallow(_unsupported_nodes) @add_ops(_op_classes) class BaseExprVisitor(ast.NodeVisitor): - - """Custom ast walker. Parsers of other engines should subclass this class + """ + Custom ast walker. Parsers of other engines should subclass this class if necessary. Parameters @@ -581,6 +582,9 @@ def visit_NameConstant(self, node, **kwargs): def visit_Num(self, node, **kwargs): return self.const_type(node.n, self.env) + def visit_Constant(self, node, **kwargs): + return self.const_type(node.n, self.env) + def visit_Str(self, node, **kwargs): name = self.env.add_tmp(node.s) return self.term_type(name, self.env) @@ -799,8 +803,8 @@ def __init__(self, env, engine, parser, preparser=lambda x: x): class Expr: - - """Object encapsulating an expression. + """ + Object encapsulating an expression. Parameters ---------- diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index d9dc194d484ae..29c8239fa518f 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -76,16 +76,17 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: - # check for dtype compatibility dtypes = set() for o in [a, b]: - if hasattr(o, "dtypes"): + # Series implements dtypes, check for dimension count as well + if hasattr(o, "dtypes") and o.ndim > 1: s = o.dtypes.value_counts() if len(s) > 1: return False dtypes |= set(s.index.astype(str)) - elif isinstance(o, np.ndarray): + # ndarray and Series Case + elif hasattr(o, "dtype"): dtypes |= {o.dtype.name} # allowed are a superset @@ -99,15 +100,13 @@ def _evaluate_numexpr(op, op_str, a, b, truediv=True, reversed=False, **eval_kwa result = None if _can_use_numexpr(op, op_str, a, b, "evaluate"): - try: - - # we were originally called by a reversed op - # method - if reversed: - a, b = b, a + if reversed: + # we were originally called by a reversed op method + a, b = b, a - a_value = getattr(a, "values", a) - b_value = getattr(b, "values", b) + a_value = getattr(a, "values", a) + b_value = getattr(b, "values", b) + try: result = ne.evaluate( "a_value {op} b_value".format(op=op_str), local_dict={"a_value": a_value, "b_value": b_value}, @@ -138,11 +137,11 @@ def _where_numexpr(cond, a, b): result = None if _can_use_numexpr(None, "where", a, b, "where"): + cond_value = getattr(cond, "values", cond) + a_value = getattr(a, "values", a) + b_value = getattr(b, "values", b) try: - cond_value = getattr(cond, "values", cond) - a_value = getattr(a, "values", a) - b_value = getattr(b, "values", b) result = ne.evaluate( "where(cond_value, a_value, b_value)", local_dict={ @@ -203,17 +202,19 @@ def _bool_arith_check( def evaluate(op, op_str, a, b, use_numexpr=True, **eval_kwargs): - """ evaluate and return the expression of the op on a and b - - Parameters - ---------- - - op : the actual operand - op_str: the string version of the op - a : left operand - b : right operand - use_numexpr : whether to try to use numexpr (default True) - """ + """ + Evaluate and return the expression of the op on a and b. + + Parameters + ---------- + op : the actual operand + op_str : str + The string version of the op. + a : left operand + b : right operand + use_numexpr : bool, default True + Whether to try to use numexpr. + """ use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b) if use_numexpr: @@ -222,16 +223,17 @@ def evaluate(op, op_str, a, b, use_numexpr=True, **eval_kwargs): def where(cond, a, b, use_numexpr=True): - """ evaluate the where condition cond on a and b - - Parameters - ---------- - - cond : a boolean array - a : return if cond is True - b : return if cond is False - use_numexpr : whether to try to use numexpr (default True) - """ + """ + Evaluate the where condition cond on a and b. + + Parameters + ---------- + cond : np.ndarray[bool] + a : return if cond is True + b : return if cond is False + use_numexpr : bool, default True + Whether to try to use numexpr. + """ if use_numexpr: return _where(cond, a, b) diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 2bf09a553ce18..28b6aef693bfe 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -51,8 +51,9 @@ class UndefinedVariableError(NameError): - - """NameError subclass for local variables.""" + """ + NameError subclass for local variables. + """ def __init__(self, name, is_local): if is_local: @@ -191,8 +192,8 @@ def __repr__(self): class Op: - - """Hold an operator of arbitrary arity + """ + Hold an operator of arbitrary arity. """ def __init__(self, op, operands, *args, **kwargs): @@ -204,8 +205,9 @@ def __iter__(self): return iter(self.operands) def __repr__(self): - """Print a generic n-ary operator and its operands using infix - notation""" + """ + Print a generic n-ary operator and its operands using infix notation. + """ # recurse over the operands parened = ("({0})".format(pprint_thing(opr)) for opr in self.operands) return pprint_thing(" {0} ".format(self.op).join(parened)) @@ -296,7 +298,8 @@ def _not_in(x, y): def _cast_inplace(terms, acceptable_dtypes, dtype): - """Cast an expression inplace. + """ + Cast an expression inplace. Parameters ---------- @@ -304,7 +307,6 @@ def _cast_inplace(terms, acceptable_dtypes, dtype): The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. - dtype : str or numpy.dtype The dtype to cast to. """ @@ -325,8 +327,8 @@ def is_term(obj): class BinOp(Op): - - """Hold a binary operator and its operands + """ + Hold a binary operator and its operands. Parameters ---------- @@ -355,7 +357,8 @@ def __init__(self, op, lhs, rhs, **kwargs): ) def __call__(self, env): - """Recursively evaluate an expression in Python space. + """ + Recursively evaluate an expression in Python space. Parameters ---------- @@ -377,7 +380,8 @@ def __call__(self, env): return self.func(left, right) def evaluate(self, env, engine, parser, term_type, eval_in_python): - """Evaluate a binary operation *before* being passed to the engine. + """ + Evaluate a binary operation *before* being passed to the engine. Parameters ---------- @@ -472,8 +476,8 @@ def isnumeric(dtype): class Div(BinOp): - - """Div operator to special case casting. + """ + Div operator to special case casting. Parameters ---------- @@ -504,8 +508,8 @@ def __init__(self, lhs, rhs, truediv, *args, **kwargs): class UnaryOp(Op): - - """Hold a unary operator and its operands + """ + Hold a unary operator and its operands. Parameters ---------- diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 1523eb05ac41d..81658ab23ba46 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -478,7 +478,6 @@ def _validate_where(w): class Expr(expr.Expr): - """ hold a pytables like expression, comprised of possibly multiple 'terms' Parameters @@ -573,7 +572,6 @@ def evaluate(self): class TermValue: - """ hold a term value the we use to construct a condition/filter """ def __init__(self, value, converted, kind): diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 8ddd0dd7622e7..b11411eb2dc66 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -15,9 +15,6 @@ from pandas._libs.tslibs import Timestamp from pandas.compat.chainmap import DeepChainMap -import pandas.core.computation as compu -from pandas.core.computation.common import StringMixin - def _ensure_scope( level, global_dict=None, local_dict=None, resolvers=(), target=None, **kwargs @@ -67,7 +64,8 @@ def _raw_hex_id(obj): def _get_pretty_string(obj): - """Return a prettier version of obj + """ + Return a prettier version of obj. Parameters ---------- @@ -84,9 +82,9 @@ def _get_pretty_string(obj): return sio.getvalue() -class Scope(StringMixin): - - """Object to hold scope, with a few bells to deal with some custom syntax +class Scope: + """ + Object to hold scope, with a few bells to deal with some custom syntax and contexts added by pandas. Parameters @@ -105,7 +103,7 @@ class Scope(StringMixin): temps : dict """ - __slots__ = "level", "scope", "target", "temps" + __slots__ = ["level", "scope", "target", "resolvers", "temps"] def __init__( self, level, global_dict=None, local_dict=None, resolvers=(), target=None @@ -163,7 +161,8 @@ def has_resolvers(self): return bool(len(self.resolvers)) def resolve(self, key, is_local): - """Resolve a variable name in a possibly local context + """ + Resolve a variable name in a possibly local context. Parameters ---------- @@ -198,10 +197,14 @@ def resolve(self, key, is_local): # e.g., df[df > 0] return self.temps[key] except KeyError: - raise compu.ops.UndefinedVariableError(key, is_local) + # runtime import because ops imports from scope + from pandas.core.computation.ops import UndefinedVariableError + + raise UndefinedVariableError(key, is_local) def swapkey(self, old_key, new_key, new_value=None): - """Replace a variable name, with a potentially new value. + """ + Replace a variable name, with a potentially new value. Parameters ---------- @@ -225,7 +228,8 @@ def swapkey(self, old_key, new_key, new_value=None): return def _get_vars(self, stack, scopes): - """Get specifically scoped variables from a list of stack frames. + """ + Get specifically scoped variables from a list of stack frames. Parameters ---------- @@ -247,7 +251,8 @@ def _get_vars(self, stack, scopes): del frame def update(self, level): - """Update the current scope by going back `level` levels. + """ + Update the current scope by going back `level` levels. Parameters ---------- @@ -266,7 +271,8 @@ def update(self, level): del stack[:], stack def add_tmp(self, value): - """Add a temporary variable to the scope. + """ + Add a temporary variable to the scope. Parameters ---------- @@ -297,7 +303,8 @@ def ntemps(self): @property def full_scope(self): - """Return the full scope for use with passing to engines transparently + """ + Return the full scope for use with passing to engines transparently as a mapping. Returns diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index be6086dd360f2..08dce6aca6e6d 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -17,6 +17,7 @@ is_callable, is_instance_factory, is_int, + is_nonnegative_int, is_one_of_factory, is_text, ) @@ -319,7 +320,7 @@ def is_terminal(): with cf.config_prefix("display"): - cf.register_option("precision", 6, pc_precision_doc, validator=is_int) + cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int) cf.register_option( "float_format", None, @@ -333,12 +334,7 @@ def is_terminal(): pc_max_info_rows_doc, validator=is_instance_factory((int, type(None))), ) - cf.register_option( - "max_rows", - 60, - pc_max_rows_doc, - validator=is_instance_factory([type(None), int]), - ) + cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int) cf.register_option( "min_rows", 10, @@ -352,10 +348,7 @@ def is_terminal(): else: max_cols = 20 # cannot determine optimal number of columns cf.register_option( - "max_columns", - max_cols, - pc_max_cols_doc, - validator=is_instance_factory([type(None), int]), + "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int ) cf.register_option( "large_repr", diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 6f599a6be6021..056cd2222af3c 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -133,6 +133,8 @@ def _isna_new(obj): # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") + elif isinstance(obj, type): + return False elif isinstance( obj, ( @@ -171,6 +173,8 @@ def _isna_old(obj): # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") + elif isinstance(obj, type): + return False elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)): return _isna_ndarraylike_old(obj) elif isinstance(obj, ABCGeneric): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 02241eeaae7b2..16fece1c7eb8b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -86,12 +86,7 @@ from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.generic import NDFrame, _shared_docs -from pandas.core.index import ( - Index, - MultiIndex, - ensure_index, - ensure_index_from_sequences, -) +from pandas.core.index import Index, ensure_index, ensure_index_from_sequences from pandas.core.indexes import base as ibase from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.multi import maybe_droplevels @@ -108,6 +103,7 @@ sanitize_index, to_arrays, ) +from pandas.core.ops.missing import dispatch_fill_zeros from pandas.core.series import Series from pandas.io.formats import console, format as fmt @@ -669,15 +665,33 @@ def _repr_html_(self): if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") + min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") - return self.to_html( + formatter = fmt.DataFrameFormatter( + self, + columns=None, + col_space=None, + na_rep="NaN", + formatters=None, + float_format=None, + sparsify=None, + justify=None, + index_names=True, + header=True, + index=True, + bold_rows=True, + escape=True, max_rows=max_rows, + min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, - notebook=True, + decimal=".", + table_id=None, + render_links=False, ) + return formatter.to_html(notebook=True) else: return None @@ -770,12 +784,13 @@ def style(self): _shared_docs[ "items" ] = r""" - Iterator over (column name, Series) pairs. + Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. - %s + Yields + ------ label : object The column names for the DataFrame being iterated over. content : Series @@ -816,7 +831,7 @@ def style(self): Name: population, dtype: int64 """ - @Appender(_shared_docs["items"] % "Yields\n ------") + @Appender(_shared_docs["items"]) def items(self): if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: @@ -825,9 +840,9 @@ def items(self): for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) - @Appender(_shared_docs["items"] % "Returns\n -------") + @Appender(_shared_docs["items"]) def iteritems(self): - return self.items() + yield from self.items() def iterrows(self): """ @@ -845,8 +860,8 @@ def iterrows(self): See Also -------- - itertuples : Iterate over DataFrame rows as namedtuples of the values. - items : Iterate over (column name, Series) pairs. + DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. + DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- @@ -1189,7 +1204,7 @@ def to_numpy(self, dtype=None, copy=False): Parameters ---------- dtype : str or numpy.dtype, optional - The dtype to pass to :meth:`numpy.asarray` + The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that @@ -1729,7 +1744,7 @@ def to_records( if is_datetime64_any_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] else: - if isinstance(self.index, MultiIndex): + if isinstance(self.index, ABCMultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = list(map(np.array, zip(*self.index.values))) else: @@ -1740,7 +1755,7 @@ def to_records( count = 0 index_names = list(self.index.names) - if isinstance(self.index, MultiIndex): + if isinstance(self.index, ABCMultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = "level_%d" % count @@ -2863,7 +2878,7 @@ def __getitem__(self, key): # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) - if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): + if data.shape[1] == 1 and not isinstance(self.columns, ABCMultiIndex): data = data[key] return data @@ -3093,7 +3108,7 @@ def _ensure_valid_index(self, value): passed value. """ # GH5632, make sure that we are a Series convertible - if not len(self.index) and is_list_like(value): + if not len(self.index) and is_list_like(value) and len(value): try: value = Series(value) except (ValueError, NotImplementedError, TypeError): @@ -3447,15 +3462,14 @@ def _get_info_slice(obj, indexer): if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () - selection = tuple(map(frozenset, (include, exclude))) + selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation - include, exclude = map( - lambda x: frozenset(map(infer_dtype_from_object, x)), selection - ) + include = frozenset(infer_dtype_from_object(x) for x in include) + exclude = frozenset(infer_dtype_from_object(x) for x in exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) @@ -3653,7 +3667,7 @@ def reindexer(value): elif isinstance(value, DataFrame): # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame - if isinstance(self.columns, MultiIndex) and key in self.columns: + if isinstance(self.columns, ABCMultiIndex) and key in self.columns: loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) @@ -3702,7 +3716,7 @@ def reindexer(value): # broadcast across multiple columns if necessary if broadcast and key in self.columns and value.ndim == 1: - if not self.columns.is_unique or isinstance(self.columns, MultiIndex): + if not self.columns.is_unique or isinstance(self.columns, ABCMultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) @@ -4597,7 +4611,7 @@ def _maybe_casted_values(index, labels=None): new_index = self.index.droplevel(level) if not drop: - if isinstance(self.index, MultiIndex): + if isinstance(self.index, ABCMultiIndex): names = [ n if n is not None else ("level_%d" % i) for (i, n) in enumerate(self.index.names) @@ -4608,7 +4622,7 @@ def _maybe_casted_values(index, labels=None): names = [default] if self.index.name is None else [self.index.name] to_insert = ((self.index, None),) - multi_col = isinstance(self.columns, MultiIndex) + multi_col = isinstance(self.columns, ABCMultiIndex) for i, (lev, lab) in reversed(list(enumerate(to_insert))): if not (level is None or i in level): continue @@ -4990,7 +5004,7 @@ def sort_index( level, ascending=ascending, sort_remaining=sort_remaining ) - elif isinstance(labels, MultiIndex): + elif isinstance(labels, ABCMultiIndex): from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer( @@ -5276,7 +5290,7 @@ def reorder_levels(self, order, axis=0): type of caller (new object) """ axis = self._get_axis_number(axis) - if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover + if not isinstance(self._get_axis(axis), ABCMultiIndex): # pragma: no cover raise TypeError("Can only reorder levels on a hierarchical axis.") result = self.copy() @@ -5294,25 +5308,34 @@ def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join="outer", level=level, copy=False) new_index, new_columns = this.index, this.columns - def _arith_op(left, right): - # for the mixed_type case where we iterate over columns, - # _arith_op(left, right) is equivalent to - # left._binop(right, func, fill_value=fill_value) - left, right = ops.fill_binop(left, right, fill_value) - return func(left, right) + if fill_value is None: + # since _arith_op may be called in a loop, avoid function call + # overhead if possible by doing this check once + _arith_op = func + + else: + + def _arith_op(left, right): + # for the mixed_type case where we iterate over columns, + # _arith_op(left, right) is equivalent to + # left._binop(right, func, fill_value=fill_value) + left, right = ops.fill_binop(left, right, fill_value) + return func(left, right) if ops.should_series_dispatch(this, other, func): # iterate over columns return ops.dispatch_to_series(this, other, _arith_op) else: - result = _arith_op(this.values, other.values) + with np.errstate(all="ignore"): + result = _arith_op(this.values, other.values) + result = dispatch_fill_zeros(func, this.values, other.values, result) return self._constructor( result, index=new_index, columns=new_columns, copy=False ) def _combine_match_index(self, other, func, level=None): left, right = self.align(other, join="outer", axis=0, level=level, copy=False) - assert left.index.equals(right.index) + # at this point we have `left.index.equals(right.index)` if left._is_mixed_type or right._is_mixed_type: # operate column-wise; avoid costly object-casting in `.values` @@ -5325,14 +5348,13 @@ def _combine_match_index(self, other, func, level=None): new_data, index=left.index, columns=self.columns, copy=False ) - def _combine_match_columns(self, other, func, level=None): - assert isinstance(other, Series) + def _combine_match_columns(self, other: Series, func, level=None): left, right = self.align(other, join="outer", axis=1, level=level, copy=False) - assert left.columns.equals(right.index) + # at this point we have `left.columns.equals(right.index)` return ops.dispatch_to_series(left, right, func, axis="columns") def _combine_const(self, other, func): - assert lib.is_scalar(other) or np.ndim(other) == 0 + # scalar other or np.ndim(other) == 0 return ops.dispatch_to_series(self, other, func) def combine(self, other, func, fill_value=None, overwrite=True): @@ -6177,14 +6199,14 @@ def stack(self, level=-1, dropna=True): def explode(self, column: Union[str, Tuple]) -> "DataFrame": """ - Transform each element of a list-like to a row, replicating the - index values. + Transform each element of a list-like to a row, replicating index values. .. versionadded:: 0.25.0 Parameters ---------- column : str or tuple + Column to explode. Returns ------- @@ -6200,8 +6222,8 @@ def explode(self, column: Union[str, Tuple]) -> "DataFrame": See Also -------- DataFrame.unstack : Pivot a level of the (necessarily hierarchical) - index labels - DataFrame.melt : Unpivot a DataFrame from wide format to long format + index labels. + DataFrame.melt : Unpivot a DataFrame from wide format to long format. Series.explode : Explode a DataFrame from list-like columns to long format. Notes @@ -7772,7 +7794,7 @@ def _count_level(self, level, axis=0, numeric_only=False): count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) - if not isinstance(count_axis, MultiIndex): + if not isinstance(count_axis, ABCMultiIndex): raise TypeError( "Can only count levels on hierarchical " "{ax}.".format(ax=self._get_axis_name(axis)) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1b39f9225a0ed..1a5b36b07e93c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7,7 +7,17 @@ import pickle import re from textwrap import dedent -from typing import Callable, Dict, FrozenSet, List, Optional, Set +from typing import ( + Callable, + Dict, + FrozenSet, + Hashable, + List, + Optional, + Sequence, + Set, + Union, +) import warnings import weakref @@ -50,7 +60,7 @@ from pandas.core.dtypes.missing import isna, notna import pandas as pd -from pandas._typing import Dtype +from pandas._typing import Dtype, FilePathOrBuffer from pandas.core import missing, nanops import pandas.core.algorithms as algos from pandas.core.base import PandasObject, SelectionMixin @@ -122,6 +132,9 @@ def _single_replace(self, to_replace, method, inplace, limit): return result +bool_t = bool # Need alias because NDFrame has def bool: + + class NDFrame(PandasObject, SelectionMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a @@ -131,7 +144,7 @@ class NDFrame(PandasObject, SelectionMixin): ---------- data : BlockManager axes : list - copy : boolean, default False + copy : bool, default False """ _internal_names = [ @@ -280,7 +293,8 @@ def _setup_axes( ns=None, docs=None, ): - """Provide axes setup for the major PandasObjects. + """ + Provide axes setup for the major PandasObjects. Parameters ---------- @@ -288,8 +302,8 @@ def _setup_axes( info_axis_num : the axis of the selector dimension (int) stat_axis_num : the number of axis for the default stats (int) aliases : other names for a single axis (dict) - axes_are_reversed : boolean whether to treat passed axes as - reversed (DataFrame) + axes_are_reversed : bool + Whether to treat passed axes as reversed (DataFrame). build_axes : setup the axis properties (default True) """ @@ -676,7 +690,7 @@ def transpose(self, *args, **kwargs): Parameters ---------- args : %(args_transpose)s - copy : boolean, default False + copy : bool, default False Make a copy of the underlying data. Mixed-dtype data will always result in a copy **kwargs @@ -1874,7 +1888,7 @@ def __iter__(self): # can we get a better explanation of this? def keys(self): """ - Get the 'info axis' (see Indexing for more) + Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. @@ -2179,6 +2193,12 @@ def _repr_data_resource_(self): ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') + ExcelWriter can also be used to append to an existing Excel file: + + >>> with pd.ExcelWriter('output.xlsx', + ... mode='a') as writer: # doctest: +SKIP + ... df.to_excel(writer, sheet_name='Sheet_name_3') + To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): @@ -2251,10 +2271,10 @@ def to_json( Parameters ---------- - path_or_buf : string or file handle, optional + path_or_buf : str or file handle, optional File path or object. If not specified, the result is returned as a string. - orient : string + orient : str Indication of expected JSON string format. * Series @@ -2533,7 +2553,7 @@ def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs): def to_sql( self, - name, + name: str, con, schema=None, if_exists="fail", @@ -2551,12 +2571,12 @@ def to_sql( Parameters ---------- - name : string + name : str Name of SQL table. con : sqlalchemy.engine.Engine or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. - schema : string, optional + schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' @@ -2569,18 +2589,19 @@ def to_sql( index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. - index_label : string or sequence, default None + index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional - Rows will be written in batches of this size at a time. By default, - all rows will be written at once. - dtype : dict, optional - Specifying the datatype for columns. The keys should be the column - names and the values should be the SQLAlchemy types or strings for - the sqlite3 legacy mode. - method : {None, 'multi', callable}, default None + Specify the number of rows in each batch to be written at a time. + By default, all rows will be written at once. + dtype : dict or scalar, optional + Specifying the datatype for columns. If a dictionary is used, the + keys should be the column names and the values should be the + SQLAlchemy types or strings for the sqlite3 legacy mode. If a + scalar is provided, it will be applied to all columns. + method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). @@ -2993,10 +3014,15 @@ def to_latex( >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}) - >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE - '\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon - \\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello & - purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n' + >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE + \begin{tabular}{lll} + \toprule + name & mask & weapon \\ + \midrule + Raphael & red & sai \\ + Donatello & purple & bo staff \\ + \bottomrule + \end{tabular} """ # Get defaults from the pandas config if self.ndim == 1: @@ -3039,26 +3065,26 @@ def to_latex( def to_csv( self, - path_or_buf=None, - sep=",", - na_rep="", - float_format=None, - columns=None, - header=True, - index=True, - index_label=None, - mode="w", - encoding=None, - compression="infer", - quoting=None, - quotechar='"', - line_terminator=None, - chunksize=None, - date_format=None, - doublequote=True, - escapechar=None, - decimal=".", - ): + path_or_buf: Optional[FilePathOrBuffer] = None, + sep: str = ",", + na_rep: str = "", + float_format: Optional[str] = None, + columns: Optional[Sequence[Hashable]] = None, + header: Union[bool_t, List[str]] = True, + index: bool_t = True, + index_label: Optional[Union[bool_t, str, Sequence[Hashable]]] = None, + mode: str = "w", + encoding: Optional[str] = None, + compression: Optional[Union[str, Dict[str, str]]] = "infer", + quoting: Optional[int] = None, + quotechar: str = '"', + line_terminator: Optional[str] = None, + chunksize: Optional[int] = None, + date_format: Optional[str] = None, + doublequote: bool_t = True, + escapechar: Optional[str] = None, + decimal: Optional[str] = ".", + ) -> Optional[str]: r""" Write object to a comma-separated values (csv) file. @@ -3105,16 +3131,21 @@ def to_csv( encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. - compression : str, default 'infer' - Compression mode among the following possible values: {'infer', - 'gzip', 'bz2', 'zip', 'xz', None}. If 'infer' and `path_or_buf` - is path-like, then detect compression from the following - extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no - compression). - - .. versionchanged:: 0.24.0 - - 'infer' option added and set to default. + compression : str or dict, default 'infer' + If str, represents compression mode. If dict, value at 'method' is + the compression mode. Compression mode may be any of the following + possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If + compression mode is 'infer' and `path_or_buf` is path-like, then + detect compression mode from the following extensions: '.gz', + '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given + and mode is 'zip' or inferred as 'zip', other entries passed as + additional compression options. + + .. versionchanged:: 0.25.0 + + May now be a dict with key 'method' as compression mode + and other entries as additional compression options if + compression mode is 'zip'. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` @@ -3159,6 +3190,13 @@ def to_csv( ... 'weapon': ['sai', 'bo staff']}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' + + # create 'out.zip' containing 'out.csv' + >>> compression_opts = dict(method='zip', + ... archive_name='out.csv') # doctest: +SKIP + + >>> df.to_csv('out.zip', index=False, + ... compression=compression_opts) # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() @@ -3192,6 +3230,8 @@ def to_csv( if path_or_buf is None: return formatter.path_or_buf.getvalue() + return None + # ---------------------------------------------------------------------- # Fancy Indexing @@ -3240,11 +3280,10 @@ def _maybe_update_cacher(self, clear=False, verify_is_copy=True): Parameters ---------- - clear : boolean, default False - clear the item cache - verify_is_copy : boolean, default True - provide is_copy checks - + clear : bool, default False + Clear the item cache. + verify_is_copy : bool, default True + Provide is_copy checks. """ cacher = getattr(self, "_cacher", None) @@ -3610,11 +3649,11 @@ def _check_setitem_copy(self, stacklevel=4, t="setting", force=False): Parameters ---------- - stacklevel : integer, default 4 + stacklevel : int, default 4 the level to show of the stack when the error is output - t : string, the type of setting error - force : boolean, default False - if True, then force showing an error + t : str, the type of setting error + force : bool, default False + If True, then force showing an error. validate if we are doing a setitem on a chained copy. @@ -3943,9 +3982,8 @@ def _update_inplace(self, result, verify_is_copy=True): Parameters ---------- - verify_is_copy : boolean, default True - provide is_copy checks - + verify_is_copy : bool, default True + Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. @@ -4560,9 +4598,9 @@ def filter(self, items=None, like=None, regex=None, axis=None): ---------- items : list-like Keep labels from axis which are in items. - like : string + like : str Keep labels from axis for which "like in label == True". - regex : string (regular expression) + regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, @@ -4781,7 +4819,7 @@ def sample( frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False - Sample with or without replacement. + Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index @@ -5222,8 +5260,8 @@ def _consolidate(self, inplace=False): Parameters ---------- - inplace : boolean, default False - If False return new object, otherwise modify existing object + inplace : bool, default False + If False return new object, otherwise modify existing object. Returns ------- @@ -5669,11 +5707,12 @@ def as_blocks(self, copy=True): Parameters ---------- - copy : boolean, default True + copy : bool, default True Returns ------- - values : a dict of dtype -> Constructor Types + dict + Mapping dtype -> Constructor Types. """ warnings.warn( "as_blocks is deprecated and will be removed in a future version", @@ -5982,17 +6021,17 @@ def _convert( Parameters ---------- - datetime : boolean, default False + datetime : bool, default False If True, convert to date where possible. - numeric : boolean, default False + numeric : bool, default False If True, attempt to convert to numbers (including strings), with unconvertible values becoming NaN. - timedelta : boolean, default False + timedelta : bool, default False If True, convert to timedelta where possible. - coerce : boolean, default False + coerce : bool, default False If True, force conversion with unconvertible values converted to - nulls (NaN or NaT) - copy : boolean, default True + nulls (NaN or NaT). + copy : bool, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. @@ -6631,11 +6670,7 @@ def replace( for k, v in items: keys, values = list(zip(*v.items())) or ([], []) - if set(keys) & set(values): - raise ValueError( - "Replacement not allowed with " - "overlapping keys and values" - ) + to_rep_dict[k] = list(keys) value_dict[k] = list(values) @@ -7859,7 +7894,7 @@ def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): Parameters ---------- - freq : DateOffset object, or string + freq : DateOffset or str method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): @@ -8660,7 +8695,7 @@ def ranker(data): level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level - copy : boolean, default True + copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN @@ -9452,7 +9487,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True): Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. - copy : boolean, default is True, + copy : bool, default is True, Return a copy of the truncated section. Returns @@ -9596,13 +9631,13 @@ def tz_convert(self, tz, axis=0, level=None, copy=True): Parameters ---------- - tz : string or pytz.timezone object + tz : str or tzinfo object axis : the axis to convert level : int, str, default None - If axis ia a MultiIndex, convert a specific level. Otherwise - must be None - copy : boolean, default True - Also make a copy of the underlying data + If axis is a MultiIndex, convert a specific level. Otherwise + must be None. + copy : bool, default True + Also make a copy of the underlying data. Returns ------- @@ -9656,12 +9691,12 @@ def tz_localize( Parameters ---------- - tz : string or pytz.timezone object + tz : str or tzinfo axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None - copy : boolean, default True + copy : bool, default True Also make a copy of the underlying data ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. @@ -10683,9 +10718,9 @@ def _add_series_or_dataframe_operations(cls): the doc strings again. """ - from pandas.core import window as rwindow + from pandas.core.window import EWM, Expanding, Rolling, Window - @Appender(rwindow.rolling.__doc__) + @Appender(Rolling.__doc__) def rolling( self, window, @@ -10697,7 +10732,20 @@ def rolling( closed=None, ): axis = self._get_axis_number(axis) - return rwindow.rolling( + + if win_type is not None: + return Window( + self, + window=window, + min_periods=min_periods, + center=center, + win_type=win_type, + on=on, + axis=axis, + closed=closed, + ) + + return Rolling( self, window=window, min_periods=min_periods, @@ -10710,16 +10758,14 @@ def rolling( cls.rolling = rolling - @Appender(rwindow.expanding.__doc__) + @Appender(Expanding.__doc__) def expanding(self, min_periods=1, center=False, axis=0): axis = self._get_axis_number(axis) - return rwindow.expanding( - self, min_periods=min_periods, center=center, axis=axis - ) + return Expanding(self, min_periods=min_periods, center=center, axis=axis) cls.expanding = expanding - @Appender(rwindow.ewm.__doc__) + @Appender(EWM.__doc__) def ewm( self, com=None, @@ -10732,7 +10778,7 @@ def ewm( axis=0, ): axis = self._get_axis_number(axis) - return rwindow.ewm( + return EWM( self, com=com, span=span, @@ -10987,7 +11033,7 @@ def _doc_parms(cls): ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The index or the name of the axis. 0 is equivalent to None or 'index'. -skipna : boolean, default True +skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. *args, **kwargs : diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 2ad85903b916b..5e463d50d43d6 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -21,7 +21,11 @@ from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution -from pandas.core.dtypes.cast import maybe_convert_objects, maybe_downcast_to_dtype +from pandas.core.dtypes.cast import ( + maybe_convert_objects, + maybe_downcast_numeric, + maybe_downcast_to_dtype, +) from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, @@ -180,10 +184,8 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1): continue finally: if result is not no_result: - dtype = block.values.dtype - # see if we can cast the block back to the original dtype - result = block._try_coerce_and_cast_result(result, dtype=dtype) + result = maybe_downcast_numeric(result, block.dtype) newb = block.make_block(result) new_items.append(locs) @@ -240,15 +242,18 @@ def aggregate(self, func, *args, **kwargs): # grouper specific aggregations if self.grouper.nkeys > 1: return self._python_agg_general(func, *args, **kwargs) + elif args or kwargs: + result = self._aggregate_generic(func, *args, **kwargs) else: # try to treat as if we are passing a list try: - assert not args and not kwargs result = self._aggregate_multiple_funcs( [func], _level=_level, _axis=self.axis ) - + except Exception: + result = self._aggregate_generic(func) + else: result.columns = Index( result.columns.levels[0], name=self._selected_obj.columns.name ) @@ -258,15 +263,15 @@ def aggregate(self, func, *args, **kwargs): # values. concat no longer converts DataFrame[Sparse] # to SparseDataFrame, so we do it here. result = SparseDataFrame(result._data) - except Exception: - result = self._aggregate_generic(func, *args, **kwargs) if not self.as_index: self._insert_inaxis_grouper_inplace(result) result.index = np.arange(len(result)) if relabeling: - result = result[order] + + # used reordered index of columns + result = result.iloc[:, order] result.columns = columns return result._convert(datetime=True) @@ -309,10 +314,10 @@ def _aggregate_item_by_item(self, func, *args, **kwargs): cannot_agg = [] errors = None for item in obj: - try: - data = obj[item] - colg = SeriesGroupBy(data, selection=item, grouper=self.grouper) + data = obj[item] + colg = SeriesGroupBy(data, selection=item, grouper=self.grouper) + try: cast = self._transform_should_cast(func) result[item] = colg.aggregate(func, *args, **kwargs) @@ -680,7 +685,7 @@ def _transform_item_by_item(self, obj, wrapper): return DataFrame(output, index=obj.index, columns=columns) - def filter(self, func, dropna=True, *args, **kwargs): # noqa + def filter(self, func, dropna=True, *args, **kwargs): """ Return a copy of a DataFrame excluding elements from groups that do not satisfy the boolean criterion specified by func. @@ -831,45 +836,45 @@ def apply(self, func, *args, **kwargs): axis="", ) @Appender(_shared_docs["aggregate"]) - def aggregate(self, func_or_funcs=None, *args, **kwargs): + def aggregate(self, func=None, *args, **kwargs): _level = kwargs.pop("_level", None) - relabeling = func_or_funcs is None + relabeling = func is None columns = None - no_arg_message = "Must provide 'func_or_funcs' or named aggregation **kwargs." + no_arg_message = "Must provide 'func' or named aggregation **kwargs." if relabeling: columns = list(kwargs) if not PY36: # sort for 3.5 and earlier columns = list(sorted(columns)) - func_or_funcs = [kwargs[col] for col in columns] + func = [kwargs[col] for col in columns] kwargs = {} if not columns: raise TypeError(no_arg_message) - if isinstance(func_or_funcs, str): - return getattr(self, func_or_funcs)(*args, **kwargs) + if isinstance(func, str): + return getattr(self, func)(*args, **kwargs) - if isinstance(func_or_funcs, abc.Iterable): + if isinstance(func, abc.Iterable): # Catch instances of lists / tuples # but not the class list / tuple itself. - func_or_funcs = _maybe_mangle_lambdas(func_or_funcs) - ret = self._aggregate_multiple_funcs(func_or_funcs, (_level or 0) + 1) + func = _maybe_mangle_lambdas(func) + ret = self._aggregate_multiple_funcs(func, (_level or 0) + 1) if relabeling: ret.columns = columns else: - cyfunc = self._get_cython_func(func_or_funcs) + cyfunc = self._get_cython_func(func) if cyfunc and not args and not kwargs: return getattr(self, cyfunc)() if self.grouper.nkeys > 1: - return self._python_agg_general(func_or_funcs, *args, **kwargs) + return self._python_agg_general(func, *args, **kwargs) try: - return self._python_agg_general(func_or_funcs, *args, **kwargs) + return self._python_agg_general(func, *args, **kwargs) except Exception: - result = self._aggregate_named(func_or_funcs, *args, **kwargs) + result = self._aggregate_named(func, *args, **kwargs) index = Index(sorted(result), name=self.grouper.names[0]) ret = Series(result, index=index) @@ -1462,8 +1467,8 @@ class DataFrameGroupBy(NDFrameGroupBy): axis="", ) @Appender(_shared_docs["aggregate"]) - def aggregate(self, arg=None, *args, **kwargs): - return super().aggregate(arg, *args, **kwargs) + def aggregate(self, func=None, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) agg = aggregate @@ -1729,8 +1734,8 @@ def _normalize_keyword_aggregation(kwargs): The transformed kwargs. columns : List[str] The user-provided keys. - order : List[Tuple[str, str]] - Pairs of the input and output column names. + col_idx_order : List[int] + List of columns indices. Examples -------- @@ -1757,7 +1762,39 @@ def _normalize_keyword_aggregation(kwargs): else: aggspec[column] = [aggfunc] order.append((column, com.get_callable_name(aggfunc) or aggfunc)) - return aggspec, columns, order + + # uniquify aggfunc name if duplicated in order list + uniquified_order = _make_unique(order) + + # GH 25719, due to aggspec will change the order of assigned columns in aggregation + # uniquified_aggspec will store uniquified order list and will compare it with order + # based on index + aggspec_order = [ + (column, com.get_callable_name(aggfunc) or aggfunc) + for column, aggfuncs in aggspec.items() + for aggfunc in aggfuncs + ] + uniquified_aggspec = _make_unique(aggspec_order) + + # get the new indice of columns by comparison + col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order) + return aggspec, columns, col_idx_order + + +def _make_unique(seq): + """Uniquify aggfunc name of the pairs in the order list + + Examples: + -------- + >>> _make_unique([('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]) + [('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')] + """ + return [ + (pair[0], "_".join([pair[1], str(seq[:i].count(pair))])) + if seq.count(pair) > 1 + else pair + for i, pair in enumerate(seq) + ] # TODO: Can't use, because mypy doesn't like us setting __name__ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index c5e81e21e9fd5..6deef16bdec13 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -653,7 +653,8 @@ def curried(x): # mark this column as an error try: return self._aggregate_item_by_item(name, *args, **kwargs) - except (AttributeError): + except AttributeError: + # e.g. SparseArray has no flags attr raise ValueError return wrapper @@ -1011,7 +1012,6 @@ def _apply_filter(self, indices, dropna): class GroupBy(_GroupBy): - """ Class for grouping and aggregating relational data. @@ -1773,7 +1773,11 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra if not self.as_index: return out - out.index = self.grouper.result_index[ids[mask]] + result_index = self.grouper.result_index + out.index = result_index[ids[mask]] + + if not self.observed and isinstance(result_index, CategoricalIndex): + out = out.reindex(result_index) return out.sort_index() if self.sort else out @@ -1870,6 +1874,7 @@ def quantile(self, q=0.5, interpolation="linear"): a 2.0 b 3.0 """ + from pandas import concat def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): @@ -1897,18 +1902,57 @@ def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray: return vals - return self._get_cythonized_result( - "group_quantile", - self.grouper, - aggregate=True, - needs_values=True, - needs_mask=True, - cython_dtype=np.float64, - pre_processing=pre_processor, - post_processing=post_processor, - q=q, - interpolation=interpolation, - ) + if is_scalar(q): + return self._get_cythonized_result( + "group_quantile", + self.grouper, + aggregate=True, + needs_values=True, + needs_mask=True, + cython_dtype=np.float64, + pre_processing=pre_processor, + post_processing=post_processor, + q=q, + interpolation=interpolation, + ) + else: + results = [ + self._get_cythonized_result( + "group_quantile", + self.grouper, + aggregate=True, + needs_values=True, + needs_mask=True, + cython_dtype=np.float64, + pre_processing=pre_processor, + post_processing=post_processor, + q=qi, + interpolation=interpolation, + ) + for qi in q + ] + result = concat(results, axis=0, keys=q) + # fix levels to place quantiles on the inside + # TODO(GH-10710): Ideally, we could write this as + # >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :] + # but this hits https://github.com/pandas-dev/pandas/issues/10710 + # which doesn't reorder the list-like `q` on the inner level. + order = np.roll(list(range(result.index.nlevels)), -1) + result = result.reorder_levels(order) + result = result.reindex(q, level=-1) + + # fix order. + hi = len(q) * self.ngroups + arr = np.arange(0, hi, self.ngroups) + arrays = [] + + for i in range(self.ngroups): + arr2 = arr + i + arrays.append(arr2) + + indices = np.concatenate(arrays) + assert len(indices) == len(result) + return result.take(indices) @Substitution(name="groupby") def ngroup(self, ascending=True): @@ -2326,8 +2370,9 @@ def head(self, n=5): """ Return first n rows of each group. - Essentially equivalent to ``.apply(lambda x: x.head(n))``, - except ignores as_index flag. + Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows + from the original DataFrame with original index and order preserved + (``as_index`` flag is ignored). Returns ------- @@ -2338,10 +2383,6 @@ def head(self, n=5): >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], ... columns=['A', 'B']) - >>> df.groupby('A', as_index=False).head(1) - A B - 0 1 2 - 2 5 6 >>> df.groupby('A').head(1) A B 0 1 2 @@ -2357,8 +2398,9 @@ def tail(self, n=5): """ Return last n rows of each group. - Essentially equivalent to ``.apply(lambda x: x.tail(n))``, - except ignores as_index flag. + Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows + from the original DataFrame with original index and order preserved + (``as_index`` flag is ignored). Returns ------- @@ -2373,10 +2415,6 @@ def tail(self, n=5): A B 1 a 2 3 b 2 - >>> df.groupby('A').head(1) - A B - 0 a 1 - 2 b 1 """ self._reset_group_selection() mask = self._cumcount_array(ascending=False) < n diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 143755a47b97b..31623171e9e63 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -37,7 +37,7 @@ class Grouper: """ A Grouper allows the user to specify a groupby instruction for a target - object + object. This specification will select a column via the key parameter, or if the level and/or axis parameters are given, a level of the index of the target @@ -217,7 +217,6 @@ def __repr__(self): class Grouping: - """ Holds the grouping information for a single key diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 676f243c9c8d3..7afb0a28f943e 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -12,7 +12,7 @@ from pandas._libs import NaT, iNaT, lib import pandas._libs.groupby as libgroupby -import pandas._libs.reduction as reduction +import pandas._libs.reduction as libreduction from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly @@ -207,7 +207,7 @@ def apply(self, f, data, axis=0): if len(result_values) == len(group_keys): return group_keys, result_values, mutated - except reduction.InvalidApply: + except libreduction.InvalidApply: # Cannot fast apply on MultiIndex (_has_complex_internals). # This Exception is also raised if `f` triggers an exception # but it is preferable to raise the exception in Python. @@ -591,6 +591,8 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs): if is_datetime64tz_dtype(orig_values.dtype): result = type(orig_values)(result.astype(np.int64), dtype=orig_values.dtype) + elif is_datetimelike and kind == "aggregate": + result = result.astype(orig_values.dtype) return result, names @@ -676,7 +678,7 @@ def _aggregate_series_fast(self, obj, func): indexer = get_group_index_sorter(group_index, ngroups) obj = obj.take(indexer) group_index = algorithms.take_nd(group_index, indexer, allow_fill=False) - grouper = reduction.SeriesGrouper(obj, func, group_index, ngroups, dummy) + grouper = libreduction.SeriesGrouper(obj, func, group_index, ngroups, dummy) result, counts = grouper.get_result() return result, counts @@ -704,7 +706,6 @@ def _aggregate_series_pure_python(self, obj, func): class BinGrouper(BaseGrouper): - """ This is an internal Grouper class @@ -850,7 +851,7 @@ def groupings(self): def agg_series(self, obj, func): dummy = obj[:0] - grouper = reduction.SeriesBinGrouper(obj, func, self.bins, dummy) + grouper = libreduction.SeriesBinGrouper(obj, func, self.bins, dummy) return grouper.get_result() @@ -938,7 +939,7 @@ def fast_apply(self, f, names): return [], True sdata = self._get_sorted_data() - return reduction.apply_frame_axis0(sdata, f, names, starts, ends) + return libreduction.apply_frame_axis0(sdata, f, names, starts, ends) def _chop(self, sdata, slice_obj): if self.axis == 0: diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index 70c48e969172f..433bca940c028 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -226,6 +226,7 @@ def length_of_indexer(indexer, target=None) -> int: if step is None: step = 1 elif step < 0: + start, stop = stop + 1, start + 1 step = -step return (stop - start + step - 1) // step elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2271ff643bc15..2dbd592fc6787 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,4 +1,4 @@ -from datetime import datetime, timedelta +from datetime import datetime import operator from textwrap import dedent from typing import Union @@ -10,6 +10,7 @@ import pandas._libs.join as libjoin from pandas._libs.lib import is_datetime_array from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp +from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import tz_compare from pandas.compat import set_function_name from pandas.compat.numpy import function as nv @@ -48,8 +49,8 @@ ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( + ABCCategorical, ABCDataFrame, - ABCDateOffset, ABCDatetimeArray, ABCDatetimeIndex, ABCIndexClass, @@ -70,7 +71,8 @@ from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.frozen import FrozenList import pandas.core.missing as missing -from pandas.core.ops import get_op_result_name, make_invalid_op +from pandas.core.ops import get_op_result_name +from pandas.core.ops.invalid import make_invalid_op import pandas.core.sorting as sorting from pandas.core.strings import StringMethods @@ -98,30 +100,27 @@ def _make_comparison_op(op, cls): def cmp_method(self, other): - if isinstance(other, (np.ndarray, Index, ABCSeries)): + if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)): if other.ndim > 0 and len(self) != len(other): raise ValueError("Lengths must match to compare") - if is_object_dtype(self) and not isinstance(self, ABCMultiIndex): + if is_object_dtype(self) and isinstance(other, ABCCategorical): + left = type(other)(self._values, dtype=other.dtype) + return op(left, other) + elif is_object_dtype(self) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): - result = ops._comp_method_OBJECT_ARRAY(op, self.values, other) + result = ops.comp_method_OBJECT_ARRAY(op, self.values, other) else: with np.errstate(all="ignore"): result = op(self.values, np.asarray(other)) - # technically we could support bool dtyped Index - # for now just return the indexing array directly if is_bool_dtype(result): return result - try: - return Index(result) - except TypeError: - return result + return ops.invalid_comparison(self, other, op) name = "__{name}__".format(name=op.__name__) - # TODO: docstring? return set_function_name(cmp_method, name, cls) @@ -264,7 +263,13 @@ def __new__( fastpath=None, tupleize_cols=True, **kwargs - ): + ) -> "Index": + + from .range import RangeIndex + from pandas import PeriodIndex, DatetimeIndex, TimedeltaIndex + from .numeric import Float64Index, Int64Index, UInt64Index + from .interval import IntervalIndex + from .category import CategoricalIndex if name is None and hasattr(data, "name"): name = data.name @@ -279,8 +284,6 @@ def __new__( if fastpath: return cls._simple_new(data, name) - from .range import RangeIndex - if isinstance(data, ABCPandasArray): # ensure users don't accidentally put a PandasArray in an index. data = data.to_numpy() @@ -293,72 +296,53 @@ def __new__( # categorical elif is_categorical_dtype(data) or is_categorical_dtype(dtype): - from .category import CategoricalIndex - return CategoricalIndex(data, dtype=dtype, copy=copy, name=name, **kwargs) # interval elif ( is_interval_dtype(data) or is_interval_dtype(dtype) ) and not is_object_dtype(dtype): - from .interval import IntervalIndex - closed = kwargs.get("closed", None) return IntervalIndex(data, dtype=dtype, name=name, copy=copy, closed=closed) elif ( is_datetime64_any_dtype(data) - or (dtype is not None and is_datetime64_any_dtype(dtype)) + or is_datetime64_any_dtype(dtype) or "tz" in kwargs ): - from pandas import DatetimeIndex - - if dtype is not None and is_dtype_equal(_o_dtype, dtype): + if is_dtype_equal(_o_dtype, dtype): # GH#23524 passing `dtype=object` to DatetimeIndex is invalid, # will raise in the where `data` is already tz-aware. So # we leave it out of this step and cast to object-dtype after # the DatetimeIndex construction. # Note we can pass copy=False because the .astype below # will always make a copy - result = DatetimeIndex(data, copy=False, name=name, **kwargs) + result = DatetimeIndex( + data, copy=False, name=name, **kwargs + ) # type: "Index" return result.astype(object) else: - result = DatetimeIndex( - data, copy=copy, name=name, dtype=dtype, **kwargs - ) - return result + return DatetimeIndex(data, copy=copy, name=name, dtype=dtype, **kwargs) - elif is_timedelta64_dtype(data) or ( - dtype is not None and is_timedelta64_dtype(dtype) - ): - from pandas import TimedeltaIndex - - if dtype is not None and is_dtype_equal(_o_dtype, dtype): + elif is_timedelta64_dtype(data) or is_timedelta64_dtype(dtype): + if is_dtype_equal(_o_dtype, dtype): # Note we can pass copy=False because the .astype below # will always make a copy result = TimedeltaIndex(data, copy=False, name=name, **kwargs) return result.astype(object) else: - result = TimedeltaIndex( - data, copy=copy, name=name, dtype=dtype, **kwargs - ) - return result + return TimedeltaIndex(data, copy=copy, name=name, dtype=dtype, **kwargs) elif is_period_dtype(data) and not is_object_dtype(dtype): - from pandas import PeriodIndex - - result = PeriodIndex(data, copy=copy, name=name, **kwargs) - return result + return PeriodIndex(data, copy=copy, name=name, **kwargs) # extension dtype elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype): data = np.asarray(data) if not (dtype is None or is_object_dtype(dtype)): - # coerce to the provided dtype - data = dtype.construct_array_type()._from_sequence( - data, dtype=dtype, copy=False - ) + ea_cls = dtype.construct_array_type() + data = ea_cls._from_sequence(data, dtype=dtype, copy=False) # coerce to the object dtype data = data.astype(object) @@ -367,73 +351,53 @@ def __new__( # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): if dtype is not None: - try: - - # we need to avoid having numpy coerce - # things that look like ints/floats to ints unless - # they are actually ints, e.g. '0' and 0.0 - # should not be coerced - # GH 11836 - if is_integer_dtype(dtype): - inferred = lib.infer_dtype(data, skipna=False) - if inferred == "integer": - data = maybe_cast_to_integer_array(data, dtype, copy=copy) - elif inferred in ["floating", "mixed-integer-float"]: - if isna(data).any(): - raise ValueError("cannot convert float NaN to integer") - - if inferred == "mixed-integer-float": - data = maybe_cast_to_integer_array(data, dtype) - - # If we are actually all equal to integers, - # then coerce to integer. - try: - return cls._try_convert_to_int_index( - data, copy, name, dtype - ) - except ValueError: - pass - - # Return an actual float index. - from .numeric import Float64Index - - return Float64Index(data, copy=copy, dtype=dtype, name=name) - - elif inferred == "string": - pass - else: - data = data.astype(dtype) - elif is_float_dtype(dtype): - inferred = lib.infer_dtype(data, skipna=False) - if inferred == "string": + # we need to avoid having numpy coerce + # things that look like ints/floats to ints unless + # they are actually ints, e.g. '0' and 0.0 + # should not be coerced + # GH 11836 + if is_integer_dtype(dtype): + inferred = lib.infer_dtype(data, skipna=False) + if inferred == "integer": + data = maybe_cast_to_integer_array(data, dtype, copy=copy) + elif inferred in ["floating", "mixed-integer-float"]: + if isna(data).any(): + raise ValueError("cannot convert float NaN to integer") + + if inferred == "mixed-integer-float": + data = maybe_cast_to_integer_array(data, dtype) + + # If we are actually all equal to integers, + # then coerce to integer. + try: + return cls._try_convert_to_int_index( + data, copy, name, dtype + ) + except ValueError: pass - else: - data = data.astype(dtype) + + # Return an actual float index. + return Float64Index(data, copy=copy, dtype=dtype, name=name) + + elif inferred == "string": + pass else: - data = np.array(data, dtype=dtype, copy=copy) - - except (TypeError, ValueError) as e: - msg = str(e) - if ( - "cannot convert float" in msg - or "Trying to coerce float values to integer" in msg - ): - raise + data = data.astype(dtype) + elif is_float_dtype(dtype): + inferred = lib.infer_dtype(data, skipna=False) + if inferred == "string": + pass + else: + data = data.astype(dtype) + else: + data = np.array(data, dtype=dtype, copy=copy) # maybe coerce to a sub-class - from pandas.core.indexes.period import PeriodIndex, IncompatibleFrequency - if is_signed_integer_dtype(data.dtype): - from .numeric import Int64Index - return Int64Index(data, copy=copy, dtype=dtype, name=name) elif is_unsigned_integer_dtype(data.dtype): - from .numeric import UInt64Index - return UInt64Index(data, copy=copy, dtype=dtype, name=name) elif is_float_dtype(data.dtype): - from .numeric import Float64Index - return Float64Index(data, copy=copy, dtype=dtype, name=name) elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): subarr = data.astype("object") @@ -456,12 +420,8 @@ def __new__( return Index(subarr, copy=copy, dtype=object, name=name) elif inferred in ["floating", "mixed-integer-float", "integer-na"]: # TODO: Returns IntegerArray for integer-na case in the future - from .numeric import Float64Index - return Float64Index(subarr, copy=copy, name=name) elif inferred == "interval": - from .interval import IntervalIndex - try: return IntervalIndex(subarr, name=name, copy=copy) except ValueError: @@ -472,8 +432,6 @@ def __new__( pass elif inferred != "string": if inferred.startswith("datetime"): - from pandas import DatetimeIndex - try: return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) except (ValueError, OutOfBoundsDatetime): @@ -483,8 +441,6 @@ def __new__( pass elif inferred.startswith("timedelta"): - from pandas import TimedeltaIndex - return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs) elif inferred == "period": try: @@ -554,16 +510,6 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs): Must be careful not to recurse. """ - if not hasattr(values, "dtype"): - if (values is None or not len(values)) and dtype is not None: - values = np.empty(0, dtype=dtype) - else: - values = np.array(values, copy=False) - if is_object_dtype(values): - values = cls( - values, name=name, dtype=dtype, **kwargs - )._ndarray_values - if isinstance(values, (ABCSeries, ABCIndexClass)): # Index._data must always be an ndarray. # This is no-copy for when _values is an ndarray, @@ -690,7 +636,11 @@ def _cleanup(self): @cache_readonly def _engine(self): # property, for now, slow to look up - return self._engine_type(lambda: self._ndarray_values, len(self)) + + # to avoid a reference cycle, bind `_ndarray_values` to a local variable, so + # `self` is not passed into the lambda. + _ndarray_values = self._ndarray_values + return self._engine_type(lambda: _ndarray_values, len(self)) # -------------------------------------------------------------------- # Array-Like Methods @@ -717,7 +667,6 @@ def __array_wrap__(self, result, context=None): return result attrs = self._get_attributes_dict() - attrs = self._maybe_update_attributes(attrs) return Index(result, **attrs) @cache_readonly @@ -1857,8 +1806,6 @@ def inferred_type(self): @cache_readonly def is_all_dates(self): - if self._data is None: - return False return is_datetime_array(ensure_object(self.values)) # -------------------------------------------------------------------- @@ -2045,7 +1992,7 @@ def notna(self): _index_shared_docs[ "fillna" ] = """ - Fill NA/NaN values with the specified value + Fill NA/NaN values with the specified value. Parameters ---------- @@ -2076,7 +2023,7 @@ def fillna(self, value=None, downcast=None): _index_shared_docs[ "dropna" ] = """ - Return Index without NA/NaN values + Return Index without NA/NaN values. Parameters ---------- @@ -2350,7 +2297,10 @@ def __sub__(self, other): return Index(np.array(self) - other) def __rsub__(self, other): - return Index(other - np.array(self)) + # wrap Series to ensure we pin name correctly + from pandas import Series + + return Index(other - Series(self)) def __and__(self, other): return self.intersection(other) @@ -3129,13 +3079,9 @@ def _convert_scalar_indexer(self, key, kind=None): """ @Appender(_index_shared_docs["_convert_slice_indexer"]) - def _convert_slice_indexer(self, key, kind=None): + def _convert_slice_indexer(self, key: slice, kind=None): assert kind in ["ix", "loc", "getitem", "iloc", None] - # if we are not a slice, then we are done - if not isinstance(key, slice): - return key - # validate iloc if kind == "iloc": return slice( @@ -4713,7 +4659,7 @@ def get_value(self, series, key): raise try: - return libindex.get_value_box(s, key) + return libindex.get_value_at(s, key) except IndexError: raise except TypeError: @@ -5363,67 +5309,6 @@ def _add_numeric_methods_disabled(cls): cls.__abs__ = make_invalid_op("__abs__") cls.__inv__ = make_invalid_op("__inv__") - def _maybe_update_attributes(self, attrs): - """ - Update Index attributes (e.g. freq) depending on op. - """ - return attrs - - def _validate_for_numeric_unaryop(self, op, opstr): - """ - Validate if we can perform a numeric unary operation. - """ - if not self._is_numeric_dtype: - raise TypeError( - "cannot evaluate a numeric op " - "{opstr} for type: {typ}".format(opstr=opstr, typ=type(self).__name__) - ) - - def _validate_for_numeric_binop(self, other, op): - """ - Return valid other; evaluate or raise TypeError if we are not of - the appropriate type. - - Notes - ----- - This is an internal method called by ops. - """ - opstr = "__{opname}__".format(opname=op.__name__) - # if we are an inheritor of numeric, - # but not actually numeric (e.g. DatetimeIndex/PeriodIndex) - if not self._is_numeric_dtype: - raise TypeError( - "cannot evaluate a numeric op {opstr} " - "for type: {typ}".format(opstr=opstr, typ=type(self).__name__) - ) - - if isinstance(other, Index): - if not other._is_numeric_dtype: - raise TypeError( - "cannot evaluate a numeric op " - "{opstr} with type: {typ}".format(opstr=opstr, typ=type(other)) - ) - elif isinstance(other, np.ndarray) and not other.ndim: - other = other.item() - - if isinstance(other, (Index, ABCSeries, np.ndarray)): - if len(self) != len(other): - raise ValueError("cannot evaluate a numeric op with unequal lengths") - other = com.values_from_object(other) - if other.dtype.kind not in ["f", "i", "u"]: - raise TypeError("cannot evaluate a numeric op with a non-numeric dtype") - elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)): - # higher up to handle - pass - elif isinstance(other, (datetime, np.datetime64)): - # higher up to handle - pass - else: - if not (is_float(other) or is_integer(other)): - raise TypeError("can only perform ops with scalar values") - - return other - @classmethod def _add_numeric_methods_binary(cls): """ @@ -5456,9 +5341,7 @@ def _add_numeric_methods_unary(cls): def _make_evaluate_unary(op, opstr): def _evaluate_numeric_unary(self): - self._validate_for_numeric_unaryop(op, opstr) attrs = self._get_attributes_dict() - attrs = self._maybe_update_attributes(attrs) return Index(op(self.values), **attrs) _evaluate_numeric_unary.__name__ = opstr @@ -5599,7 +5482,10 @@ def shape(self): """ Return a tuple of the shape of the underlying data. """ - return (len(self),) + # not using "(len(self), )" to return "correct" shape if the values + # consists of a >1 D array (see GH-27775) + # overridden in MultiIndex.shape to avoid materializing the values + return self._values.shape Index._add_numeric_methods_disabled() diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 0f6aa711adc90..82806c7351db6 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -446,9 +446,11 @@ def argsort(self, *args, **kwargs): @cache_readonly def _engine(self): - - # we are going to look things up with the codes themselves - return self._engine_type(lambda: self.codes, len(self)) + # we are going to look things up with the codes themselves. + # To avoid a reference cycle, bind `codes` to a local variable, so + # `self` is not passed into the lambda. + codes = self.codes + return self._engine_type(lambda: codes, len(self)) # introspection @cache_readonly @@ -899,31 +901,12 @@ def _make_compare(op): opname = "__{op}__".format(op=op.__name__) def _evaluate_compare(self, other): - - # if we have a Categorical type, then must have the same - # categories - if isinstance(other, CategoricalIndex): - other = other._values - elif isinstance(other, Index): - other = self._create_categorical(other._values, dtype=self.dtype) - - if isinstance(other, (ABCCategorical, np.ndarray, ABCSeries)): - if len(self.values) != len(other): - raise ValueError("Lengths must match to compare") - - if isinstance(other, ABCCategorical): - if not self.values.is_dtype_equal(other): - raise TypeError( - "categorical index comparisons must " - "have the same categories and ordered " - "attributes" - ) - - result = op(self.values, other) + with np.errstate(all="ignore"): + result = op(self.array, other) if isinstance(result, ABCSeries): # Dispatch to pd.Categorical returned NotImplemented # and we got a Series back; down-cast to ndarray - result = result.values + result = result._values return result return compat.set_function_name(_evaluate_compare, opname, cls) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index af99c7a2754e5..c7664d9777c71 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -15,6 +15,7 @@ from pandas.core.dtypes.common import ( ensure_int64, + is_bool_dtype, is_dtype_equal, is_float, is_integer, @@ -163,6 +164,20 @@ def values(self): def asi8(self): return self._data.asi8 + def __array_wrap__(self, result, context=None): + """ + Gets called after a ufunc. + """ + result = lib.item_from_zerodim(result) + if is_bool_dtype(result) or lib.is_scalar(result): + return result + + attrs = self._get_attributes_dict() + if not is_period_dtype(self) and attrs["freq"]: + # no need to infer if freq is None + attrs["freq"] = "infer" + return Index(result, **attrs) + # ------------------------------------------------------------------------ def equals(self, other): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 67de7b0196b8e..cce390d98c037 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -4,7 +4,7 @@ import numpy as np -from pandas._libs import Timestamp, index as libindex, lib, tslib as libts +from pandas._libs import NaT, Timestamp, index as libindex, lib, tslib as libts import pandas._libs.join as libjoin from pandas._libs.tslibs import ccalendar, fields, parsing, timezones from pandas.util._decorators import Appender, Substitution, cache_readonly @@ -69,7 +69,7 @@ class DatetimeDelegateMixin(DatetimelikeDelegateMixin): # Some are "raw" methods, the result is not not re-boxed in an Index # We also have a few "extra" attrs, which may or may not be raw, # which we we dont' want to expose in the .dt accessor. - _extra_methods = ["to_period", "to_perioddelta", "to_julian_date"] + _extra_methods = ["to_period", "to_perioddelta", "to_julian_date", "strftime"] _extra_raw_methods = ["to_pydatetime", "_local_timestamps", "_has_same_tz"] _extra_raw_properties = ["_box_func", "tz", "tzinfo"] _delegated_properties = DatetimeArray._datetimelike_ops + _extra_raw_properties @@ -465,14 +465,6 @@ def _convert_for_op(self, value): return _to_M8(value) raise ValueError("Passed item and index have different timezone") - def _maybe_update_attributes(self, attrs): - """ Update Index attributes (e.g. freq) depending on op """ - freq = attrs.get("freq", None) - if freq is not None: - # no need to infer if freq is None - attrs["freq"] = "infer" - return attrs - # -------------------------------------------------------------------- # Rendering Methods @@ -669,7 +661,7 @@ def _get_time_micros(self): def to_series(self, keep_tz=None, index=None, name=None): """ Create a Series with both index and values equal to the index keys - useful with map for returning an indexer based on an index + useful with map for returning an indexer based on an index. Parameters ---------- @@ -695,10 +687,10 @@ def to_series(self, keep_tz=None, index=None, name=None): behaviour and silence the warning. index : Index, optional - index of resulting Series. If None, defaults to original index - name : string, optional - name of resulting Series. If None, defaults to name of original - index + Index of resulting Series. If None, defaults to original index. + name : str, optional + Name of resulting Series. If None, defaults to name of original + index. Returns ------- @@ -743,7 +735,7 @@ def to_series(self, keep_tz=None, index=None, name=None): def snap(self, freq="S"): """ - Snap time stamps to nearest occurring frequency + Snap time stamps to nearest occurring frequency. Returns ------- @@ -1184,7 +1176,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): is_normalized = cache_readonly(DatetimeArray.is_normalized.fget) # type: ignore _resolution = cache_readonly(DatetimeArray._resolution.fget) # type: ignore - strftime = ea_passthrough(DatetimeArray.strftime) _has_same_tz = ea_passthrough(DatetimeArray._has_same_tz) @property @@ -1282,7 +1273,9 @@ def insert(self, loc, item): raise ValueError("Passed item and index have different timezone") # check freq can be preserved on edge cases if self.size and self.freq is not None: - if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]: + if item is NaT: + pass + elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]: freq = self.freq elif (loc == len(self)) and item - self.freq == self[-1]: freq = self.freq @@ -1601,7 +1594,7 @@ def bdate_range( ): """ Return a fixed frequency DatetimeIndex, with business day as the default - frequency + frequency. Parameters ---------- diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index 2e5b3ff8ef502..329456e25bded 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -22,7 +22,6 @@ class FrozenList(PandasObject, list): - """ Container that doesn't allow setting item *but* because it's technically non-hashable, will be used diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 7a444683ffcb2..7c581a12764b1 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -269,22 +269,6 @@ def from_arrays( ) return cls._simple_new(array, name=name) - @classmethod - @Appender(_interval_shared_docs["from_intervals"] % _index_doc_kwargs) - def from_intervals(cls, data, closed=None, name=None, copy=False, dtype=None): - msg = ( - "IntervalIndex.from_intervals is deprecated and will be " - "removed in a future version; Use IntervalIndex(...) instead" - ) - warnings.warn(msg, FutureWarning, stacklevel=2) - with rewrite_exception("IntervalArray", cls.__name__): - array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype) - - if name is None and isinstance(data, cls): - name = data.name - - return cls._simple_new(array, name=name) - @classmethod @Appender(_interval_shared_docs["from_tuples"] % _index_doc_kwargs) def from_tuples(cls, data, closed="right", name=None, copy=False, dtype=None): @@ -347,7 +331,8 @@ def __contains__(self, key): >>> idx.to_tuples() Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object') >>> idx.to_tuples(na_tuple=False) - Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""", + Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object') + """, ) ) def to_tuples(self, na_tuple=True): @@ -804,7 +789,7 @@ def _find_non_overlapping_monotonic_bounds(self, key): return start, stop def get_loc( - self, key: Any, method: Optional[str] = None + self, key: Any, method: Optional[str] = None, tolerance=None ) -> Union[int, slice, np.ndarray]: """ Get integer location, slice or boolean mask for requested label. @@ -998,7 +983,7 @@ def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray: List of indices. """ if self.is_overlapping: - return self.get_indexer_non_unique(target, **kwargs)[0] + return self.get_indexer_non_unique(target)[0] return self.get_indexer(target, **kwargs) @Appender(_index_shared_docs["get_value"] % _index_doc_kwargs) @@ -1111,12 +1096,8 @@ def _format_with_header(self, header, **kwargs): return header + list(self._format_native_types(**kwargs)) def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs): - """ actually format my specific types """ - from pandas.io.formats.format import ExtensionArrayFormatter - - return ExtensionArrayFormatter( - values=self, na_rep=na_rep, justify="all", leading_space=False - ).get_result() + # GH 28210: use base method but with different default na_rep + return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs) def _format_data(self, name=None): @@ -1326,7 +1307,7 @@ def interval_range( start=None, end=None, periods=None, freq=None, name=None, closed="right" ): """ - Return a fixed frequency IntervalIndex + Return a fixed frequency IntervalIndex. Parameters ---------- diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 488107690fbd6..761862b9f30e9 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -622,6 +622,15 @@ def _values(self): # We override here, since our parent uses _data, which we dont' use. return self.values + @property + def shape(self): + """ + Return a tuple of the shape of the underlying data. + """ + # overriding the base Index.shape definition to avoid materializing + # the values (GH-27384, GH-27775) + return (len(self),) + @property def array(self): """ @@ -1241,7 +1250,7 @@ def _set_names(self, names, level=None, validate=True): self.levels[l].rename(name, inplace=True) names = property( - fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex\n""" + fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n""" ) @Appender(_index_shared_docs["_get_grouper_for_level"]) @@ -1753,7 +1762,7 @@ def is_all_dates(self): def is_lexsorted(self): """ - Return True if the codes are lexicographically sorted + Return True if the codes are lexicographically sorted. Returns ------- @@ -2237,7 +2246,7 @@ def swaplevel(self, i=-2, j=-1): def reorder_levels(self, order): """ - Rearrange levels using input order. May not drop or duplicate levels + Rearrange levels using input order. May not drop or duplicate levels. Parameters ---------- diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index f6b3d1076043e..f7bf77928bdc7 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -1,5 +1,6 @@ from datetime import datetime, timedelta import warnings +import weakref import numpy as np @@ -63,7 +64,10 @@ class PeriodDelegateMixin(DatetimelikeDelegateMixin): _delegate_class = PeriodArray _delegated_properties = PeriodArray._datetimelike_ops - _delegated_methods = set(PeriodArray._datetimelike_methods) | {"_addsub_int_array"} + _delegated_methods = set(PeriodArray._datetimelike_methods) | { + "_addsub_int_array", + "strftime", + } _raw_properties = {"is_leap_year"} @@ -438,7 +442,9 @@ def _formatter_func(self): @cache_readonly def _engine(self): - return self._engine_type(lambda: self, len(self)) + # To avoid a reference cycle, pass a weakref of self to _engine_type. + period = weakref.ref(self) + return self._engine_type(period, len(self)) @Appender(_index_shared_docs["contains"]) def __contains__(self, key): @@ -988,7 +994,7 @@ def memory_usage(self, deep=False): def period_range(start=None, end=None, periods=None, freq=None, name=None): """ Return a fixed frequency PeriodIndex, with day (calendar) as the default - frequency + frequency. Parameters ---------- diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index d2bea5f68b92d..8783351cc74d1 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -25,6 +25,7 @@ from pandas.core import ops import pandas.core.common as com +from pandas.core.construction import extract_array import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.numeric import Int64Index @@ -74,7 +75,7 @@ class RangeIndex(Int64Index): _engine_type = libindex.Int64Engine _range = None # type: range - # check whether self._data has benn called + # check whether self._data has been called _cached_data = None # type: np.ndarray # -------------------------------------------------------------------- # Constructors @@ -235,7 +236,7 @@ def _format_with_header(self, header, na_rep="NaN", **kwargs): @cache_readonly def start(self): """ - The value of the `start` parameter (``0`` if this was not supplied) + The value of the `start` parameter (``0`` if this was not supplied). """ # GH 25710 return self._range.start @@ -243,7 +244,7 @@ def start(self): @property def _start(self): """ - The value of the `start` parameter (``0`` if this was not supplied) + The value of the `start` parameter (``0`` if this was not supplied). .. deprecated:: 0.25.0 Use ``start`` instead. @@ -258,14 +259,14 @@ def _start(self): @cache_readonly def stop(self): """ - The value of the `stop` parameter + The value of the `stop` parameter. """ return self._range.stop @property def _stop(self): """ - The value of the `stop` parameter + The value of the `stop` parameter. .. deprecated:: 0.25.0 Use ``stop`` instead. @@ -281,7 +282,7 @@ def _stop(self): @cache_readonly def step(self): """ - The value of the `step` parameter (``1`` if this was not supplied) + The value of the `step` parameter (``1`` if this was not supplied). """ # GH 25710 return self._range.step @@ -289,7 +290,7 @@ def step(self): @property def _step(self): """ - The value of the `step` parameter (``1`` if this was not supplied) + The value of the `step` parameter (``1`` if this was not supplied). .. deprecated:: 0.25.0 Use ``step`` instead. @@ -782,9 +783,8 @@ def _evaluate_numeric_binop(self, other): # Must be an np.ndarray; GH#22390 return op(self._int64index, other) - other = self._validate_for_numeric_binop(other, op) + other = extract_array(other, extract_numpy=True) attrs = self._get_attributes_dict() - attrs = self._maybe_update_attributes(attrs) left, right = self, other diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index f2ce562536b95..b03d60c7b5b37 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -44,7 +44,12 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin): # which we we dont' want to expose in the .dt accessor. _delegate_class = TimedeltaArray _delegated_properties = TimedeltaArray._datetimelike_ops + ["components"] - _delegated_methods = TimedeltaArray._datetimelike_methods + ["_box_values"] + _delegated_methods = TimedeltaArray._datetimelike_methods + [ + "_box_values", + "__neg__", + "__pos__", + "__abs__", + ] _raw_properties = {"components"} _raw_methods = {"to_pytimedelta"} @@ -56,27 +61,27 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin): TimedeltaArray, TimedeltaDelegateMixin._delegated_methods, typ="method", - overwrite=False, + overwrite=True, ) class TimedeltaIndex( DatetimeIndexOpsMixin, dtl.TimelikeOps, Int64Index, TimedeltaDelegateMixin ): """ Immutable ndarray of timedelta64 data, represented internally as int64, and - which can be boxed to timedelta objects + which can be boxed to timedelta objects. Parameters ---------- data : array-like (1-dimensional), optional - Optional timedelta-like data to construct index with + Optional timedelta-like data to construct index with. unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional - which is an integer/float number - freq : string or pandas offset object, optional + Which is an integer/float number. + freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. The string 'infer' can be passed in order to set the frequency of the index as the - inferred frequency upon creation + inferred frequency upon creation. copy : bool - Make a copy of input ndarray + Make a copy of input ndarray. start : starting value, timedelta-like, optional If data is None, start is used as the start point in generating regular timedelta data. @@ -85,24 +90,24 @@ class TimedeltaIndex( periods : int, optional, > 0 Number of periods to generate, if generating index. Takes precedence - over end argument + over end argument. .. deprecated:: 0.24.0 end : end time, timedelta-like, optional If periods is none, generated index will extend to first conforming - time on or just past end argument + time on or just past end argument. .. deprecated:: 0.24. 0 - closed : string or None, default None + closed : str or None, default None Make the interval closed with respect to the given frequency to - the 'left', 'right', or both sides (None) + the 'left', 'right', or both sides (None). .. deprecated:: 0.24. 0 name : object - Name to be stored in the index + Name to be stored in the index. Attributes ---------- @@ -279,14 +284,6 @@ def __setstate__(self, state): _unpickle_compat = __setstate__ - def _maybe_update_attributes(self, attrs): - """ Update Index attributes (e.g. freq) depending on op """ - freq = attrs.get("freq", None) - if freq is not None: - # no need to infer if freq is None - attrs["freq"] = "infer" - return attrs - # ------------------------------------------------------------------- # Rendering Methods @@ -689,7 +686,6 @@ def delete(self, loc): TimedeltaIndex._add_comparison_ops() -TimedeltaIndex._add_numeric_methods_unary() TimedeltaIndex._add_logical_methods_disabled() TimedeltaIndex._add_datetimelike_methods() @@ -717,7 +713,7 @@ def timedelta_range( ): """ Return a fixed frequency TimedeltaIndex, with day as the default - frequency + frequency. Parameters ---------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e308ae03730b3..3d495eeb8c885 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -22,11 +22,11 @@ is_sparse, ) from pandas.core.dtypes.concat import concat_compat -from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries +from pandas.core.dtypes.generic import ABCDataFrame, ABCMultiIndex, ABCSeries from pandas.core.dtypes.missing import _infer_fill_value, isna import pandas.core.common as com -from pandas.core.index import Index, InvalidIndexError, MultiIndex +from pandas.core.index import Index, InvalidIndexError from pandas.core.indexers import is_list_like_indexer, length_of_indexer @@ -49,7 +49,7 @@ def get_indexers_list(): # the public IndexSlicerMaker class _IndexSlice: """ - Create an object to more easily perform multi-index slicing + Create an object to more easily perform multi-index slicing. See Also -------- @@ -124,7 +124,7 @@ def __getitem__(self, key): key = tuple(com.apply_if_callable(x, self.obj) for x in key) try: values = self.obj._get_value(*key) - except (KeyError, TypeError, InvalidIndexError): + except (KeyError, TypeError, InvalidIndexError, AttributeError): # TypeError occurs here if the key has non-hashable entries, # generally slice or list. # TODO(ix): most/all of the TypeError cases here are for ix, @@ -132,6 +132,9 @@ def __getitem__(self, key): # The InvalidIndexError is only catched for compatibility # with geopandas, see # https://github.com/pandas-dev/pandas/issues/27258 + # TODO: The AttributeError is for IntervalIndex which + # incorrectly implements get_value, see + # https://github.com/pandas-dev/pandas/issues/27865 pass else: if is_scalar(values): @@ -169,7 +172,7 @@ def _get_setitem_indexer(self, key): ax = self.obj._get_axis(0) - if isinstance(ax, MultiIndex) and self.name != "iloc": + if isinstance(ax, ABCMultiIndex) and self.name != "iloc": try: return ax.get_loc(key) except Exception: @@ -238,7 +241,7 @@ def _has_valid_tuple(self, key: Tuple): ) def _is_nested_tuple_indexer(self, tup: Tuple): - if any(isinstance(ax, MultiIndex) for ax in self.obj.axes): + if any(isinstance(ax, ABCMultiIndex) for ax in self.obj.axes): return any(is_nested_tuple(tup, ax) for ax in self.obj.axes) return False @@ -321,6 +324,17 @@ def _setitem_with_indexer(self, indexer, value): val = list(value.values()) if isinstance(value, dict) else value take_split_path = not blk._can_hold_element(val) + # if we have any multi-indexes that have non-trivial slices + # (not null slices) then we must take the split path, xref + # GH 10360, GH 27841 + if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): + for i, ax in zip(indexer, self.obj.axes): + if isinstance(ax, ABCMultiIndex) and not ( + is_integer(i) or com.is_null_slice(i) + ): + take_split_path = True + break + if isinstance(indexer, tuple): nindexer = [] for i, idx in enumerate(indexer): @@ -408,7 +422,9 @@ def _setitem_with_indexer(self, indexer, value): # if we have a partial multiindex, then need to adjust the plane # indexer here - if len(labels) == 1 and isinstance(self.obj[labels[0]].axes[0], MultiIndex): + if len(labels) == 1 and isinstance( + self.obj[labels[0]].axes[0], ABCMultiIndex + ): item = labels[0] obj = self.obj[item] index = obj.index @@ -481,7 +497,7 @@ def setter(item, v): # we have an equal len Frame if isinstance(value, ABCDataFrame): sub_indexer = list(indexer) - multiindex_indexer = isinstance(labels, MultiIndex) + multiindex_indexer = isinstance(labels, ABCMultiIndex) for item in labels: if item in value: @@ -763,8 +779,8 @@ def _align_frame(self, indexer, df: ABCDataFrame): # we have a multi-index and are trying to align # with a particular, level GH3738 if ( - isinstance(ax, MultiIndex) - and isinstance(df.index, MultiIndex) + isinstance(ax, ABCMultiIndex) + and isinstance(df.index, ABCMultiIndex) and ax.nlevels != df.index.nlevels ): raise TypeError( @@ -890,7 +906,7 @@ def _getitem_lowerdim(self, tup: Tuple): ax0 = self.obj._get_axis(0) # ...but iloc should handle the tuple as simple integer-location # instead of checking it as multiindex representation (GH 13797) - if isinstance(ax0, MultiIndex) and self.name != "iloc": + if isinstance(ax0, ABCMultiIndex) and self.name != "iloc": result = self._handle_lowerdim_multi_index_axis0(tup) if result is not None: return result @@ -990,7 +1006,7 @@ def _getitem_axis(self, key, axis: int): if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) elif is_list_like_indexer(key) and not ( - isinstance(key, tuple) and isinstance(labels, MultiIndex) + isinstance(key, tuple) and isinstance(labels, ABCMultiIndex) ): if hasattr(key, "ndim") and key.ndim > 1: @@ -1003,7 +1019,7 @@ def _getitem_axis(self, key, axis: int): key = labels._maybe_cast_indexer(key) if is_integer(key): - if axis == 0 and isinstance(labels, MultiIndex): + if axis == 0 and isinstance(labels, ABCMultiIndex): try: return self._get_label(key, axis=axis) except (KeyError, TypeError): @@ -1214,7 +1230,7 @@ def _convert_to_indexer(self, obj, axis: int, raise_missing: bool = False): try: return labels.get_loc(obj) except LookupError: - if isinstance(obj, tuple) and isinstance(labels, MultiIndex): + if isinstance(obj, tuple) and isinstance(labels, ABCMultiIndex): if len(obj) == labels.nlevels: return {"key": obj} raise @@ -1234,7 +1250,7 @@ def _convert_to_indexer(self, obj, axis: int, raise_missing: bool = False): # always valid return {"key": obj} - if obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex): + if obj >= self.obj.shape[axis] and not isinstance(labels, ABCMultiIndex): # a positional raise ValueError("cannot set by positional indexing with enlargement") @@ -1701,7 +1717,7 @@ def _is_scalar_access(self, key: Tuple): return False ax = self.obj.axes[i] - if isinstance(ax, MultiIndex): + if isinstance(ax, ABCMultiIndex): return False if isinstance(k, str) and ax._supports_partial_string_indexing: @@ -1723,7 +1739,7 @@ def _getitem_scalar(self, key): def _get_partial_string_timestamp_match_key(self, key, labels): """Translate any partial string timestamp matches in key, returning the new key (GH 10331)""" - if isinstance(labels, MultiIndex): + if isinstance(labels, ABCMultiIndex): if ( isinstance(key, str) and labels.levels[0]._supports_partial_string_indexing @@ -1767,7 +1783,7 @@ def _getitem_axis(self, key, axis: int): # to a list of keys # we will use the *values* of the object # and NOT the index if its a PandasObject - if isinstance(labels, MultiIndex): + if isinstance(labels, ABCMultiIndex): if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1: # Series, or 0,1 ndim ndarray @@ -1795,7 +1811,7 @@ def _getitem_axis(self, key, axis: int): key = tuple([key]) # an iterable multi-selection - if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): + if not (isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)): if hasattr(key, "ndim") and key.ndim > 1: raise ValueError("Cannot index with multidimensional key") @@ -2460,7 +2476,7 @@ def is_nested_tuple(tup, labels): for i, k in enumerate(tup): if is_list_like(k) or isinstance(k, slice): - return isinstance(labels, MultiIndex) + return isinstance(labels, ABCMultiIndex) return False diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6a2aebe5db246..33698d245e9ff 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -7,7 +7,7 @@ import numpy as np -from pandas._libs import NaT, Timestamp, lib, tslib, tslibs +from pandas._libs import NaT, Timestamp, lib, tslib import pandas._libs.internals as libinternals from pandas._libs.tslibs import Timedelta, conversion from pandas._libs.tslibs.timezones import tz_compare @@ -18,6 +18,7 @@ find_common_type, infer_dtype_from, infer_dtype_from_scalar, + maybe_downcast_numeric, maybe_downcast_to_dtype, maybe_infer_dtype_type, maybe_promote, @@ -55,7 +56,6 @@ ABCDataFrame, ABCDatetimeIndex, ABCExtensionArray, - ABCIndexClass, ABCPandasArray, ABCSeries, ) @@ -68,13 +68,7 @@ ) import pandas.core.algorithms as algos -from pandas.core.arrays import ( - Categorical, - DatetimeArray, - ExtensionArray, - PandasDtype, - TimedeltaArray, -) +from pandas.core.arrays import Categorical, DatetimeArray, PandasDtype, TimedeltaArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import extract_array @@ -209,10 +203,6 @@ def internal_values(self, dtype=None): """ return self.values - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self.internal_values() - def get_values(self, dtype=None): """ return an internal format, currently just the ndarray @@ -273,6 +263,8 @@ def make_block_same_class(self, values, placement=None, ndim=None, dtype=None): ) if placement is None: placement = self.mgr_locs + if ndim is None: + ndim = self.ndim return make_block( values, placement=placement, ndim=ndim, klass=self.__class__, dtype=dtype ) @@ -415,7 +407,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): return self.copy() if self._can_hold_element(value): - # equivalent: self._try_coerce_args(value) would not raise + # equivalent: _try_coerce_args(value) would not raise blocks = self.putmask(mask, value, inplace=inplace) return self._maybe_downcast(blocks, downcast) @@ -434,7 +426,7 @@ def f(m, v, i): return self.split_and_operate(mask, f, inplace) - def split_and_operate(self, mask, f, inplace): + def split_and_operate(self, mask, f, inplace: bool): """ split the block per-column, and apply the callable f per-column, return a new block for each. Handle @@ -493,17 +485,15 @@ def make_a_block(nv, ref_loc): return new_blocks - def _maybe_downcast(self, blocks, downcast=None): + def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]: # no need to downcast our float # unless indicated - if downcast is None and self.is_float: - return blocks - elif downcast is None and (self.is_timedelta or self.is_datetime): + if downcast is None and ( + self.is_float or self.is_timedelta or self.is_datetime + ): return blocks - if not isinstance(blocks, list): - blocks = [blocks] return _extend_blocks([b.downcast(downcast) for b in blocks]) def downcast(self, dtypes=None): @@ -679,7 +669,7 @@ def convert( return self.copy() if copy else self - def _can_hold_element(self, element): + def _can_hold_element(self, element: Any) -> bool: """ require the same dtype as ourselves """ dtype = self.values.dtype.type tipo = maybe_infer_dtype_type(element) @@ -687,28 +677,6 @@ def _can_hold_element(self, element): return issubclass(tipo.type, dtype) return isinstance(element, dtype) - def _try_cast_result(self, result, dtype=None): - """ try to cast the result to our original type, we may have - roundtripped thru object in the mean-time - """ - if dtype is None: - dtype = self.dtype - - if self.is_integer or self.is_bool or self.is_datetime: - pass - elif self.is_float and result.dtype == self.dtype: - # protect against a bool/object showing up here - if isinstance(dtype, str) and dtype == "infer": - return result - - # This is only reached via Block.setitem, where dtype is always - # either "infer", self.dtype, or values.dtype. - assert dtype == self.dtype, (dtype, self.dtype) - return result - - # may need to change the dtype here - return maybe_downcast_to_dtype(result, dtype) - def _try_coerce_args(self, other): """ provide coercion to our input arguments """ @@ -731,10 +699,6 @@ def _try_coerce_args(self, other): return other - def _try_coerce_and_cast_result(self, result, dtype=None): - result = self._try_cast_result(result, dtype=dtype) - return result - def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ @@ -774,8 +738,31 @@ def replace( # If we cannot replace with own dtype, convert to ObjectBlock and # retry if not self._can_hold_element(to_replace): - # TODO: we should be able to infer at this point that there is - # nothing to replace + if not isinstance(to_replace, list): + if inplace: + return [self] + return [self.copy()] + + to_replace = [x for x in to_replace if self._can_hold_element(x)] + if not len(to_replace): + # GH#28084 avoid costly checks since we can infer + # that there is nothing to replace in this block + if inplace: + return [self] + return [self.copy()] + + if len(to_replace) == 1: + # _can_hold_element checks have reduced this back to the + # scalar case and we can avoid a costly object cast + return self.replace( + to_replace[0], + value, + inplace=inplace, + filter=filter, + regex=regex, + convert=convert, + ) + # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): @@ -784,7 +771,7 @@ def replace( # try again with a compatible block block = self.astype(object) return block.replace( - to_replace=original_to_replace, + to_replace=to_replace, value=value, inplace=inplace, filter=filter, @@ -800,14 +787,27 @@ def replace( filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False + if not mask.any(): + if inplace: + return [self] + return [self.copy()] + try: blocks = self.putmask(mask, value, inplace=inplace) + # Note: it is _not_ the case that self._can_hold_element(value) + # is always true at this point. In particular, that can fail + # for: + # "2u" with bool-dtype, float-dtype + # 0.5 with int64-dtype + # np.nan with int64-dtype except (TypeError, ValueError): # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise + assert not self._can_hold_element(value), value + # try again with a compatible block block = self.astype(object) return block.replace( @@ -857,12 +857,6 @@ def setitem(self, indexer, value): if self._can_hold_element(value): value = self._try_coerce_args(value) - # can keep its own dtype - if hasattr(value, "dtype") and is_dtype_equal(values.dtype, value.dtype): - dtype = self.dtype - else: - dtype = "infer" - else: # current dtype cannot store value, coerce to common dtype find_dtype = False @@ -871,15 +865,9 @@ def setitem(self, indexer, value): dtype = value.dtype find_dtype = True - elif lib.is_scalar(value): - if isna(value): - # NaN promotion is handled in latter path - dtype = False - else: - dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True) - find_dtype = True - else: - dtype = "infer" + elif lib.is_scalar(value) and not isna(value): + dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True) + find_dtype = True if find_dtype: dtype = find_common_type([values.dtype, dtype]) @@ -927,8 +915,6 @@ def setitem(self, indexer, value): else: values[indexer] = value - # coerce and try to infer the dtypes of the result - values = self._try_coerce_and_cast_result(values, dtype) if transpose: values = values.T block = self.make_block(values) @@ -962,6 +948,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) # if we are passed a scalar None, convert it here if not is_list_like(new) and isna(new) and not self.is_object: + # FIXME: make sure we have compatible NA new = self.fill_value if self._can_hold_element(new): @@ -1089,7 +1076,7 @@ def coerce_to_target_dtype(self, other): mytz = getattr(self.dtype, "tz", None) othertz = getattr(dtype, "tz", None) - if str(mytz) != str(othertz): + if not tz_compare(mytz, othertz): return self.astype(object) raise AssertionError( @@ -1309,7 +1296,7 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): else: return self.make_block_same_class(new_values, new_mgr_locs) - def diff(self, n, axis=1): + def diff(self, n: int, axis: int = 1) -> List["Block"]: """ return block for the diff of the values """ new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)] @@ -1343,7 +1330,15 @@ def shift(self, periods, axis=0, fill_value=None): return [self.make_block(new_values)] - def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): + def where( + self, + other, + cond, + align=True, + errors="raise", + try_cast: bool = False, + axis: int = 0, + ) -> List["Block"]: """ evaluate the block; return result block(s) from the result @@ -1390,26 +1385,14 @@ def func(cond, values, other): if not ( (self.is_integer or self.is_bool) - and lib.is_scalar(other) + and lib.is_float(other) and np.isnan(other) ): # np.where will cast integer array to floats in this case other = self._try_coerce_args(other) - try: - fastres = expressions.where(cond, values, other) - return fastres - except Exception as detail: - if errors == "raise": - raise TypeError( - "Could not operate [{other!r}] with block values " - "[{detail!s}]".format(other=other, detail=detail) - ) - else: - # return the values - result = np.empty(values.shape, dtype="float64") - result.fill(np.nan) - return result + fastres = expressions.where(cond, values, other) + return fastres if cond.ravel().all(): result = values @@ -1438,11 +1421,7 @@ def func(cond, values, other): if transpose: result = result.T - # try to cast if requested - if try_cast: - result = self._try_cast_result(result) - - return self.make_block(result) + return [self.make_block(result)] # might need to separate out blocks axis = cond.ndim - 1 @@ -1453,13 +1432,13 @@ def func(cond, values, other): for m in [mask, ~mask]: if m.any(): taken = result.take(m.nonzero()[0], axis=axis) - r = self._try_cast_result(taken) + r = maybe_downcast_numeric(taken, self.dtype) nb = self.make_block(r.T, placement=self.mgr_locs[m]) result_blocks.append(nb) return result_blocks - def equals(self, other): + def equals(self, other) -> bool: if self.dtype != other.dtype or self.shape != other.shape: return False return array_equivalent(self.values, other.values) @@ -1474,9 +1453,9 @@ def _unstack(self, unstacker_func, new_columns, n_rows, fill_value): new_columns : Index All columns of the unstacked BlockManager. n_rows : int - Only used in ExtensionBlock.unstack + Only used in ExtensionBlock._unstack fill_value : int - Only used in ExtensionBlock.unstack + Only used in ExtensionBlock._unstack Returns ------- @@ -1550,7 +1529,7 @@ def quantile(self, qs, interpolation="linear", axis=0): result = result[..., 0] result = lib.item_from_zerodim(result) - ndim = getattr(result, "ndim", None) or 0 + ndim = np.ndim(result) return make_block(result, placement=np.arange(len(result)), ndim=ndim) def _replace_coerce( @@ -1686,9 +1665,6 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) new_values[mask] = new return [self.make_block(values=new_values)] - def _try_cast_result(self, result, dtype=None): - return result - def _get_unstack_items(self, unstacker, new_columns): """ Get the placement, values, and mask for a Block unstack. @@ -1740,7 +1716,8 @@ def __init__(self, values, placement, ndim=None): super().__init__(values, placement, ndim) def _maybe_coerce_values(self, values): - """Unbox to an extension array. + """ + Unbox to an extension array. This will unbox an ExtensionArray stored in an Index or Series. ExtensionArrays pass through. No dtype coercion is done. @@ -1753,9 +1730,7 @@ def _maybe_coerce_values(self, values): ------- ExtensionArray """ - if isinstance(values, (ABCIndexClass, ABCSeries)): - values = values._values - return values + return extract_array(values) @property def _holder(self): @@ -1843,7 +1818,7 @@ def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): return self.make_block_same_class(new_values, new_mgr_locs) - def _can_hold_element(self, element): + def _can_hold_element(self, element: Any) -> bool: # XXX: We may need to think about pushing this onto the array. # We're doing the same as CategoricalBlock here. return True @@ -1861,21 +1836,6 @@ def _slice(self, slicer): return self.values[slicer] - def formatting_values(self): - # Deprecating the ability to override _formatting_values. - # Do the warning here, it's only user in pandas, since we - # have to check if the subclass overrode it. - fv = getattr(type(self.values), "_formatting_values", None) - if fv and fv != ExtensionArray._formatting_values: - msg = ( - "'ExtensionArray._formatting_values' is deprecated. " - "Specify 'ExtensionArray._formatter' instead." - ) - warnings.warn(msg, FutureWarning, stacklevel=10) - return self.values._formatting_values() - - return self.values - def concat_same_type(self, to_concat, placement=None): """ Concatenate list of single blocks of the same type. @@ -1923,7 +1883,15 @@ def shift( ) ] - def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): + def where( + self, + other, + cond, + align=True, + errors="raise", + try_cast: bool = False, + axis: int = 0, + ) -> List["Block"]: if isinstance(other, ABCDataFrame): # ExtensionArrays are 1-D, so if we get here then # `other` should be a DataFrame with a single column. @@ -1968,7 +1936,7 @@ def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0) np.where(cond, self.values, other), dtype=dtype ) - return self.make_block_same_class(result, placement=self.mgr_locs) + return [self.make_block_same_class(result, placement=self.mgr_locs)] @property def _ftype(self): @@ -2020,7 +1988,7 @@ class NumericBlock(Block): class FloatOrComplexBlock(NumericBlock): __slots__ = () - def equals(self, other): + def equals(self, other) -> bool: if self.dtype != other.dtype or self.shape != other.shape: return False left, right = self.values, other.values @@ -2031,7 +1999,7 @@ class FloatBlock(FloatOrComplexBlock): __slots__ = () is_float = True - def _can_hold_element(self, element): + def _can_hold_element(self, element: Any) -> bool: tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, (np.floating, np.integer)) and not issubclass( @@ -2095,7 +2063,7 @@ class ComplexBlock(FloatOrComplexBlock): __slots__ = () is_complex = True - def _can_hold_element(self, element): + def _can_hold_element(self, element: Any) -> bool: tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating)) @@ -2112,7 +2080,7 @@ class IntBlock(NumericBlock): is_integer = True _can_hold_na = False - def _can_hold_element(self, element): + def _can_hold_element(self, element: Any) -> bool: tipo = maybe_infer_dtype_type(element) if tipo is not None: return ( @@ -2202,7 +2170,7 @@ def _astype(self, dtype, **kwargs): # delegate return super()._astype(dtype=dtype, **kwargs) - def _can_hold_element(self, element): + def _can_hold_element(self, element: Any) -> bool: tipo = maybe_infer_dtype_type(element) if tipo is not None: if self.is_datetimetz: @@ -2392,41 +2360,19 @@ def _slice(self, slicer): return self.values[slicer] def _try_coerce_args(self, other): - """ - localize and return i8 for the values - - Parameters - ---------- - other : ndarray-like or scalar - - Returns - ------- - base-type other - """ - if is_valid_nat_for_dtype(other, self.dtype): - other = np.datetime64("NaT", "ns") - elif isinstance(other, self._holder): - if not tz_compare(other.tz, self.values.tz): - raise ValueError("incompatible or non tz-aware value") - - elif isinstance(other, (np.datetime64, datetime, date)): - other = tslibs.Timestamp(other) - - # test we can have an equal time zone - if not tz_compare(other.tz, self.values.tz): - raise ValueError("incompatible or non tz-aware value") - else: - raise TypeError(other) - + # DatetimeArray handles this for us return other - def diff(self, n, axis=0): - """1st discrete difference + def diff(self, n: int, axis: int = 0) -> List["Block"]: + """ + 1st discrete difference. Parameters ---------- - n : int, number of periods to diff - axis : int, axis to diff upon. default 0 + n : int + Number of periods to diff. + axis : int, default 0 + Axis to diff upon. Returns ------- @@ -2488,7 +2434,7 @@ def setitem(self, indexer, value): ) return newb.setitem(indexer, value) - def equals(self, other): + def equals(self, other) -> bool: # override for significant performance improvement if self.dtype != other.dtype or self.shape != other.shape: return False @@ -2527,7 +2473,7 @@ def __init__(self, values, placement, ndim=None): def _holder(self): return TimedeltaArray - def _can_hold_element(self, element): + def _can_hold_element(self, element: Any) -> bool: tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, np.timedelta64) @@ -2620,7 +2566,7 @@ class BoolBlock(NumericBlock): is_bool = True _can_hold_na = False - def _can_hold_element(self, element): + def _can_hold_element(self, element: Any) -> bool: tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, np.bool_) @@ -2706,7 +2652,7 @@ def f(m, v, i): return blocks - def _maybe_downcast(self, blocks, downcast=None): + def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]: if downcast is not None: return blocks @@ -2714,7 +2660,7 @@ def _maybe_downcast(self, blocks, downcast=None): # split and convert the blocks return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks]) - def _can_hold_element(self, element): + def _can_hold_element(self, element: Any) -> bool: return True def _try_coerce_args(self, other): @@ -2870,9 +2816,9 @@ def _replace_single( regex = regex_re or to_rep_re # try to get the pattern attribute (compiled re) or it's a string - try: + if is_re(to_replace): pattern = to_replace.pattern - except AttributeError: + else: pattern = to_replace # if the pattern is not empty and to_replace is either a string or a @@ -2893,18 +2839,18 @@ def _replace_single( if isna(value) or not isinstance(value, str): def re_replacer(s): - try: + if is_re(rx) and isinstance(s, str): return value if rx.search(s) is not None else s - except TypeError: + else: return s else: # value is guaranteed to be a string here, s can be either a string # or null if it's null it gets returned def re_replacer(s): - try: + if is_re(rx) and isinstance(s, str): return rx.sub(value, s) - except TypeError: + else: return s f = np.vectorize(re_replacer, otypes=[self.dtype]) @@ -3031,7 +2977,15 @@ def concat_same_type(self, to_concat, placement=None): values, placement=placement or slice(0, len(values), 1), ndim=self.ndim ) - def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): + def where( + self, + other, + cond, + align=True, + errors="raise", + try_cast: bool = False, + axis: int = 0, + ) -> List["Block"]: # TODO(CategoricalBlock.where): # This can all be deleted in favor of ExtensionBlock.where once # we enforce the deprecation. diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e5acd23b77d5d..1c31542daa5de 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1582,10 +1582,6 @@ def external_values(self): def internal_values(self): return self._block.internal_values() - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self._block.formatting_values() - def get_values(self): """ return a dense type view """ return np.array(self._block.to_dense(), copy=False) @@ -1823,7 +1819,7 @@ def _simple_blockify(tuples, dtype): """ values, placement = _stack_arrays(tuples, dtype) - # CHECK DTYPE? + # TODO: CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 48b3d74e8d02c..86cd6e878cde6 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -13,51 +13,46 @@ from pandas.errors import NullFrequencyError from pandas.util._decorators import Appender -from pandas.core.dtypes.cast import ( - construct_1d_object_array_from_listlike, - find_common_type, - maybe_upcast_putmask, -) +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, - is_categorical_dtype, is_datetime64_dtype, - is_datetime64tz_dtype, is_datetimelike_v_numeric, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, - is_period_dtype, is_scalar, is_timedelta64_dtype, - needs_i8_conversion, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeArray, ABCDatetimeIndex, - ABCIndex, + ABCExtensionArray, ABCIndexClass, ABCSeries, - ABCSparseArray, ABCSparseSeries, ) from pandas.core.dtypes.missing import isna, notna import pandas as pd from pandas._typing import ArrayLike -from pandas.core.construction import extract_array - -from . import missing -from .docstrings import ( +from pandas.core.construction import array, extract_array +from pandas.core.ops.array_ops import comp_method_OBJECT_ARRAY, define_na_arithmetic_op +from pandas.core.ops.docstrings import ( _arith_doc_FRAME, _flex_comp_doc_FRAME, _make_flex_doc, _op_descriptions, ) -from .roperator import ( # noqa:F401 +from pandas.core.ops.invalid import invalid_comparison +from pandas.core.ops.methods import ( # noqa:F401 + add_flex_arithmetic_methods, + add_special_arithmetic_methods, +) +from pandas.core.ops.roperator import ( # noqa:F401 radd, rand_, rdiv, @@ -174,7 +169,7 @@ def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') return Timedelta(obj) - elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): + elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj.dtype): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to # timedelta64 when operating with timedelta64 @@ -185,29 +180,6 @@ def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): # ----------------------------------------------------------------------------- -def make_invalid_op(name): - """ - Return a binary method that always raises a TypeError. - - Parameters - ---------- - name : str - - Returns - ------- - invalid_op : function - """ - - def invalid_op(self, other=None): - raise TypeError( - "cannot perform {name} with this index type: " - "{typ}".format(name=name, typ=type(self).__name__) - ) - - invalid_op.__name__ = name - return invalid_op - - def _gen_eval_kwargs(name): """ Find the keyword arguments to pass to numexpr for the given operation. @@ -419,97 +391,35 @@ def mask_cmp_op(x, y, op): return result -def masked_arith_op(x, y, op): - """ - If the given arithmetic operation fails, attempt it again on - only the non-null elements of the input array(s). - - Parameters - ---------- - x : np.ndarray - y : np.ndarray, Series, Index - op : binary operator - """ - # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes - # the logic valid for both Series and DataFrame ops. - xrav = x.ravel() - assert isinstance(x, np.ndarray), type(x) - if isinstance(y, np.ndarray): - dtype = find_common_type([x.dtype, y.dtype]) - result = np.empty(x.size, dtype=dtype) - - # PeriodIndex.ravel() returns int64 dtype, so we have - # to work around that case. See GH#19956 - yrav = y if is_period_dtype(y) else y.ravel() - mask = notna(xrav) & notna(yrav) - - if yrav.shape != mask.shape: - # FIXME: GH#5284, GH#5035, GH#19448 - # Without specifically raising here we get mismatched - # errors in Py3 (TypeError) vs Py2 (ValueError) - # Note: Only = an issue in DataFrame case - raise ValueError("Cannot broadcast operands together.") - - if mask.any(): - with np.errstate(all="ignore"): - result[mask] = op(xrav[mask], yrav[mask]) - - else: - assert is_scalar(y), type(y) - assert isinstance(x, np.ndarray), type(x) - # mask is only meaningful for x - result = np.empty(x.size, dtype=x.dtype) - mask = notna(xrav) - - # 1 ** np.nan is 1. So we have to unmask those. - if op == pow: - mask = np.where(x == 1, False, mask) - elif op == rpow: - mask = np.where(y == 1, False, mask) - - if mask.any(): - with np.errstate(all="ignore"): - result[mask] = op(xrav[mask], y) - - result, changed = maybe_upcast_putmask(result, ~mask, np.nan) - result = result.reshape(x.shape) # 2D compat - return result +# ----------------------------------------------------------------------------- +# Dispatch logic -def invalid_comparison(left, right, op): +def should_extension_dispatch(left: ABCSeries, right: Any) -> bool: """ - If a comparison has mismatched types and is not necessarily meaningful, - follow python3 conventions by: - - - returning all-False for equality - - returning all-True for inequality - - raising TypeError otherwise + Identify cases where Series operation should use dispatch_to_extension_op. Parameters ---------- - left : array-like - right : scalar, array-like - op : operator.{eq, ne, lt, le, gt} + left : Series + right : object - Raises - ------ - TypeError : on inequality comparisons + Returns + ------- + bool """ - if op is operator.eq: - res_values = np.zeros(left.shape, dtype=bool) - elif op is operator.ne: - res_values = np.ones(left.shape, dtype=bool) - else: - raise TypeError( - "Invalid comparison between dtype={dtype} and {typ}".format( - dtype=left.dtype, typ=type(right).__name__ - ) - ) - return res_values + if ( + is_extension_array_dtype(left.dtype) + or is_datetime64_dtype(left.dtype) + or is_timedelta64_dtype(left.dtype) + ): + return True + if not is_scalar(right) and is_extension_array_dtype(right): + # GH#22378 disallow scalar to exclude e.g. "category", "Int64" + return True -# ----------------------------------------------------------------------------- -# Dispatch logic + return False def should_series_dispatch(left, right, op): @@ -616,19 +526,18 @@ def dispatch_to_extension_op(op, left, right): apply the operator defined by op. """ + if left.dtype.kind in "mM": + # We need to cast datetime64 and timedelta64 ndarrays to + # DatetimeArray/TimedeltaArray. But we avoid wrapping others in + # PandasArray as that behaves poorly with e.g. IntegerArray. + left = array(left) + # The op calls will raise TypeError if the op is not defined # on the ExtensionArray # unbox Series and Index to arrays - if isinstance(left, (ABCSeries, ABCIndexClass)): - new_left = left._values - else: - new_left = left - - if isinstance(right, (ABCSeries, ABCIndexClass)): - new_right = right._values - else: - new_right = right + new_left = extract_array(left, extract_numpy=True) + new_right = extract_array(right, extract_numpy=True) try: res_values = op(new_left, new_right) @@ -642,224 +551,6 @@ def dispatch_to_extension_op(op, left, right): return res_values -# ----------------------------------------------------------------------------- -# Functions that add arithmetic methods to objects, given arithmetic factory -# methods - - -def _get_method_wrappers(cls): - """ - Find the appropriate operation-wrappers to use when defining flex/special - arithmetic, boolean, and comparison operations with the given class. - - Parameters - ---------- - cls : class - - Returns - ------- - arith_flex : function or None - comp_flex : function or None - arith_special : function - comp_special : function - bool_special : function - - Notes - ----- - None is only returned for SparseArray - """ - if issubclass(cls, ABCSparseSeries): - # Be sure to catch this before ABCSeries and ABCSparseArray, - # as they will both come see SparseSeries as a subclass - arith_flex = _flex_method_SERIES - comp_flex = _flex_method_SERIES - arith_special = _arith_method_SPARSE_SERIES - comp_special = _arith_method_SPARSE_SERIES - bool_special = _bool_method_SERIES - # TODO: I don't think the functions defined by bool_method are tested - elif issubclass(cls, ABCSeries): - # Just Series; SparseSeries is caught above - arith_flex = _flex_method_SERIES - comp_flex = _flex_method_SERIES - arith_special = _arith_method_SERIES - comp_special = _comp_method_SERIES - bool_special = _bool_method_SERIES - elif issubclass(cls, ABCDataFrame): - # Same for DataFrame and SparseDataFrame - arith_flex = _arith_method_FRAME - comp_flex = _flex_comp_method_FRAME - arith_special = _arith_method_FRAME - comp_special = _comp_method_FRAME - bool_special = _arith_method_FRAME - return arith_flex, comp_flex, arith_special, comp_special, bool_special - - -def _create_methods(cls, arith_method, comp_method, bool_method, special): - # creates actual methods based upon arithmetic, comp and bool method - # constructors. - - have_divmod = issubclass(cls, ABCSeries) - # divmod is available for Series and SparseSeries - - # yapf: disable - new_methods = dict( - add=arith_method(cls, operator.add, special), - radd=arith_method(cls, radd, special), - sub=arith_method(cls, operator.sub, special), - mul=arith_method(cls, operator.mul, special), - truediv=arith_method(cls, operator.truediv, special), - floordiv=arith_method(cls, operator.floordiv, special), - # Causes a floating point exception in the tests when numexpr enabled, - # so for now no speedup - mod=arith_method(cls, operator.mod, special), - pow=arith_method(cls, operator.pow, special), - # not entirely sure why this is necessary, but previously was included - # so it's here to maintain compatibility - rmul=arith_method(cls, rmul, special), - rsub=arith_method(cls, rsub, special), - rtruediv=arith_method(cls, rtruediv, special), - rfloordiv=arith_method(cls, rfloordiv, special), - rpow=arith_method(cls, rpow, special), - rmod=arith_method(cls, rmod, special)) - # yapf: enable - new_methods["div"] = new_methods["truediv"] - new_methods["rdiv"] = new_methods["rtruediv"] - if have_divmod: - # divmod doesn't have an op that is supported by numexpr - new_methods["divmod"] = arith_method(cls, divmod, special) - new_methods["rdivmod"] = arith_method(cls, rdivmod, special) - - new_methods.update( - dict( - eq=comp_method(cls, operator.eq, special), - ne=comp_method(cls, operator.ne, special), - lt=comp_method(cls, operator.lt, special), - gt=comp_method(cls, operator.gt, special), - le=comp_method(cls, operator.le, special), - ge=comp_method(cls, operator.ge, special), - ) - ) - - if bool_method: - new_methods.update( - dict( - and_=bool_method(cls, operator.and_, special), - or_=bool_method(cls, operator.or_, special), - # For some reason ``^`` wasn't used in original. - xor=bool_method(cls, operator.xor, special), - rand_=bool_method(cls, rand_, special), - ror_=bool_method(cls, ror_, special), - rxor=bool_method(cls, rxor, special), - ) - ) - - if special: - dunderize = lambda x: "__{name}__".format(name=x.strip("_")) - else: - dunderize = lambda x: x - new_methods = {dunderize(k): v for k, v in new_methods.items()} - return new_methods - - -def add_methods(cls, new_methods): - for name, method in new_methods.items(): - # For most methods, if we find that the class already has a method - # of the same name, it is OK to over-write it. The exception is - # inplace methods (__iadd__, __isub__, ...) for SparseArray, which - # retain the np.ndarray versions. - force = not (issubclass(cls, ABCSparseArray) and name.startswith("__i")) - if force or name not in cls.__dict__: - setattr(cls, name, method) - - -# ---------------------------------------------------------------------- -# Arithmetic -def add_special_arithmetic_methods(cls): - """ - Adds the full suite of special arithmetic methods (``__add__``, - ``__sub__``, etc.) to the class. - - Parameters - ---------- - cls : class - special methods will be defined and pinned to this class - """ - _, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls) - new_methods = _create_methods( - cls, arith_method, comp_method, bool_method, special=True - ) - # inplace operators (I feel like these should get passed an `inplace=True` - # or just be removed - - def _wrap_inplace_method(method): - """ - return an inplace wrapper for this method - """ - - def f(self, other): - result = method(self, other) - - # this makes sure that we are aligned like the input - # we are updating inplace so we want to ignore is_copy - self._update_inplace( - result.reindex_like(self, copy=False)._data, verify_is_copy=False - ) - - return self - - f.__name__ = "__i{name}__".format(name=method.__name__.strip("__")) - return f - - new_methods.update( - dict( - __iadd__=_wrap_inplace_method(new_methods["__add__"]), - __isub__=_wrap_inplace_method(new_methods["__sub__"]), - __imul__=_wrap_inplace_method(new_methods["__mul__"]), - __itruediv__=_wrap_inplace_method(new_methods["__truediv__"]), - __ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]), - __imod__=_wrap_inplace_method(new_methods["__mod__"]), - __ipow__=_wrap_inplace_method(new_methods["__pow__"]), - ) - ) - - new_methods.update( - dict( - __iand__=_wrap_inplace_method(new_methods["__and__"]), - __ior__=_wrap_inplace_method(new_methods["__or__"]), - __ixor__=_wrap_inplace_method(new_methods["__xor__"]), - ) - ) - - add_methods(cls, new_methods=new_methods) - - -def add_flex_arithmetic_methods(cls): - """ - Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) - to the class. - - Parameters - ---------- - cls : class - flex methods will be defined and pinned to this class - """ - flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls) - new_methods = _create_methods( - cls, flex_arith_method, flex_comp_method, bool_method=None, special=False - ) - new_methods.update( - dict( - multiply=new_methods["mul"], - subtract=new_methods["sub"], - divide=new_methods["div"], - ) - ) - # opt out of bool flex methods for now - assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_")) - - add_methods(cls, new_methods=new_methods) - - # ----------------------------------------------------------------------------- # Series @@ -918,33 +609,7 @@ def _arith_method_SERIES(cls, op, special): _construct_divmod_result if op in [divmod, rdivmod] else _construct_result ) - def na_op(x, y): - """ - Return the result of evaluating op on the passed in values. - - If native types are not compatible, try coersion to object dtype. - - Parameters - ---------- - x : array-like - y : array-like or scalar - - Returns - ------- - array-like - - Raises - ------ - TypeError : invalid operation - """ - import pandas.core.computation.expressions as expressions - - try: - result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) - except TypeError: - result = masked_arith_op(x, y, op) - - return missing.dispatch_fill_zeros(op, x, y, result) + na_op = define_na_arithmetic_op(op, str_rep, eval_kwargs) def wrapper(left, right): if isinstance(right, ABCDataFrame): @@ -954,127 +619,56 @@ def wrapper(left, right): res_name = get_op_result_name(left, right) right = maybe_upcast_for_op(right, left.shape) - if is_categorical_dtype(left): - raise TypeError( - "{typ} cannot perform the operation " - "{op}".format(typ=type(left).__name__, op=str_rep) - ) - - elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): - from pandas.core.arrays import DatetimeArray - - result = dispatch_to_extension_op(op, DatetimeArray(left), right) - return construct_result(left, result, index=left.index, name=res_name) - - elif is_extension_array_dtype(left) or ( - is_extension_array_dtype(right) and not is_scalar(right) - ): - # GH#22378 disallow scalar to exclude e.g. "category", "Int64" + if should_extension_dispatch(left, right): result = dispatch_to_extension_op(op, left, right) - return construct_result(left, result, index=left.index, name=res_name) - elif is_timedelta64_dtype(left): - from pandas.core.arrays import TimedeltaArray - - result = dispatch_to_extension_op(op, TimedeltaArray(left), right) - return construct_result(left, result, index=left.index, name=res_name) - - elif is_timedelta64_dtype(right): - # We should only get here with non-scalar values for right - # upcast by maybe_upcast_for_op + elif is_timedelta64_dtype(right) or isinstance( + right, (ABCDatetimeArray, ABCDatetimeIndex) + ): + # We should only get here with td64 right with non-scalar values + # for right upcast by maybe_upcast_for_op assert not isinstance(right, (np.timedelta64, np.ndarray)) - result = op(left._values, right) - # We do not pass dtype to ensure that the Series constructor - # does inference in the case where `result` has object-dtype. - return construct_result(left, result, index=left.index, name=res_name) - - elif isinstance(right, (ABCDatetimeArray, ABCDatetimeIndex)): - result = op(left._values, right) - return construct_result(left, result, index=left.index, name=res_name) + else: + lvalues = extract_array(left, extract_numpy=True) + rvalues = extract_array(right, extract_numpy=True) - lvalues = left.values - rvalues = right - if isinstance(rvalues, (ABCSeries, ABCIndexClass)): - rvalues = rvalues._values + with np.errstate(all="ignore"): + result = na_op(lvalues, rvalues) - with np.errstate(all="ignore"): - result = na_op(lvalues, rvalues) - return construct_result( - left, result, index=left.index, name=res_name, dtype=None - ) + # We do not pass dtype to ensure that the Series constructor + # does inference in the case where `result` has object-dtype. + return construct_result(left, result, index=left.index, name=res_name) wrapper.__name__ = op_name return wrapper -def _comp_method_OBJECT_ARRAY(op, x, y): - if isinstance(y, list): - y = construct_1d_object_array_from_listlike(y) - if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): - if not is_object_dtype(y.dtype): - y = y.astype(np.object_) - - if isinstance(y, (ABCSeries, ABCIndex)): - y = y.values - - result = libops.vec_compare(x, y, op) - else: - result = libops.scalar_compare(x, y, op) - return result - - def _comp_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) - masker = _gen_eval_kwargs(op_name).get("masker", False) def na_op(x, y): # TODO: - # should have guarantess on what x, y can be type-wise + # should have guarantees on what x, y can be type-wise # Extension Dtypes are not called here - # Checking that cases that were once handled here are no longer - # reachable. - assert not (is_categorical_dtype(y) and not is_scalar(y)) - if is_object_dtype(x.dtype): - result = _comp_method_OBJECT_ARRAY(op, x, y) + result = comp_method_OBJECT_ARRAY(op, x, y) elif is_datetimelike_v_numeric(x, y): return invalid_comparison(x, y, op) else: - - # we want to compare like types - # we only want to convert to integer like if - # we are not NotImplemented, otherwise - # we would allow datetime64 (but viewed as i8) against - # integer comparisons - - # we have a datetime/timedelta and may need to convert - assert not needs_i8_conversion(x) - mask = None - if not is_scalar(y) and needs_i8_conversion(y): - mask = isna(x) | isna(y) - y = y.view("i8") - x = x.view("i8") - - method = getattr(x, op_name, None) - if method is not None: - with np.errstate(all="ignore"): - result = method(y) - if result is NotImplemented: - return invalid_comparison(x, y, op) - else: - result = op(x, y) - - if mask is not None and mask.any(): - result[mask] = masker + method = getattr(x, op_name) + with np.errstate(all="ignore"): + result = method(y) + if result is NotImplemented: + return invalid_comparison(x, y, op) return result @@ -1084,6 +678,15 @@ def wrapper(self, other, axis=None): self._get_axis_number(axis) res_name = get_op_result_name(self, other) + other = lib.item_from_zerodim(other) + + # TODO: shouldn't we be applying finalize whenever + # not isinstance(other, ABCSeries)? + finalizer = ( + lambda x: x.__finalize__(self) + if isinstance(other, (np.ndarray, ABCIndexClass)) + else x + ) if isinstance(other, list): # TODO: same for tuples? @@ -1093,57 +696,20 @@ def wrapper(self, other, axis=None): # Defer to DataFrame implementation; fail early return NotImplemented - elif isinstance(other, ABCSeries) and not self._indexed_same(other): + if isinstance(other, ABCSeries) and not self._indexed_same(other): raise ValueError("Can only compare identically-labeled Series objects") - elif is_categorical_dtype(self): - # Dispatch to Categorical implementation; CategoricalIndex - # behavior is non-canonical GH#19513 - res_values = dispatch_to_extension_op(op, self, other) - return self._constructor(res_values, index=self.index, name=res_name) - - elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self): - # Dispatch to DatetimeIndex to ensure identical - # Series/Index behavior - from pandas.core.arrays import DatetimeArray - - res_values = dispatch_to_extension_op(op, DatetimeArray(self), other) - return self._constructor(res_values, index=self.index, name=res_name) - - elif is_timedelta64_dtype(self): - from pandas.core.arrays import TimedeltaArray - - res_values = dispatch_to_extension_op(op, TimedeltaArray(self), other) - return self._constructor(res_values, index=self.index, name=res_name) - - elif is_extension_array_dtype(self) or ( - is_extension_array_dtype(other) and not is_scalar(other) + elif isinstance( + other, (np.ndarray, ABCExtensionArray, ABCIndexClass, ABCSeries) ): - # Note: the `not is_scalar(other)` condition rules out - # e.g. other == "category" - res_values = dispatch_to_extension_op(op, self, other) - return self._constructor(res_values, index=self.index).rename(res_name) - - elif isinstance(other, ABCSeries): - # By this point we have checked that self._indexed_same(other) - res_values = na_op(self.values, other.values) - # rename is needed in case res_name is None and res_values.name - # is not. - return self._constructor( - res_values, index=self.index, name=res_name - ).rename(res_name) - - elif isinstance(other, (np.ndarray, ABCIndexClass)): - # do not check length of zerodim array - # as it will broadcast - if other.ndim != 0 and len(self) != len(other): + # TODO: make this treatment consistent across ops and classes. + # We are not catching all listlikes here (e.g. frozenset, tuple) + # The ambiguous case is object-dtype. See GH#27803 + if len(self) != len(other): raise ValueError("Lengths must match to compare") - res_values = na_op(self.values, np.asarray(other)) - result = self._constructor(res_values, index=self.index) - # rename is needed in case res_name is None and self.name - # is not. - return result.__finalize__(self).rename(res_name) + if should_extension_dispatch(self, other): + res_values = dispatch_to_extension_op(op, self, other) elif is_scalar(other) and isna(other): # numpy does not like comparisons vs None @@ -1151,25 +717,25 @@ def wrapper(self, other, axis=None): res_values = np.ones(len(self), dtype=bool) else: res_values = np.zeros(len(self), dtype=bool) - return self._constructor( - res_values, index=self.index, name=res_name, dtype="bool" - ) else: - values = self.to_numpy() + lvalues = extract_array(self, extract_numpy=True) + rvalues = extract_array(other, extract_numpy=True) with np.errstate(all="ignore"): - res = na_op(values, other) - if is_scalar(res): + res_values = na_op(lvalues, rvalues) + if is_scalar(res_values): raise TypeError( "Could not compare {typ} type with Series".format(typ=type(other)) ) - # always return a full value series here - res_values = extract_array(res, extract_numpy=True) - return self._constructor( - res_values, index=self.index, name=res_name, dtype="bool" - ) + result = self._constructor(res_values, index=self.index) + result = finalizer(result) + + # Set the result's name after finalizer is called because finalizer + # would set it back to self.name + result.name = res_name + return result wrapper.__name__ = op_name return wrapper @@ -1189,7 +755,7 @@ def na_op(x, y): assert not isinstance(y, (list, ABCSeries, ABCIndexClass)) if isinstance(y, np.ndarray): # bool-bool dtype operations should be OK, should not get here - assert not (is_bool_dtype(x) and is_bool_dtype(y)) + assert not (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)) x = ensure_object(x) y = ensure_object(y) result = libops.vec_binop(x, y, op) @@ -1238,7 +804,7 @@ def wrapper(self, other): else: # scalars, list, tuple, np.array - is_other_int_dtype = is_integer_dtype(np.asarray(other)) + is_other_int_dtype = is_integer_dtype(np.asarray(other).dtype) if is_list_like(other) and not isinstance(other, np.ndarray): # TODO: Can we do this before the is_integer_dtype check? # could the is_integer_dtype check be checking the wrong @@ -1397,15 +963,7 @@ def _arith_method_FRAME(cls, op, special): eval_kwargs = _gen_eval_kwargs(op_name) default_axis = _get_frame_op_default_axis(op_name) - def na_op(x, y): - import pandas.core.computation.expressions as expressions - - try: - result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) - except TypeError: - result = masked_arith_op(x, y, op) - - return missing.dispatch_fill_zeros(op, x, y, result) + na_op = define_na_arithmetic_op(op, str_rep, eval_kwargs) if op_name in _op_descriptions: # i.e. include "add" but not "__add__" @@ -1430,10 +988,10 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): self, other, pass_op, fill_value=fill_value, axis=axis, level=level ) else: + # in this case we always have `np.ndim(other) == 0` if fill_value is not None: self = self.fillna(fill_value) - assert np.ndim(other) == 0 return self._combine_const(other, op) f.__name__ = op_name @@ -1474,7 +1032,7 @@ def f(self, other, axis=default_axis, level=None): self, other, na_op, fill_value=None, axis=axis, level=level ) else: - assert np.ndim(other) == 0, other + # in this case we always have `np.ndim(other) == 0` return self._combine_const(other, na_op) f.__name__ = op_name diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py new file mode 100644 index 0000000000000..f5f6d77676f1f --- /dev/null +++ b/pandas/core/ops/array_ops.py @@ -0,0 +1,128 @@ +""" +Functions for arithmetic and comparison operations on NumPy arrays and +ExtensionArrays. +""" +import numpy as np + +from pandas._libs import ops as libops + +from pandas.core.dtypes.cast import ( + construct_1d_object_array_from_listlike, + find_common_type, + maybe_upcast_putmask, +) +from pandas.core.dtypes.common import is_object_dtype, is_scalar +from pandas.core.dtypes.generic import ABCIndex, ABCSeries +from pandas.core.dtypes.missing import notna + +from pandas.core.ops import missing +from pandas.core.ops.roperator import rpow + + +def comp_method_OBJECT_ARRAY(op, x, y): + if isinstance(y, list): + y = construct_1d_object_array_from_listlike(y) + + # TODO: Should the checks below be ABCIndexClass? + if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): + # TODO: should this be ABCIndexClass?? + if not is_object_dtype(y.dtype): + y = y.astype(np.object_) + + if isinstance(y, (ABCSeries, ABCIndex)): + y = y.values + + result = libops.vec_compare(x, y, op) + else: + result = libops.scalar_compare(x, y, op) + return result + + +def masked_arith_op(x, y, op): + """ + If the given arithmetic operation fails, attempt it again on + only the non-null elements of the input array(s). + + Parameters + ---------- + x : np.ndarray + y : np.ndarray, Series, Index + op : binary operator + """ + # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes + # the logic valid for both Series and DataFrame ops. + xrav = x.ravel() + assert isinstance(x, np.ndarray), type(x) + if isinstance(y, np.ndarray): + dtype = find_common_type([x.dtype, y.dtype]) + result = np.empty(x.size, dtype=dtype) + + # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex + # we would get int64 dtype, see GH#19956 + yrav = y.ravel() + mask = notna(xrav) & notna(yrav) + + if yrav.shape != mask.shape: + # FIXME: GH#5284, GH#5035, GH#19448 + # Without specifically raising here we get mismatched + # errors in Py3 (TypeError) vs Py2 (ValueError) + # Note: Only = an issue in DataFrame case + raise ValueError("Cannot broadcast operands together.") + + if mask.any(): + with np.errstate(all="ignore"): + result[mask] = op(xrav[mask], yrav[mask]) + + else: + if not is_scalar(y): + raise TypeError(type(y)) + + # mask is only meaningful for x + result = np.empty(x.size, dtype=x.dtype) + mask = notna(xrav) + + # 1 ** np.nan is 1. So we have to unmask those. + if op is pow: + mask = np.where(x == 1, False, mask) + elif op is rpow: + mask = np.where(y == 1, False, mask) + + if mask.any(): + with np.errstate(all="ignore"): + result[mask] = op(xrav[mask], y) + + result, changed = maybe_upcast_putmask(result, ~mask, np.nan) + result = result.reshape(x.shape) # 2D compat + return result + + +def define_na_arithmetic_op(op, str_rep, eval_kwargs): + def na_op(x, y): + """ + Return the result of evaluating op on the passed in values. + + If native types are not compatible, try coersion to object dtype. + + Parameters + ---------- + x : array-like + y : array-like or scalar + + Returns + ------- + array-like + + Raises + ------ + TypeError : invalid operation + """ + import pandas.core.computation.expressions as expressions + + try: + result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) + except TypeError: + result = masked_arith_op(x, y, op) + + return missing.dispatch_fill_zeros(op, x, y, result) + + return na_op diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py new file mode 100644 index 0000000000000..013ff7689b221 --- /dev/null +++ b/pandas/core/ops/invalid.py @@ -0,0 +1,61 @@ +""" +Templates for invalid operations. +""" +import operator + +import numpy as np + + +def invalid_comparison(left, right, op): + """ + If a comparison has mismatched types and is not necessarily meaningful, + follow python3 conventions by: + + - returning all-False for equality + - returning all-True for inequality + - raising TypeError otherwise + + Parameters + ---------- + left : array-like + right : scalar, array-like + op : operator.{eq, ne, lt, le, gt} + + Raises + ------ + TypeError : on inequality comparisons + """ + if op is operator.eq: + res_values = np.zeros(left.shape, dtype=bool) + elif op is operator.ne: + res_values = np.ones(left.shape, dtype=bool) + else: + raise TypeError( + "Invalid comparison between dtype={dtype} and {typ}".format( + dtype=left.dtype, typ=type(right).__name__ + ) + ) + return res_values + + +def make_invalid_op(name: str): + """ + Return a binary method that always raises a TypeError. + + Parameters + ---------- + name : str + + Returns + ------- + invalid_op : function + """ + + def invalid_op(self, other=None): + raise TypeError( + "cannot perform {name} with this index type: " + "{typ}".format(name=name, typ=type(self).__name__) + ) + + invalid_op.__name__ = name + return invalid_op diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py new file mode 100644 index 0000000000000..eba0a797a791f --- /dev/null +++ b/pandas/core/ops/methods.py @@ -0,0 +1,249 @@ +""" +Functions to generate methods and pin them to the appropriate classes. +""" +import operator + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, + ABCSparseArray, + ABCSparseSeries, +) + +from pandas.core.ops.roperator import ( + radd, + rand_, + rdivmod, + rfloordiv, + rmod, + rmul, + ror_, + rpow, + rsub, + rtruediv, + rxor, +) + + +def _get_method_wrappers(cls): + """ + Find the appropriate operation-wrappers to use when defining flex/special + arithmetic, boolean, and comparison operations with the given class. + + Parameters + ---------- + cls : class + + Returns + ------- + arith_flex : function or None + comp_flex : function or None + arith_special : function + comp_special : function + bool_special : function + + Notes + ----- + None is only returned for SparseArray + """ + # TODO: make these non-runtime imports once the relevant functions + # are no longer in __init__ + from pandas.core.ops import ( + _arith_method_FRAME, + _arith_method_SERIES, + _arith_method_SPARSE_SERIES, + _bool_method_SERIES, + _comp_method_FRAME, + _comp_method_SERIES, + _flex_comp_method_FRAME, + _flex_method_SERIES, + ) + + if issubclass(cls, ABCSparseSeries): + # Be sure to catch this before ABCSeries and ABCSparseArray, + # as they will both come see SparseSeries as a subclass + arith_flex = _flex_method_SERIES + comp_flex = _flex_method_SERIES + arith_special = _arith_method_SPARSE_SERIES + comp_special = _arith_method_SPARSE_SERIES + bool_special = _bool_method_SERIES + # TODO: I don't think the functions defined by bool_method are tested + elif issubclass(cls, ABCSeries): + # Just Series; SparseSeries is caught above + arith_flex = _flex_method_SERIES + comp_flex = _flex_method_SERIES + arith_special = _arith_method_SERIES + comp_special = _comp_method_SERIES + bool_special = _bool_method_SERIES + elif issubclass(cls, ABCDataFrame): + # Same for DataFrame and SparseDataFrame + arith_flex = _arith_method_FRAME + comp_flex = _flex_comp_method_FRAME + arith_special = _arith_method_FRAME + comp_special = _comp_method_FRAME + bool_special = _arith_method_FRAME + return arith_flex, comp_flex, arith_special, comp_special, bool_special + + +def add_special_arithmetic_methods(cls): + """ + Adds the full suite of special arithmetic methods (``__add__``, + ``__sub__``, etc.) to the class. + + Parameters + ---------- + cls : class + special methods will be defined and pinned to this class + """ + _, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls) + new_methods = _create_methods( + cls, arith_method, comp_method, bool_method, special=True + ) + # inplace operators (I feel like these should get passed an `inplace=True` + # or just be removed + + def _wrap_inplace_method(method): + """ + return an inplace wrapper for this method + """ + + def f(self, other): + result = method(self, other) + + # this makes sure that we are aligned like the input + # we are updating inplace so we want to ignore is_copy + self._update_inplace( + result.reindex_like(self, copy=False)._data, verify_is_copy=False + ) + + return self + + f.__name__ = "__i{name}__".format(name=method.__name__.strip("__")) + return f + + new_methods.update( + dict( + __iadd__=_wrap_inplace_method(new_methods["__add__"]), + __isub__=_wrap_inplace_method(new_methods["__sub__"]), + __imul__=_wrap_inplace_method(new_methods["__mul__"]), + __itruediv__=_wrap_inplace_method(new_methods["__truediv__"]), + __ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]), + __imod__=_wrap_inplace_method(new_methods["__mod__"]), + __ipow__=_wrap_inplace_method(new_methods["__pow__"]), + ) + ) + + new_methods.update( + dict( + __iand__=_wrap_inplace_method(new_methods["__and__"]), + __ior__=_wrap_inplace_method(new_methods["__or__"]), + __ixor__=_wrap_inplace_method(new_methods["__xor__"]), + ) + ) + + _add_methods(cls, new_methods=new_methods) + + +def add_flex_arithmetic_methods(cls): + """ + Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) + to the class. + + Parameters + ---------- + cls : class + flex methods will be defined and pinned to this class + """ + flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls) + new_methods = _create_methods( + cls, flex_arith_method, flex_comp_method, bool_method=None, special=False + ) + new_methods.update( + dict( + multiply=new_methods["mul"], + subtract=new_methods["sub"], + divide=new_methods["div"], + ) + ) + # opt out of bool flex methods for now + assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_")) + + _add_methods(cls, new_methods=new_methods) + + +def _create_methods(cls, arith_method, comp_method, bool_method, special): + # creates actual methods based upon arithmetic, comp and bool method + # constructors. + + have_divmod = issubclass(cls, ABCSeries) + # divmod is available for Series and SparseSeries + + # yapf: disable + new_methods = dict( + add=arith_method(cls, operator.add, special), + radd=arith_method(cls, radd, special), + sub=arith_method(cls, operator.sub, special), + mul=arith_method(cls, operator.mul, special), + truediv=arith_method(cls, operator.truediv, special), + floordiv=arith_method(cls, operator.floordiv, special), + # Causes a floating point exception in the tests when numexpr enabled, + # so for now no speedup + mod=arith_method(cls, operator.mod, special), + pow=arith_method(cls, operator.pow, special), + # not entirely sure why this is necessary, but previously was included + # so it's here to maintain compatibility + rmul=arith_method(cls, rmul, special), + rsub=arith_method(cls, rsub, special), + rtruediv=arith_method(cls, rtruediv, special), + rfloordiv=arith_method(cls, rfloordiv, special), + rpow=arith_method(cls, rpow, special), + rmod=arith_method(cls, rmod, special)) + # yapf: enable + new_methods["div"] = new_methods["truediv"] + new_methods["rdiv"] = new_methods["rtruediv"] + if have_divmod: + # divmod doesn't have an op that is supported by numexpr + new_methods["divmod"] = arith_method(cls, divmod, special) + new_methods["rdivmod"] = arith_method(cls, rdivmod, special) + + new_methods.update( + dict( + eq=comp_method(cls, operator.eq, special), + ne=comp_method(cls, operator.ne, special), + lt=comp_method(cls, operator.lt, special), + gt=comp_method(cls, operator.gt, special), + le=comp_method(cls, operator.le, special), + ge=comp_method(cls, operator.ge, special), + ) + ) + + if bool_method: + new_methods.update( + dict( + and_=bool_method(cls, operator.and_, special), + or_=bool_method(cls, operator.or_, special), + # For some reason ``^`` wasn't used in original. + xor=bool_method(cls, operator.xor, special), + rand_=bool_method(cls, rand_, special), + ror_=bool_method(cls, ror_, special), + rxor=bool_method(cls, rxor, special), + ) + ) + + if special: + dunderize = lambda x: "__{name}__".format(name=x.strip("_")) + else: + dunderize = lambda x: x + new_methods = {dunderize(k): v for k, v in new_methods.items()} + return new_methods + + +def _add_methods(cls, new_methods): + for name, method in new_methods.items(): + # For most methods, if we find that the class already has a method + # of the same name, it is OK to over-write it. The exception is + # inplace methods (__iadd__, __isub__, ...) for SparseArray, which + # retain the np.ndarray versions. + force = not (issubclass(cls, ABCSparseArray) and name.startswith("__i")) + if force or name not in cls.__dict__: + setattr(cls, name, method) diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 01bc345a40b83..45fa6a2830af6 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -40,7 +40,7 @@ def fill_zeros(result, x, y, name, fill): Mask the nan's from x. """ - if fill is None or is_float_dtype(result): + if fill is None or is_float_dtype(result.dtype): return result if name.startswith(("r", "__r")): @@ -55,7 +55,7 @@ def fill_zeros(result, x, y, name, fill): if is_scalar_type: y = np.array(y) - if is_integer_dtype(y): + if is_integer_dtype(y.dtype): if (y == 0).any(): diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 66878c3b1026c..a5d0e2cb3b58f 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1630,15 +1630,14 @@ def _get_period_bins(self, ax): def _take_new_index(obj, indexer, new_index, axis=0): - from pandas.core.api import Series, DataFrame - if isinstance(obj, Series): + if isinstance(obj, ABCSeries): new_values = algos.take_1d(obj.values, indexer) - return Series(new_values, index=new_index, name=obj.name) - elif isinstance(obj, DataFrame): + return obj._constructor(new_values, index=new_index, name=obj.name) + elif isinstance(obj, ABCDataFrame): if axis == 1: raise NotImplementedError("axis 1 is not supported") - return DataFrame( + return obj._constructor( obj._data.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) ) else: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index f45c7693bf6ed..d7fbe464cb1e5 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -22,7 +22,6 @@ is_bool, is_bool_dtype, is_categorical_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, is_datetimelike, is_dtype_equal, @@ -179,7 +178,7 @@ def merge_ordered( """ Perform merge with optional filling/interpolation designed for ordered data like time series data. Optionally perform group-wise merge (see - examples) + examples). Parameters ---------- @@ -1635,7 +1634,7 @@ def _get_merge_keys(self): ) ) - if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt): + if is_datetimelike(lt): if not isinstance(self.tolerance, Timedelta): raise MergeError(msg) if self.tolerance < Timedelta(0): diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 79716520f6654..d653dd87308cf 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -611,13 +611,21 @@ def _normalize(table, normalize, margins, margins_name="All"): table = table.fillna(0) elif margins is True: - - column_margin = table.loc[:, margins_name].drop(margins_name) - index_margin = table.loc[margins_name, :].drop(margins_name) - table = table.drop(margins_name, axis=1).drop(margins_name) - # to keep index and columns names - table_index_names = table.index.names - table_columns_names = table.columns.names + # keep index and column of pivoted table + table_index = table.index + table_columns = table.columns + + # check if margin name is in (for MI cases) or equal to last + # index/column and save the column and index margin + if (margins_name not in table.iloc[-1, :].name) | ( + margins_name != table.iloc[:, -1].name + ): + raise ValueError("{} not in pivoted DataFrame".format(margins_name)) + column_margin = table.iloc[:-1, -1] + index_margin = table.iloc[-1, :-1] + + # keep the core table + table = table.iloc[:-1, :-1] # Normalize core table = _normalize(table, normalize=normalize, margins=False) @@ -627,11 +635,13 @@ def _normalize(table, normalize, margins, margins_name="All"): column_margin = column_margin / column_margin.sum() table = concat([table, column_margin], axis=1) table = table.fillna(0) + table.columns = table_columns elif normalize == "index": index_margin = index_margin / index_margin.sum() table = table.append(index_margin) table = table.fillna(0) + table.index = table_index elif normalize == "all" or normalize is True: column_margin = column_margin / column_margin.sum() @@ -641,13 +651,12 @@ def _normalize(table, normalize, margins, margins_name="All"): table = table.append(index_margin) table = table.fillna(0) + table.index = table_index + table.columns = table_columns else: raise ValueError("Not a valid normalize argument") - table.index.names = table_index_names - table.columns.names = table_columns_names - else: raise ValueError("Not a valid margins argument") diff --git a/pandas/core/series.py b/pandas/core/series.py index 9e317d365ccb8..6fb39c422de93 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -562,13 +562,6 @@ def _values(self): """ return self._data.internal_values() - def _formatting_values(self): - """ - Return the values that can be formatted (used by SeriesFormatter - and DataFrameFormatter). - """ - return self._data.formatting_values() - def get_values(self): """ Same as values (but handles sparseness conversions); is a view. @@ -682,8 +675,8 @@ def nonzero(self): 3 4 dtype: int64 - >>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd']) # same return although index of s is different + >>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd']) >>> s.nonzero() (array([1, 3]),) >>> s.iloc[s.nonzero()[0]] @@ -1688,7 +1681,8 @@ def items(self): See Also -------- - DataFrame.items : Equivalent to Series.items for DataFrame. + DataFrame.items : Iterate over (column name, Series) pairs. + DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- @@ -3626,7 +3620,7 @@ def explode(self) -> "Series": Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. - DataFrame.melt : Unpivot a DataFrame from wide format to long format + DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. @@ -4171,12 +4165,10 @@ def rename(self, index=None, **kwargs): """ kwargs["inplace"] = validate_bool_kwarg(kwargs.get("inplace", False), "inplace") - non_mapping = is_scalar(index) or ( - is_list_like(index) and not is_dict_like(index) - ) - if non_mapping: + if callable(index) or is_dict_like(index): + return super().rename(index=index, **kwargs) + else: return self._set_name(index, inplace=kwargs.get("inplace")) - return super().rename(index=index, **kwargs) @Substitution(**_shared_doc_kwargs) @Appender(generic.NDFrame.reindex.__doc__) diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 5db31fe6664ea..e6edad656d430 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -271,7 +271,6 @@ def nargsort(items, kind="quicksort", ascending=True, na_position="last"): class _KeyMapper: - """ Ease my suffering. Map compressed group id -> key tuple """ diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index f5add426297a7..8fe6850c84b8b 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -569,13 +569,13 @@ def _combine_frame(self, other, func, fill_value=None, level=None): ).__finalize__(self) def _combine_match_index(self, other, func, level=None): - new_data = {} if level is not None: raise NotImplementedError("'level' argument is not supported") this, other = self.align(other, join="outer", axis=0, level=level, copy=False) + new_data = {} for col, series in this.items(): new_data[col] = func(series.values, other.values) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 169a3a24c254d..25350119f9df5 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1442,6 +1442,12 @@ def str_slice(arr, start=None, stop=None, step=None): 2 hameleon dtype: object + >>> s.str.slice(start=-1) + 0 a + 1 x + 2 n + dtype: object + >>> s.str.slice(stop=2) 0 ko 1 fo diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 172084e97a959..b07647cf5b5fb 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -334,6 +334,9 @@ def _convert_listlike_datetimes( return DatetimeIndex(arg, tz=tz, name=name) except ValueError: pass + elif tz: + # DatetimeArray, DatetimeIndex + return arg.tz_localize(tz) return arg diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 73e126cf230a5..bcdbf0855cbb4 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -58,7 +58,7 @@ def hash_pandas_object( obj, index=True, encoding="utf8", hash_key=None, categorize=True ): """ - Return a data hash of the Index/Series/DataFrame + Return a data hash of the Index/Series/DataFrame. Parameters ---------- diff --git a/pandas/core/window/__init__.py b/pandas/core/window/__init__.py new file mode 100644 index 0000000000000..dcf58a4c0dd5b --- /dev/null +++ b/pandas/core/window/__init__.py @@ -0,0 +1,3 @@ +from pandas.core.window.ewm import EWM # noqa:F401 +from pandas.core.window.expanding import Expanding, ExpandingGroupby # noqa:F401 +from pandas.core.window.rolling import Rolling, RollingGroupby, Window # noqa:F401 diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py new file mode 100644 index 0000000000000..0f2920b3558c9 --- /dev/null +++ b/pandas/core/window/common.py @@ -0,0 +1,276 @@ +"""Common utility functions for rolling operations""" +from collections import defaultdict +import warnings + +import numpy as np + +from pandas.core.dtypes.common import is_integer +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries + +import pandas.core.common as com +from pandas.core.generic import _shared_docs +from pandas.core.groupby.base import GroupByMixin +from pandas.core.index import MultiIndex + +_shared_docs = dict(**_shared_docs) +_doc_template = """ + Returns + ------- + Series or DataFrame + Return type is determined by the caller. + + See Also + -------- + Series.%(name)s : Series %(name)s. + DataFrame.%(name)s : DataFrame %(name)s. +""" + + +class _GroupByMixin(GroupByMixin): + """ + Provide the groupby facilities. + """ + + def __init__(self, obj, *args, **kwargs): + parent = kwargs.pop("parent", None) # noqa + groupby = kwargs.pop("groupby", None) + if groupby is None: + groupby, obj = obj, obj.obj + self._groupby = groupby + self._groupby.mutated = True + self._groupby.grouper.mutated = True + super().__init__(obj, *args, **kwargs) + + count = GroupByMixin._dispatch("count") + corr = GroupByMixin._dispatch("corr", other=None, pairwise=None) + cov = GroupByMixin._dispatch("cov", other=None, pairwise=None) + + def _apply( + self, func, name=None, window=None, center=None, check_minp=None, **kwargs + ): + """ + Dispatch to apply; we are stripping all of the _apply kwargs and + performing the original function call on the grouped object. + """ + + def f(x, name=name, *args): + x = self._shallow_copy(x) + + if isinstance(name, str): + return getattr(x, name)(*args, **kwargs) + + return x.apply(name, *args, **kwargs) + + return self._groupby.apply(f) + + +def _flex_binary_moment(arg1, arg2, f, pairwise=False): + + if not ( + isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) + and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame)) + ): + raise TypeError( + "arguments to moment function must be of type " + "np.ndarray/Series/DataFrame" + ) + + if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance( + arg2, (np.ndarray, ABCSeries) + ): + X, Y = _prep_binary(arg1, arg2) + return f(X, Y) + + elif isinstance(arg1, ABCDataFrame): + from pandas import DataFrame + + def dataframe_from_int_dict(data, frame_template): + result = DataFrame(data, index=frame_template.index) + if len(result.columns) > 0: + result.columns = frame_template.columns[result.columns] + return result + + results = {} + if isinstance(arg2, ABCDataFrame): + if pairwise is False: + if arg1 is arg2: + # special case in order to handle duplicate column names + for i, col in enumerate(arg1.columns): + results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) + return dataframe_from_int_dict(results, arg1) + else: + if not arg1.columns.is_unique: + raise ValueError("'arg1' columns are not unique") + if not arg2.columns.is_unique: + raise ValueError("'arg2' columns are not unique") + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", RuntimeWarning) + X, Y = arg1.align(arg2, join="outer") + X = X + 0 * Y + Y = Y + 0 * X + + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", RuntimeWarning) + res_columns = arg1.columns.union(arg2.columns) + for col in res_columns: + if col in X and col in Y: + results[col] = f(X[col], Y[col]) + return DataFrame(results, index=X.index, columns=res_columns) + elif pairwise is True: + results = defaultdict(dict) + for i, k1 in enumerate(arg1.columns): + for j, k2 in enumerate(arg2.columns): + if j < i and arg2 is arg1: + # Symmetric case + results[i][j] = results[j][i] + else: + results[i][j] = f( + *_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]) + ) + + from pandas import concat + + result_index = arg1.index.union(arg2.index) + if len(result_index): + + # construct result frame + result = concat( + [ + concat( + [results[i][j] for j, c in enumerate(arg2.columns)], + ignore_index=True, + ) + for i, c in enumerate(arg1.columns) + ], + ignore_index=True, + axis=1, + ) + result.columns = arg1.columns + + # set the index and reorder + if arg2.columns.nlevels > 1: + result.index = MultiIndex.from_product( + arg2.columns.levels + [result_index] + ) + result = result.reorder_levels([2, 0, 1]).sort_index() + else: + result.index = MultiIndex.from_product( + [range(len(arg2.columns)), range(len(result_index))] + ) + result = result.swaplevel(1, 0).sort_index() + result.index = MultiIndex.from_product( + [result_index] + [arg2.columns] + ) + else: + + # empty result + result = DataFrame( + index=MultiIndex( + levels=[arg1.index, arg2.columns], codes=[[], []] + ), + columns=arg2.columns, + dtype="float64", + ) + + # reset our index names to arg1 names + # reset our column names to arg2 names + # careful not to mutate the original names + result.columns = result.columns.set_names(arg1.columns.names) + result.index = result.index.set_names( + result_index.names + arg2.columns.names + ) + + return result + + else: + raise ValueError("'pairwise' is not True/False") + else: + results = { + i: f(*_prep_binary(arg1.iloc[:, i], arg2)) + for i, col in enumerate(arg1.columns) + } + return dataframe_from_int_dict(results, arg1) + + else: + return _flex_binary_moment(arg2, arg1, f) + + +def _get_center_of_mass(comass, span, halflife, alpha): + valid_count = com.count_not_none(comass, span, halflife, alpha) + if valid_count > 1: + raise ValueError("comass, span, halflife, and alpha are mutually exclusive") + + # Convert to center of mass; domain checks ensure 0 < alpha <= 1 + if comass is not None: + if comass < 0: + raise ValueError("comass must satisfy: comass >= 0") + elif span is not None: + if span < 1: + raise ValueError("span must satisfy: span >= 1") + comass = (span - 1) / 2.0 + elif halflife is not None: + if halflife <= 0: + raise ValueError("halflife must satisfy: halflife > 0") + decay = 1 - np.exp(np.log(0.5) / halflife) + comass = 1 / decay - 1 + elif alpha is not None: + if alpha <= 0 or alpha > 1: + raise ValueError("alpha must satisfy: 0 < alpha <= 1") + comass = (1.0 - alpha) / alpha + else: + raise ValueError("Must pass one of comass, span, halflife, or alpha") + + return float(comass) + + +def _offset(window, center): + if not is_integer(window): + window = len(window) + offset = (window - 1) / 2.0 if center else 0 + try: + return int(offset) + except TypeError: + return offset.astype(int) + + +def _require_min_periods(p): + def _check_func(minp, window): + if minp is None: + return window + else: + return max(p, minp) + + return _check_func + + +def _use_window(minp, window): + if minp is None: + return window + else: + return minp + + +def _zsqrt(x): + with np.errstate(all="ignore"): + result = np.sqrt(x) + mask = x < 0 + + if isinstance(x, ABCDataFrame): + if mask.values.any(): + result[mask] = 0 + else: + if mask.any(): + result[mask] = 0 + + return result + + +def _prep_binary(arg1, arg2): + if not isinstance(arg2, type(arg1)): + raise Exception("Input arrays must be of the same type!") + + # mask out values, this also makes a common index... + X = arg1 + 0 * arg2 + Y = arg2 + 0 * arg1 + + return X, Y diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py new file mode 100644 index 0000000000000..40e6c679ba72d --- /dev/null +++ b/pandas/core/window/ewm.py @@ -0,0 +1,388 @@ +from textwrap import dedent + +import numpy as np + +import pandas._libs.window as libwindow +from pandas.compat.numpy import function as nv +from pandas.util._decorators import Appender, Substitution + +from pandas.core.dtypes.generic import ABCDataFrame + +from pandas.core.base import DataError +from pandas.core.window.common import _doc_template, _get_center_of_mass, _shared_docs +from pandas.core.window.rolling import _flex_binary_moment, _Rolling, _zsqrt + +_bias_template = """ + Parameters + ---------- + bias : bool, default False + Use a standard estimation bias correction. + *args, **kwargs + Arguments and keyword arguments to be passed into func. +""" + +_pairwise_template = """ + Parameters + ---------- + other : Series, DataFrame, or ndarray, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndex DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + bias : bool, default False + Use a standard estimation bias correction. + **kwargs + Keyword arguments to be passed into func. +""" + + +class EWM(_Rolling): + r""" + Provide exponential weighted functions. + + Parameters + ---------- + com : float, optional + Specify decay in terms of center of mass, + :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`. + span : float, optional + Specify decay in terms of span, + :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`. + halflife : float, optional + Specify decay in terms of half-life, + :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`. + alpha : float, optional + Specify smoothing factor :math:`\alpha` directly, + :math:`0 < \alpha \leq 1`. + min_periods : int, default 0 + Minimum number of observations in window required to have a value + (otherwise result is NA). + adjust : bool, default True + Divide by decaying adjustment factor in beginning periods to account + for imbalance in relative weightings + (viewing EWMA as a moving average). + ignore_na : bool, default False + Ignore missing values when calculating weights; + specify True to reproduce pre-0.15.0 behavior. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to use. The value 0 identifies the rows, and 1 + identifies the columns. + + Returns + ------- + DataFrame + A Window sub-classed for the particular operation. + + See Also + -------- + rolling : Provides rolling window calculations. + expanding : Provides expanding transformations. + + Notes + ----- + Exactly one of center of mass, span, half-life, and alpha must be provided. + Allowed values and relationship between the parameters are specified in the + parameter descriptions above; see the link at the end of this section for + a detailed explanation. + + When adjust is True (default), weighted averages are calculated using + weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1. + + When adjust is False, weighted averages are calculated recursively as: + weighted_average[0] = arg[0]; + weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i]. + + When ignore_na is False (default), weights are based on absolute positions. + For example, the weights of x and y used in calculating the final weighted + average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and + (1-alpha)**2 and alpha (if adjust is False). + + When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based + on relative positions. For example, the weights of x and y used in + calculating the final weighted average of [x, None, y] are 1-alpha and 1 + (if adjust is True), and 1-alpha and alpha (if adjust is False). + + More details can be found at + http://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows + + Examples + -------- + + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> df.ewm(com=0.5).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + """ + _attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"] + + def __init__( + self, + obj, + com=None, + span=None, + halflife=None, + alpha=None, + min_periods=0, + adjust=True, + ignore_na=False, + axis=0, + ): + self.obj = obj + self.com = _get_center_of_mass(com, span, halflife, alpha) + self.min_periods = min_periods + self.adjust = adjust + self.ignore_na = ignore_na + self.axis = axis + self.on = None + + @property + def _constructor(self): + return EWM + + _agg_see_also_doc = dedent( + """ + See Also + -------- + pandas.DataFrame.rolling.aggregate + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + + >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.004295 0.905829 -0.954544 + 2 0.735167 -0.165272 -1.619346 + 3 -0.702657 -1.340923 -0.706334 + 4 -0.246845 0.211596 -0.901819 + 5 2.463718 3.157577 -1.380906 + 6 -1.142255 2.340594 -0.039875 + 7 1.396598 -1.647453 1.677227 + 8 -0.543425 1.761277 -0.220481 + 9 -0.640505 0.289374 -1.550670 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.464856 0.569633 -0.490089 + 2 -0.207700 0.149687 -1.135379 + 3 -0.471677 -0.645305 -0.906555 + 4 -0.355635 -0.203033 -0.904111 + 5 1.076417 1.503943 -1.146293 + 6 -0.041654 1.925562 -0.588728 + 7 0.680292 0.132049 0.548693 + 8 0.067236 0.948257 0.163353 + 9 -0.286980 0.618493 -0.694496 + """ + ) + + @Substitution( + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + versionadded="", + klass="Series/Dataframe", + axis="", + ) + @Appender(_shared_docs["aggregate"]) + def aggregate(self, func, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) + + agg = aggregate + + def _apply(self, func, **kwargs): + """ + Rolling statistical measure using supplied function. Designed to be + used with passed-in Cython array-based functions. + + Parameters + ---------- + func : str/callable to apply + + Returns + ------- + y : same type as input argument + """ + blocks, obj = self._create_blocks() + block_list = list(blocks) + + results = [] + exclude = [] + for i, b in enumerate(blocks): + try: + values = self._prep_values(b.values) + + except (TypeError, NotImplementedError): + if isinstance(obj, ABCDataFrame): + exclude.extend(b.columns) + del block_list[i] + continue + else: + raise DataError("No numeric types to aggregate") + + if values.size == 0: + results.append(values.copy()) + continue + + # if we have a string function name, wrap it + if isinstance(func, str): + cfunc = getattr(libwindow, func, None) + if cfunc is None: + raise ValueError( + "we do not support this function " + "in libwindow.{func}".format(func=func) + ) + + def func(arg): + return cfunc( + arg, + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + ) + + results.append(np.apply_along_axis(func, self.axis, values)) + + return self._wrap_results(results, block_list, obj, exclude) + + @Substitution(name="ewm") + @Appender(_doc_template) + def mean(self, *args, **kwargs): + """ + Exponential weighted moving average. + + Parameters + ---------- + *args, **kwargs + Arguments and keyword arguments to be passed into func. + """ + nv.validate_window_func("mean", args, kwargs) + return self._apply("ewma", **kwargs) + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_bias_template) + def std(self, bias=False, *args, **kwargs): + """ + Exponential weighted moving stddev. + """ + nv.validate_window_func("std", args, kwargs) + return _zsqrt(self.var(bias=bias, **kwargs)) + + vol = std + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_bias_template) + def var(self, bias=False, *args, **kwargs): + """ + Exponential weighted moving variance. + """ + nv.validate_window_func("var", args, kwargs) + + def f(arg): + return libwindow.ewmcov( + arg, + arg, + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + int(bias), + ) + + return self._apply(f, **kwargs) + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_pairwise_template) + def cov(self, other=None, pairwise=None, bias=False, **kwargs): + """ + Exponential weighted sample covariance. + """ + if other is None: + other = self._selected_obj + # only default unset + pairwise = True if pairwise is None else pairwise + other = self._shallow_copy(other) + + def _get_cov(X, Y): + X = self._shallow_copy(X) + Y = self._shallow_copy(Y) + cov = libwindow.ewmcov( + X._prep_values(), + Y._prep_values(), + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + int(bias), + ) + return X._wrap_result(cov) + + return _flex_binary_moment( + self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) + ) + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_pairwise_template) + def corr(self, other=None, pairwise=None, **kwargs): + """ + Exponential weighted sample correlation. + """ + if other is None: + other = self._selected_obj + # only default unset + pairwise = True if pairwise is None else pairwise + other = self._shallow_copy(other) + + def _get_corr(X, Y): + X = self._shallow_copy(X) + Y = self._shallow_copy(Y) + + def _cov(x, y): + return libwindow.ewmcov( + x, + y, + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + 1, + ) + + x_values = X._prep_values() + y_values = Y._prep_values() + with np.errstate(all="ignore"): + cov = _cov(x_values, y_values) + x_var = _cov(x_values, x_values) + y_var = _cov(y_values, y_values) + corr = cov / _zsqrt(x_var * y_var) + return X._wrap_result(corr) + + return _flex_binary_moment( + self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) + ) diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py new file mode 100644 index 0000000000000..47bd8f2ec593b --- /dev/null +++ b/pandas/core/window/expanding.py @@ -0,0 +1,260 @@ +from textwrap import dedent + +from pandas.compat.numpy import function as nv +from pandas.util._decorators import Appender, Substitution + +from pandas.core.window.common import _doc_template, _GroupByMixin, _shared_docs +from pandas.core.window.rolling import _Rolling_and_Expanding + + +class Expanding(_Rolling_and_Expanding): + """ + Provide expanding transformations. + + Parameters + ---------- + min_periods : int, default 1 + Minimum number of observations in window required to have a value + (otherwise result is NA). + center : bool, default False + Set the labels at the center of the window. + axis : int or str, default 0 + + Returns + ------- + a Window sub-classed for the particular operation + + See Also + -------- + rolling : Provides rolling window calculations. + ewm : Provides exponential weighted functions. + + Notes + ----- + By default, the result is set to the right edge of the window. This can be + changed to the center of the window by setting ``center=True``. + + Examples + -------- + + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> df.expanding(2).sum() + B + 0 NaN + 1 1.0 + 2 3.0 + 3 3.0 + 4 7.0 + """ + + _attributes = ["min_periods", "center", "axis"] + + def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs): + super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis) + + @property + def _constructor(self): + return Expanding + + def _get_window(self, other=None, **kwargs): + """ + Get the window length over which to perform some operation. + + Parameters + ---------- + other : object, default None + The other object that is involved in the operation. + Such an object is involved for operations like covariance. + + Returns + ------- + window : int + The window length. + """ + axis = self.obj._get_axis(self.axis) + length = len(axis) + (other is not None) * len(axis) + + other = self.min_periods or -1 + return max(length, other) + + _agg_see_also_doc = dedent( + """ + See Also + -------- + DataFrame.expanding.aggregate + DataFrame.rolling.aggregate + DataFrame.aggregate + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + + >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.004295 0.905829 -0.954544 + 2 0.735167 -0.165272 -1.619346 + 3 -0.702657 -1.340923 -0.706334 + 4 -0.246845 0.211596 -0.901819 + 5 2.463718 3.157577 -1.380906 + 6 -1.142255 2.340594 -0.039875 + 7 1.396598 -1.647453 1.677227 + 8 -0.543425 1.761277 -0.220481 + 9 -0.640505 0.289374 -1.550670 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.464856 0.569633 -0.490089 + 2 -0.207700 0.149687 -1.135379 + 3 -0.471677 -0.645305 -0.906555 + 4 -0.355635 -0.203033 -0.904111 + 5 1.076417 1.503943 -1.146293 + 6 -0.041654 1.925562 -0.588728 + 7 0.680292 0.132049 0.548693 + 8 0.067236 0.948257 0.163353 + 9 -0.286980 0.618493 -0.694496 + """ + ) + + @Substitution( + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + versionadded="", + klass="Series/Dataframe", + axis="", + ) + @Appender(_shared_docs["aggregate"]) + def aggregate(self, func, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) + + agg = aggregate + + @Substitution(name="expanding") + @Appender(_shared_docs["count"]) + def count(self, **kwargs): + return super().count(**kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["apply"]) + def apply(self, func, raw=None, args=(), kwargs={}): + return super().apply(func, raw=raw, args=args, kwargs=kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["sum"]) + def sum(self, *args, **kwargs): + nv.validate_expanding_func("sum", args, kwargs) + return super().sum(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_doc_template) + @Appender(_shared_docs["max"]) + def max(self, *args, **kwargs): + nv.validate_expanding_func("max", args, kwargs) + return super().max(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["min"]) + def min(self, *args, **kwargs): + nv.validate_expanding_func("min", args, kwargs) + return super().min(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["mean"]) + def mean(self, *args, **kwargs): + nv.validate_expanding_func("mean", args, kwargs) + return super().mean(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["median"]) + def median(self, **kwargs): + return super().median(**kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["std"]) + def std(self, ddof=1, *args, **kwargs): + nv.validate_expanding_func("std", args, kwargs) + return super().std(ddof=ddof, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["var"]) + def var(self, ddof=1, *args, **kwargs): + nv.validate_expanding_func("var", args, kwargs) + return super().var(ddof=ddof, **kwargs) + + @Substitution(name="expanding") + @Appender(_doc_template) + @Appender(_shared_docs["skew"]) + def skew(self, **kwargs): + return super().skew(**kwargs) + + _agg_doc = dedent( + """ + Examples + -------- + + The example below will show an expanding calculation with a window size of + four matching the equivalent function call using `scipy.stats`. + + >>> arr = [1, 2, 3, 4, 999] + >>> import scipy.stats + >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits + >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) + -1.200000 + >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False))) + 4.999874 + >>> s = pd.Series(arr) + >>> s.expanding(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 4.999874 + dtype: float64 + """ + ) + + @Appender(_agg_doc) + @Substitution(name="expanding") + @Appender(_shared_docs["kurt"]) + def kurt(self, **kwargs): + return super().kurt(**kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["quantile"]) + def quantile(self, quantile, interpolation="linear", **kwargs): + return super().quantile( + quantile=quantile, interpolation=interpolation, **kwargs + ) + + @Substitution(name="expanding") + @Appender(_doc_template) + @Appender(_shared_docs["cov"]) + def cov(self, other=None, pairwise=None, ddof=1, **kwargs): + return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["corr"]) + def corr(self, other=None, pairwise=None, **kwargs): + return super().corr(other=other, pairwise=pairwise, **kwargs) + + +class ExpandingGroupby(_GroupByMixin, Expanding): + """ + Provide a expanding groupby implementation. + """ + + @property + def _constructor(self): + return Expanding diff --git a/pandas/core/window.py b/pandas/core/window/rolling.py similarity index 65% rename from pandas/core/window.py rename to pandas/core/window/rolling.py index 4b6a1cf2e9a04..a7e122fa3528f 100644 --- a/pandas/core/window.py +++ b/pandas/core/window/rolling.py @@ -2,7 +2,6 @@ Provide a generic structure to support window functions, similar to how we have a Groupby object. """ -from collections import defaultdict from datetime import timedelta from textwrap import dedent from typing import Callable, List, Optional, Set, Union @@ -38,22 +37,17 @@ from pandas._typing import Axis, FrameOrSeries, Scalar from pandas.core.base import DataError, PandasObject, SelectionMixin import pandas.core.common as com -from pandas.core.generic import _shared_docs -from pandas.core.groupby.base import GroupByMixin -from pandas.core.index import Index, MultiIndex, ensure_index - -_shared_docs = dict(**_shared_docs) -_doc_template = """ - Returns - ------- - Series or DataFrame - Return type is determined by the caller. - - See Also - -------- - Series.%(name)s : Series %(name)s. - DataFrame.%(name)s : DataFrame %(name)s. -""" +from pandas.core.index import Index, ensure_index +from pandas.core.window.common import ( + _doc_template, + _flex_binary_moment, + _GroupByMixin, + _offset, + _require_min_periods, + _shared_docs, + _use_window, + _zsqrt, +) class _Window(PandasObject, SelectionMixin): @@ -121,6 +115,8 @@ def validate(self): "neither", ]: raise ValueError("closed must be 'right', 'left', 'both' or 'neither'") + if not isinstance(self.obj, (ABCSeries, ABCDataFrame)): + raise TypeError("invalid type: {}".format(type(self))) def _create_blocks(self): """ @@ -246,8 +242,10 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray: except (ValueError, TypeError): raise TypeError("cannot handle this type -> {0}".format(values.dtype)) - # Always convert inf to nan - values[np.isinf(values)] = np.NaN + # Convert inf to nan for C funcs + inf = np.isinf(values) + if inf.any(): + values = np.where(inf, np.nan, values) return values @@ -265,6 +263,8 @@ def _wrap_result(self, result, block=None, obj=None): # coerce if necessary if block is not None: if is_timedelta64_dtype(block.values.dtype): + # TODO: do we know what result.dtype is at this point? + # i.e. can we just do an astype? from pandas import to_timedelta result = to_timedelta(result.ravel(), unit="ns").values.reshape( @@ -901,12 +901,12 @@ def func(arg, window, min_periods=None, closed=None): axis="", ) @Appender(_shared_docs["aggregate"]) - def aggregate(self, arg, *args, **kwargs): - result, how = self._aggregate(arg, *args, **kwargs) + def aggregate(self, func, *args, **kwargs): + result, how = self._aggregate(func, *args, **kwargs) if result is None: # these must apply directly - result = arg(self) + result = func(self) return result @@ -925,44 +925,6 @@ def mean(self, *args, **kwargs): return self._apply("roll_weighted_mean", **kwargs) -class _GroupByMixin(GroupByMixin): - """ - Provide the groupby facilities. - """ - - def __init__(self, obj, *args, **kwargs): - parent = kwargs.pop("parent", None) # noqa - groupby = kwargs.pop("groupby", None) - if groupby is None: - groupby, obj = obj, obj.obj - self._groupby = groupby - self._groupby.mutated = True - self._groupby.grouper.mutated = True - super().__init__(obj, *args, **kwargs) - - count = GroupByMixin._dispatch("count") - corr = GroupByMixin._dispatch("corr", other=None, pairwise=None) - cov = GroupByMixin._dispatch("cov", other=None, pairwise=None) - - def _apply( - self, func, name=None, window=None, center=None, check_minp=None, **kwargs - ): - """ - Dispatch to apply; we are stripping all of the _apply kwargs and - performing the original function call on the grouped object. - """ - - def f(x, name=name, *args): - x = self._shallow_copy(x) - - if isinstance(name, str): - return getattr(x, name)(*args, **kwargs) - - return x.apply(name, *args, **kwargs) - - return self._groupby.apply(f) - - class _Rolling(_Window): @property def _constructor(self): @@ -1826,8 +1788,8 @@ def _validate_freq(self): axis="", ) @Appender(_shared_docs["aggregate"]) - def aggregate(self, arg, *args, **kwargs): - return super().aggregate(arg, *args, **kwargs) + def aggregate(self, func, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) agg = aggregate @@ -1945,6 +1907,9 @@ def corr(self, other=None, pairwise=None, **kwargs): return super().corr(other=other, pairwise=pairwise, **kwargs) +Rolling.__doc__ = Window.__doc__ + + class RollingGroupby(_GroupByMixin, Rolling): """ Provide a rolling groupby implementation. @@ -1972,883 +1937,3 @@ def _validate_monotonic(self): level. """ pass - - -class Expanding(_Rolling_and_Expanding): - """ - Provide expanding transformations. - - Parameters - ---------- - min_periods : int, default 1 - Minimum number of observations in window required to have a value - (otherwise result is NA). - center : bool, default False - Set the labels at the center of the window. - axis : int or str, default 0 - - Returns - ------- - a Window sub-classed for the particular operation - - See Also - -------- - rolling : Provides rolling window calculations. - ewm : Provides exponential weighted functions. - - Notes - ----- - By default, the result is set to the right edge of the window. This can be - changed to the center of the window by setting ``center=True``. - - Examples - -------- - - >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) - B - 0 0.0 - 1 1.0 - 2 2.0 - 3 NaN - 4 4.0 - - >>> df.expanding(2).sum() - B - 0 NaN - 1 1.0 - 2 3.0 - 3 3.0 - 4 7.0 - """ - - _attributes = ["min_periods", "center", "axis"] - - def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs): - super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis) - - @property - def _constructor(self): - return Expanding - - def _get_window(self, other=None, **kwargs): - """ - Get the window length over which to perform some operation. - - Parameters - ---------- - other : object, default None - The other object that is involved in the operation. - Such an object is involved for operations like covariance. - - Returns - ------- - window : int - The window length. - """ - axis = self.obj._get_axis(self.axis) - length = len(axis) + (other is not None) * len(axis) - - other = self.min_periods or -1 - return max(length, other) - - _agg_see_also_doc = dedent( - """ - See Also - -------- - DataFrame.expanding.aggregate - DataFrame.rolling.aggregate - DataFrame.aggregate - """ - ) - - _agg_examples_doc = dedent( - """ - Examples - -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) - >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 - - >>> df.ewm(alpha=0.5).mean() - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.464856 0.569633 -0.490089 - 2 -0.207700 0.149687 -1.135379 - 3 -0.471677 -0.645305 -0.906555 - 4 -0.355635 -0.203033 -0.904111 - 5 1.076417 1.503943 -1.146293 - 6 -0.041654 1.925562 -0.588728 - 7 0.680292 0.132049 0.548693 - 8 0.067236 0.948257 0.163353 - 9 -0.286980 0.618493 -0.694496 - """ - ) - - @Substitution( - see_also=_agg_see_also_doc, - examples=_agg_examples_doc, - versionadded="", - klass="Series/Dataframe", - axis="", - ) - @Appender(_shared_docs["aggregate"]) - def aggregate(self, arg, *args, **kwargs): - return super().aggregate(arg, *args, **kwargs) - - agg = aggregate - - @Substitution(name="expanding") - @Appender(_shared_docs["count"]) - def count(self, **kwargs): - return super().count(**kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["apply"]) - def apply(self, func, raw=None, args=(), kwargs={}): - return super().apply(func, raw=raw, args=args, kwargs=kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["sum"]) - def sum(self, *args, **kwargs): - nv.validate_expanding_func("sum", args, kwargs) - return super().sum(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_doc_template) - @Appender(_shared_docs["max"]) - def max(self, *args, **kwargs): - nv.validate_expanding_func("max", args, kwargs) - return super().max(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["min"]) - def min(self, *args, **kwargs): - nv.validate_expanding_func("min", args, kwargs) - return super().min(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["mean"]) - def mean(self, *args, **kwargs): - nv.validate_expanding_func("mean", args, kwargs) - return super().mean(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["median"]) - def median(self, **kwargs): - return super().median(**kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["std"]) - def std(self, ddof=1, *args, **kwargs): - nv.validate_expanding_func("std", args, kwargs) - return super().std(ddof=ddof, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["var"]) - def var(self, ddof=1, *args, **kwargs): - nv.validate_expanding_func("var", args, kwargs) - return super().var(ddof=ddof, **kwargs) - - @Substitution(name="expanding") - @Appender(_doc_template) - @Appender(_shared_docs["skew"]) - def skew(self, **kwargs): - return super().skew(**kwargs) - - _agg_doc = dedent( - """ - Examples - -------- - - The example below will show an expanding calculation with a window size of - four matching the equivalent function call using `scipy.stats`. - - >>> arr = [1, 2, 3, 4, 999] - >>> import scipy.stats - >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits - >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) - -1.200000 - >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False))) - 4.999874 - >>> s = pd.Series(arr) - >>> s.expanding(4).kurt() - 0 NaN - 1 NaN - 2 NaN - 3 -1.200000 - 4 4.999874 - dtype: float64 - """ - ) - - @Appender(_agg_doc) - @Substitution(name="expanding") - @Appender(_shared_docs["kurt"]) - def kurt(self, **kwargs): - return super().kurt(**kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["quantile"]) - def quantile(self, quantile, interpolation="linear", **kwargs): - return super().quantile( - quantile=quantile, interpolation=interpolation, **kwargs - ) - - @Substitution(name="expanding") - @Appender(_doc_template) - @Appender(_shared_docs["cov"]) - def cov(self, other=None, pairwise=None, ddof=1, **kwargs): - return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["corr"]) - def corr(self, other=None, pairwise=None, **kwargs): - return super().corr(other=other, pairwise=pairwise, **kwargs) - - -class ExpandingGroupby(_GroupByMixin, Expanding): - """ - Provide a expanding groupby implementation. - """ - - @property - def _constructor(self): - return Expanding - - -_bias_template = """ - Parameters - ---------- - bias : bool, default False - Use a standard estimation bias correction. - *args, **kwargs - Arguments and keyword arguments to be passed into func. -""" - -_pairwise_template = """ - Parameters - ---------- - other : Series, DataFrame, or ndarray, optional - If not supplied then will default to self and produce pairwise - output. - pairwise : bool, default None - If False then only matching columns between self and other will be - used and the output will be a DataFrame. - If True then all pairwise combinations will be calculated and the - output will be a MultiIndex DataFrame in the case of DataFrame - inputs. In the case of missing elements, only complete pairwise - observations will be used. - bias : bool, default False - Use a standard estimation bias correction. - **kwargs - Keyword arguments to be passed into func. -""" - - -class EWM(_Rolling): - r""" - Provide exponential weighted functions. - - Parameters - ---------- - com : float, optional - Specify decay in terms of center of mass, - :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`. - span : float, optional - Specify decay in terms of span, - :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`. - halflife : float, optional - Specify decay in terms of half-life, - :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`. - alpha : float, optional - Specify smoothing factor :math:`\alpha` directly, - :math:`0 < \alpha \leq 1`. - min_periods : int, default 0 - Minimum number of observations in window required to have a value - (otherwise result is NA). - adjust : bool, default True - Divide by decaying adjustment factor in beginning periods to account - for imbalance in relative weightings - (viewing EWMA as a moving average). - ignore_na : bool, default False - Ignore missing values when calculating weights; - specify True to reproduce pre-0.15.0 behavior. - axis : {0 or 'index', 1 or 'columns'}, default 0 - The axis to use. The value 0 identifies the rows, and 1 - identifies the columns. - - Returns - ------- - DataFrame - A Window sub-classed for the particular operation. - - See Also - -------- - rolling : Provides rolling window calculations. - expanding : Provides expanding transformations. - - Notes - ----- - Exactly one of center of mass, span, half-life, and alpha must be provided. - Allowed values and relationship between the parameters are specified in the - parameter descriptions above; see the link at the end of this section for - a detailed explanation. - - When adjust is True (default), weighted averages are calculated using - weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1. - - When adjust is False, weighted averages are calculated recursively as: - weighted_average[0] = arg[0]; - weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i]. - - When ignore_na is False (default), weights are based on absolute positions. - For example, the weights of x and y used in calculating the final weighted - average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and - (1-alpha)**2 and alpha (if adjust is False). - - When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based - on relative positions. For example, the weights of x and y used in - calculating the final weighted average of [x, None, y] are 1-alpha and 1 - (if adjust is True), and 1-alpha and alpha (if adjust is False). - - More details can be found at - http://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows - - Examples - -------- - - >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) - >>> df - B - 0 0.0 - 1 1.0 - 2 2.0 - 3 NaN - 4 4.0 - - >>> df.ewm(com=0.5).mean() - B - 0 0.000000 - 1 0.750000 - 2 1.615385 - 3 1.615385 - 4 3.670213 - """ - _attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"] - - def __init__( - self, - obj, - com=None, - span=None, - halflife=None, - alpha=None, - min_periods=0, - adjust=True, - ignore_na=False, - axis=0, - ): - self.obj = obj - self.com = _get_center_of_mass(com, span, halflife, alpha) - self.min_periods = min_periods - self.adjust = adjust - self.ignore_na = ignore_na - self.axis = axis - self.on = None - - @property - def _constructor(self): - return EWM - - _agg_see_also_doc = dedent( - """ - See Also - -------- - pandas.DataFrame.rolling.aggregate - """ - ) - - _agg_examples_doc = dedent( - """ - Examples - -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) - >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 - - >>> df.ewm(alpha=0.5).mean() - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.464856 0.569633 -0.490089 - 2 -0.207700 0.149687 -1.135379 - 3 -0.471677 -0.645305 -0.906555 - 4 -0.355635 -0.203033 -0.904111 - 5 1.076417 1.503943 -1.146293 - 6 -0.041654 1.925562 -0.588728 - 7 0.680292 0.132049 0.548693 - 8 0.067236 0.948257 0.163353 - 9 -0.286980 0.618493 -0.694496 - """ - ) - - @Substitution( - see_also=_agg_see_also_doc, - examples=_agg_examples_doc, - versionadded="", - klass="Series/Dataframe", - axis="", - ) - @Appender(_shared_docs["aggregate"]) - def aggregate(self, arg, *args, **kwargs): - return super().aggregate(arg, *args, **kwargs) - - agg = aggregate - - def _apply(self, func, **kwargs): - """ - Rolling statistical measure using supplied function. Designed to be - used with passed-in Cython array-based functions. - - Parameters - ---------- - func : str/callable to apply - - Returns - ------- - y : same type as input argument - """ - blocks, obj = self._create_blocks() - block_list = list(blocks) - - results = [] - exclude = [] - for i, b in enumerate(blocks): - try: - values = self._prep_values(b.values) - - except (TypeError, NotImplementedError): - if isinstance(obj, ABCDataFrame): - exclude.extend(b.columns) - del block_list[i] - continue - else: - raise DataError("No numeric types to aggregate") - - if values.size == 0: - results.append(values.copy()) - continue - - # if we have a string function name, wrap it - if isinstance(func, str): - cfunc = getattr(libwindow, func, None) - if cfunc is None: - raise ValueError( - "we do not support this function " - "in libwindow.{func}".format(func=func) - ) - - def func(arg): - return cfunc( - arg, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - ) - - results.append(np.apply_along_axis(func, self.axis, values)) - - return self._wrap_results(results, block_list, obj, exclude) - - @Substitution(name="ewm") - @Appender(_doc_template) - def mean(self, *args, **kwargs): - """ - Exponential weighted moving average. - - Parameters - ---------- - *args, **kwargs - Arguments and keyword arguments to be passed into func. - """ - nv.validate_window_func("mean", args, kwargs) - return self._apply("ewma", **kwargs) - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_bias_template) - def std(self, bias=False, *args, **kwargs): - """ - Exponential weighted moving stddev. - """ - nv.validate_window_func("std", args, kwargs) - return _zsqrt(self.var(bias=bias, **kwargs)) - - vol = std - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_bias_template) - def var(self, bias=False, *args, **kwargs): - """ - Exponential weighted moving variance. - """ - nv.validate_window_func("var", args, kwargs) - - def f(arg): - return libwindow.ewmcov( - arg, - arg, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - int(bias), - ) - - return self._apply(f, **kwargs) - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_pairwise_template) - def cov(self, other=None, pairwise=None, bias=False, **kwargs): - """ - Exponential weighted sample covariance. - """ - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise - other = self._shallow_copy(other) - - def _get_cov(X, Y): - X = self._shallow_copy(X) - Y = self._shallow_copy(Y) - cov = libwindow.ewmcov( - X._prep_values(), - Y._prep_values(), - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - int(bias), - ) - return X._wrap_result(cov) - - return _flex_binary_moment( - self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) - ) - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_pairwise_template) - def corr(self, other=None, pairwise=None, **kwargs): - """ - Exponential weighted sample correlation. - """ - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise - other = self._shallow_copy(other) - - def _get_corr(X, Y): - X = self._shallow_copy(X) - Y = self._shallow_copy(Y) - - def _cov(x, y): - return libwindow.ewmcov( - x, - y, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - 1, - ) - - x_values = X._prep_values() - y_values = Y._prep_values() - with np.errstate(all="ignore"): - cov = _cov(x_values, y_values) - x_var = _cov(x_values, x_values) - y_var = _cov(y_values, y_values) - corr = cov / _zsqrt(x_var * y_var) - return X._wrap_result(corr) - - return _flex_binary_moment( - self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) - ) - - -# Helper Funcs - - -def _flex_binary_moment(arg1, arg2, f, pairwise=False): - - if not ( - isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) - and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame)) - ): - raise TypeError( - "arguments to moment function must be of type " - "np.ndarray/Series/DataFrame" - ) - - if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance( - arg2, (np.ndarray, ABCSeries) - ): - X, Y = _prep_binary(arg1, arg2) - return f(X, Y) - - elif isinstance(arg1, ABCDataFrame): - from pandas import DataFrame - - def dataframe_from_int_dict(data, frame_template): - result = DataFrame(data, index=frame_template.index) - if len(result.columns) > 0: - result.columns = frame_template.columns[result.columns] - return result - - results = {} - if isinstance(arg2, ABCDataFrame): - if pairwise is False: - if arg1 is arg2: - # special case in order to handle duplicate column names - for i, col in enumerate(arg1.columns): - results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) - return dataframe_from_int_dict(results, arg1) - else: - if not arg1.columns.is_unique: - raise ValueError("'arg1' columns are not unique") - if not arg2.columns.is_unique: - raise ValueError("'arg2' columns are not unique") - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - X, Y = arg1.align(arg2, join="outer") - X = X + 0 * Y - Y = Y + 0 * X - - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - res_columns = arg1.columns.union(arg2.columns) - for col in res_columns: - if col in X and col in Y: - results[col] = f(X[col], Y[col]) - return DataFrame(results, index=X.index, columns=res_columns) - elif pairwise is True: - results = defaultdict(dict) - for i, k1 in enumerate(arg1.columns): - for j, k2 in enumerate(arg2.columns): - if j < i and arg2 is arg1: - # Symmetric case - results[i][j] = results[j][i] - else: - results[i][j] = f( - *_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]) - ) - - from pandas import concat - - result_index = arg1.index.union(arg2.index) - if len(result_index): - - # construct result frame - result = concat( - [ - concat( - [results[i][j] for j, c in enumerate(arg2.columns)], - ignore_index=True, - ) - for i, c in enumerate(arg1.columns) - ], - ignore_index=True, - axis=1, - ) - result.columns = arg1.columns - - # set the index and reorder - if arg2.columns.nlevels > 1: - result.index = MultiIndex.from_product( - arg2.columns.levels + [result_index] - ) - result = result.reorder_levels([2, 0, 1]).sort_index() - else: - result.index = MultiIndex.from_product( - [range(len(arg2.columns)), range(len(result_index))] - ) - result = result.swaplevel(1, 0).sort_index() - result.index = MultiIndex.from_product( - [result_index] + [arg2.columns] - ) - else: - - # empty result - result = DataFrame( - index=MultiIndex( - levels=[arg1.index, arg2.columns], codes=[[], []] - ), - columns=arg2.columns, - dtype="float64", - ) - - # reset our index names to arg1 names - # reset our column names to arg2 names - # careful not to mutate the original names - result.columns = result.columns.set_names(arg1.columns.names) - result.index = result.index.set_names( - result_index.names + arg2.columns.names - ) - - return result - - else: - raise ValueError("'pairwise' is not True/False") - else: - results = { - i: f(*_prep_binary(arg1.iloc[:, i], arg2)) - for i, col in enumerate(arg1.columns) - } - return dataframe_from_int_dict(results, arg1) - - else: - return _flex_binary_moment(arg2, arg1, f) - - -def _get_center_of_mass(comass, span, halflife, alpha): - valid_count = com.count_not_none(comass, span, halflife, alpha) - if valid_count > 1: - raise ValueError("comass, span, halflife, and alpha are mutually exclusive") - - # Convert to center of mass; domain checks ensure 0 < alpha <= 1 - if comass is not None: - if comass < 0: - raise ValueError("comass must satisfy: comass >= 0") - elif span is not None: - if span < 1: - raise ValueError("span must satisfy: span >= 1") - comass = (span - 1) / 2.0 - elif halflife is not None: - if halflife <= 0: - raise ValueError("halflife must satisfy: halflife > 0") - decay = 1 - np.exp(np.log(0.5) / halflife) - comass = 1 / decay - 1 - elif alpha is not None: - if alpha <= 0 or alpha > 1: - raise ValueError("alpha must satisfy: 0 < alpha <= 1") - comass = (1.0 - alpha) / alpha - else: - raise ValueError("Must pass one of comass, span, halflife, or alpha") - - return float(comass) - - -def _offset(window, center): - if not is_integer(window): - window = len(window) - offset = (window - 1) / 2.0 if center else 0 - try: - return int(offset) - except TypeError: - return offset.astype(int) - - -def _require_min_periods(p): - def _check_func(minp, window): - if minp is None: - return window - else: - return max(p, minp) - - return _check_func - - -def _use_window(minp, window): - if minp is None: - return window - else: - return minp - - -def _zsqrt(x): - with np.errstate(all="ignore"): - result = np.sqrt(x) - mask = x < 0 - - if isinstance(x, ABCDataFrame): - if mask.values.any(): - result[mask] = 0 - else: - if mask.any(): - result[mask] = 0 - - return result - - -def _prep_binary(arg1, arg2): - if not isinstance(arg2, type(arg1)): - raise Exception("Input arrays must be of the same type!") - - # mask out values, this also makes a common index... - X = arg1 + 0 * arg2 - Y = arg2 + 0 * arg1 - - return X, Y - - -# Top-level exports - - -def rolling(obj, win_type=None, **kwds): - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - raise TypeError("invalid type: %s" % type(obj)) - - if win_type is not None: - return Window(obj, win_type=win_type, **kwds) - - return Rolling(obj, **kwds) - - -rolling.__doc__ = Window.__doc__ - - -def expanding(obj, **kwds): - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - raise TypeError("invalid type: %s" % type(obj)) - - return Expanding(obj, **kwds) - - -expanding.__doc__ = Expanding.__doc__ - - -def ewm(obj, **kwds): - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - raise TypeError("invalid type: %s" % type(obj)) - - return EWM(obj, **kwds) - - -ewm.__doc__ = EWM.__doc__ diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index d38221d784273..76c01535a26e7 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -9,8 +9,7 @@ def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover r""" - Read text from clipboard and pass to read_csv. See read_csv for the - full argument list + Read text from clipboard and pass to read_csv. Parameters ---------- @@ -18,9 +17,13 @@ def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover A string or regex delimiter. The default of '\s+' denotes one or more whitespace characters. + **kwargs + See read_csv for the full argument list. + Returns ------- - parsed : DataFrame + DataFrame + A parsed DataFrame object. """ encoding = kwargs.pop("encoding", "utf-8") diff --git a/pandas/io/common.py b/pandas/io/common.py index e01e473047b88..30228d660e816 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -5,12 +5,23 @@ import csv import gzip from http.client import HTTPException # noqa -from io import BytesIO -import lzma +from io import BufferedIOBase, BytesIO import mmap import os import pathlib -from typing import IO, AnyStr, BinaryIO, Optional, TextIO, Type +from typing import ( + IO, + Any, + AnyStr, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + Union, +) from urllib.error import URLError # noqa from urllib.parse import ( # noqa urlencode, @@ -23,6 +34,7 @@ from urllib.request import pathname2url, urlopen import zipfile +from pandas.compat import _get_lzma_file, _import_lzma from pandas.errors import ( # noqa AbstractMethodError, DtypeWarning, @@ -35,6 +47,8 @@ from pandas._typing import FilePathOrBuffer +lzma = _import_lzma() + # gh-12665: Alias for now and remove later. CParserError = ParserError @@ -253,6 +267,40 @@ def file_path_to_url(path: str) -> str: _compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"} +def _get_compression_method( + compression: Optional[Union[str, Dict[str, str]]] +) -> Tuple[Optional[str], Dict[str, str]]: + """ + Simplifies a compression argument to a compression method string and + a dict containing additional arguments. + + Parameters + ---------- + compression : str or dict + If string, specifies the compression method. If dict, value at key + 'method' specifies compression method. + + Returns + ------- + tuple of ({compression method}, Optional[str] + {compression arguments}, Dict[str, str]) + + Raises + ------ + ValueError on dict missing 'method' key + """ + # Handle dict + if isinstance(compression, dict): + compression_args = compression.copy() + try: + compression = compression_args.pop("method") + except KeyError: + raise ValueError("If dict, compression must have key 'method'") + else: + compression_args = {} + return compression, compression_args + + def _infer_compression( filepath_or_buffer: FilePathOrBuffer, compression: Optional[str] ) -> Optional[str]: @@ -264,8 +312,8 @@ def _infer_compression( Parameters ---------- - filepath_or_buffer : - a path (str) or buffer + filepath_or_buffer : str or file handle + File path or object. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} If 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', @@ -273,12 +321,11 @@ def _infer_compression( Returns ------- - string or None : - compression method + string or None Raises ------ - ValueError on invalid compression specified + ValueError on invalid compression specified. """ # No compression has been explicitly specified @@ -310,49 +357,67 @@ def _infer_compression( def _get_handle( - path_or_buf, mode, encoding=None, compression=None, memory_map=False, is_text=True + path_or_buf, + mode: str, + encoding=None, + compression: Optional[Union[str, Dict[str, Any]]] = None, + memory_map: bool = False, + is_text: bool = True, ): """ Get file handle for given path/buffer and mode. Parameters ---------- - path_or_buf : - a path (str) or buffer + path_or_buf : str or file handle + File path or object. mode : str - mode to open path_or_buf with + Mode to open path_or_buf with. encoding : str or None - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None - If 'infer' and `filepath_or_buffer` is path-like, then detect - compression from the following extensions: '.gz', '.bz2', '.zip', - or '.xz' (otherwise no compression). + Encoding to use. + compression : str or dict, default None + If string, specifies compression mode. If dict, value at key 'method' + specifies compression mode. Compression mode must be one of {'infer', + 'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer' + and `filepath_or_buffer` is path-like, then detect compression from + the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise + no compression). If dict and compression mode is 'zip' or inferred as + 'zip', other entries passed as additional compression options. + + .. versionchanged:: 1.0.0 + + May now be a dict with key 'method' as compression mode + and other keys as compression options if compression + mode is 'zip'. + memory_map : boolean, default False See parsers._parser_params for more information. is_text : boolean, default True whether file/buffer is in text format (csv, json, etc.), or in binary - mode (pickle, etc.) + mode (pickle, etc.). Returns ------- f : file-like - A file-like object + A file-like object. handles : list of file-like objects A list of file-like object that were opened in this function. """ try: from s3fs import S3File - need_text_wrapping = (BytesIO, S3File) + need_text_wrapping = (BufferedIOBase, S3File) except ImportError: - need_text_wrapping = (BytesIO,) + need_text_wrapping = BufferedIOBase # type: ignore - handles = list() + handles = list() # type: List[IO] f = path_or_buf # Convert pathlib.Path/py.path.local or string path_or_buf = _stringify_path(path_or_buf) is_path = isinstance(path_or_buf, str) + compression, compression_args = _get_compression_method(compression) if is_path: compression = _infer_compression(path_or_buf, compression) @@ -374,7 +439,7 @@ def _get_handle( # ZIP Compression elif compression == "zip": - zf = BytesZipFile(path_or_buf, mode) + zf = BytesZipFile(path_or_buf, mode, **compression_args) # Ensure the container is closed as well. handles.append(zf) if zf.mode == "w": @@ -395,7 +460,7 @@ def _get_handle( # XZ Compression elif compression == "xz": - f = lzma.LZMAFile(path_or_buf, mode) + f = _get_lzma_file(lzma)(path_or_buf, mode) # Unrecognized Compression else: @@ -420,14 +485,16 @@ def _get_handle( if is_text and (compression or isinstance(f, need_text_wrapping)): from io import TextIOWrapper - f = TextIOWrapper(f, encoding=encoding, newline="") - handles.append(f) + g = TextIOWrapper(f, encoding=encoding, newline="") + if not isinstance(f, BufferedIOBase): + handles.append(g) + f = g if memory_map and hasattr(f, "fileno"): try: - g = MMapWrapper(f) + wrapped = MMapWrapper(f) f.close() - f = g + f = wrapped except Exception: # we catch any errors that may have occurred # because that is consistent with the lower-level @@ -452,15 +519,19 @@ def __init__( self, file: FilePathOrBuffer, mode: str, - compression: int = zipfile.ZIP_DEFLATED, + archive_name: Optional[str] = None, **kwargs ): if mode in ["wb", "rb"]: mode = mode.replace("b", "") - super().__init__(file, mode, compression, **kwargs) + self.archive_name = archive_name + super().__init__(file, mode, zipfile.ZIP_DEFLATED, **kwargs) def write(self, data): - super().writestr(self.filename, data) + archive_name = self.filename + if self.archive_name is not None: + archive_name = self.archive_name + super().writestr(archive_name, data) @property def closed(self): @@ -505,7 +576,6 @@ def __next__(self) -> str: class UTF8Recoder(BaseIterator): - """ Iterator that reads an encoded stream and re-encodes the input to UTF-8 """ diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 154656fbb250b..997edf49d9e8f 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -837,10 +837,10 @@ def parse( **kwds ): """ - Parse specified sheet(s) into a DataFrame + Parse specified sheet(s) into a DataFrame. Equivalent to read_excel(ExcelFile, ...) See the read_excel - docstring for more info on accepted parameters + docstring for more info on accepted parameters. Returns ------- diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 6fe22f14c2c5b..25a6db675265d 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -39,7 +39,7 @@ def to_feather(df, path): if not isinstance(df.index, Int64Index): raise ValueError( "feather does not support serializing {} " - "for the index; you can .reset_index()" + "for the index; you can .reset_index() " "to make the index into column(s)".format(type(df.index)) ) diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 60daf311397e8..e25862537cbfc 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -22,6 +22,7 @@ from pandas.io.common import ( UnicodeWriter, + _get_compression_method, _get_handle, _infer_compression, get_filepath_or_buffer, @@ -58,6 +59,9 @@ def __init__( if path_or_buf is None: path_or_buf = StringIO() + # Extract compression mode as given, if dict + compression, self.compression_args = _get_compression_method(compression) + self.path_or_buf, _, _, _ = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode ) @@ -178,7 +182,7 @@ def save(self): self.path_or_buf, self.mode, encoding=self.encoding, - compression=self.compression, + compression=dict(self.compression_args, method=self.compression), ) close = True @@ -206,11 +210,13 @@ def save(self): if hasattr(self.path_or_buf, "write"): self.path_or_buf.write(buf) else: + compression = dict(self.compression_args, method=self.compression) + f, handles = _get_handle( self.path_or_buf, self.mode, encoding=self.encoding, - compression=self.compression, + compression=compression, ) f.write(buf) close = True diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index d8a370d77ea31..8ff4b9bda0430 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -336,9 +336,11 @@ def _get_formatted_index(self) -> Tuple[List[str], bool]: return fmt_index, have_header def _get_formatted_values(self) -> List[str]: - values_to_format = self.tr_series._formatting_values() return format_array( - values_to_format, None, float_format=self.float_format, na_rep=self.na_rep + self.tr_series._values, + None, + float_format=self.float_format, + na_rep=self.na_rep, ) def to_string(self) -> str: @@ -547,7 +549,8 @@ def __init__( decimal: str = ".", table_id: Optional[str] = None, render_links: bool = False, - **kwds + bold_rows: bool = False, + escape: bool = True, ): self.frame = frame self.show_index_names = index_names @@ -578,7 +581,8 @@ def __init__( else: self.justify = justify - self.kwds = kwds + self.bold_rows = bold_rows + self.escape = escape if columns is not None: self.columns = ensure_index(columns) @@ -903,9 +907,8 @@ def to_latex( def _format_col(self, i: int) -> List[str]: frame = self.tr_frame formatter = self._get_formatter(i) - values_to_format = frame.iloc[:, i]._formatting_values() return format_array( - values_to_format, + frame.iloc[:, i]._values, formatter, float_format=self.float_format, na_rep=self.na_rep, diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 4b44893df70ed..8c4a7f4a1213d 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -37,7 +37,7 @@ class HTMLFormatter(TableFormatter): def __init__( self, formatter: DataFrameFormatter, - classes: Optional[Union[str, List, Tuple]] = None, + classes: Optional[Union[str, List[str], Tuple[str, ...]]] = None, border: Optional[int] = None, ) -> None: self.fmt = formatter @@ -46,11 +46,11 @@ def __init__( self.frame = self.fmt.frame self.columns = self.fmt.tr_frame.columns self.elements = [] # type: List[str] - self.bold_rows = self.fmt.kwds.get("bold_rows", False) - self.escape = self.fmt.kwds.get("escape", True) + self.bold_rows = self.fmt.bold_rows + self.escape = self.fmt.escape self.show_dimensions = self.fmt.show_dimensions if border is None: - border = get_option("display.html.border") + border = cast(int, get_option("display.html.border")) self.border = border self.table_id = self.fmt.table_id self.render_links = self.fmt.render_links diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index c60e15b733f0a..4c4d5ec73269a 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -39,12 +39,13 @@ def __init__( ): self.fmt = formatter self.frame = self.fmt.frame - self.bold_rows = self.fmt.kwds.get("bold_rows", False) + self.bold_rows = self.fmt.bold_rows self.column_format = column_format self.longtable = longtable self.multicolumn = multicolumn self.multicolumn_format = multicolumn_format self.multirow = multirow + self.escape = self.fmt.escape def write_result(self, buf: IO[str]) -> None: """ @@ -142,7 +143,7 @@ def pad_empties(x): buf.write("\\endfoot\n\n") buf.write("\\bottomrule\n") buf.write("\\endlastfoot\n") - if self.fmt.kwds.get("escape", True): + if self.escape: # escape backslashes first crow = [ ( diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 4ec9094ce4abe..ead51693da791 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -3,12 +3,14 @@ """ import sys -from typing import Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union from pandas._config import get_option from pandas.core.dtypes.inference import is_sequence +EscapeChars = Union[Dict[str, str], Iterable[str]] + def adjoin(space: int, *lists: List[str], **kwargs) -> str: """ @@ -148,19 +150,16 @@ def _pprint_dict( def pprint_thing( - thing, + thing: Any, _nest_lvl: int = 0, - escape_chars: Optional[Union[Dict[str, str], Iterable[str]]] = None, + escape_chars: Optional[EscapeChars] = None, default_escapes: bool = False, quote_strings: bool = False, max_seq_items: Optional[int] = None, ) -> str: """ This function is the sanctioned way of converting objects - to a unicode representation. - - properly handles nested sequences containing unicode strings - (unicode(object) does not) + to a string representation and properly handles nested sequences. Parameters ---------- @@ -178,21 +177,13 @@ def pprint_thing( Returns ------- - result - unicode str + str """ - def as_escaped_unicode(thing, escape_chars=escape_chars): - # Unicode is fine, else we try to decode using utf-8 and 'replace' - # if that's not it either, we have no way of knowing and the user - # should deal with it himself. - - try: - result = str(thing) # we should try this first - except UnicodeDecodeError: - # either utf-8 or we replace errors - result = str(thing).decode("utf-8", "replace") - + def as_escaped_string( + thing: Any, escape_chars: Optional[EscapeChars] = escape_chars + ) -> str: translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} if isinstance(escape_chars, dict): if default_escapes: @@ -202,10 +193,11 @@ def as_escaped_unicode(thing, escape_chars=escape_chars): escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or tuple() + + result = str(thing) for c in escape_chars: result = result.replace(c, translate[c]) - - return str(result) + return result if hasattr(thing, "__next__"): return str(thing) @@ -224,11 +216,11 @@ def as_escaped_unicode(thing, escape_chars=escape_chars): max_seq_items=max_seq_items, ) elif isinstance(thing, str) and quote_strings: - result = "'{thing}'".format(thing=as_escaped_unicode(thing)) + result = "'{thing}'".format(thing=as_escaped_string(thing)) else: - result = as_escaped_unicode(thing) + result = as_escaped_string(thing) - return str(result) # always unicode + return result def pprint_thing_encoded( diff --git a/pandas/io/html.py b/pandas/io/html.py index 9d2647f226f00..490c574463b9b 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -1,4 +1,5 @@ -""":mod:`pandas.io.html` is a module containing functionality for dealing with +""" +:mod:`pandas.io.html` is a module containing functionality for dealing with HTML IO. """ @@ -58,7 +59,8 @@ def _importers(): def _remove_whitespace(s, regex=_RE_WHITESPACE): - """Replace extra whitespace inside of a string with a single space. + """ + Replace extra whitespace inside of a string with a single space. Parameters ---------- @@ -77,7 +79,8 @@ def _remove_whitespace(s, regex=_RE_WHITESPACE): def _get_skiprows(skiprows): - """Get an iterator given an integer, slice or container. + """ + Get an iterator given an integer, slice or container. Parameters ---------- @@ -107,7 +110,8 @@ def _get_skiprows(skiprows): def _read(obj): - """Try to read from a url, file or string. + """ + Try to read from a url, file or string. Parameters ---------- @@ -136,7 +140,8 @@ def _read(obj): class _HtmlFrameParser: - """Base class for parsers that parse HTML into DataFrames. + """ + Base class for parsers that parse HTML into DataFrames. Parameters ---------- @@ -515,7 +520,8 @@ def _handle_hidden_tables(self, tbl_list, attr_name): class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser): - """HTML to DataFrame parser that uses BeautifulSoup under the hood. + """ + HTML to DataFrame parser that uses BeautifulSoup under the hood. See Also -------- @@ -622,7 +628,8 @@ def _build_xpath_expr(attrs): class _LxmlFrameParser(_HtmlFrameParser): - """HTML to DataFrame parser that uses lxml under the hood. + """ + HTML to DataFrame parser that uses lxml under the hood. Warning ------- @@ -937,7 +944,8 @@ def read_html( keep_default_na=True, displayed_only=True, ): - r"""Read HTML tables into a ``list`` of ``DataFrame`` objects. + r""" + Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters ---------- diff --git a/pandas/io/msgpack/__init__.py b/pandas/io/msgpack/__init__.py index 9b09cffd83f75..7107263c180cb 100644 --- a/pandas/io/msgpack/__init__.py +++ b/pandas/io/msgpack/__init__.py @@ -2,8 +2,8 @@ from collections import namedtuple -from pandas.io.msgpack.exceptions import * # noqa -from pandas.io.msgpack._version import version # noqa +from pandas.io.msgpack.exceptions import * # noqa: F401,F403 isort:skip +from pandas.io.msgpack._version import version # noqa: F401 isort:skip class ExtType(namedtuple("ExtType", "code data")): @@ -19,10 +19,14 @@ def __new__(cls, code, data): return super().__new__(cls, code, data) -import os # noqa +import os # noqa: F401,E402 isort:skip -from pandas.io.msgpack._packer import Packer # noqa -from pandas.io.msgpack._unpacker import unpack, unpackb, Unpacker # noqa +from pandas.io.msgpack._unpacker import ( # noqa: F401,E402 isort:skip + Unpacker, + unpack, + unpackb, +) +from pandas.io.msgpack._packer import Packer # noqa: E402 isort:skip def pack(o, stream, **kwargs): diff --git a/pandas/io/msgpack/_packer.pyi b/pandas/io/msgpack/_packer.pyi new file mode 100644 index 0000000000000..e95a1622c5615 --- /dev/null +++ b/pandas/io/msgpack/_packer.pyi @@ -0,0 +1,22 @@ +# flake8: noqa + +class Packer: + def __cinit__(self): ... + def __init__( + self, + default=..., + encoding=..., + unicode_errors=..., + use_single_float=..., + autoreset: int = ..., + use_bin_type: int = ..., + ): ... + def __dealloc__(self): ... + def _pack(self, o, nest_limit: int = ...) -> int: ... + def pack(self, obj): ... + def pack_ext_type(self, typecode, data): ... + def pack_array_header(self, size): ... + def pack_map_header(self, size): ... + def pack_map_pairs(self, pairs): ... + def reset(self) -> None: ... + def bytes(self): ... diff --git a/pandas/io/msgpack/_unpacker.pyi b/pandas/io/msgpack/_unpacker.pyi new file mode 100644 index 0000000000000..9910895947fb6 --- /dev/null +++ b/pandas/io/msgpack/_unpacker.pyi @@ -0,0 +1,59 @@ +# flake8: noqa + +def unpackb( + packed, + object_hook=..., + list_hook=..., + use_list=..., + encoding=..., + unicode_errors=..., + object_pairs_hook=..., + ext_hook=..., + max_str_len=..., + max_bin_len=..., + max_array_len=..., + max_map_len=..., + max_ext_len=..., +): ... +def unpack( + stream, + object_hook=..., + list_hook=..., + use_list=..., + encoding=..., + unicode_errors=..., + object_pairs_hook=..., +): ... + +class Unpacker: + def __cinit__(self): ... + def __dealloc__(self): ... + def __init__( + self, + file_like=..., + read_size=..., + use_list=..., + object_hook=..., + object_pairs_hook=..., + list_hook=..., + encoding=..., + unicode_errors=..., + max_buffer_size: int = ..., + ext_hook=..., + max_str_len=..., + max_bin_len=..., + max_array_len=..., + max_map_len=..., + max_ext_len=..., + ): ... + def feed(self, next_bytes): ... + def append_buffer(self, _buf, _buf_len): ... + def read_from_file(self): ... + def _unpack(self, execute, write_bytes, iter=...): ... + def read_bytes(self, nbytes): ... + def unpack(self, write_bytes=...): ... + def skip(self, write_bytes=...): ... + def read_array_header(self, write_bytes=...): ... + def read_map_header(self, write_bytes=...): ... + def __iter__(self): ... + def __next__(self): ... diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 04e49708ff082..ad47ba23b9221 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -846,7 +846,6 @@ def __init__( class Iterator: - """ manage the unpacking iteration, close the file on completion """ diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 82c460300582b..6fc70e9f4a737 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -184,12 +184,14 @@ def write( def read(self, path, columns=None, **kwargs): if is_s3_url(path): + from pandas.io.s3 import get_file_and_filesystem + # When path is s3:// an S3File is returned. # We need to retain the original path(str) while also # pass the S3File().open function to fsatparquet impl. - s3, _, _, should_close = get_filepath_or_buffer(path) + s3, filesystem = get_file_and_filesystem(path) try: - parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open) + parquet_file = self.api.ParquetFile(path, open_with=filesystem.open) finally: s3.close() else: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index f4b00b0aac5f7..a3ff837bc7f52 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1393,6 +1393,10 @@ def __init__(self, kwds): if isinstance(self.header, (list, tuple, np.ndarray)): if not all(map(is_integer, self.header)): raise ValueError("header must be integer or list of integers") + if any(i < 0 for i in self.header): + raise ValueError( + "cannot specify multi-index header with negative integers" + ) if kwds.get("usecols"): raise ValueError( "cannot specify usecols when specifying a multi-index header" @@ -1419,6 +1423,13 @@ def __init__(self, kwds): elif self.header is not None and not is_integer(self.header): raise ValueError("header must be integer or list of integers") + # GH 27779 + elif self.header is not None and self.header < 0: + raise ValueError( + "Passing negative integer to header is invalid. " + "For no header, use header=None instead" + ) + self._name_processed = False self._first_chunk = True diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index abc8a414eb37a..1ff3400323e54 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -429,10 +429,10 @@ def _is_metadata_of(group, parent_group): class HDFStore: - """ - Dict-like IO interface for storing pandas objects in PyTables - either Fixed or Table format. + Dict-like IO interface for storing pandas objects in PyTables. + + Either Fixed or Table format. Parameters ---------- @@ -564,13 +564,12 @@ def __exit__(self, exc_type, exc_value, traceback): def keys(self): """ - Return a (potentially unordered) list of the keys corresponding to the - objects stored in the HDFStore. These are ABSOLUTE path-names (e.g. - have the leading '/' + Return a list of keys corresponding to objects stored in HDFStore. Returns ------- list + List of ABSOLUTE path-names (e.g. have the leading '/'). """ return [n._v_pathname for n in self.groups()] @@ -703,7 +702,7 @@ def flush(self, fsync=False): def get(self, key): """ - Retrieve pandas object stored in file + Retrieve pandas object stored in file. Parameters ---------- @@ -711,7 +710,8 @@ def get(self, key): Returns ------- - obj : same type as object stored in file + object + Same type as object stored in file. """ group = self.get_node(key) if group is None: @@ -731,25 +731,31 @@ def select( **kwargs ): """ - Retrieve pandas object stored in file, optionally based on where - criteria + Retrieve pandas object stored in file, optionally based on where criteria. Parameters ---------- key : object - where : list of Term (or convertible) objects, optional - start : integer (defaults to None), row number to start selection - stop : integer (defaults to None), row number to stop selection - columns : a list of columns that if not None, will limit the return - columns - iterator : boolean, return an iterator, default False - chunksize : nrows to include in iteration, return an iterator - auto_close : boolean, should automatically close the store when - finished, default is False + Object being retrieved from file. + where : list, default None + List of Term (or convertible) objects, optional. + start : int, default None + Row number to start selection. + stop : int, default None + Row number to stop selection. + columns : list, default None + A list of columns that if not None, will limit the return columns. + iterator : bool, default False + Returns an iterator. + chunksize : int, default None + Number or rows to include in iteration, return an iterator. + auto_close : bool, default False + Should automatically close the store when finished. Returns ------- - The selected object + object + Retrieved object from file. """ group = self.get_node(key) if group is None: @@ -929,28 +935,30 @@ def func(_start, _stop, _where): def put(self, key, value, format=None, append=False, **kwargs): """ - Store object in HDFStore + Store object in HDFStore. Parameters ---------- - key : object - value : {Series, DataFrame} - format : 'fixed(f)|table(t)', default is 'fixed' + key : object + value : {Series, DataFrame} + format : 'fixed(f)|table(t)', default is 'fixed' fixed(f) : Fixed format - Fast writing/reading. Not-appendable, nor searchable + Fast writing/reading. Not-appendable, nor searchable. table(t) : Table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching - / selecting subsets of the data - append : boolean, default False + / selecting subsets of the data. + append : bool, default False This will force Table format, append the input data to the existing. - data_columns : list of columns to create as data columns, or True to + data_columns : list, default None + List of columns to create as data columns, or True to use all columns. See `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__. - encoding : default None, provide an encoding for strings - dropna : boolean, default False, do not write an ALL nan row to - the store settable by the option 'io.hdf.dropna_table' + encoding : str, default None + Provide an encoding for strings. + dropna : bool, default False, do not write an ALL nan row to + The store settable by the option 'io.hdf.dropna_table'. """ if format is None: format = get_option("io.hdf.default_format") or "fixed" @@ -1165,12 +1173,15 @@ def create_table_index(self, key, **kwargs): s.create_index(**kwargs) def groups(self): - """return a list of all the top-level nodes (that are not themselves a - pandas storage object) + """ + Return a list of all the top-level nodes. + + Each node returned is not a pandas storage object. Returns ------- list + List of objects. """ _tables() self._check_if_open() @@ -1188,10 +1199,12 @@ def groups(self): ] def walk(self, where="/"): - """ Walk the pytables group hierarchy for pandas objects + """ + Walk the pytables group hierarchy for pandas objects. This generator will yield the group path, subgroups and pandas object names for each group. + Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its @@ -1202,18 +1215,17 @@ def walk(self, where="/"): Parameters ---------- - where : str, optional + where : str, default "/" Group where to start walking. - If not supplied, the root group is used. Yields ------ path : str - Full path to a group (without trailing '/') - groups : list of str - names of the groups contained in `path` - leaves : list of str - names of the pandas objects contained in `path` + Full path to a group (without trailing '/'). + groups : list + Names (strings) of the groups contained in `path`. + leaves : list + Names (strings) of the pandas objects contained in `path`. """ _tables() self._check_if_open() @@ -1533,7 +1545,6 @@ def _read_group(self, group, **kwargs): class TableIterator: - """ define the iteration interface on a table Parameters @@ -1641,7 +1652,6 @@ def get_result(self, coordinates=False): class IndexCol: - """ an index column description class Parameters @@ -1955,7 +1965,6 @@ def write_metadata(self, handler): class GenericIndexCol(IndexCol): - """ an index which is not represented in the data of the table """ @property @@ -1993,7 +2002,6 @@ def set_attr(self): class DataCol(IndexCol): - """ a data holding column, by definition this is not indexable Parameters @@ -2443,7 +2451,6 @@ def set_attr(self): class DataIndexableCol(DataCol): - """ represent a data column that can be indexed """ is_data_indexable = True @@ -2466,7 +2473,6 @@ def get_atom_timedelta64(self, block): class GenericDataIndexableCol(DataIndexableCol): - """ represent a generic pytables data column """ def get_attr(self): @@ -2474,7 +2480,6 @@ def get_attr(self): class Fixed: - """ represent an object in my store facilitate read/write of various types of objects this is an abstract base class @@ -2642,7 +2647,6 @@ def delete(self, where=None, start=None, stop=None, **kwargs): class GenericFixed(Fixed): - """ a generified fixed version """ _index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"} @@ -2898,7 +2902,12 @@ def read_index_node(self, node, start=None, stop=None): kwargs["freq"] = node._v_attrs["freq"] if "tz" in node._v_attrs: - kwargs["tz"] = node._v_attrs["tz"] + if isinstance(node._v_attrs["tz"], bytes): + # created by python2 + kwargs["tz"] = node._v_attrs["tz"].decode("utf-8") + else: + # created by python3 + kwargs["tz"] = node._v_attrs["tz"] if kind in ("date", "datetime"): index = factory( @@ -3202,7 +3211,9 @@ def read(self, start=None, stop=None, **kwargs): values = self.read_array( "block{idx}_values".format(idx=i), start=_start, stop=_stop ) - blk = make_block(values, placement=items.get_indexer(blk_items)) + blk = make_block( + values, placement=items.get_indexer(blk_items), ndim=len(axes) + ) blocks.append(blk) return self.obj_type(BlockManager(blocks, axes)) @@ -3237,7 +3248,6 @@ class FrameFixed(BlockManagerFixed): class Table(Fixed): - """ represent a table: facilitate read/write of various types of tables @@ -4112,7 +4122,6 @@ def read_column(self, column, where=None, start=None, stop=None): class WORMTable(Table): - """ a write-once read-many table: this format DOES NOT ALLOW appending to a table. writing is a one-time operation the data are stored in a format that allows for searching the data on disk @@ -4134,7 +4143,6 @@ def write(self, **kwargs): class LegacyTable(Table): - """ an appendable table: allow append/query/delete operations to a (possibly) already existing appendable table this table ALLOWS append (but doesn't require them), and stores the data in a format @@ -4462,7 +4470,7 @@ def read(self, where=None, columns=None, **kwargs): if values.ndim == 1 and isinstance(values, np.ndarray): values = values.reshape((1, values.shape[0])) - block = make_block(values, placement=np.arange(len(cols_))) + block = make_block(values, placement=np.arange(len(cols_)), ndim=2) mgr = BlockManager([block], [cols_, index_]) frames.append(DataFrame(mgr)) @@ -4588,7 +4596,6 @@ def write(self, **kwargs): class AppendableMultiFrameTable(AppendableFrameTable): - """ a frame with a multi-index """ table_type = "appendable_multiframe" @@ -4947,7 +4954,6 @@ def _need_convert(kind): class Selection: - """ Carries out a selection operation on a tables.Table object. diff --git a/pandas/io/s3.py b/pandas/io/s3.py index 0a7c082fec51c..7e0a37e8cba20 100644 --- a/pandas/io/s3.py +++ b/pandas/io/s3.py @@ -1,8 +1,11 @@ """ s3 support for remote file interactivity """ +from typing import IO, Any, Optional, Tuple from urllib.parse import urlparse as parse_url from pandas.compat._optional import import_optional_dependency +from pandas._typing import FilePathOrBuffer + s3fs = import_optional_dependency( "s3fs", extra="The s3fs package is required to handle s3 files." ) @@ -14,9 +17,9 @@ def _strip_schema(url): return result.netloc + result.path -def get_filepath_or_buffer( - filepath_or_buffer, encoding=None, compression=None, mode=None -): +def get_file_and_filesystem( + filepath_or_buffer: FilePathOrBuffer, mode: Optional[str] = None +) -> Tuple[IO, Any]: from botocore.exceptions import NoCredentialsError if mode is None: @@ -24,7 +27,7 @@ def get_filepath_or_buffer( fs = s3fs.S3FileSystem(anon=False) try: - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) + file = fs.open(_strip_schema(filepath_or_buffer), mode) except (FileNotFoundError, NoCredentialsError): # boto3 has troubles when trying to access a public file # when credentialed... @@ -33,5 +36,15 @@ def get_filepath_or_buffer( # A NoCredentialsError is raised if you don't have creds # for that bucket. fs = s3fs.S3FileSystem(anon=True) - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) - return filepath_or_buffer, None, compression, True + file = fs.open(_strip_schema(filepath_or_buffer), mode) + return file, fs + + +def get_filepath_or_buffer( + filepath_or_buffer: FilePathOrBuffer, + encoding: Optional[str] = None, + compression: Optional[str] = None, + mode: Optional[str] = None, +) -> Tuple[IO, Optional[str], Optional[str], bool]: + file, _fs = get_file_and_filesystem(filepath_or_buffer, mode=mode) + return file, None, compression, True diff --git a/pandas/io/sql.py b/pandas/io/sql.py index f1f52a9198d29..44cb399336d62 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -269,7 +269,8 @@ def read_sql_query( parse_dates=None, chunksize=None, ): - """Read SQL query into a DataFrame. + """ + Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the @@ -455,14 +456,14 @@ def to_sql( Parameters ---------- frame : DataFrame, Series - name : string + name : str Name of SQL table. con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - schema : string, default None + schema : str, optional Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). if_exists : {'fail', 'replace', 'append'}, default 'fail' @@ -471,18 +472,19 @@ def to_sql( - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. - index_label : string or sequence, default None + index_label : str or sequence, optional Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. - chunksize : int, default None - If not None, then rows will be written in batches of this size at a - time. If None, all rows will be written at once. - dtype : single SQLtype or dict of column name to SQL type, default None - Optional specifying the datatype for columns. The SQL type should - be a SQLAlchemy type, or a string for sqlite3 fallback connection. - If all columns are of the same type, one single value can be used. - method : {None, 'multi', callable}, default None + chunksize : int, optional + Specify the number of rows in each batch to be written at a time. + By default, all rows will be written at once. + dtype : dict or scalar, optional + Specifying the datatype for columns. If a dictionary is used, the + keys should be the column names and the values should be the + SQLAlchemy types or strings for the sqlite3 fallback mode. If a + scalar is provided, it will be applied to all columns. + method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: - None : Uses standard SQL ``INSERT`` clause (one per row). diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 69bafc7749258..31fdaa5cc6735 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -138,7 +138,7 @@ _iterator_params, ) -_data_method_doc = """\ +_data_method_doc = """ Read observations from Stata file, converting them into a dataframe .. deprecated:: diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index a3c1499845c2a..2e6a401b49efc 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -53,7 +53,7 @@ def hist_series( rotation of y axis labels figsize : tuple, default None figure size in inches by default - bins : integer or sequence, default 10 + bins : int or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last @@ -116,7 +116,7 @@ def hist_frame( ---------- data : DataFrame The pandas object holding the data. - column : string or sequence + column : str or sequence If passed, will be used to limit data to a subset of columns. by : object, optional If passed, then used to form histograms for separate groups. @@ -148,7 +148,7 @@ def hist_frame( `matplotlib.rcParams` by default. layout : tuple, optional Tuple of (rows, columns) for the layout of the histograms. - bins : integer or sequence, default 10 + bins : int or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last @@ -177,7 +177,7 @@ def hist_frame( >>> df = pd.DataFrame({ ... 'length': [1.5, 0.5, 1.2, 0.9, 3], ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1] - ... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse']) + ... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse']) >>> hist = df.hist(bins=3) """ plot_backend = _get_plot_backend() @@ -370,8 +370,8 @@ def boxplot( If ``return_type`` is `None`, a NumPy array of axes with the same shape as ``layout`` is returned: - >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', - ... return_type=None) + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... return_type=None) >>> type(boxplot) <class 'numpy.ndarray'> """ @@ -446,7 +446,7 @@ def boxplot_frame_groupby( * ``True`` - create a subplot for each group column : column name or list of names, or vector Can be any valid input to groupby - fontsize : int or string + fontsize : int or str rot : label rotation angle grid : Setting this to True will show the grid ax : Matplotlib axis object, default None @@ -530,7 +530,7 @@ class PlotAccessor(PandasObject): figsize : a tuple (width, height) in inches use_index : bool, default True Use index as ticks for x axis - title : string or list + title : str or list Title to use for the plot. If a string is passed, print the string at the top of the figure. If a list is passed and `subplots` is True, print each item in the list above the corresponding subplot. @@ -553,16 +553,16 @@ class PlotAccessor(PandasObject): .. versionchanged:: 0.25.0 xticks : sequence - Values to use for the xticks + Values to use for the xticks. yticks : sequence - Values to use for the yticks + Values to use for the yticks. xlim : 2-tuple/list ylim : 2-tuple/list rot : int, default None Rotation for ticks (xticks for vertical, yticks for horizontal plots) fontsize : int, default None - Font size for xticks and yticks + Font size for xticks and yticks. colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. @@ -586,8 +586,10 @@ class PlotAccessor(PandasObject): mark_right : bool, default True When using a secondary_y axis, automatically mark the column labels with "(right)" in the legend + include_bool : bool, default is False + If True, boolean values can be plotted. `**kwds` : keywords - Options to pass to matplotlib plotting method + Options to pass to matplotlib plotting method. Returns ------- @@ -983,7 +985,7 @@ def barh(self, x=None, y=None, **kwargs): .. plot:: :context: close-figs - >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]}) + >>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]}) >>> ax = df.plot.barh(x='lab', y='val') Plot a whole DataFrame to a horizontal bar plot @@ -1047,7 +1049,7 @@ def box(self, by=None, **kwargs): Parameters ---------- - by : string or sequence + by : str or sequence Column in the DataFrame to group by. **kwds : optional Additional keywords are documented in diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 15648d59c8f98..893854ab26e37 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -64,11 +64,12 @@ def register(explicit=True): pairs = get_pairs() for type_, cls in pairs: - converter = cls() - if type_ in units.registry: + # Cache previous converter if present + if type_ in units.registry and not isinstance(units.registry[type_], cls): previous = units.registry[type_] _mpl_units[type_] = previous - units.registry[type_] = converter + # Replace with pandas converter + units.registry[type_] = cls() def deregister(): diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index c2b37bb297ecb..6ff3f28440303 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -106,6 +106,7 @@ def __init__( colormap=None, table=False, layout=None, + include_bool=False, **kwds ): @@ -191,6 +192,7 @@ def __init__( self.colormap = colormap self.table = table + self.include_bool = include_bool self.kwds = kwds @@ -400,9 +402,20 @@ def _compute_plot_data(self): # GH16953, _convert is needed as fallback, for ``Series`` # with ``dtype == object`` data = data._convert(datetime=True, timedelta=True) - numeric_data = data.select_dtypes( - include=[np.number, "datetime", "datetimetz", "timedelta"] - ) + include_type = [np.number, "datetime", "datetimetz", "timedelta"] + + # GH23719, allow plotting boolean + if self.include_bool is True: + include_type.append(np.bool_) + + # GH22799, exclude datatime-like type for boxplot + exclude_type = None + if self._kind == "box": + # TODO: change after solving issue 27881 + include_type = [np.number] + exclude_type = ["timedelta"] + + numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type) try: is_empty = numeric_data.empty @@ -549,7 +562,7 @@ def _add_legend_handle(self, handle, label, index=None): self.legend_labels.append(label) def _make_legend(self): - ax, leg = self._get_ax_legend(self.axes[0]) + ax, leg, handle = self._get_ax_legend_handle(self.axes[0]) handles = [] labels = [] @@ -558,7 +571,8 @@ def _make_legend(self): if not self.subplots: if leg is not None: title = leg.get_title().get_text() - handles = leg.legendHandles + # Replace leg.LegendHandles because it misses marker info + handles.extend(handle) labels = [x.get_text() for x in leg.get_texts()] if self.legend: @@ -568,6 +582,7 @@ def _make_legend(self): handles += self.legend_handles labels += self.legend_labels + if self.legend_title is not None: title = self.legend_title @@ -579,8 +594,14 @@ def _make_legend(self): if ax.get_visible(): ax.legend(loc="best") - def _get_ax_legend(self, ax): + def _get_ax_legend_handle(self, ax): + """ + Take in axes and return ax, legend and handle under different scenarios + """ leg = ax.get_legend() + + # Get handle from axes + handle, _ = ax.get_legend_handles_labels() other_ax = getattr(ax, "left_ax", None) or getattr(ax, "right_ax", None) other_leg = None if other_ax is not None: @@ -588,7 +609,7 @@ def _get_ax_legend(self, ax): if leg is None and other_leg is not None: leg = other_leg ax = other_ax - return ax, leg + return ax, leg, handle @cache_readonly def plt(self): @@ -1080,9 +1101,13 @@ def _make_plot(self): ) self._add_legend_handle(newlines[0], label, index=i) - lines = _get_all_lines(ax) - left, right = _get_xlim(lines) - ax.set_xlim(left, right) + if self._is_ts_plot(): + + # reset of xlim should be used for ts data + # TODO: GH28021, should find a way to change view limit on xaxis + lines = _get_all_lines(ax) + left, right = _get_xlim(lines) + ax.set_xlim(left, right) @classmethod def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds): diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 8472eb3a3d887..67fa79ad5da8c 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -343,6 +343,21 @@ def _flatten(axes): return np.array(axes) +def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None): + import matplotlib.pyplot as plt + + for ax in _flatten(axes): + if xlabelsize is not None: + plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) + if xrot is not None: + plt.setp(ax.get_xticklabels(), rotation=xrot) + if ylabelsize is not None: + plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) + if yrot is not None: + plt.setp(ax.get_yticklabels(), rotation=yrot) + return axes + + def _get_all_lines(ax): lines = ax.get_lines() @@ -362,18 +377,3 @@ def _get_xlim(lines): left = min(np.nanmin(x), left) right = max(np.nanmax(x), right) return left, right - - -def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None): - import matplotlib.pyplot as plt - - for ax in _flatten(axes): - if xlabelsize is not None: - plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) - if xrot is not None: - plt.setp(ax.get_xticklabels(), rotation=xrot) - if ylabelsize is not None: - plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) - if yrot is not None: - plt.setp(ax.get_yticklabels(), rotation=yrot) - return axes diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index 1cba0e7354182..7ed0ffc6d0115 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -329,7 +329,8 @@ def parallel_coordinates( sort_labels=False, **kwds ): - """Parallel coordinates plotting. + """ + Parallel coordinates plotting. Parameters ---------- @@ -392,7 +393,8 @@ def parallel_coordinates( def lag_plot(series, lag=1, ax=None, **kwds): - """Lag plot for time series. + """ + Lag plot for time series. Parameters ---------- diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 3920cfcc002d7..5931cd93cc8c5 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -30,6 +30,54 @@ import pandas.util.testing as tm +def assert_invalid_comparison(left, right, box): + """ + Assert that comparison operations with mismatched types behave correctly. + + Parameters + ---------- + left : np.ndarray, ExtensionArray, Index, or Series + right : object + box : {pd.DataFrame, pd.Series, pd.Index, tm.to_array} + """ + # Not for tznaive-tzaware comparison + + # Note: not quite the same as how we do this for tm.box_expected + xbox = box if box is not pd.Index else np.array + + result = left == right + expected = xbox(np.zeros(result.shape, dtype=np.bool_)) + + tm.assert_equal(result, expected) + + result = right == left + tm.assert_equal(result, expected) + + result = left != right + tm.assert_equal(result, ~expected) + + result = right != left + tm.assert_equal(result, ~expected) + + msg = "Invalid comparison between" + with pytest.raises(TypeError, match=msg): + left < right + with pytest.raises(TypeError, match=msg): + left <= right + with pytest.raises(TypeError, match=msg): + left > right + with pytest.raises(TypeError, match=msg): + left >= right + with pytest.raises(TypeError, match=msg): + right < left + with pytest.raises(TypeError, match=msg): + right <= left + with pytest.raises(TypeError, match=msg): + right > left + with pytest.raises(TypeError, match=msg): + right >= left + + def assert_all(obj): """ Test helper to call call obj.all() the appropriate number of times on @@ -47,7 +95,7 @@ def assert_all(obj): class TestDatetime64ArrayLikeComparisons: # Comparison tests for datetime64 vectors fully parametrized over - # DataFrame/Series/DatetimeIndex/DateteimeArray. Ideally all comparison + # DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison # tests will eventually end up here. def test_compare_zerodim(self, tz_naive_fixture, box_with_array): @@ -59,36 +107,61 @@ def test_compare_zerodim(self, tz_naive_fixture, box_with_array): other = np.array(dti.to_numpy()[0]) - # FIXME: ValueError with transpose on tzaware - dtarr = tm.box_expected(dti, box, transpose=False) + dtarr = tm.box_expected(dti, box) result = dtarr <= other expected = np.array([True, False, False]) - expected = tm.box_expected(expected, xbox, transpose=False) + expected = tm.box_expected(expected, xbox) tm.assert_equal(result, expected) + def test_dt64arr_cmp_date_invalid(self, tz_naive_fixture, box_with_array): + # GH#19800, GH#19301 datetime.date comparison raises to + # match DatetimeIndex/Timestamp. This also matches the behavior + # of stdlib datetime.datetime + tz = tz_naive_fixture -class TestDatetime64DataFrameComparison: - @pytest.mark.parametrize( - "timestamps", - [ - [pd.Timestamp("2012-01-01 13:00:00+00:00")] * 2, - [pd.Timestamp("2012-01-01 13:00:00")] * 2, - ], - ) - def test_tz_aware_scalar_comparison(self, timestamps): - # GH#15966 - df = pd.DataFrame({"test": timestamps}) - expected = pd.DataFrame({"test": [False, False]}) - tm.assert_frame_equal(df == -1, expected) + dti = pd.date_range("20010101", periods=10, tz=tz) + date = dti[0].to_pydatetime().date() + + dtarr = tm.box_expected(dti, box_with_array) + assert_invalid_comparison(dtarr, date, box_with_array) + + @pytest.mark.parametrize("other", ["foo", -1, 99, 4.0, object(), timedelta(days=2)]) + def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array): + # GH#22074, GH#15966 + tz = tz_naive_fixture + + rng = date_range("1/1/2000", periods=10, tz=tz) + dtarr = tm.box_expected(rng, box_with_array) + assert_invalid_comparison(dtarr, other, box_with_array) + + @pytest.mark.parametrize("other", [None, np.nan]) + def test_dt64arr_cmp_na_scalar_invalid( + self, other, tz_naive_fixture, box_with_array + ): + # GH#19301 + tz = tz_naive_fixture + dti = pd.date_range("2016-01-01", periods=2, tz=tz) + dtarr = tm.box_expected(dti, box_with_array) + assert_invalid_comparison(dtarr, other, box_with_array) - def test_dt64_nat_comparison(self): + def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array): # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly - ts = pd.Timestamp.now() - df = pd.DataFrame([ts, pd.NaT]) - expected = pd.DataFrame([True, False]) + tz = tz_naive_fixture + box = box_with_array + xbox = box if box is not pd.Index else np.ndarray - result = df == ts - tm.assert_frame_equal(result, expected) + ts = pd.Timestamp.now(tz) + ser = pd.Series([ts, pd.NaT]) + + # FIXME: Can't transpose because that loses the tz dtype on + # the NaT column + obj = tm.box_expected(ser, box, transpose=False) + + expected = pd.Series([True, False], dtype=np.bool_) + expected = tm.box_expected(expected, xbox, transpose=False) + + result = obj == ts + tm.assert_equal(result, expected) class TestDatetime64SeriesComparison: @@ -142,35 +215,17 @@ def test_nat_comparisons(self, dtype, box, reverse, pair): expected = Series([False, False, True]) tm.assert_series_equal(left <= right, expected) - def test_comparison_invalid(self, box_with_array): + def test_comparison_invalid(self, tz_naive_fixture, box_with_array): # GH#4968 # invalid date/int comparisons - xbox = box_with_array if box_with_array is not pd.Index else np.ndarray - + tz = tz_naive_fixture ser = Series(range(5)) - ser2 = Series(pd.date_range("20010101", periods=5)) + ser2 = Series(pd.date_range("20010101", periods=5, tz=tz)) ser = tm.box_expected(ser, box_with_array) ser2 = tm.box_expected(ser2, box_with_array) - for (x, y) in [(ser, ser2), (ser2, ser)]: - - result = x == y - expected = tm.box_expected([False] * 5, xbox) - tm.assert_equal(result, expected) - - result = x != y - expected = tm.box_expected([True] * 5, xbox) - tm.assert_equal(result, expected) - msg = "Invalid comparison between" - with pytest.raises(TypeError, match=msg): - x >= y - with pytest.raises(TypeError, match=msg): - x > y - with pytest.raises(TypeError, match=msg): - x < y - with pytest.raises(TypeError, match=msg): - x <= y + assert_invalid_comparison(ser, ser2, box_with_array) @pytest.mark.parametrize( "data", @@ -227,26 +282,6 @@ def test_series_comparison_scalars(self): expected = Series([x > val for x in series]) tm.assert_series_equal(result, expected) - def test_dt64ser_cmp_date_invalid(self, box_with_array): - # GH#19800 datetime.date comparison raises to - # match DatetimeIndex/Timestamp. This also matches the behavior - # of stdlib datetime.datetime - - ser = pd.date_range("20010101", periods=10) - date = ser[0].to_pydatetime().date() - - ser = tm.box_expected(ser, box_with_array) - assert_all(~(ser == date)) - assert_all(ser != date) - with pytest.raises(TypeError): - ser > date - with pytest.raises(TypeError): - ser < date - with pytest.raises(TypeError): - ser >= date - with pytest.raises(TypeError): - ser <= date - @pytest.mark.parametrize( "left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")] ) @@ -388,57 +423,6 @@ def test_dti_cmp_datetimelike(self, other, tz_naive_fixture): expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) - def dt64arr_cmp_non_datetime(self, tz_naive_fixture, box_with_array): - # GH#19301 by convention datetime.date is not considered comparable - # to Timestamp or DatetimeIndex. This may change in the future. - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01", periods=2, tz=tz) - dtarr = tm.box_expected(dti, box_with_array) - - other = datetime(2016, 1, 1).date() - assert not (dtarr == other).any() - assert (dtarr != other).all() - with pytest.raises(TypeError): - dtarr < other - with pytest.raises(TypeError): - dtarr <= other - with pytest.raises(TypeError): - dtarr > other - with pytest.raises(TypeError): - dtarr >= other - - @pytest.mark.parametrize("other", [None, np.nan, pd.NaT]) - def test_dti_eq_null_scalar(self, other, tz_naive_fixture): - # GH#19301 - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01", periods=2, tz=tz) - assert not (dti == other).any() - - @pytest.mark.parametrize("other", [None, np.nan, pd.NaT]) - def test_dti_ne_null_scalar(self, other, tz_naive_fixture): - # GH#19301 - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01", periods=2, tz=tz) - assert (dti != other).all() - - @pytest.mark.parametrize("other", [None, np.nan]) - def test_dti_cmp_null_scalar_inequality( - self, tz_naive_fixture, other, box_with_array - ): - # GH#19301 - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01", periods=2, tz=tz) - dtarr = tm.box_expected(dti, box_with_array) - msg = "Invalid comparison between" - with pytest.raises(TypeError, match=msg): - dtarr < other - with pytest.raises(TypeError, match=msg): - dtarr <= other - with pytest.raises(TypeError, match=msg): - dtarr > other - with pytest.raises(TypeError, match=msg): - dtarr >= other - @pytest.mark.parametrize("dtype", [None, object]) def test_dti_cmp_nat(self, dtype, box_with_array): if box_with_array is tm.to_array and dtype is object: @@ -728,34 +712,6 @@ def test_dti_cmp_str(self, tz_naive_fixture): expected = np.array([True] * 10) tm.assert_numpy_array_equal(result, expected) - @pytest.mark.parametrize("other", ["foo", 99, 4.0, object(), timedelta(days=2)]) - def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array): - # GH#22074 - tz = tz_naive_fixture - xbox = box_with_array if box_with_array is not pd.Index else np.ndarray - - rng = date_range("1/1/2000", periods=10, tz=tz) - rng = tm.box_expected(rng, box_with_array) - - result = rng == other - expected = np.array([False] * 10) - expected = tm.box_expected(expected, xbox) - tm.assert_equal(result, expected) - - result = rng != other - expected = np.array([True] * 10) - expected = tm.box_expected(expected, xbox) - tm.assert_equal(result, expected) - msg = "Invalid comparison between" - with pytest.raises(TypeError, match=msg): - rng < other - with pytest.raises(TypeError, match=msg): - rng <= other - with pytest.raises(TypeError, match=msg): - rng > other - with pytest.raises(TypeError, match=msg): - rng >= other - def test_dti_cmp_list(self): rng = date_range("1/1/2000", periods=10) @@ -1097,7 +1053,13 @@ def test_dt64arr_add_timestamp_raises(self, box_with_array): def test_dt64arr_add_sub_float(self, other, box_with_array): dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D") dtarr = tm.box_expected(dti, box_with_array) - msg = "|".join(["unsupported operand type", "cannot (add|subtract)"]) + msg = "|".join( + [ + "unsupported operand type", + "cannot (add|subtract)", + "ufunc '?(add|subtract)'? cannot use operands with types", + ] + ) with pytest.raises(TypeError, match=msg): dtarr + other with pytest.raises(TypeError, match=msg): @@ -2570,24 +2532,3 @@ def test_shift_months(years, months): raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti] expected = DatetimeIndex(raw) tm.assert_index_equal(actual, expected) - - -# FIXME: this belongs in scalar tests -class SubDatetime(datetime): - pass - - -@pytest.mark.parametrize( - "lh,rh", - [ - (SubDatetime(2000, 1, 1), Timedelta(hours=1)), - (Timedelta(hours=1), SubDatetime(2000, 1, 1)), - ], -) -def test_dt_subclass_add_timedelta(lh, rh): - # GH 25851 - # ensure that subclassed datetime works for - # Timedelta operations - result = lh + rh - expected = SubDatetime(2000, 1, 1, 1) - assert result == expected diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 2b23790e4ccd3..d686d9f90a5a4 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -1227,3 +1227,36 @@ def test_addsub_arithmetic(self, dtype, delta): tm.assert_index_equal(index + index, 2 * index) tm.assert_index_equal(index - index, 0 * index) assert not (index - index).empty + + +def test_fill_value_inf_masking(): + # GH #27464 make sure we mask 0/1 with Inf and not NaN + df = pd.DataFrame({"A": [0, 1, 2], "B": [1.1, None, 1.1]}) + + other = pd.DataFrame({"A": [1.1, 1.2, 1.3]}, index=[0, 2, 3]) + + result = df.rfloordiv(other, fill_value=1) + + expected = pd.DataFrame( + {"A": [np.inf, 1.0, 0.0, 1.0], "B": [0.0, np.nan, 0.0, np.nan]} + ) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_div_silenced(): + # GH#26793 + pdf1 = pd.DataFrame( + { + "A": np.arange(10), + "B": [np.nan, 1, 2, 3, 4] * 2, + "C": [np.nan] * 10, + "D": np.arange(10), + }, + index=list("abcdefghij"), + columns=list("ABCD"), + ) + pdf2 = pd.DataFrame( + np.random.randn(10, 4), index=list("abcdefghjk"), columns=list("ABCX") + ) + with tm.assert_produces_warning(None): + pdf1.div(pdf2, fill_value=0) diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index 4b58c290c3cea..ed693d873efb8 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -573,12 +573,19 @@ def test_parr_add_sub_float_raises(self, op, other, box_with_array): @pytest.mark.parametrize( "other", [ + # datetime scalars pd.Timestamp.now(), pd.Timestamp.now().to_pydatetime(), pd.Timestamp.now().to_datetime64(), + # datetime-like arrays + pd.date_range("2016-01-01", periods=3, freq="H"), + pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"), + pd.date_range("2016-01-01", periods=3, freq="S")._data, + pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data, + # Miscellaneous invalid types ], ) - def test_parr_add_sub_datetime_scalar(self, other, box_with_array): + def test_parr_add_sub_invalid(self, other, box_with_array): # GH#23215 rng = pd.period_range("1/1/2000", freq="D", periods=3) rng = tm.box_expected(rng, box_with_array) @@ -595,23 +602,6 @@ def test_parr_add_sub_datetime_scalar(self, other, box_with_array): # ----------------------------------------------------------------- # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64] - def test_parr_add_sub_dt64_array_raises(self, box_with_array): - rng = pd.period_range("1/1/2000", freq="D", periods=3) - dti = pd.date_range("2016-01-01", periods=3) - dtarr = dti.values - - rng = tm.box_expected(rng, box_with_array) - - with pytest.raises(TypeError): - rng + dtarr - with pytest.raises(TypeError): - dtarr + rng - - with pytest.raises(TypeError): - rng - dtarr - with pytest.raises(TypeError): - dtarr - rng - def test_pi_add_sub_td64_array_non_tick_raises(self): rng = pd.period_range("1/1/2000", freq="Q", periods=3) tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 4f5e00bc5a37d..6d6b85a1e81e1 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -18,6 +18,7 @@ Timestamp, timedelta_range, ) +from pandas.tests.arithmetic.test_datetime64 import assert_invalid_comparison import pandas.util.testing as tm @@ -61,42 +62,33 @@ def test_compare_timedelta64_zerodim(self, box_with_array): # zero-dim of wrong dtype should still raise tdi >= np.array(4) - -class TestTimedelta64ArrayComparisons: - # TODO: All of these need to be parametrized over box - - def test_compare_timedelta_series(self): + @pytest.mark.parametrize( + "td_scalar", + [timedelta(days=1), Timedelta(days=1), Timedelta(days=1).to_timedelta64()], + ) + def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar): # regression test for GH#5963 - s = pd.Series([timedelta(days=1), timedelta(days=2)]) - actual = s > timedelta(days=1) + box = box_with_array + xbox = box if box is not pd.Index else np.ndarray + ser = pd.Series([timedelta(days=1), timedelta(days=2)]) + ser = tm.box_expected(ser, box) + actual = ser > td_scalar expected = pd.Series([False, True]) - tm.assert_series_equal(actual, expected) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(actual, expected) - def test_tdi_cmp_str_invalid(self, box_with_array): - # GH#13624 - xbox = box_with_array if box_with_array is not pd.Index else np.ndarray - tdi = TimedeltaIndex(["1 day", "2 days"]) - tdarr = tm.box_expected(tdi, box_with_array) + @pytest.mark.parametrize("invalid", [345600000000000, "a"]) + def test_td64_comparisons_invalid(self, box_with_array, invalid): + # GH#13624 for str + box = box_with_array + rng = timedelta_range("1 days", periods=10) + obj = tm.box_expected(rng, box) - for left, right in [(tdarr, "a"), ("a", tdarr)]: - with pytest.raises(TypeError): - left > right - with pytest.raises(TypeError): - left >= right - with pytest.raises(TypeError): - left < right - with pytest.raises(TypeError): - left <= right - - result = left == right - expected = np.array([False, False], dtype=bool) - expected = tm.box_expected(expected, xbox) - tm.assert_equal(result, expected) + assert_invalid_comparison(obj, invalid, box) - result = left != right - expected = np.array([True, True], dtype=bool) - expected = tm.box_expected(expected, xbox) - tm.assert_equal(result, expected) + +class TestTimedelta64ArrayComparisons: + # TODO: All of these need to be parametrized over box @pytest.mark.parametrize("dtype", [None, object]) def test_comp_nat(self, dtype): @@ -191,10 +183,6 @@ def test_comparisons_coverage(self): expected = np.array([True, True, True] + [False] * 7) tm.assert_numpy_array_equal(result, expected) - # raise TypeError for now - with pytest.raises(TypeError): - rng < rng[3].value - result = rng == list(rng) exp = rng == rng tm.assert_numpy_array_equal(result, exp) @@ -835,19 +823,10 @@ def test_timedelta64_ops_nat(self): # ------------------------------------------------------------- # Invalid Operations - def test_td64arr_add_str_invalid(self, box_with_array): - # GH#13624 + @pytest.mark.parametrize("other", ["a", 3.14, np.array([2.0, 3.0])]) + def test_td64arr_add_sub_invalid(self, box_with_array, other): + # GH#13624 for str tdi = TimedeltaIndex(["1 day", "2 days"]) - tdi = tm.box_expected(tdi, box_with_array) - - with pytest.raises(TypeError): - tdi + "a" - with pytest.raises(TypeError): - "a" + tdi - - @pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])]) - def test_td64arr_add_sub_float(self, box_with_array, other): - tdi = TimedeltaIndex(["-1 days", "-1 days"]) tdarr = tm.box_expected(tdi, box_with_array) with pytest.raises(TypeError): @@ -1399,8 +1378,12 @@ def test_td64arr_add_offset_array(self, box): @pytest.mark.parametrize( "names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")] ) - def test_td64arr_sub_offset_index(self, names, box): + def test_td64arr_sub_offset_index(self, names, box_with_array): # GH#18824, GH#19744 + box = box_with_array + xbox = box if box is not tm.to_array else pd.Index + exname = names[2] if box is not tm.to_array else names[1] + if box is pd.DataFrame and names[1] == "bar": pytest.skip( "Name propagation for DataFrame does not behave like " @@ -1411,11 +1394,11 @@ def test_td64arr_sub_offset_index(self, names, box): other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1]) expected = TimedeltaIndex( - [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=names[2] + [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname ) tdi = tm.box_expected(tdi, box) - expected = tm.box_expected(expected, box) + expected = tm.box_expected(expected, xbox) # The DataFrame operation is transposed and so operates as separate # scalar operations, which do not issue a PerformanceWarning @@ -1631,7 +1614,7 @@ def test_td64arr_div_nat_invalid(self, box_with_array): rng = timedelta_range("1 days", "10 days", name="foo") rng = tm.box_expected(rng, box_with_array) - with pytest.raises(TypeError, match="'?true_divide'? cannot use operands"): + with pytest.raises(TypeError, match="unsupported operand type"): rng / pd.NaT with pytest.raises(TypeError, match="Cannot divide NaTType by"): pd.NaT / rng diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py index 1b62479530d24..3037ac79cd592 100644 --- a/pandas/tests/arrays/categorical/test_missing.py +++ b/pandas/tests/arrays/categorical/test_missing.py @@ -5,7 +5,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas import Categorical, Index, isna +from pandas import Categorical, Index, Series, isna import pandas.util.testing as tm @@ -59,11 +59,13 @@ def test_set_item_nan(self): ), (dict(), "Must specify a fill 'value' or 'method'."), (dict(method="bad"), "Invalid fill method. Expecting .* bad"), + (dict(value=Series([1, 2, 3, 4, "a"])), "fill value must be in categories"), ], ) def test_fillna_raises(self, fillna_kwargs, msg): # https://github.com/pandas-dev/pandas/issues/19682 - cat = Categorical([1, 2, 3]) + # https://github.com/pandas-dev/pandas/issues/13628 + cat = Categorical([1, 2, 3, None, None]) with pytest.raises(ValueError, match=msg): cat.fillna(**fillna_kwargs) diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index 9a09ea8422b1f..22c1d5373372a 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -349,7 +349,9 @@ def test_numeric_like_ops(self): ("__mul__", r"\*"), ("__truediv__", "/"), ]: - msg = r"Series cannot perform the operation {}".format(str_rep) + msg = r"Series cannot perform the operation {}|unsupported operand".format( + str_rep + ) with pytest.raises(TypeError, match=msg): getattr(df, op)(df) @@ -375,7 +377,9 @@ def test_numeric_like_ops(self): ("__mul__", r"\*"), ("__truediv__", "/"), ]: - msg = r"Series cannot perform the operation {}".format(str_rep) + msg = r"Series cannot perform the operation {}|unsupported operand".format( + str_rep + ) with pytest.raises(TypeError, match=msg): getattr(s, op)(2) diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index 57e5a35d99e48..cb5b437c962f9 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -441,6 +441,23 @@ def test_with_list(op): tm.assert_sp_array_equal(result, expected) +def test_with_dataframe(): + # GH#27910 + arr = pd.SparseArray([0, 1], fill_value=0) + df = pd.DataFrame([[1, 2], [3, 4]]) + result = arr.__add__(df) + assert result is NotImplemented + + +def test_with_zerodim_ndarray(): + # GH#27910 + arr = pd.SparseArray([0, 1], fill_value=0) + + result = arr * np.array(2) + expected = arr * 2 + tm.assert_sp_array_equal(result, expected) + + @pytest.mark.parametrize("ufunc", [np.abs, np.exp]) @pytest.mark.parametrize( "arr", [pd.SparseArray([0, 0, -1, 1]), pd.SparseArray([None, None, -1, 1])] diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index ffda2f4de2700..7c482664bca48 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1,6 +1,8 @@ import numpy as np import pytest +from pandas._libs import OutOfBoundsDatetime + import pandas as pd from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray import pandas.util.testing as tm @@ -462,6 +464,13 @@ def test_concat_same_type_different_freq(self): tm.assert_datetime_array_equal(result, expected) + def test_strftime(self, datetime_index): + arr = DatetimeArray(datetime_index) + + result = arr.strftime("%Y %b") + expected = np.array(datetime_index.strftime("%Y %b")) + tm.assert_numpy_array_equal(result, expected) + class TestTimedeltaArray(SharedTests): index_cls = pd.TimedeltaIndex @@ -608,6 +617,15 @@ def test_to_timestamp(self, how, period_index): # an EA-specific tm.assert_ function tm.assert_index_equal(pd.Index(result), pd.Index(expected)) + def test_to_timestamp_out_of_bounds(self): + # GH#19643 previously overflowed silently + pi = pd.period_range("1500", freq="Y", periods=3) + with pytest.raises(OutOfBoundsDatetime): + pi.to_timestamp() + + with pytest.raises(OutOfBoundsDatetime): + pi._data.to_timestamp() + @pytest.mark.parametrize("propname", PeriodArray._bool_ops) def test_bool_properties(self, period_index, propname): # in this case _bool_ops is just `is_leap_year` @@ -652,6 +670,13 @@ def test_array_interface(self, period_index): expected = np.asarray(arr).astype("S20") tm.assert_numpy_array_equal(result, expected) + def test_strftime(self, period_index): + arr = PeriodArray(period_index) + + result = arr.strftime("%Y") + expected = np.array(period_index.strftime("%Y")) + tm.assert_numpy_array_equal(result, expected) + @pytest.mark.parametrize( "array,casting_nats", diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py index 8fbfb4c12f4b2..31a9a0483081e 100644 --- a/pandas/tests/arrays/test_integer.py +++ b/pandas/tests/arrays/test_integer.py @@ -280,7 +280,7 @@ def test_arith_coerce_scalar(self, data, all_arithmetic_operators): other = 0.01 self._check_op(s, op, other) - @pytest.mark.parametrize("other", [1.0, 1.0, np.array(1.0), np.array([1.0])]) + @pytest.mark.parametrize("other", [1.0, np.array(1.0)]) def test_arithmetic_conversion(self, all_arithmetic_operators, other): # if we have a float operand we should have a float result # if that is equal to an integer @@ -290,6 +290,15 @@ def test_arithmetic_conversion(self, all_arithmetic_operators, other): result = op(s, other) assert result.dtype is np.dtype("float") + def test_arith_len_mismatch(self, all_arithmetic_operators): + # operating with a list-like with non-matching length raises + op = self.get_op_from_name(all_arithmetic_operators) + other = np.array([1.0]) + + s = pd.Series([1, 2, 3], dtype="Int64") + with pytest.raises(ValueError, match="Lengths must match"): + op(s, other) + @pytest.mark.parametrize("other", [0, 0.5]) def test_arith_zero_dim_ndarray(self, other): arr = integer_array([1, None, 2]) @@ -322,8 +331,9 @@ def test_error(self, data, all_arithmetic_operators): ops(pd.Series(pd.date_range("20180101", periods=len(s)))) # 2d - with pytest.raises(NotImplementedError): - opa(pd.DataFrame({"A": s})) + result = opa(pd.DataFrame({"A": s})) + assert result is NotImplemented + with pytest.raises(NotImplementedError): opa(np.arange(len(s)).reshape(-1, len(s))) @@ -379,8 +389,6 @@ def test_compare_array(self, data, all_compare_operators): class TestCasting: - pass - @pytest.mark.parametrize("dropna", [True, False]) def test_construct_index(self, all_data, dropna): # ensure that we do not coerce to Float64Index, rather diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index c500760fa1390..b6ffd8a83e409 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -14,7 +14,7 @@ from pandas.core.dtypes.common import is_bool, is_list_like, is_scalar import pandas as pd -from pandas import DataFrame, Series, date_range +from pandas import DataFrame, Series, compat, date_range from pandas.core.computation import pytables from pandas.core.computation.check import _NUMEXPR_VERSION from pandas.core.computation.engines import NumExprClobberingError, _engines @@ -1267,7 +1267,10 @@ def test_assignment_column(self): msg = "left hand side of an assignment must be a single name" with pytest.raises(SyntaxError, match=msg): df.eval("d,c = a + b") - msg = "can't assign to function call" + if compat.PY38: + msg = "cannot assign to function call" + else: + msg = "can't assign to function call" with pytest.raises(SyntaxError, match=msg): df.eval('Timestamp("20131001") = a + b') @@ -1967,6 +1970,26 @@ def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser): pd.eval(ex, engine=engine, parser=parser) +@pytest.mark.parametrize( + "other", + [ + "'x'", + pytest.param( + "...", marks=pytest.mark.xfail(not compat.PY38, reason="GH-28116") + ), + ], +) +def test_equals_various(other): + df = DataFrame({"A": ["a", "b", "c"]}) + result = df.eval("A == {}".format(other)) + expected = Series([False, False, False], name="A") + if _USE_NUMEXPR: + # https://github.com/pandas-dev/pandas/issues/10239 + # lose name with numexpr engine. Remove when that's fixed. + expected.name = None + tm.assert_series_equal(result, expected) + + def test_inf(engine, parser): s = "inf + 1" expected = np.inf diff --git a/pandas/tests/config/test_config.py b/pandas/tests/config/test_config.py index 3f12d1d7a292d..efaeb7b1471ec 100644 --- a/pandas/tests/config/test_config.py +++ b/pandas/tests/config/test_config.py @@ -208,13 +208,16 @@ def test_set_option_multiple(self): def test_validation(self): self.cf.register_option("a", 1, "doc", validator=self.cf.is_int) + self.cf.register_option("d", 1, "doc", validator=self.cf.is_nonnegative_int) self.cf.register_option("b.c", "hullo", "doc2", validator=self.cf.is_text) + msg = "Value must have type '<class 'int'>'" with pytest.raises(ValueError, match=msg): self.cf.register_option("a.b.c.d2", "NO", "doc", validator=self.cf.is_int) self.cf.set_option("a", 2) # int is_int self.cf.set_option("b.c", "wurld") # str is_str + self.cf.set_option("d", 2) # None not is_int with pytest.raises(ValueError, match=msg): @@ -222,6 +225,16 @@ def test_validation(self): with pytest.raises(ValueError, match=msg): self.cf.set_option("a", "ab") + msg = "Value must be a nonnegative integer or None" + with pytest.raises(ValueError, match=msg): + self.cf.register_option( + "a.b.c.d3", "NO", "doc", validator=self.cf.is_nonnegative_int + ) + with pytest.raises(ValueError, match=msg): + self.cf.register_option( + "a.b.c.d3", -2, "doc", validator=self.cf.is_nonnegative_int + ) + msg = r"Value must be an instance of <class 'str'>\|<class 'bytes'>" with pytest.raises(ValueError, match=msg): self.cf.set_option("b.c", 1) diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index a688dec50bc95..bbc485ecf94f2 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -86,6 +86,10 @@ def test_isna_isnull(self, isna_f): assert not isna_f(np.inf) assert not isna_f(-np.inf) + # type + assert not isna_f(type(pd.Series())) + assert not isna_f(type(pd.DataFrame())) + # series for s in [ tm.makeFloatSeries(), diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index cc0deca765b41..9c53210b75d6b 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -41,6 +41,10 @@ def test_copy(self, data): # __setitem__ does not work, so we only have a smoke-test data.copy() + def test_view(self, data): + # __setitem__ does not work, so we only have a smoke-test + data.view() + class TestConstructors(BaseArrowTests, base.BaseConstructorsTests): def test_from_dtype(self, data): diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py index e02586eacfea7..d56cc50f4739c 100644 --- a/pandas/tests/extension/base/getitem.py +++ b/pandas/tests/extension/base/getitem.py @@ -260,3 +260,9 @@ def test_reindex_non_na_fill_value(self, data_missing): expected = pd.Series(data_missing._from_sequence([na, valid, valid])) self.assert_series_equal(result, expected) + + def test_loc_len1(self, data): + # see GH-27785 take_nd with indexer of len 1 resulting in wrong ndim + df = pd.DataFrame({"A": data}) + res = df.loc[[0], "A"] + assert res._data._block.ndim == 1 diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py index dee8021f5375f..a29f6deeffae6 100644 --- a/pandas/tests/extension/base/interface.py +++ b/pandas/tests/extension/base/interface.py @@ -75,3 +75,18 @@ def test_copy(self, data): data[1] = data[0] assert result[1] != result[0] + + def test_view(self, data): + # view with no dtype should return a shallow copy, *not* the same + # object + assert data[1] != data[0] + + result = data.view() + assert result is not data + assert type(result) == type(data) + + result[1] = result[0] + assert data[1] == data[0] + + # check specifically that the `dtype` kwarg is accepted + data.view(dtype=None) diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index c28ff956a33a4..a1988744d76a1 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -137,11 +137,11 @@ def __setitem__(self, key, value): value = decimal.Decimal(value) self._data[key] = value - def __len__(self): + def __len__(self) -> int: return len(self._data) @property - def nbytes(self): + def nbytes(self) -> int: n = len(self) if n: return n * sys.getsizeof(self[0]) diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 9dec023f4073a..3ac9d37ccf4f3 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -392,17 +392,6 @@ def test_ufunc_fallback(data): tm.assert_series_equal(result, expected) -def test_formatting_values_deprecated(): - class DecimalArray2(DecimalArray): - def _formatting_values(self): - return np.array(self) - - ser = pd.Series(DecimalArray2([decimal.Decimal("1.0")])) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - repr(ser) - - def test_array_ufunc(): a = to_decimal([1, 2, 3]) result = np.exp(a) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 21c4ac8f055a2..b64ddbd6ac84d 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -80,6 +80,9 @@ def __getitem__(self, item): elif isinstance(item, abc.Iterable): # fancy indexing return type(self)([self.data[i] for i in item]) + elif isinstance(item, slice) and item == slice(None): + # Make sure we get a view + return type(self)(self.data) else: # slice return type(self)(self.data[item]) @@ -103,11 +106,11 @@ def __setitem__(self, key, value): assert isinstance(v, self.dtype.type) self.data[k] = v - def __len__(self): + def __len__(self) -> int: return len(self.data) @property - def nbytes(self): + def nbytes(self) -> int: return sys.getsizeof(self.data) def isna(self): diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index f7456d24ad6d3..0c0e8b0123c03 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -211,7 +211,7 @@ def test_arith_series_with_scalar(self, data, all_arithmetic_operators): def test_add_series_with_extension_array(self, data): ser = pd.Series(data) - with pytest.raises(TypeError, match="cannot perform"): + with pytest.raises(TypeError, match="cannot perform|unsupported operand"): ser + data def test_divmod_series_array(self): diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py index 1a4f84e2c0fd2..6311070cfe2bb 100644 --- a/pandas/tests/extension/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas.core.internals import BlockManager, SingleBlockManager +from pandas.core.internals import BlockManager from pandas.core.internals.blocks import Block, NonConsolidatableMixIn @@ -10,9 +10,6 @@ class CustomBlock(NonConsolidatableMixIn, Block): _holder = np.ndarray - def formatting_values(self): - return np.array(["Val: {}".format(i) for i in self.values]) - def concat_same_type(self, to_concat, placement=None): """ Always concatenate disregarding self.ndim as the values are @@ -35,22 +32,6 @@ def df(): return pd.DataFrame(block_manager) -def test_custom_repr(): - values = np.arange(3, dtype="int64") - - # series - block = CustomBlock(values, placement=slice(0, 3)) - - s = pd.Series(SingleBlockManager(block, pd.RangeIndex(3))) - assert repr(s) == "0 Val: 0\n1 Val: 1\n2 Val: 2\ndtype: int64" - - # dataframe - block = CustomBlock(values, placement=slice(0, 1)) - blk_mgr = BlockManager([block], [["col"], range(3)]) - df = pd.DataFrame(blk_mgr) - assert repr(df) == " col\n0 Val: 0\n1 Val: 1\n2 Val: 2" - - def test_concat_series(): # GH17728 values = np.arange(3, dtype="int64") diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py index 1aab71286b4a6..4fdcf930d224f 100644 --- a/pandas/tests/extension/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -95,7 +95,10 @@ class TestGrouping(BaseInterval, base.BaseGroupbyTests): class TestInterface(BaseInterval, base.BaseInterfaceTests): - pass + def test_view(self, data): + # __setitem__ incorrectly makes a copy (GH#27147), so we only + # have a smoke-test + data.view() class TestReduce(base.BaseNoReduceTests): diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 84d59902d2aa7..6ebe71e173ec2 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -103,6 +103,10 @@ def test_copy(self, data): # __setitem__ does not work, so we only have a smoke-test data.copy() + def test_view(self, data): + # __setitem__ does not work, so we only have a smoke-test + data.view() + class TestConstructors(BaseSparseTests, base.BaseConstructorsTests): pass diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index ae14563e5952a..a78b2ab7d1c4c 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -821,6 +821,14 @@ def test_setitem_empty_frame_with_boolean(self, dtype, kwargs): df[df > df2] = 47 assert_frame_equal(df, df2) + def test_setitem_with_empty_listlike(self): + # GH #17101 + index = pd.Index([], name="idx") + result = pd.DataFrame(columns=["A"], index=index) + result["A"] = [] + expected = pd.DataFrame(columns=["A"], index=index) + tm.assert_index_equal(result.index, expected.index) + def test_setitem_scalars_no_index(self): # GH16823 / 17894 df = DataFrame() diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py index 2862615ef8585..b341ed6a52ca5 100644 --- a/pandas/tests/frame/test_replace.py +++ b/pandas/tests/frame/test_replace.py @@ -1069,18 +1069,24 @@ def test_replace_truthy(self): e = df assert_frame_equal(r, e) - def test_replace_int_to_int_chain(self): + def test_nested_dict_overlapping_keys_replace_int(self): + # GH 27660 keep behaviour consistent for simple dictionary and + # nested dictionary replacement df = DataFrame({"a": list(range(1, 5))}) - with pytest.raises(ValueError, match="Replacement not allowed .+"): - df.replace({"a": dict(zip(range(1, 5), range(2, 6)))}) - def test_replace_str_to_str_chain(self): + result = df.replace({"a": dict(zip(range(1, 5), range(2, 6)))}) + expected = df.replace(dict(zip(range(1, 5), range(2, 6)))) + assert_frame_equal(result, expected) + + def test_nested_dict_overlapping_keys_replace_str(self): + # GH 27660 a = np.arange(1, 5) astr = a.astype(str) bstr = np.arange(2, 6).astype(str) df = DataFrame({"a": astr}) - with pytest.raises(ValueError, match="Replacement not allowed .+"): - df.replace({"a": dict(zip(astr, bstr))}) + result = df.replace(dict(zip(astr, bstr))) + expected = df.replace({"a": dict(zip(astr, bstr))}) + assert_frame_equal(result, expected) def test_replace_swapping_bug(self): df = pd.DataFrame({"a": [True, False, True]}) diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index f3452e9a85fb3..84e343f07f990 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -984,7 +984,7 @@ def test_stack_preserve_categorical_dtype(self, ordered, labels): df = DataFrame([[10, 11, 12]], columns=cidx) result = df.stack() - # `MutliIndex.from_product` preserves categorical dtype - + # `MultiIndex.from_product` preserves categorical dtype - # it's tested elsewhere. midx = pd.MultiIndex.from_product([df.index, cidx]) expected = Series([10, 11, 12], index=midx) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index e2e4a82ff581c..8fb028a0f0326 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -695,6 +695,20 @@ def _make_frame(names=None): tm.assert_index_equal(recons.columns, exp.columns) assert len(recons) == 0 + def test_to_csv_interval_index(self): + # GH 28210 + df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3)) + + with ensure_clean("__tmp_to_csv_interval_index__.csv") as path: + df.to_csv(path) + result = self.read_csv(path, index_col=0) + + # can't roundtrip intervalindex via read_csv so check string repr (GH 23595) + expected = df.copy() + expected.index = expected.index.astype(str) + + assert_frame_equal(result, expected) + def test_to_csv_float32_nanrep(self): df = DataFrame(np.random.randn(1, 4).astype(np.float32)) df[1] = np.nan diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 52d4fa76bf879..aa80c461a00e7 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, compat, concat from pandas.core.base import SpecificationError -from pandas.core.groupby.generic import _maybe_mangle_lambdas +from pandas.core.groupby.generic import _make_unique, _maybe_mangle_lambdas from pandas.core.groupby.grouper import Grouping import pandas.util.testing as tm @@ -560,3 +560,150 @@ def test_with_kwargs(self): result = pd.Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10) expected = pd.DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]}) tm.assert_frame_equal(result, expected) + + def test_agg_with_one_lambda(self): + # GH 25719, write tests for DataFrameGroupby.agg with only one lambda + df = pd.DataFrame( + { + "kind": ["cat", "dog", "cat", "dog"], + "height": [9.1, 6.0, 9.5, 34.0], + "weight": [7.9, 7.5, 9.9, 198.0], + } + ) + + # sort for 35 and earlier + columns = ["height_sqr_min", "height_max", "weight_max"] + if compat.PY35: + columns = ["height_max", "height_sqr_min", "weight_max"] + expected = pd.DataFrame( + { + "height_sqr_min": [82.81, 36.00], + "height_max": [9.5, 34.0], + "weight_max": [9.9, 198.0], + }, + index=pd.Index(["cat", "dog"], name="kind"), + columns=columns, + ) + + # check pd.NameAgg case + result1 = df.groupby(by="kind").agg( + height_sqr_min=pd.NamedAgg( + column="height", aggfunc=lambda x: np.min(x ** 2) + ), + height_max=pd.NamedAgg(column="height", aggfunc="max"), + weight_max=pd.NamedAgg(column="weight", aggfunc="max"), + ) + tm.assert_frame_equal(result1, expected) + + # check agg(key=(col, aggfunc)) case + result2 = df.groupby(by="kind").agg( + height_sqr_min=("height", lambda x: np.min(x ** 2)), + height_max=("height", "max"), + weight_max=("weight", "max"), + ) + tm.assert_frame_equal(result2, expected) + + def test_agg_multiple_lambda(self): + # GH25719, test for DataFrameGroupby.agg with multiple lambdas + # with mixed aggfunc + df = pd.DataFrame( + { + "kind": ["cat", "dog", "cat", "dog"], + "height": [9.1, 6.0, 9.5, 34.0], + "weight": [7.9, 7.5, 9.9, 198.0], + } + ) + # sort for 35 and earlier + columns = [ + "height_sqr_min", + "height_max", + "weight_max", + "height_max_2", + "weight_min", + ] + if compat.PY35: + columns = [ + "height_max", + "height_max_2", + "height_sqr_min", + "weight_max", + "weight_min", + ] + expected = pd.DataFrame( + { + "height_sqr_min": [82.81, 36.00], + "height_max": [9.5, 34.0], + "weight_max": [9.9, 198.0], + "height_max_2": [9.5, 34.0], + "weight_min": [7.9, 7.5], + }, + index=pd.Index(["cat", "dog"], name="kind"), + columns=columns, + ) + + # check agg(key=(col, aggfunc)) case + result1 = df.groupby(by="kind").agg( + height_sqr_min=("height", lambda x: np.min(x ** 2)), + height_max=("height", "max"), + weight_max=("weight", "max"), + height_max_2=("height", lambda x: np.max(x)), + weight_min=("weight", lambda x: np.min(x)), + ) + tm.assert_frame_equal(result1, expected) + + # check pd.NamedAgg case + result2 = df.groupby(by="kind").agg( + height_sqr_min=pd.NamedAgg( + column="height", aggfunc=lambda x: np.min(x ** 2) + ), + height_max=pd.NamedAgg(column="height", aggfunc="max"), + weight_max=pd.NamedAgg(column="weight", aggfunc="max"), + height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)), + weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)), + ) + tm.assert_frame_equal(result2, expected) + + @pytest.mark.parametrize( + "order, expected_reorder", + [ + ( + [ + ("height", "<lambda>"), + ("height", "max"), + ("weight", "max"), + ("height", "<lambda>"), + ("weight", "<lambda>"), + ], + [ + ("height", "<lambda>_0"), + ("height", "max"), + ("weight", "max"), + ("height", "<lambda>_1"), + ("weight", "<lambda>"), + ], + ), + ( + [ + ("col2", "min"), + ("col1", "<lambda>"), + ("col1", "<lambda>"), + ("col1", "<lambda>"), + ], + [ + ("col2", "min"), + ("col1", "<lambda>_0"), + ("col1", "<lambda>_1"), + ("col1", "<lambda>_2"), + ], + ), + ( + [("col", "<lambda>"), ("col", "<lambda>"), ("col", "<lambda>")], + [("col", "<lambda>_0"), ("col", "<lambda>_1"), ("col", "<lambda>_2")], + ), + ], + ) + def test_make_unique(self, order, expected_reorder): + # GH 27519, test if make_unique function reorders correctly + result = _make_unique(order) + + assert result == expected_reorder diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index 2195686ee9c7f..b8f9ecd42bae3 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -2,7 +2,7 @@ from numpy import nan import pytest -from pandas._libs import groupby, lib, reduction +from pandas._libs import groupby, lib, reduction as libreduction from pandas.core.dtypes.common import ensure_int64 @@ -18,7 +18,7 @@ def test_series_grouper(): labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) - grouper = reduction.SeriesGrouper(obj, np.mean, labels, 2, dummy) + grouper = libreduction.SeriesGrouper(obj, np.mean, labels, 2, dummy) result, counts = grouper.get_result() expected = np.array([obj[3:6].mean(), obj[6:].mean()]) @@ -34,7 +34,7 @@ def test_series_bin_grouper(): bins = np.array([3, 6]) - grouper = reduction.SeriesBinGrouper(obj, np.mean, bins, dummy) + grouper = libreduction.SeriesBinGrouper(obj, np.mean, bins, dummy) result, counts = grouper.get_result() expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()]) @@ -120,31 +120,31 @@ class TestMoments: class TestReducer: def test_int_index(self): arr = np.random.randn(100, 4) - result = reduction.compute_reduction(arr, np.sum, labels=Index(np.arange(4))) + result = libreduction.compute_reduction(arr, np.sum, labels=Index(np.arange(4))) expected = arr.sum(0) assert_almost_equal(result, expected) - result = reduction.compute_reduction( + result = libreduction.compute_reduction( arr, np.sum, axis=1, labels=Index(np.arange(100)) ) expected = arr.sum(1) assert_almost_equal(result, expected) dummy = Series(0.0, index=np.arange(100)) - result = reduction.compute_reduction( + result = libreduction.compute_reduction( arr, np.sum, dummy=dummy, labels=Index(np.arange(4)) ) expected = arr.sum(0) assert_almost_equal(result, expected) dummy = Series(0.0, index=np.arange(4)) - result = reduction.compute_reduction( + result = libreduction.compute_reduction( arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100)) ) expected = arr.sum(1) assert_almost_equal(result, expected) - result = reduction.compute_reduction( + result = libreduction.compute_reduction( arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100)) ) assert_almost_equal(result, expected) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 9b8c8e6d8a077..e09af3fd48ee6 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -4,6 +4,8 @@ import numpy as np import pytest +from pandas.compat import PY37, is_platform_windows + import pandas as pd from pandas import ( Categorical, @@ -208,6 +210,9 @@ def test_level_get_group(observed): # GH#21636 previously flaky on py37 +@pytest.mark.xfail( + is_platform_windows() and PY37, reason="Flaky, GH-27902", strict=False +) @pytest.mark.parametrize("ordered", [True, False]) def test_apply(ordered): # GH 10138 @@ -429,6 +434,21 @@ def test_observed_groups_with_nan(observed): tm.assert_dict_equal(result, expected) +def test_observed_nth(): + # GH 26385 + cat = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"]) + ser = pd.Series([1, 2, 3]) + df = pd.DataFrame({"cat": cat, "ser": ser}) + + result = df.groupby("cat", observed=False)["ser"].nth(0) + + index = pd.Categorical(["a", "b", "c"], categories=["a", "b", "c"]) + expected = pd.Series([1, np.nan, np.nan], index=index, name="ser") + expected.index.name = "cat" + + tm.assert_series_equal(result, expected) + + def test_dataframe_categorical_with_nan(observed): # GH 21151 s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"]) @@ -506,7 +526,7 @@ def test_datetime(): desc_result = grouped.describe() idx = cats.codes.argsort() - ord_labels = cats.take_nd(idx) + ord_labels = cats.take(idx) ord_data = data.take(idx) expected = ord_data.groupby(ord_labels, observed=False).describe() assert_frame_equal(desc_result, expected) @@ -1163,3 +1183,13 @@ def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data): lambda x: OrderedDict([("min", x.min()), ("max", x.max())]) ) assert_series_equal(result, expected) + + +@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])]) +def test_groupby_categorical_axis_1(code): + # GH 13420 + df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]}) + cat = pd.Categorical.from_codes(code, categories=list("abc")) + result = df.groupby(cat, axis=1).mean() + expected = df.T.groupby(cat, axis=0).mean().T + assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index efc3142b25b82..d89233f2fd603 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1238,6 +1238,75 @@ def test_quantile(interpolation, a_vals, b_vals, q): tm.assert_frame_equal(result, expected) +def test_quantile_array(): + # https://github.com/pandas-dev/pandas/issues/27526 + df = pd.DataFrame({"A": [0, 1, 2, 3, 4]}) + result = df.groupby([0, 0, 1, 1, 1]).quantile([0.25]) + + index = pd.MultiIndex.from_product([[0, 1], [0.25]]) + expected = pd.DataFrame({"A": [0.25, 2.50]}, index=index) + tm.assert_frame_equal(result, expected) + + df = pd.DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]}) + index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]]) + + result = df.groupby([0, 0, 1, 1]).quantile([0.25, 0.75]) + expected = pd.DataFrame( + {"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index + ) + tm.assert_frame_equal(result, expected) + + +def test_quantile_array2(): + # https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959 + df = pd.DataFrame( + np.random.RandomState(0).randint(0, 5, size=(10, 3)), columns=list("ABC") + ) + result = df.groupby("A").quantile([0.3, 0.7]) + expected = pd.DataFrame( + { + "B": [0.9, 2.1, 2.2, 3.4, 1.6, 2.4, 2.3, 2.7, 0.0, 0.0], + "C": [1.2, 2.8, 1.8, 3.0, 0.0, 0.0, 1.9, 3.1, 3.0, 3.0], + }, + index=pd.MultiIndex.from_product( + [[0, 1, 2, 3, 4], [0.3, 0.7]], names=["A", None] + ), + ) + tm.assert_frame_equal(result, expected) + + +def test_quantile_array_no_sort(): + df = pd.DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]}) + result = df.groupby([1, 0, 1], sort=False).quantile([0.25, 0.5, 0.75]) + expected = pd.DataFrame( + {"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]}, + index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]), + ) + tm.assert_frame_equal(result, expected) + + result = df.groupby([1, 0, 1], sort=False).quantile([0.75, 0.25]) + expected = pd.DataFrame( + {"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]}, + index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]), + ) + tm.assert_frame_equal(result, expected) + + +def test_quantile_array_multiple_levels(): + df = pd.DataFrame( + {"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]} + ) + result = df.groupby(["c", "d"]).quantile([0.25, 0.75]) + index = pd.MultiIndex.from_tuples( + [("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)], + names=["c", "d", None], + ) + expected = pd.DataFrame( + {"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index + ) + tm.assert_frame_equal(result, expected) + + def test_quantile_raises(): df = pd.DataFrame( [["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"] @@ -1247,6 +1316,17 @@ def test_quantile_raises(): df.groupby("key").quantile() +def test_quantile_out_of_bounds_q_raises(): + # https://github.com/pandas-dev/pandas/issues/27470 + df = pd.DataFrame(dict(a=[0, 0, 0, 1, 1, 1], b=range(6))) + g = df.groupby([0, 0, 0, 1, 1, 1]) + with pytest.raises(ValueError, match="Got '50.0' instead"): + g.quantile(50) + + with pytest.raises(ValueError, match="Got '-1.0' instead"): + g.quantile(-1) + + # pipe # -------------------------------- diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 9459069f0ea2d..0e74c87388682 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -1,3 +1,5 @@ +import gc + import numpy as np import pytest @@ -908,3 +910,10 @@ def test_is_unique(self): # multiple NA should not be unique index_na_dup = index_na.insert(0, np.nan) assert index_na_dup.is_unique is False + + def test_engine_reference_cycle(self): + # GH27585 + index = self.create_index() + nrefs_pre = len(gc.get_referrers(index)) + index._engine + assert len(gc.get_referrers(index)) == nrefs_pre diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index 4ea32359b8d4a..ab3107a0798e5 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -377,3 +377,11 @@ def test_nanosecond_field(self): dti = DatetimeIndex(np.arange(10)) tm.assert_index_equal(dti.nanosecond, pd.Index(np.arange(10, dtype=np.int64))) + + +def test_iter_readonly(): + # GH#28055 ints_to_pydatetime with readonly array + arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")]) + arr.setflags(write=False) + dti = pd.to_datetime(arr) + list(dti) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 8db15709da35d..9af0f47f6dce9 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1620,6 +1620,18 @@ def test_dayfirst(self, cache): tm.assert_index_equal(expected, idx5) tm.assert_index_equal(expected, idx6) + @pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray]) + def test_to_datetime_dta_tz(self, klass): + # GH#27733 + dti = date_range("2015-04-05", periods=3).rename("foo") + expected = dti.tz_localize("UTC") + + obj = klass(dti) + expected = klass(expected) + + result = to_datetime(obj, utc=True) + tm.assert_equal(result, expected) + class TestGuessDatetimeFormat: @td.skip_if_not_us_locale diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py index e2abb4531525a..82a10d24dad30 100644 --- a/pandas/tests/indexes/interval/test_construction.py +++ b/pandas/tests/indexes/interval/test_construction.py @@ -421,32 +421,3 @@ def test_index_mixed_closed(self): result = Index(intervals) expected = Index(intervals, dtype=object) tm.assert_index_equal(result, expected) - - -class TestFromIntervals(TestClassConstructors): - """ - Tests for IntervalIndex.from_intervals, which is deprecated in favor of the - IntervalIndex constructor. Same tests as the IntervalIndex constructor, - plus deprecation test. Should only need to delete this class when removed. - """ - - @pytest.fixture - def constructor(self): - def from_intervals_ignore_warnings(*args, **kwargs): - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - return IntervalIndex.from_intervals(*args, **kwargs) - - return from_intervals_ignore_warnings - - def test_deprecated(self): - ivs = [Interval(0, 1), Interval(1, 2)] - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - IntervalIndex.from_intervals(ivs) - - @pytest.mark.skip(reason="parent class test that is not applicable") - def test_index_object_dtype(self): - pass - - @pytest.mark.skip(reason="parent class test that is not applicable") - def test_index_mixed_closed(self): - pass diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index c1a21e6a7f152..eeb0f43f4b900 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -417,6 +417,46 @@ def test_repr_missing(self, constructor, expected): result = repr(obj) assert result == expected + @pytest.mark.parametrize( + "tuples, closed, expected_data", + [ + ([(0, 1), (1, 2), (2, 3)], "left", ["[0, 1)", "[1, 2)", "[2, 3)"]), + ( + [(0.5, 1.0), np.nan, (2.0, 3.0)], + "right", + ["(0.5, 1.0]", "NaN", "(2.0, 3.0]"], + ), + ( + [ + (Timestamp("20180101"), Timestamp("20180102")), + np.nan, + ((Timestamp("20180102"), Timestamp("20180103"))), + ], + "both", + ["[2018-01-01, 2018-01-02]", "NaN", "[2018-01-02, 2018-01-03]"], + ), + ( + [ + (Timedelta("0 days"), Timedelta("1 days")), + (Timedelta("1 days"), Timedelta("2 days")), + np.nan, + ], + "neither", + [ + "(0 days 00:00:00, 1 days 00:00:00)", + "(1 days 00:00:00, 2 days 00:00:00)", + "NaN", + ], + ), + ], + ) + def test_to_native_types(self, tuples, closed, expected_data): + # GH 28210 + index = IntervalIndex.from_tuples(tuples, closed=closed) + result = index.to_native_types() + expected = np.array(expected_data) + tm.assert_numpy_array_equal(result, expected) + def test_get_item(self, closed): i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), closed=closed) assert i[0] == Interval(0.0, 1.0, closed=closed) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index c40a9bce9385b..d1ed79118d2fa 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2805,3 +2805,17 @@ def test_deprecated_fastpath(): expected = pd.CategoricalIndex(["a", "b", "c"], name="test") tm.assert_index_equal(idx, expected) + + +def test_shape_of_invalid_index(): + # Currently, it is possible to create "invalid" index objects backed by + # a multi-dimensional array (see https://github.com/pandas-dev/pandas/issues/27125 + # about this). However, as long as this is not solved in general,this test ensures + # that the returned shape is consistent with this underlying array for + # compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775) + a = np.arange(8).reshape(2, 2, 2) + idx = pd.Index(a) + assert idx.shape == a.shape + + idx = pd.Index([0, 1, 2, 3]) + assert idx[:, None].shape == (4, 1) diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 280b0a99c7e68..67bf9bd20e716 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -823,6 +823,11 @@ def test_equals_categorical(self): msg = ( "categorical index comparisons must have the same categories" " and ordered attributes" + "|" + "Categoricals can only be compared if 'categories' are the same. " + "Categories are different lengths" + "|" + "Categoricals can only be compared if 'ordered' is the same" ) with pytest.raises(TypeError, match=msg): ci1 == ci2 diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py index f9ca1bca04165..645ad19ea4cc9 100644 --- a/pandas/tests/indexes/test_numpy_compat.py +++ b/pandas/tests/indexes/test_numpy_compat.py @@ -118,4 +118,7 @@ def test_elementwise_comparison_warning(): # this test. idx = Index([1, 2]) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - idx == "a" + result = idx == "a" + + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index a08b2b4c66af2..8b48c2bf7169f 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -390,3 +390,26 @@ def test_loc_getitem_lowerdim_corner(multiindex_dataframe_random_data): expected = 0 result = df.sort_index().loc[("bar", "three"), "B"] assert result == expected + + +def test_loc_setitem_single_column_slice(): + # case from https://github.com/pandas-dev/pandas/issues/27841 + df = DataFrame( + "string", + index=list("abcd"), + columns=MultiIndex.from_product([["Main"], ("another", "one")]), + ) + df["labels"] = "a" + df.loc[:, "labels"] = df.index + tm.assert_numpy_array_equal(np.asarray(df["labels"]), np.asarray(df.index)) + + # test with non-object block + df = DataFrame( + np.nan, + index=range(4), + columns=MultiIndex.from_tuples([("A", "1"), ("A", "2"), ("B", "1")]), + ) + expected = df.copy() + df.loc[:, "B"] = np.arange(4) + expected.iloc[:, 2] = np.arange(4) + tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index ed80e249220fd..05b58b0eca9b8 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -1038,10 +1038,6 @@ def test_replace_series(self, how, to_key, from_key): "from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"] ) def test_replace_series_datetime_tz(self, how, to_key, from_key): - how = "series" - from_key = "datetime64[ns, US/Eastern]" - to_key = "timedelta64[ns]" - index = pd.Index([3, 4], name="xyz") obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py index 45ccd8d1b8fb3..6029db8ed66f6 100644 --- a/pandas/tests/indexing/test_ix.py +++ b/pandas/tests/indexing/test_ix.py @@ -343,3 +343,13 @@ def test_ix_duplicate_returns_series(self): r = df.ix[0.2, "a"] e = df.loc[0.2, "a"] tm.assert_series_equal(r, e) + + def test_ix_intervalindex(self): + # https://github.com/pandas-dev/pandas/issues/27865 + df = DataFrame( + np.random.randn(5, 2), + index=pd.IntervalIndex.from_breaks([-np.inf, 0, 1, 2, 3, np.inf]), + ) + result = df.ix[0:2, 0] + expected = df.iloc[0:2, 0] + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index abe0cd86c90d7..9845b1ac3a4b9 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1070,6 +1070,16 @@ def test_series_indexing_zerodim_np_array(self): result = s.loc[np.array(0)] assert result == 1 + def test_loc_reverse_assignment(self): + # GH26939 + data = [1, 2, 3, 4, 5, 6] + [None] * 4 + expected = Series(data, index=range(2010, 2020)) + + result = pd.Series(index=range(2010, 2020)) + result.loc[2015:2010:-1] = [6, 5, 4, 3, 2, 1] + + tm.assert_series_equal(result, expected) + def test_series_loc_getitem_label_list_missing_values(): # gh-11428 diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 68e93f06e43dc..c4505231932c6 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -442,10 +442,10 @@ def test_partial_set_empty_frame(self): # these work as they don't really change # anything but the index # GH5632 - expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) + expected = DataFrame(columns=["foo"], index=Index([], dtype="object")) def f(): - df = DataFrame() + df = DataFrame(index=Index([], dtype="object")) df["foo"] = Series([], dtype="object") return df @@ -469,22 +469,21 @@ def f(): expected["foo"] = expected["foo"].astype("float64") def f(): - df = DataFrame() + df = DataFrame(index=Index([], dtype="int64")) df["foo"] = [] return df tm.assert_frame_equal(f(), expected) def f(): - df = DataFrame() + df = DataFrame(index=Index([], dtype="int64")) df["foo"] = Series(np.arange(len(df)), dtype="float64") return df tm.assert_frame_equal(f(), expected) def f(): - df = DataFrame() - tm.assert_index_equal(df.index, Index([], dtype="object")) + df = DataFrame(index=Index([], dtype="int64")) df["foo"] = range(len(df)) return df diff --git a/pandas/tests/io/data/legacy_hdf/gh26443.h5 b/pandas/tests/io/data/legacy_hdf/gh26443.h5 new file mode 100644 index 0000000000000..45aa64324530f Binary files /dev/null and b/pandas/tests/io/data/legacy_hdf/gh26443.h5 differ diff --git a/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_12.html b/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_12.html new file mode 100644 index 0000000000000..4eb3f5319749d --- /dev/null +++ b/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_12.html @@ -0,0 +1,70 @@ +<div> +<style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } +</style> +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>0</td> + </tr> + <tr> + <th>1</th> + <td>1</td> + </tr> + <tr> + <th>2</th> + <td>2</td> + </tr> + <tr> + <th>3</th> + <td>3</td> + </tr> + <tr> + <th>4</th> + <td>4</td> + </tr> + <tr> + <th>...</th> + <td>...</td> + </tr> + <tr> + <th>56</th> + <td>56</td> + </tr> + <tr> + <th>57</th> + <td>57</td> + </tr> + <tr> + <th>58</th> + <td>58</td> + </tr> + <tr> + <th>59</th> + <td>59</td> + </tr> + <tr> + <th>60</th> + <td>60</td> + </tr> + </tbody> +</table> +<p>61 rows × 1 columns</p> +</div> diff --git a/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_4.html b/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_4.html new file mode 100644 index 0000000000000..2b1d97aec517c --- /dev/null +++ b/pandas/tests/io/formats/data/html/html_repr_max_rows_10_min_rows_4.html @@ -0,0 +1,46 @@ +<div> +<style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } +</style> +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>0</td> + </tr> + <tr> + <th>1</th> + <td>1</td> + </tr> + <tr> + <th>...</th> + <td>...</td> + </tr> + <tr> + <th>59</th> + <td>59</td> + </tr> + <tr> + <th>60</th> + <td>60</td> + </tr> + </tbody> +</table> +<p>61 rows × 1 columns</p> +</div> diff --git a/pandas/tests/io/formats/data/html/html_repr_max_rows_12_min_rows_None.html b/pandas/tests/io/formats/data/html/html_repr_max_rows_12_min_rows_None.html new file mode 100644 index 0000000000000..a539e5a4884a1 --- /dev/null +++ b/pandas/tests/io/formats/data/html/html_repr_max_rows_12_min_rows_None.html @@ -0,0 +1,78 @@ +<div> +<style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } +</style> +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>0</td> + </tr> + <tr> + <th>1</th> + <td>1</td> + </tr> + <tr> + <th>2</th> + <td>2</td> + </tr> + <tr> + <th>3</th> + <td>3</td> + </tr> + <tr> + <th>4</th> + <td>4</td> + </tr> + <tr> + <th>5</th> + <td>5</td> + </tr> + <tr> + <th>...</th> + <td>...</td> + </tr> + <tr> + <th>55</th> + <td>55</td> + </tr> + <tr> + <th>56</th> + <td>56</td> + </tr> + <tr> + <th>57</th> + <td>57</td> + </tr> + <tr> + <th>58</th> + <td>58</td> + </tr> + <tr> + <th>59</th> + <td>59</td> + </tr> + <tr> + <th>60</th> + <td>60</td> + </tr> + </tbody> +</table> +<p>61 rows × 1 columns</p> +</div> diff --git a/pandas/tests/io/formats/data/html/html_repr_max_rows_None_min_rows_12.html b/pandas/tests/io/formats/data/html/html_repr_max_rows_None_min_rows_12.html new file mode 100644 index 0000000000000..3e680a505c6d6 --- /dev/null +++ b/pandas/tests/io/formats/data/html/html_repr_max_rows_None_min_rows_12.html @@ -0,0 +1,269 @@ +<div> +<style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } +</style> +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>0</td> + </tr> + <tr> + <th>1</th> + <td>1</td> + </tr> + <tr> + <th>2</th> + <td>2</td> + </tr> + <tr> + <th>3</th> + <td>3</td> + </tr> + <tr> + <th>4</th> + <td>4</td> + </tr> + <tr> + <th>5</th> + <td>5</td> + </tr> + <tr> + <th>6</th> + <td>6</td> + </tr> + <tr> + <th>7</th> + <td>7</td> + </tr> + <tr> + <th>8</th> + <td>8</td> + </tr> + <tr> + <th>9</th> + <td>9</td> + </tr> + <tr> + <th>10</th> + <td>10</td> + </tr> + <tr> + <th>11</th> + <td>11</td> + </tr> + <tr> + <th>12</th> + <td>12</td> + </tr> + <tr> + <th>13</th> + <td>13</td> + </tr> + <tr> + <th>14</th> + <td>14</td> + </tr> + <tr> + <th>15</th> + <td>15</td> + </tr> + <tr> + <th>16</th> + <td>16</td> + </tr> + <tr> + <th>17</th> + <td>17</td> + </tr> + <tr> + <th>18</th> + <td>18</td> + </tr> + <tr> + <th>19</th> + <td>19</td> + </tr> + <tr> + <th>20</th> + <td>20</td> + </tr> + <tr> + <th>21</th> + <td>21</td> + </tr> + <tr> + <th>22</th> + <td>22</td> + </tr> + <tr> + <th>23</th> + <td>23</td> + </tr> + <tr> + <th>24</th> + <td>24</td> + </tr> + <tr> + <th>25</th> + <td>25</td> + </tr> + <tr> + <th>26</th> + <td>26</td> + </tr> + <tr> + <th>27</th> + <td>27</td> + </tr> + <tr> + <th>28</th> + <td>28</td> + </tr> + <tr> + <th>29</th> + <td>29</td> + </tr> + <tr> + <th>30</th> + <td>30</td> + </tr> + <tr> + <th>31</th> + <td>31</td> + </tr> + <tr> + <th>32</th> + <td>32</td> + </tr> + <tr> + <th>33</th> + <td>33</td> + </tr> + <tr> + <th>34</th> + <td>34</td> + </tr> + <tr> + <th>35</th> + <td>35</td> + </tr> + <tr> + <th>36</th> + <td>36</td> + </tr> + <tr> + <th>37</th> + <td>37</td> + </tr> + <tr> + <th>38</th> + <td>38</td> + </tr> + <tr> + <th>39</th> + <td>39</td> + </tr> + <tr> + <th>40</th> + <td>40</td> + </tr> + <tr> + <th>41</th> + <td>41</td> + </tr> + <tr> + <th>42</th> + <td>42</td> + </tr> + <tr> + <th>43</th> + <td>43</td> + </tr> + <tr> + <th>44</th> + <td>44</td> + </tr> + <tr> + <th>45</th> + <td>45</td> + </tr> + <tr> + <th>46</th> + <td>46</td> + </tr> + <tr> + <th>47</th> + <td>47</td> + </tr> + <tr> + <th>48</th> + <td>48</td> + </tr> + <tr> + <th>49</th> + <td>49</td> + </tr> + <tr> + <th>50</th> + <td>50</td> + </tr> + <tr> + <th>51</th> + <td>51</td> + </tr> + <tr> + <th>52</th> + <td>52</td> + </tr> + <tr> + <th>53</th> + <td>53</td> + </tr> + <tr> + <th>54</th> + <td>54</td> + </tr> + <tr> + <th>55</th> + <td>55</td> + </tr> + <tr> + <th>56</th> + <td>56</td> + </tr> + <tr> + <th>57</th> + <td>57</td> + </tr> + <tr> + <th>58</th> + <td>58</td> + </tr> + <tr> + <th>59</th> + <td>59</td> + </tr> + <tr> + <th>60</th> + <td>60</td> + </tr> + </tbody> +</table> +</div> diff --git a/pandas/tests/io/formats/data/html/html_repr_min_rows_default_no_truncation.html b/pandas/tests/io/formats/data/html/html_repr_min_rows_default_no_truncation.html new file mode 100644 index 0000000000000..10f6247e37def --- /dev/null +++ b/pandas/tests/io/formats/data/html/html_repr_min_rows_default_no_truncation.html @@ -0,0 +1,105 @@ +<div> +<style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } +</style> +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>0</td> + </tr> + <tr> + <th>1</th> + <td>1</td> + </tr> + <tr> + <th>2</th> + <td>2</td> + </tr> + <tr> + <th>3</th> + <td>3</td> + </tr> + <tr> + <th>4</th> + <td>4</td> + </tr> + <tr> + <th>5</th> + <td>5</td> + </tr> + <tr> + <th>6</th> + <td>6</td> + </tr> + <tr> + <th>7</th> + <td>7</td> + </tr> + <tr> + <th>8</th> + <td>8</td> + </tr> + <tr> + <th>9</th> + <td>9</td> + </tr> + <tr> + <th>10</th> + <td>10</td> + </tr> + <tr> + <th>11</th> + <td>11</td> + </tr> + <tr> + <th>12</th> + <td>12</td> + </tr> + <tr> + <th>13</th> + <td>13</td> + </tr> + <tr> + <th>14</th> + <td>14</td> + </tr> + <tr> + <th>15</th> + <td>15</td> + </tr> + <tr> + <th>16</th> + <td>16</td> + </tr> + <tr> + <th>17</th> + <td>17</td> + </tr> + <tr> + <th>18</th> + <td>18</td> + </tr> + <tr> + <th>19</th> + <td>19</td> + </tr> + </tbody> +</table> +</div> diff --git a/pandas/tests/io/formats/data/html/html_repr_min_rows_default_truncated.html b/pandas/tests/io/formats/data/html/html_repr_min_rows_default_truncated.html new file mode 100644 index 0000000000000..4eb3f5319749d --- /dev/null +++ b/pandas/tests/io/formats/data/html/html_repr_min_rows_default_truncated.html @@ -0,0 +1,70 @@ +<div> +<style scoped> + .dataframe tbody tr th:only-of-type { + vertical-align: middle; + } + + .dataframe tbody tr th { + vertical-align: top; + } + + .dataframe thead th { + text-align: right; + } +</style> +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>a</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>0</td> + </tr> + <tr> + <th>1</th> + <td>1</td> + </tr> + <tr> + <th>2</th> + <td>2</td> + </tr> + <tr> + <th>3</th> + <td>3</td> + </tr> + <tr> + <th>4</th> + <td>4</td> + </tr> + <tr> + <th>...</th> + <td>...</td> + </tr> + <tr> + <th>56</th> + <td>56</td> + </tr> + <tr> + <th>57</th> + <td>57</td> + </tr> + <tr> + <th>58</th> + <td>58</td> + </tr> + <tr> + <th>59</th> + <td>59</td> + </tr> + <tr> + <th>60</th> + <td>60</td> + </tr> + </tbody> +</table> +<p>61 rows × 1 columns</p> +</div> diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index a048e3bb867bd..c0451a0672c89 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -471,28 +471,35 @@ def test_repr_min_rows(self): # default setting no truncation even if above min_rows assert ".." not in repr(df) + assert ".." not in df._repr_html_() df = pd.DataFrame({"a": range(61)}) # default of max_rows 60 triggers truncation if above assert ".." in repr(df) + assert ".." in df._repr_html_() with option_context("display.max_rows", 10, "display.min_rows", 4): # truncated after first two rows assert ".." in repr(df) assert "2 " not in repr(df) + assert "..." in df._repr_html_() + assert "<td>2</td>" not in df._repr_html_() with option_context("display.max_rows", 12, "display.min_rows", None): # when set to None, follow value of max_rows assert "5 5" in repr(df) + assert "<td>5</td>" in df._repr_html_() with option_context("display.max_rows", 10, "display.min_rows", 12): # when set value higher as max_rows, use the minimum assert "5 5" not in repr(df) + assert "<td>5</td>" not in df._repr_html_() with option_context("display.max_rows", None, "display.min_rows", 12): # max_rows of None -> never truncate assert ".." not in repr(df) + assert ".." not in df._repr_html_() def test_str_max_colwidth(self): # GH 7856 diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index ee236a8253b01..ab44b8b8059eb 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -514,3 +514,44 @@ def test_to_csv_compression(self, compression_only, read_infer, to_infer): df.to_csv(path, compression=to_compression) result = pd.read_csv(path, index_col=0, compression=read_compression) tm.assert_frame_equal(result, df) + + def test_to_csv_compression_dict(self, compression_only): + # GH 26023 + method = compression_only + df = DataFrame({"ABC": [1]}) + filename = "to_csv_compress_as_dict." + filename += "gz" if method == "gzip" else method + with tm.ensure_clean(filename) as path: + df.to_csv(path, compression={"method": method}) + read_df = pd.read_csv(path, index_col=0) + tm.assert_frame_equal(read_df, df) + + def test_to_csv_compression_dict_no_method_raises(self): + # GH 26023 + df = DataFrame({"ABC": [1]}) + compression = {"some_option": True} + msg = "must have key 'method'" + + with tm.ensure_clean("out.zip") as path: + with pytest.raises(ValueError, match=msg): + df.to_csv(path, compression=compression) + + @pytest.mark.parametrize("compression", ["zip", "infer"]) + @pytest.mark.parametrize( + "archive_name", [None, "test_to_csv.csv", "test_to_csv.zip"] + ) + def test_to_csv_zip_arguments(self, compression, archive_name): + # GH 26023 + from zipfile import ZipFile + + df = DataFrame({"ABC": [1]}) + with tm.ensure_clean("to_csv_archive_name.zip") as path: + df.to_csv( + path, compression={"method": compression, "archive_name": archive_name} + ) + zp = ZipFile(path) + expected_arcname = path if archive_name is None else archive_name + expected_arcname = os.path.basename(expected_arcname) + assert len(zp.filelist) == 1 + archived_file = os.path.basename(zp.filelist[0].filename) + assert archived_file == expected_arcname diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 448e869df950d..52c7b89220f06 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -713,3 +713,42 @@ def test_to_html_with_col_space_units(unit): for h in hdrs: expected = '<th style="min-width: {unit};">'.format(unit=unit) assert expected in h + + +def test_html_repr_min_rows_default(datapath): + # gh-27991 + + # default setting no truncation even if above min_rows + df = pd.DataFrame({"a": range(20)}) + result = df._repr_html_() + expected = expected_html(datapath, "html_repr_min_rows_default_no_truncation") + assert result == expected + + # default of max_rows 60 triggers truncation if above + df = pd.DataFrame({"a": range(61)}) + result = df._repr_html_() + expected = expected_html(datapath, "html_repr_min_rows_default_truncated") + assert result == expected + + +@pytest.mark.parametrize( + "max_rows,min_rows,expected", + [ + # truncated after first two rows + (10, 4, "html_repr_max_rows_10_min_rows_4"), + # when set to None, follow value of max_rows + (12, None, "html_repr_max_rows_12_min_rows_None"), + # when set value higher as max_rows, use the minimum + (10, 12, "html_repr_max_rows_10_min_rows_12"), + # max_rows of None -> never truncate + (None, 12, "html_repr_max_rows_None_min_rows_12"), + ], +) +def test_html_repr_min_rows(datapath, max_rows, min_rows, expected): + # gh-27991 + + df = pd.DataFrame({"a": range(61)}) + expected = expected_html(datapath, expected) + with option_context("display.max_rows", max_rows, "display.min_rows", min_rows): + result = df._repr_html_() + assert result == expected diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 9c687f036aa68..9842a706f43d7 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1012,60 +1012,70 @@ def test_convert_dates_infer(self): result = read_json(dumps(data))[["id", infer_word]] assert_frame_equal(result, expected) - def test_date_format_frame(self): + @pytest.mark.parametrize( + "date,date_unit", + [ + ("20130101 20:43:42.123", None), + ("20130101 20:43:42", "s"), + ("20130101 20:43:42.123", "ms"), + ("20130101 20:43:42.123456", "us"), + ("20130101 20:43:42.123456789", "ns"), + ], + ) + def test_date_format_frame(self, date, date_unit): df = self.tsframe.copy() - def test_w_date(date, date_unit=None): - df["date"] = Timestamp(date) - df.iloc[1, df.columns.get_loc("date")] = pd.NaT - df.iloc[5, df.columns.get_loc("date")] = pd.NaT - if date_unit: - json = df.to_json(date_format="iso", date_unit=date_unit) - else: - json = df.to_json(date_format="iso") - result = read_json(json) - expected = df.copy() - expected.index = expected.index.tz_localize("UTC") - expected["date"] = expected["date"].dt.tz_localize("UTC") - assert_frame_equal(result, expected) - - test_w_date("20130101 20:43:42.123") - test_w_date("20130101 20:43:42", date_unit="s") - test_w_date("20130101 20:43:42.123", date_unit="ms") - test_w_date("20130101 20:43:42.123456", date_unit="us") - test_w_date("20130101 20:43:42.123456789", date_unit="ns") + df["date"] = Timestamp(date) + df.iloc[1, df.columns.get_loc("date")] = pd.NaT + df.iloc[5, df.columns.get_loc("date")] = pd.NaT + if date_unit: + json = df.to_json(date_format="iso", date_unit=date_unit) + else: + json = df.to_json(date_format="iso") + result = read_json(json) + expected = df.copy() + # expected.index = expected.index.tz_localize("UTC") + expected["date"] = expected["date"].dt.tz_localize("UTC") + assert_frame_equal(result, expected) + def test_date_format_frame_raises(self): + df = self.tsframe.copy() msg = "Invalid value 'foo' for option 'date_unit'" with pytest.raises(ValueError, match=msg): df.to_json(date_format="iso", date_unit="foo") - def test_date_format_series(self): - def test_w_date(date, date_unit=None): - ts = Series(Timestamp(date), index=self.ts.index) - ts.iloc[1] = pd.NaT - ts.iloc[5] = pd.NaT - if date_unit: - json = ts.to_json(date_format="iso", date_unit=date_unit) - else: - json = ts.to_json(date_format="iso") - result = read_json(json, typ="series") - expected = ts.copy() - expected.index = expected.index.tz_localize("UTC") - expected = expected.dt.tz_localize("UTC") - assert_series_equal(result, expected) - - test_w_date("20130101 20:43:42.123") - test_w_date("20130101 20:43:42", date_unit="s") - test_w_date("20130101 20:43:42.123", date_unit="ms") - test_w_date("20130101 20:43:42.123456", date_unit="us") - test_w_date("20130101 20:43:42.123456789", date_unit="ns") + @pytest.mark.parametrize( + "date,date_unit", + [ + ("20130101 20:43:42.123", None), + ("20130101 20:43:42", "s"), + ("20130101 20:43:42.123", "ms"), + ("20130101 20:43:42.123456", "us"), + ("20130101 20:43:42.123456789", "ns"), + ], + ) + def test_date_format_series(self, date, date_unit): + ts = Series(Timestamp(date), index=self.ts.index) + ts.iloc[1] = pd.NaT + ts.iloc[5] = pd.NaT + if date_unit: + json = ts.to_json(date_format="iso", date_unit=date_unit) + else: + json = ts.to_json(date_format="iso") + result = read_json(json, typ="series") + expected = ts.copy() + # expected.index = expected.index.tz_localize("UTC") + expected = expected.dt.tz_localize("UTC") + assert_series_equal(result, expected) + def test_date_format_series_raises(self): ts = Series(Timestamp("20130101 20:43:42.123"), index=self.ts.index) msg = "Invalid value 'foo' for option 'date_unit'" with pytest.raises(ValueError, match=msg): ts.to_json(date_format="iso", date_unit="foo") - def test_date_unit(self): + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_date_unit(self, unit): df = self.tsframe.copy() df["date"] = Timestamp("20130101 20:43:42") dl = df.columns.get_loc("date") @@ -1073,16 +1083,15 @@ def test_date_unit(self): df.iloc[2, dl] = Timestamp("21460101 20:43:42") df.iloc[4, dl] = pd.NaT - for unit in ("s", "ms", "us", "ns"): - json = df.to_json(date_format="epoch", date_unit=unit) + json = df.to_json(date_format="epoch", date_unit=unit) - # force date unit - result = read_json(json, date_unit=unit) - assert_frame_equal(result, df) + # force date unit + result = read_json(json, date_unit=unit) + assert_frame_equal(result, df) - # detect date unit - result = read_json(json, date_unit=None) - assert_frame_equal(result, df) + # detect date unit + result = read_json(json, date_unit=None) + assert_frame_equal(result, df) def test_weird_nested_json(self): # this used to core dump the parser @@ -1611,3 +1620,30 @@ def test_read_timezone_information(self): ) expected = Series([88], index=DatetimeIndex(["2019-01-01 11:00:00"], tz="UTC")) assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")] + ) + def test_timedelta_as_label(self, date_format, key): + df = pd.DataFrame([[1]], columns=[pd.Timedelta("1D")]) + expected = '{{"{key}":{{"0":1}}}}'.format(key=key) + result = df.to_json(date_format=date_format) + + assert result == expected + + @pytest.mark.parametrize( + "orient,expected", + [ + ("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"), + ("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"), + # TODO: the below have separate encoding procedures + # They produce JSON but not in a consistent manner + pytest.param("split", "", marks=pytest.mark.skip), + pytest.param("table", "", marks=pytest.mark.skip), + ], + ) + def test_tuple_labels(self, orient, expected): + # GH 20500 + df = pd.DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")]) + result = df.to_json(orient=orient) + assert result == expected diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index b94d5cd497ccf..e04535df56663 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1898,7 +1898,10 @@ def test_null_byte_char(all_parsers): out = parser.read_csv(StringIO(data), names=names) tm.assert_frame_equal(out, expected) else: - msg = "NULL byte detected" + if compat.PY38: + msg = "line contains NUL" + else: + msg = "NULL byte detected" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), names=names) @@ -2020,9 +2023,34 @@ def test_file_handles_with_open(all_parsers, csv1): # Don't close user provided file handles. parser = all_parsers - with open(csv1, "r") as f: - parser.read_csv(f) - assert not f.closed + for mode in ["r", "rb"]: + with open(csv1, mode) as f: + parser.read_csv(f) + assert not f.closed + + +@pytest.mark.parametrize( + "fname,encoding", + [ + ("test1.csv", "utf-8"), + ("unicode_series.csv", "latin-1"), + ("sauron.SHIFT_JIS.csv", "shiftjis"), + ], +) +def test_binary_mode_file_buffers(all_parsers, csv_dir_path, fname, encoding): + # gh-23779: Python csv engine shouldn't error on files opened in binary. + parser = all_parsers + + fpath = os.path.join(csv_dir_path, fname) + expected = parser.read_csv(fpath, encoding=encoding) + + with open(fpath, mode="r", encoding=encoding) as fa: + result = parser.read_csv(fa) + tm.assert_frame_equal(expected, result) + + with open(fpath, mode="rb") as fb: + result = parser.read_csv(fb, encoding=encoding) + tm.assert_frame_equal(expected, result) def test_invalid_file_buffer_class(all_parsers): diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py index 99e0181741998..0ecd8be7ddc78 100644 --- a/pandas/tests/io/parser/test_header.py +++ b/pandas/tests/io/parser/test_header.py @@ -24,6 +24,35 @@ def test_read_with_bad_header(all_parsers): parser.read_csv(s, header=[10]) +def test_negative_header(all_parsers): + # see gh-27779 + parser = all_parsers + data = """1,2,3,4,5 +6,7,8,9,10 +11,12,13,14,15 +""" + with pytest.raises( + ValueError, + match="Passing negative integer to header is invalid. " + "For no header, use header=None instead", + ): + parser.read_csv(StringIO(data), header=-1) + + +@pytest.mark.parametrize("header", [([-1, 2, 4]), ([-5, 0])]) +def test_negative_multi_index_header(all_parsers, header): + # see gh-27779 + parser = all_parsers + data = """1,2,3,4,5 + 6,7,8,9,10 + 11,12,13,14,15 + """ + with pytest.raises( + ValueError, match="cannot specify multi-index header with negative integers" + ): + parser.read_csv(StringIO(data), header=header) + + @pytest.mark.parametrize("header", [True, False]) def test_bool_header_arg(all_parsers, header): # see gh-6114 diff --git a/pandas/tests/io/pytables/test_pytables.py b/pandas/tests/io/pytables/test_pytables.py index d67f2c3b7bd66..77cac00882771 100644 --- a/pandas/tests/io/pytables/test_pytables.py +++ b/pandas/tests/io/pytables/test_pytables.py @@ -37,7 +37,6 @@ import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal, set_timezone -from pandas.io import pytables as pytables # noqa:E402 from pandas.io.formats.printing import pprint_thing from pandas.io.pytables import ( ClosedFileError, @@ -46,7 +45,9 @@ Term, read_hdf, ) -from pandas.io.pytables import TableIterator # noqa:E402 + +from pandas.io import pytables as pytables # noqa: E402 isort:skip +from pandas.io.pytables import TableIterator # noqa: E402 isort:skip tables = pytest.importorskip("tables") @@ -5446,3 +5447,16 @@ def test_read_with_where_tz_aware_index(self): store.append(key, expected, format="table", append=True) result = pd.read_hdf(path, key, where="DATE > 20151130") assert_frame_equal(result, expected) + + def test_py2_created_with_datetimez(self, datapath): + # The test HDF5 file was created in Python 2, but could not be read in + # Python 3. + # + # GH26443 + index = [pd.Timestamp("2019-01-01T18:00").tz_localize("America/New_York")] + expected = DataFrame({"data": 123}, index=index) + with ensure_clean_store( + datapath("io", "data", "legacy_hdf", "gh26443.h5"), mode="r" + ) as store: + result = store["key"] + assert_frame_equal(result, expected) diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index ce459ab24afe0..d68b6a1effaa0 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -1,5 +1,8 @@ import contextlib import os +import subprocess +import sys +import textwrap import warnings import pytest @@ -125,3 +128,33 @@ def test_compression_warning(compression_only): with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False): with f: df.to_csv(f, compression=compression_only) + + +def test_with_missing_lzma(): + """Tests if import pandas works when lzma is not present.""" + # https://github.com/pandas-dev/pandas/issues/27575 + code = textwrap.dedent( + """\ + import sys + sys.modules['lzma'] = None + import pandas + """ + ) + subprocess.check_output([sys.executable, "-c", code]) + + +def test_with_missing_lzma_runtime(): + """Tests if RuntimeError is hit when calling lzma without + having the module available.""" + code = textwrap.dedent( + """ + import sys + import pytest + sys.modules['lzma'] = None + import pandas + df = pandas.DataFrame() + with pytest.raises(RuntimeError, match='lzma module'): + df.to_csv('foo.csv', compression='xz') + """ + ) + subprocess.check_output([sys.executable, "-c", code]) diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index 87a2405a10dd5..ee668d6890756 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -8,7 +8,7 @@ import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal, ensure_clean -from pandas.io.feather_format import read_feather, to_feather # noqa:E402 +from pandas.io.feather_format import read_feather, to_feather # noqa: E402 isort:skip pyarrow = pytest.importorskip("pyarrow") diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 076d0c9f947c7..30555508f0998 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -13,7 +13,6 @@ import bz2 import glob import gzip -import lzma import os import pickle import shutil @@ -22,7 +21,7 @@ import pytest -from pandas.compat import is_platform_little_endian +from pandas.compat import _get_lzma_file, _import_lzma, is_platform_little_endian import pandas as pd from pandas import Index @@ -30,6 +29,8 @@ from pandas.tseries.offsets import Day, MonthEnd +lzma = _import_lzma() + @pytest.fixture(scope="module") def current_pickle_data(): @@ -270,7 +271,7 @@ def compress_file(self, src_path, dest_path, compression): with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f: f.write(src_path, os.path.basename(src_path)) elif compression == "xz": - f = lzma.LZMAFile(dest_path, "w") + f = _get_lzma_file(lzma)(dest_path, "w") else: msg = "Unrecognized compression type: {}".format(compression) raise ValueError(msg) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index d8465a427eaea..25727447b4c6f 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -565,7 +565,6 @@ def _transaction_test(self): class _TestSQLApi(PandasSQLTest): - """ Base class to test the public API. diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 4929422d20e8a..5a591f72d7361 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -103,6 +103,28 @@ def _check_legend_labels(self, axes, labels=None, visible=True): else: assert ax.get_legend() is None + def _check_legend_marker(self, ax, expected_markers=None, visible=True): + """ + Check ax has expected legend markers + + Parameters + ---------- + ax : matplotlib Axes object + expected_markers : list-like + expected legend markers + visible : bool + expected legend visibility. labels are checked only when visible is + True + """ + if visible and (expected_markers is None): + raise ValueError("Markers must be specified when visible is True") + if visible: + handles, _ = ax.get_legend_handles_labels() + markers = [handle.get_marker() for handle in handles] + assert markers == expected_markers + else: + assert ax.get_legend() is None + def _check_data(self, xp, rs): """ Check each axes has identical lines diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py index e79e7b6239eb3..d126407cfd823 100644 --- a/pandas/tests/plotting/test_backend.py +++ b/pandas/tests/plotting/test_backend.py @@ -46,14 +46,18 @@ def test_backend_is_correct(monkeypatch): @td.skip_if_no_mpl def test_register_entrypoint(): + + dist = pkg_resources.get_distribution("pandas") + if dist.module_path not in pandas.__file__: + # We are running from a non-installed pandas, and this test is invalid + pytest.skip("Testing a non-installed pandas") + mod = types.ModuleType("my_backend") mod.plot = lambda *args, **kwargs: 1 backends = pkg_resources.get_entry_map("pandas") my_entrypoint = pkg_resources.EntryPoint( - "pandas_plotting_backend", - mod.__name__, - dist=pkg_resources.get_distribution("pandas"), + "pandas_plotting_backend", mod.__name__, dist=dist ) backends["pandas_plotting_backends"]["my_backend"] = my_entrypoint # TODO: the docs recommend importlib.util.module_from_spec. But this works for now. diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index cab0efe53f1fc..5bbaff580c356 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -9,7 +9,7 @@ import pandas.util._test_decorators as td -from pandas import DataFrame, MultiIndex, Series +from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range from pandas.tests.plotting.common import TestPlotBase, _check_plot_works import pandas.util.testing as tm @@ -160,6 +160,21 @@ def test_fontsize(self): df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16 ) + def test_boxplot_numeric_data(self): + # GH 22799 + df = DataFrame( + { + "a": date_range("2012-01-01", periods=100), + "b": np.random.randn(100), + "c": np.random.randn(100) + 2, + "d": date_range("2012-01-01", periods=100).astype(str), + "e": date_range("2012-01-01", periods=100, tz="UTC"), + "f": timedelta_range("1 days", periods=100), + } + ) + ax = df.plot(kind="box") + assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"] + @td.skip_if_no_mpl class TestDataFrameGroupByPlots(TestPlotBase): diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 35d12706f0590..7001264c41c05 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -40,6 +40,21 @@ def test_initial_warning(): assert "Using an implicitly" in out +def test_registry_mpl_resets(): + # Check that Matplotlib converters are properly reset (see issue #27481) + code = ( + "import matplotlib.units as units; " + "import matplotlib.dates as mdates; " + "n_conv = len(units.registry); " + "import pandas as pd; " + "pd.plotting.register_matplotlib_converters(); " + "pd.plotting.deregister_matplotlib_converters(); " + "assert len(units.registry) == n_conv" + ) + call = [sys.executable, "-c", code] + subprocess.check_output(call) + + def test_timtetonum_accepts_unicode(): assert converter.time2num("00:01") == converter.time2num("00:01") diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 69070ea11e478..e2b7f2819f957 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1410,7 +1410,7 @@ def test_plot_outofbounds_datetime(self): def test_format_timedelta_ticks_narrow(self): - expected_labels = ["00:00:00.0000000{:0>2d}".format(i) for i in range(10)] + expected_labels = ["00:00:00.0000000{:0>2d}".format(i) for i in np.arange(10)] rng = timedelta_range("0", periods=10, freq="ns") df = DataFrame(np.random.randn(len(rng), 3), rng) diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 65815bcedebfc..f672cd3a6aa58 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -1881,6 +1881,31 @@ def test_df_legend_labels(self): self._check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"]) assert df5.columns.tolist() == ["b", "c"] + def test_missing_marker_multi_plots_on_same_ax(self): + # GH 18222 + df = pd.DataFrame( + data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"] + ) + fig, ax = self.plt.subplots(nrows=1, ncols=3) + # Left plot + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0]) + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0]) + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0]) + self._check_legend_labels(ax[0], labels=["r", "g", "b"]) + self._check_legend_marker(ax[0], expected_markers=["o", "x", "o"]) + # Center plot + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1]) + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1]) + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1]) + self._check_legend_labels(ax[1], labels=["b", "r", "g"]) + self._check_legend_marker(ax[1], expected_markers=["o", "o", "x"]) + # Right plot + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2]) + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2]) + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2]) + self._check_legend_labels(ax[2], labels=["g", "b", "r"]) + self._check_legend_marker(ax[2], expected_markers=["x", "o", "o"]) + def test_legend_name(self): multi = DataFrame( randn(4, 4), @@ -3152,6 +3177,58 @@ def test_x_multiindex_values_ticks(self): assert labels_position["(2013, 1)"] == 2.0 assert labels_position["(2013, 2)"] == 3.0 + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_xlim_plot_line(self, kind): + # test if xlim is set correctly in plot.line and plot.area + # GH 27686 + df = pd.DataFrame([2, 4], index=[1, 2]) + ax = df.plot(kind=kind) + xlims = ax.get_xlim() + assert xlims[0] < 1 + assert xlims[1] > 2 + + def test_xlim_plot_line_correctly_in_mixed_plot_type(self): + # test if xlim is set correctly when ax contains multiple different kinds + # of plots, GH 27686 + fig, ax = self.plt.subplots() + + indexes = ["k1", "k2", "k3", "k4"] + df = pd.DataFrame( + { + "s1": [1000, 2000, 1500, 2000], + "s2": [900, 1400, 2000, 3000], + "s3": [1500, 1500, 1600, 1200], + "secondary_y": [1, 3, 4, 3], + }, + index=indexes, + ) + df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False) + df[["secondary_y"]].plot(ax=ax, secondary_y=True) + + xlims = ax.get_xlim() + assert xlims[0] < 0 + assert xlims[1] > 3 + + # make sure axis labels are plotted correctly as well + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + assert xticklabels == indexes + + def test_subplots_sharex_false(self): + # test when sharex is set to False, two plots should have different + # labels, GH 25160 + df = pd.DataFrame(np.random.rand(10, 2)) + df.iloc[5:, 1] = np.nan + df.iloc[:5, 0] = np.nan + + figs, axs = self.plt.subplots(2, 1) + df.plot.line(ax=axs, subplots=True, sharex=False) + + expected_ax1 = np.arange(4.5, 10, 0.5) + expected_ax2 = np.arange(-0.5, 5, 0.5) + + tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1) + tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2) + def _generate_4_axes_via_gridspec(): import matplotlib.pyplot as plt diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 8b4a78e9195b5..2c4c8aa7461a3 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -167,6 +167,15 @@ def test_label(self): ax.legend() # draw it self._check_legend_labels(ax, labels=["LABEL"]) + def test_boolean(self): + # GH 23719 + s = Series([False, False, True]) + _check_plot_works(s.plot, include_bool=True) + + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + _check_plot_works(s.plot) + def test_line_area_nan_series(self): values = [1, 2, np.nan, 3] s = Series(values) @@ -888,3 +897,15 @@ def test_plot_accessor_updates_on_inplace(self): _, ax = self.plt.subplots() after = ax.xaxis.get_ticklocs() tm.assert_numpy_array_equal(before, after) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_plot_xlim_for_series(self, kind): + # test if xlim is also correctly plotted in Series for line and area + # GH 27686 + s = Series([2, 3]) + _, ax = self.plt.subplots() + s.plot(kind=kind, ax=ax) + xlims = ax.get_xlim() + + assert xlims[0] < 0 + assert xlims[1] > 1 diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index b6c6f967333a8..a04f093ee7818 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1340,6 +1340,18 @@ def test_merge_take_missing_values_from_index_of_other_dtype(self): expected = expected.reindex(columns=["a", "key", "b"]) tm.assert_frame_equal(result, expected) + def test_merge_readonly(self): + # https://github.com/pandas-dev/pandas/issues/27943 + data1 = pd.DataFrame( + np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"] + ) + data2 = pd.DataFrame( + np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"] + ) + + data1._data.blocks[0].values.flags.writeable = False + data1.merge(data2) # no error + def _check_merge(x, y): for how in ["inner", "left", "outer"]: diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 6b66386bafc5e..7412b1de643a1 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1,3 +1,5 @@ +import datetime + import numpy as np import pytest import pytz @@ -588,14 +590,23 @@ def test_non_sorted(self): # ok, though has dupes merge_asof(trades, self.quotes, on="time", by="ticker") - def test_tolerance(self): + @pytest.mark.parametrize( + "tolerance", + [ + Timedelta("1day"), + pytest.param( + datetime.timedelta(days=1), + marks=pytest.mark.xfail(reason="not implemented", strict=True), + ), + ], + ids=["pd.Timedelta", "datetime.timedelta"], + ) + def test_tolerance(self, tolerance): trades = self.trades quotes = self.quotes - result = merge_asof( - trades, quotes, on="time", by="ticker", tolerance=Timedelta("1day") - ) + result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance) expected = self.tolerance assert_frame_equal(result, expected) @@ -1246,3 +1257,39 @@ def test_by_mixed_tz_aware(self): ) expected["value_y"] = np.array([np.nan], dtype=object) assert_frame_equal(result, expected) + + def test_timedelta_tolerance_nearest(self): + # GH 27642 + + left = pd.DataFrame( + list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])), + columns=["time", "left"], + ) + + left["time"] = pd.to_timedelta(left["time"], "ms") + + right = pd.DataFrame( + list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])), + columns=["time", "right"], + ) + + right["time"] = pd.to_timedelta(right["time"], "ms") + + expected = pd.DataFrame( + list( + zip( + [0, 5, 10, 15, 20, 25], + [0, 1, 2, 3, 4, 5], + [0, np.nan, 2, 4, np.nan, np.nan], + ) + ), + columns=["time", "left", "right"], + ) + + expected["time"] = pd.to_timedelta(expected["time"], "ms") + + result = pd.merge_asof( + left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest" + ) + + assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 6366bf0521fbc..13f0f14014a31 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -50,7 +50,6 @@ def sort_with_none(request): class TestConcatAppendCommon: - """ Test common dtype coercion rules between concat and append. """ diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index be82e7f595f8c..03b15d2df1a26 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -2447,3 +2447,84 @@ def test_crosstab_unsorted_order(self): [[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns ) tm.assert_frame_equal(result, expected) + + def test_margin_normalize(self): + # GH 27500 + df = pd.DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + # normalize on index + result = pd.crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0 + ) + expected = pd.DataFrame( + [[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]] + ) + expected.index = MultiIndex( + levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]], + codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + expected.columns = Index(["large", "small"], dtype="object", name="C") + tm.assert_frame_equal(result, expected) + + # normalize on columns + result = pd.crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1 + ) + expected = pd.DataFrame( + [ + [0.25, 0.2, 0.222222], + [0.25, 0.2, 0.222222], + [0.5, 0.2, 0.333333], + [0, 0.4, 0.222222], + ] + ) + expected.columns = Index( + ["large", "small", "Sub-Total"], dtype="object", name="C" + ) + expected.index = MultiIndex( + levels=[["bar", "foo"], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=["A", "B"], + ) + tm.assert_frame_equal(result, expected) + + # normalize on both index and column + result = pd.crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True + ) + expected = pd.DataFrame( + [ + [0.111111, 0.111111, 0.222222], + [0.111111, 0.111111, 0.222222], + [0.222222, 0.111111, 0.333333], + [0.000000, 0.222222, 0.222222], + [0.444444, 0.555555, 1], + ] + ) + expected.columns = Index( + ["large", "small", "Sub-Total"], dtype="object", name="C" + ) + expected.index = MultiIndex( + levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]], + codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py index 4cff061cabc40..357274e724c68 100644 --- a/pandas/tests/scalar/period/test_asfreq.py +++ b/pandas/tests/scalar/period/test_asfreq.py @@ -30,11 +30,8 @@ def test_asfreq_near_zero_weekly(self): assert week1.asfreq("D", "E") >= per1 assert week2.asfreq("D", "S") <= per2 - @pytest.mark.xfail( - reason="GH#19643 period_helper asfreq functions fail to check for overflows" - ) def test_to_timestamp_out_of_bounds(self): - # GH#19643, currently gives Timestamp('1754-08-30 22:43:41.128654848') + # GH#19643, used to incorrectly give Timestamp in 1754 per = Period("0001-01-01", freq="B") with pytest.raises(OutOfBoundsDatetime): per.to_timestamp() diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index b57b817461788..a1de205afc0e2 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -1298,23 +1298,13 @@ def test_add_offset_nat(self): timedelta(365), ]: assert p + o is NaT - - if isinstance(o, np.timedelta64): - with pytest.raises(TypeError): - o + p - else: - assert o + p is NaT + assert o + p is NaT for freq in ["M", "2M", "3M"]: p = Period("NaT", freq=freq) for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]: assert p + o is NaT - - if isinstance(o, np.timedelta64): - with pytest.raises(TypeError): - o + p - else: - assert o + p is NaT + assert o + p is NaT for o in [ offsets.YearBegin(2), @@ -1324,12 +1314,7 @@ def test_add_offset_nat(self): timedelta(365), ]: assert p + o is NaT - - if isinstance(o, np.timedelta64): - with pytest.raises(TypeError): - o + p - else: - assert o + p is NaT + assert o + p is NaT # freq is Tick for freq in ["D", "2D", "3D"]: @@ -1343,12 +1328,7 @@ def test_add_offset_nat(self): timedelta(hours=48), ]: assert p + o is NaT - - if isinstance(o, np.timedelta64): - with pytest.raises(TypeError): - o + p - else: - assert o + p is NaT + assert o + p is NaT for o in [ offsets.YearBegin(2), @@ -1358,12 +1338,7 @@ def test_add_offset_nat(self): timedelta(hours=23), ]: assert p + o is NaT - - if isinstance(o, np.timedelta64): - with pytest.raises(TypeError): - o + p - else: - assert o + p is NaT + assert o + p is NaT for freq in ["H", "2H", "3H"]: p = Period("NaT", freq=freq) @@ -1376,9 +1351,7 @@ def test_add_offset_nat(self): timedelta(days=4, minutes=180), ]: assert p + o is NaT - - if not isinstance(o, np.timedelta64): - assert o + p is NaT + assert o + p is NaT for o in [ offsets.YearBegin(2), @@ -1388,12 +1361,7 @@ def test_add_offset_nat(self): timedelta(hours=23, minutes=30), ]: assert p + o is NaT - - if isinstance(o, np.timedelta64): - with pytest.raises(TypeError): - o + p - else: - assert o + p is NaT + assert o + p is NaT def test_sub_offset(self): # freq is DateOffset @@ -1581,7 +1549,11 @@ def test_period_immutable(): @pytest.mark.xfail( - PY35, reason="Parsing as Period('0007-01-01', 'D') for reasons unknown", strict=True + # xpassing on MacPython with strict=False + # https://travis-ci.org/MacPython/pandas-wheels/jobs/574706922 + PY35, + reason="Parsing as Period('0007-01-01', 'D') for reasons unknown", + strict=False, ) def test_small_year_parsing(): per1 = Period("0001-01-07", "D") diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index e7ad76cf95ba0..5eb69fb2952dc 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -1,4 +1,5 @@ from datetime import datetime, timedelta +import operator import numpy as np import pytest @@ -21,6 +22,7 @@ isna, ) from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray +from pandas.core.ops import roperator from pandas.util import testing as tm @@ -250,6 +252,7 @@ def _get_overlap_public_nat_methods(klass, as_tuple=False): "day_name", "dst", "floor", + "fromisocalendar", "fromisoformat", "fromordinal", "fromtimestamp", @@ -294,6 +297,8 @@ def test_overlap_public_nat_methods(klass, expected): # "fromisoformat" was introduced in 3.7 if klass is Timestamp and not compat.PY37: expected.remove("fromisoformat") + if klass is Timestamp and not compat.PY38: + expected.remove("fromisocalendar") assert _get_overlap_public_nat_methods(klass) == expected @@ -333,8 +338,9 @@ def test_nat_doc_strings(compare): "value,val_type", [ (2, "scalar"), - (1.5, "scalar"), - (np.nan, "scalar"), + (1.5, "floating"), + (np.nan, "floating"), + ("foo", "str"), (timedelta(3600), "timedelta"), (Timedelta("5s"), "timedelta"), (datetime(2014, 1, 1), "timestamp"), @@ -348,6 +354,14 @@ def test_nat_arithmetic_scalar(op_name, value, val_type): # see gh-6873 invalid_ops = { "scalar": {"right_div_left"}, + "floating": { + "right_div_left", + "left_minus_right", + "right_minus_left", + "left_plus_right", + "right_plus_left", + }, + "str": set(_ops.keys()), "timedelta": {"left_times_right", "right_times_left"}, "timestamp": { "left_times_right", @@ -366,6 +380,16 @@ def test_nat_arithmetic_scalar(op_name, value, val_type): and isinstance(value, Timedelta) ): msg = "Cannot multiply" + elif val_type == "str": + # un-specific check here because the message comes from str + # and varies by method + msg = ( + "can only concatenate str|" + "unsupported operand type|" + "can't multiply sequence|" + "Can't convert 'NaTType'|" + "must be str, not NaTType" + ) else: msg = "unsupported operand type" @@ -435,6 +459,28 @@ def test_nat_arithmetic_td64_vector(op_name, box): tm.assert_equal(_ops[op_name](vec, NaT), box_nat) +@pytest.mark.parametrize( + "dtype,op,out_dtype", + [ + ("datetime64[ns]", operator.add, "datetime64[ns]"), + ("datetime64[ns]", roperator.radd, "datetime64[ns]"), + ("datetime64[ns]", operator.sub, "timedelta64[ns]"), + ("datetime64[ns]", roperator.rsub, "timedelta64[ns]"), + ("timedelta64[ns]", operator.add, "datetime64[ns]"), + ("timedelta64[ns]", roperator.radd, "datetime64[ns]"), + ("timedelta64[ns]", operator.sub, "datetime64[ns]"), + ("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"), + ], +) +def test_nat_arithmetic_ndarray(dtype, op, out_dtype): + other = np.arange(10).astype(dtype) + result = op(NaT, other) + + expected = np.empty(other.shape, dtype=out_dtype) + expected.fill("NaT") + tm.assert_numpy_array_equal(result, expected) + + def test_nat_pinned_docstrings(): # see gh-17327 assert NaT.ctime.__doc__ == datetime.ctime.__doc__ diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 401fc285424fe..652dd34ca7ce2 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -1047,3 +1047,23 @@ def test_to_numpy_alias(self): # GH 24653: alias .to_numpy() for scalars ts = Timestamp(datetime.now()) assert ts.to_datetime64() == ts.to_numpy() + + +class SubDatetime(datetime): + pass + + +@pytest.mark.parametrize( + "lh,rh", + [ + (SubDatetime(2000, 1, 1), Timedelta(hours=1)), + (Timedelta(hours=1), SubDatetime(2000, 1, 1)), + ], +) +def test_dt_subclass_add_timedelta(lh, rh): + # GH#25851 + # ensure that subclassed datetime works for + # Timedelta operations + result = lh + rh + expected = SubDatetime(2000, 1, 1, 1) + assert result == expected diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 0a25d6ba203cb..5d74ad95be90d 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -267,6 +267,25 @@ def test_rename_axis_none(self, kwargs): expected = Series([1, 2, 3], index=expected_index) tm.assert_series_equal(result, expected) + def test_rename_with_custom_indexer(self): + # GH 27814 + class MyIndexer: + pass + + ix = MyIndexer() + s = Series([1, 2, 3]).rename(ix) + assert s.name is ix + + def test_rename_with_custom_indexer_inplace(self): + # GH 27814 + class MyIndexer: + pass + + ix = MyIndexer() + s = Series([1, 2, 3]) + s.rename(ix, inplace=True) + assert s.name is ix + def test_set_axis_inplace_axes(self, axis_series): # GH14636 ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64") diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 3a5a387b919be..d6cb7f8d6a8be 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -20,6 +20,7 @@ from pandas.api.types import is_scalar from pandas.core.index import MultiIndex from pandas.core.indexes.datetimes import Timestamp +from pandas.core.indexes.timedeltas import TimedeltaIndex import pandas.util.testing as tm from pandas.util.testing import ( assert_almost_equal, @@ -237,6 +238,59 @@ def test_npdiff(self): r = np.diff(s) assert_series_equal(Series([nan, 0, 0, 0, nan]), r) + def test_dt_nm_bool_diff(self): + # Combined datetime diff, normal diff and boolean diff test + ts = tm.makeTimeSeries(name="ts") + ts.diff() + + # int dtype + a = 10000000000000000 + b = a + 1 + s = Series([a, b]) + + rs = s.diff() + assert rs[1] == 1 + + # neg n + rs = ts.diff(-1) + xp = ts - ts.shift(-1) + assert_series_equal(rs, xp) + + # 0 + rs = ts.diff(0) + xp = ts - ts + assert_series_equal(rs, xp) + + # datetime diff (GH3100) + s = Series(date_range("20130102", periods=5)) + rs = s - s.shift(1) + xp = s.diff() + assert_series_equal(rs, xp) + + # timedelta diff + nrs = rs - rs.shift(1) + nxp = xp.diff() + assert_series_equal(nrs, nxp) + + # with tz + s = Series( + date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo" + ) + result = s.diff() + assert_series_equal( + result, Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo") + ) + + # boolean series + s = Series([False, True, True, False, False]) + result = s.diff() + assert_series_equal(result, Series([nan, True, False, True, False])) + + # boolean nan series + s = Series([False, True, nan, False, False]) + result = s.diff() + assert_series_equal(result, Series([nan, 1, nan, nan, 0], dtype="object")) + def _check_accum_op(self, name, datetime_series_, check_dtype=True): func = getattr(np, name) tm.assert_numpy_array_equal( @@ -1482,16 +1536,7 @@ def test_value_counts_with_nan(self): @pytest.mark.parametrize( "dtype", - [ - "int_", - "uint", - "float_", - "unicode_", - "timedelta64[h]", - pytest.param( - "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=True) - ), - ], + ["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"], ) def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture): cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype)) @@ -1499,6 +1544,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture): # Test case 1 input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype)) tc1 = Series(Categorical(input1, categories=cat_array, ordered=ordered_fixture)) + if dtype == "datetime64[D]": + # pre-empty flaky xfail, tc1 values are seemingly-random + if not (np.array(tc1) == input1).all(): + pytest.xfail(reason="GH#7996") expected = Series([False, False, False, True]) tm.assert_series_equal(tc1.duplicated(), expected) @@ -1524,6 +1573,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture): # Test case 2 input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype)) tc2 = Series(Categorical(input2, categories=cat_array, ordered=ordered_fixture)) + if dtype == "datetime64[D]": + # pre-empty flaky xfail, tc2 values are seemingly-random + if not (np.array(tc2) == input2).all(): + pytest.xfail(reason="GH#7996") expected = Series([False, False, False, False, True, True, False]) tm.assert_series_equal(tc2.duplicated(), expected) diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 0686b397cbd81..0ddf1dfcabb59 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -191,6 +191,20 @@ def test_to_csv_compression(self, s, encoding, compression): s, pd.read_csv(fh, index_col=0, squeeze=True, encoding=encoding) ) + def test_to_csv_interval_index(self): + # GH 28210 + s = Series(["foo", "bar", "baz"], index=pd.interval_range(0, 3)) + + with ensure_clean("__tmp_to_csv_interval_index__.csv") as path: + s.to_csv(path, header=False) + result = self.read_csv(path, index_col=0, squeeze=True) + + # can't roundtrip intervalindex via read_csv so check string repr (GH 23595) + expected = s.copy() + expected.index = expected.index.astype(str) + + assert_series_equal(result, expected) + class TestSeriesIO: def test_to_frame(self, datetime_series): diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index f1b84acf68755..ddd2c566f4cda 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -578,6 +578,28 @@ def test_fillna_categorical(self, fill_value, expected_output): exp = Series(Categorical(expected_output, categories=["a", "b"])) tm.assert_series_equal(s.fillna(fill_value), exp) + @pytest.mark.parametrize( + "fill_value, expected_output", + [ + (Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]), + (Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]), + ( + Series( + Categorical( + ["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"] + ) + ), + ["a", "d", "b", "d", "a"], + ), + ], + ) + def test_fillna_categorical_with_new_categories(self, fill_value, expected_output): + # GH 26215 + data = ["a", np.nan, "b", np.nan, np.nan] + s = Series(Categorical(data, categories=["a", "b", "c", "d", "e"])) + exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"])) + tm.assert_series_equal(s.fillna(fill_value), exp) + def test_fillna_categorical_raise(self): data = ["a", np.nan, "b", np.nan, np.nan] s = Series(Categorical(data, categories=["a", "b"])) diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index 9b34b52bf39b9..4aeb211170d8f 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -71,10 +71,9 @@ def test_NaT_scalar(self): series[2] = val assert pd.isna(series[2]) - @pytest.mark.xfail(reason="PeriodDtype Series not supported yet") def test_NaT_cast(self): result = Series([np.nan]).astype("period[D]") - expected = Series([pd.NaT]) + expected = Series([pd.NaT], dtype="period[D]") tm.assert_series_equal(result, expected) def test_set_none(self): diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index d0ca5d82c6b33..fbe3f929cf5b5 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -355,48 +355,6 @@ def test_asfreq_datetimeindex_empty_series(self): ) tm.assert_index_equal(expected.index, result.index) - def test_diff(self): - # Just run the function - self.ts.diff() - - # int dtype - a = 10000000000000000 - b = a + 1 - s = Series([a, b]) - - rs = s.diff() - assert rs[1] == 1 - - # neg n - rs = self.ts.diff(-1) - xp = self.ts - self.ts.shift(-1) - assert_series_equal(rs, xp) - - # 0 - rs = self.ts.diff(0) - xp = self.ts - self.ts - assert_series_equal(rs, xp) - - # datetime diff (GH3100) - s = Series(date_range("20130102", periods=5)) - rs = s - s.shift(1) - xp = s.diff() - assert_series_equal(rs, xp) - - # timedelta diff - nrs = rs - rs.shift(1) - nxp = xp.diff() - assert_series_equal(nrs, nxp) - - # with tz - s = Series( - date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo" - ) - result = s.diff() - assert_series_equal( - result, Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo") - ) - def test_pct_change(self): rs = self.ts.pct_change(fill_method=None) assert_series_equal(rs, self.ts / self.ts.shift(1) - 1) diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py index c024e9caba156..8144a3931b9b8 100644 --- a/pandas/tests/series/test_ufunc.py +++ b/pandas/tests/series/test_ufunc.py @@ -252,10 +252,7 @@ def __add__(self, other): "values", [ pd.array([1, 3, 2]), - pytest.param( - pd.array([1, 10, 0], dtype="Sparse[int]"), - marks=pytest.mark.xfail(resason="GH-27080. Bug in SparseArray"), - ), + pd.array([1, 10, 0], dtype="Sparse[int]"), pd.to_datetime(["2000", "2010", "2001"]), pd.to_datetime(["2000", "2010", "2001"]).tz_localize("CET"), pd.to_datetime(["2000", "2010", "2001"]).to_period(freq="D"), diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 479e55c86fcd1..65b2dab1b02a8 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,4 +1,5 @@ import collections +from distutils.version import LooseVersion from functools import partial import string @@ -117,3 +118,13 @@ def test_git_version(): git_version = pd.__git_version__ assert len(git_version) == 40 assert all(c in string.hexdigits for c in git_version) + + +def test_version_tag(): + version = pd.__version__ + try: + version > LooseVersion("0.0.1") + except TypeError: + raise ValueError( + "No git tags exist, please sync tags between upstream and your repo" + ) diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 4070624985068..ca514f62f451d 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -66,7 +66,7 @@ def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=Tr operator_name = "truediv" if test_flex: - op = lambda x, y: getattr(df, arith)(y) + op = lambda x, y: getattr(x, arith)(y) op.__name__ = arith else: op = getattr(operator, operator_name) @@ -318,7 +318,6 @@ def testit(): for f in [self.frame, self.frame2, self.mixed, self.mixed2]: for cond in [True, False]: - c = np.empty(f.shape, dtype=np.bool_) c.fill(cond) result = expr.where(c, f.values, f.values + 1) @@ -431,3 +430,29 @@ def test_bool_ops_column_name_dtype(self, test_input, expected): # GH 22383 - .ne fails if columns containing column name 'dtype' result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]]) assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv") + ) + @pytest.mark.parametrize("axis", (0, 1)) + def test_frame_series_axis(self, axis, arith): + # GH#26736 Dataframe.floordiv(Series, axis=1) fails + if axis == 1 and arith == "floordiv": + pytest.xfail("'floordiv' does not succeed with axis=1 #27636") + + df = self.frame + if axis == 1: + other = self.frame.iloc[0, :] + else: + other = self.frame.iloc[:, 0] + + expr._MIN_ELEMENTS = 0 + + op_func = getattr(df, arith) + + expr.set_use_numexpr(False) + expected = op_func(other, axis=axis) + expr.set_use_numexpr(True) + + result = op_func(other, axis=axis) + assert_frame_equal(expected, result) diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py index a05b567adad7a..1683fda500f85 100644 --- a/pandas/tests/window/test_ewm.py +++ b/pandas/tests/window/test_ewm.py @@ -4,7 +4,7 @@ from pandas.errors import UnsupportedFunctionCall from pandas import DataFrame, Series -import pandas.core.window as rwindow +from pandas.core.window import EWM from pandas.tests.window.common import Base @@ -60,7 +60,7 @@ def test_constructor(self, which): @pytest.mark.parametrize("method", ["std", "mean", "var"]) def test_numpy_compat(self, method): # see gh-12811 - e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5) + e = EWM(Series([2, 4, 6]), alpha=0.5) msg = "numpy operations are not valid with window objects" diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py index 1e92c981964c5..098acdff93ac6 100644 --- a/pandas/tests/window/test_expanding.py +++ b/pandas/tests/window/test_expanding.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.core.window as rwindow +from pandas.core.window import Expanding from pandas.tests.window.common import Base import pandas.util.testing as tm @@ -42,7 +42,7 @@ def test_constructor(self, which): @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) def test_numpy_compat(self, method): # see gh-12811 - e = rwindow.Expanding(Series([2, 4, 6]), window=2) + e = Expanding(Series([2, 4, 6]), window=2) msg = "numpy operations are not valid with window objects" diff --git a/pandas/tests/window/test_moments.py b/pandas/tests/window/test_moments.py index d860859958254..3d6cd7d10bd10 100644 --- a/pandas/tests/window/test_moments.py +++ b/pandas/tests/window/test_moments.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, concat, isna, notna -import pandas.core.window as rwindow +from pandas.core.window.common import _flex_binary_moment from pandas.tests.window.common import Base import pandas.util.testing as tm @@ -1878,7 +1878,7 @@ def test_flex_binary_moment(self): " np.ndarray/Series/DataFrame" ) with pytest.raises(TypeError, match=msg): - rwindow._flex_binary_moment(5, 6, None) + _flex_binary_moment(5, 6, None) def test_corr_sanity(self): # GH 3155 diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index c7177e1d3914f..b4787bf25e3bb 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.core.window as rwindow +from pandas.core.window import Rolling from pandas.tests.window.common import Base import pandas.util.testing as tm @@ -101,7 +101,7 @@ def test_constructor_timedelta_window_and_minperiods(self, window, raw): @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) def test_numpy_compat(self, method): # see gh-12811 - r = rwindow.Rolling(Series([2, 4, 6]), window=2) + r = Rolling(Series([2, 4, 6]), window=2) msg = "numpy operations are not valid with window objects" @@ -326,3 +326,11 @@ def test_rolling_axis_count(self, axis_frame): result = df.rolling(2, axis=axis_frame).count() tm.assert_frame_equal(result, expected) + + def test_readonly_array(self): + # GH-27766 + arr = np.array([1, 3, np.nan, 3, 5]) + arr.setflags(write=False) + result = pd.Series(arr).rolling(2).mean() + expected = pd.Series([np.nan, 2, np.nan, np.nan, 4]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py index a6a56c98a9377..5692404205012 100644 --- a/pandas/tests/window/test_window.py +++ b/pandas/tests/window/test_window.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import Series -import pandas.core.window as rwindow +from pandas.core.window import Window from pandas.tests.window.common import Base @@ -50,7 +50,7 @@ def test_constructor_with_win_type(self, which, win_types): @pytest.mark.parametrize("method", ["sum", "mean"]) def test_numpy_compat(self, method): # see gh-12811 - w = rwindow.Window(Series([2, 4, 6]), window=[0, 2]) + w = Window(Series([2, 4, 6]), window=[0, 2]) msg = "numpy operations are not valid with window objects" diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index a208d5ad2fea9..edf58ba3850a1 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -204,8 +204,7 @@ def __add__(date): normalize : bool, default False Whether to round the result of a DateOffset addition down to the previous midnight. - **kwds - Temporal parameter that add to or replace the offset value. + **kwds : Temporal parameter that add to or replace the offset value. Parameters that **add** to the offset (like Timedelta): @@ -233,16 +232,19 @@ def __add__(date): See Also -------- - dateutil.relativedelta.relativedelta + dateutil.relativedelta.relativedelta : The relativedelta type is designed + to be applied to an existing datetime an can replace specific components of + that datetime, or represents an interval of time. Examples -------- + >>> from pandas.tseries.offsets import DateOffset >>> ts = pd.Timestamp('2017-01-01 09:10:11') >>> ts + DateOffset(months=3) Timestamp('2017-04-01 09:10:11') >>> ts = pd.Timestamp('2017-01-01 09:10:11') - >>> ts + DateOffset(month=3) + >>> ts + DateOffset(months=2) Timestamp('2017-03-01 09:10:11') """ diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 5c7d481ff2586..8a25e511b5fc4 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -1,21 +1,35 @@ from functools import wraps import inspect from textwrap import dedent -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, + cast, +) import warnings from pandas._libs.properties import cache_readonly # noqa +FuncType = Callable[..., Any] +F = TypeVar("F", bound=FuncType) + def deprecate( name: str, - alternative: Callable, + alternative: Callable[..., Any], version: str, alt_name: Optional[str] = None, klass: Optional[Type[Warning]] = None, stacklevel: int = 2, msg: Optional[str] = None, -) -> Callable: +) -> Callable[..., Any]: """ Return a new function that emits a deprecation warning on use. @@ -47,7 +61,7 @@ def deprecate( warning_msg = msg or "{} is deprecated, use {} instead".format(name, alt_name) @wraps(alternative) - def wrapper(*args, **kwargs): + def wrapper(*args, **kwargs) -> Callable[..., Any]: warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) @@ -90,9 +104,9 @@ def wrapper(*args, **kwargs): def deprecate_kwarg( old_arg_name: str, new_arg_name: Optional[str], - mapping: Optional[Union[Dict, Callable[[Any], Any]]] = None, + mapping: Optional[Union[Dict[Any, Any], Callable[[Any], Any]]] = None, stacklevel: int = 2, -) -> Callable: +) -> Callable[..., Any]: """ Decorator to deprecate a keyword argument of a function. @@ -160,27 +174,27 @@ def deprecate_kwarg( "mapping from old to new argument values " "must be dict or callable!" ) - def _deprecate_kwarg(func): + def _deprecate_kwarg(func: F) -> F: @wraps(func) - def wrapper(*args, **kwargs): + def wrapper(*args, **kwargs) -> Callable[..., Any]: old_arg_value = kwargs.pop(old_arg_name, None) - if new_arg_name is None and old_arg_value is not None: - msg = ( - "the '{old_name}' keyword is deprecated and will be " - "removed in a future version. " - "Please take steps to stop the use of '{old_name}'" - ).format(old_name=old_arg_name) - warnings.warn(msg, FutureWarning, stacklevel=stacklevel) - kwargs[old_arg_name] = old_arg_value - return func(*args, **kwargs) - if old_arg_value is not None: - if mapping is not None: - if hasattr(mapping, "get"): - new_arg_value = mapping.get(old_arg_value, old_arg_value) - else: + if new_arg_name is None: + msg = ( + "the '{old_name}' keyword is deprecated and will be " + "removed in a future version. " + "Please take steps to stop the use of '{old_name}'" + ).format(old_name=old_arg_name) + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + kwargs[old_arg_name] = old_arg_value + return func(*args, **kwargs) + + elif mapping is not None: + if callable(mapping): new_arg_value = mapping(old_arg_value) + else: + new_arg_value = mapping.get(old_arg_value, old_arg_value) msg = ( "the {old_name}={old_val!r} keyword is deprecated, " "use {new_name}={new_val!r} instead" @@ -198,7 +212,7 @@ def wrapper(*args, **kwargs): ).format(old_name=old_arg_name, new_name=new_arg_name) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) - if kwargs.get(new_arg_name, None) is not None: + if kwargs.get(new_arg_name) is not None: msg = ( "Can only specify '{old_name}' or '{new_name}', " "not both" ).format(old_name=old_arg_name, new_name=new_arg_name) @@ -207,17 +221,17 @@ def wrapper(*args, **kwargs): kwargs[new_arg_name] = new_arg_value return func(*args, **kwargs) - return wrapper + return cast(F, wrapper) return _deprecate_kwarg def rewrite_axis_style_signature( name: str, extra_params: List[Tuple[str, Any]] -) -> Callable: - def decorate(func): +) -> Callable[..., Any]: + def decorate(func: F) -> F: @wraps(func) - def wrapper(*args, **kwargs): + def wrapper(*args, **kwargs) -> Callable[..., Any]: return func(*args, **kwargs) kind = inspect.Parameter.POSITIONAL_OR_KEYWORD @@ -234,8 +248,9 @@ def wrapper(*args, **kwargs): sig = inspect.Signature(params) - func.__signature__ = sig - return wrapper + # https://github.com/python/typing/issues/598 + func.__signature__ = sig # type: ignore + return cast(F, wrapper) return decorate @@ -279,18 +294,17 @@ def __init__(self, *args, **kwargs): self.params = args or kwargs - def __call__(self, func: Callable) -> Callable: + def __call__(self, func: F) -> F: func.__doc__ = func.__doc__ and func.__doc__ % self.params return func def update(self, *args, **kwargs) -> None: """ Update self.params with supplied args. - - If called, we assume self.params is a dict. """ - self.params.update(*args, **kwargs) + if isinstance(self.params, dict): + self.params.update(*args, **kwargs) class Appender: @@ -320,7 +334,7 @@ def __init__(self, addendum: Optional[str], join: str = "", indents: int = 0): self.addendum = addendum self.join = join - def __call__(self, func: Callable) -> Callable: + def __call__(self, func: F) -> F: func.__doc__ = func.__doc__ if func.__doc__ else "" self.addendum = self.addendum if self.addendum else "" docitems = [func.__doc__, self.addendum] diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 3de4e5d66d577..627757aaa3741 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -25,9 +25,8 @@ def test_foo(): """ from distutils.version import LooseVersion import locale -from typing import Optional +from typing import Callable, Optional -from _pytest.mark.structures import MarkDecorator import pytest from pandas.compat import is_platform_32bit, is_platform_windows @@ -103,7 +102,7 @@ def _skip_if_no_scipy(): ) -def skip_if_installed(package: str,) -> MarkDecorator: +def skip_if_installed(package: str,) -> Callable: """ Skip a test if a package is installed. @@ -117,7 +116,7 @@ def skip_if_installed(package: str,) -> MarkDecorator: ) -def skip_if_no(package: str, min_version: Optional[str] = None) -> MarkDecorator: +def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable: """ Generic function to help skip tests when required packages are not present on the testing system. diff --git a/pandas/util/testing.py b/pandas/util/testing.py index cf8452cdd0c59..0d543f891a5f6 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -5,7 +5,6 @@ from functools import wraps import gzip import http.client -import lzma import os import re from shutil import rmtree @@ -26,7 +25,7 @@ ) import pandas._libs.testing as _testing -from pandas.compat import raise_with_traceback +from pandas.compat import _get_lzma_file, _import_lzma, raise_with_traceback from pandas.core.dtypes.common import ( is_bool, @@ -70,6 +69,8 @@ from pandas.io.common import urlopen from pandas.io.formats.printing import pprint_thing +lzma = _import_lzma() + N = 30 K = 4 _RAISE_NETWORK_ERROR_DEFAULT = False @@ -211,7 +212,7 @@ def decompress_file(path, compression): elif compression == "bz2": f = bz2.BZ2File(path, "rb") elif compression == "xz": - f = lzma.LZMAFile(path, "rb") + f = _get_lzma_file(lzma)(path, "rb") elif compression == "zip": zip_file = zipfile.ZipFile(path) zip_names = zip_file.namelist() @@ -264,9 +265,7 @@ def write_to_compressed(compression, path, data, dest="test"): compress_method = bz2.BZ2File elif compression == "xz": - import lzma - - compress_method = lzma.LZMAFile + compress_method = _get_lzma_file(lzma) else: msg = "Unrecognized compression type: {}".format(compression) raise ValueError(msg) @@ -581,7 +580,8 @@ def assert_index_equal( check_categorical: bool = True, obj: str = "Index", ) -> None: - """Check that left and right Index are equal. + """ + Check that left and right Index are equal. Parameters ---------- @@ -1082,7 +1082,8 @@ def assert_series_equal( check_categorical=True, obj="Series", ): - """Check that left and right Series are equal. + """ + Check that left and right Series are equal. Parameters ---------- diff --git a/requirements-dev.txt b/requirements-dev.txt index e49ad10bfc99d..cf11a3ee28258 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -45,7 +45,7 @@ html5lib lxml openpyxl pyarrow>=0.9.0 -pyqt +pyqt5>=5.9.2 tables>=3.4.2 python-snappy s3fs diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py index 1075a257d4270..95a892b822cff 100755 --- a/scripts/find_commits_touching_func.py +++ b/scripts/find_commits_touching_func.py @@ -10,11 +10,11 @@ Usage:: $ ./find_commits_touching_func.py (see arguments below) """ -import logging -import re -import os import argparse from collections import namedtuple +import logging +import os +import re from dateutil.parser import parse diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py index ac73859b22598..29fe8bf84c12b 100755 --- a/scripts/generate_pip_deps_from_conda.py +++ b/scripts/generate_pip_deps_from_conda.py @@ -16,11 +16,11 @@ import os import re import sys -import yaml +import yaml EXCLUDE = {"python=3"} -RENAME = {"pytables": "tables"} +RENAME = {"pytables": "tables", "pyqt": "pyqt5"} def conda_package_to_pip(package): diff --git a/scripts/merge-pr.py b/scripts/merge-pr.py index 95352751a23c6..300cb149f387f 100755 --- a/scripts/merge-pr.py +++ b/scripts/merge-pr.py @@ -22,14 +22,15 @@ # usage: ./apache-pr-merge.py (see config env vars below) # # Lightly modified from version of this script in incubator-parquet-format -from subprocess import check_output -from requests.auth import HTTPBasicAuth -import requests import os +from subprocess import check_output import sys import textwrap +import requests +from requests.auth import HTTPBasicAuth + PANDAS_HOME = "." PROJECT_NAME = "pandas" print("PANDAS_HOME = " + PANDAS_HOME) diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index f3364e6725a20..85e5bf239cbfa 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -2,12 +2,13 @@ import random import string import textwrap -import pytest -import numpy as np -import pandas as pd +import numpy as np +import pytest import validate_docstrings +import pandas as pd + validate_one = validate_docstrings.validate_one @@ -200,7 +201,7 @@ def contains(self, pat, case=True, na=np.nan): def mode(self, axis, numeric_only): """ - Ensure sphinx directives don't affect checks for trailing periods. + Ensure reST directives don't affect checks for leading periods. Parameters ---------- @@ -447,6 +448,27 @@ def deprecation_in_wrong_order(self): def method_wo_docstrings(self): pass + def directives_without_two_colons(self, first, second): + """ + Ensure reST directives have trailing colons. + + Parameters + ---------- + first : str + Sentence ending in period, followed by single directive w/o colons. + + .. versionchanged 0.1.2 + + second : bool + Sentence ending in period, followed by multiple directives w/o + colons. + + .. versionadded 0.1.2 + .. deprecated 0.00.0 + + """ + pass + class BadSummaries: def wrong_line(self): @@ -840,6 +862,7 @@ def test_bad_class(self, capsys): "plot", "method", "private_classes", + "directives_without_two_colons", ], ) def test_bad_generic_functions(self, capsys, func): @@ -879,6 +902,14 @@ def test_bad_generic_functions(self, capsys, func): "deprecation_in_wrong_order", ("Deprecation warning should precede extended summary",), ), + ( + "BadGenericDocStrings", + "directives_without_two_colons", + ( + "reST directives ['versionchanged', 'versionadded', " + "'deprecated'] must be followed by two colons", + ), + ), ( "BadSeeAlso", "desc_no_period", diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 37623d32db685..401eaf8ff5ed5 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -13,20 +13,20 @@ $ ./validate_docstrings.py $ ./validate_docstrings.py pandas.DataFrame.head """ -import os -import sys -import json -import re -import glob -import functools -import collections import argparse -import pydoc -import inspect -import importlib +import ast +import collections import doctest +import functools +import glob +import importlib +import inspect +import json +import os +import pydoc +import re +import sys import tempfile -import ast import textwrap import flake8.main.application @@ -41,24 +41,25 @@ # script. Setting here before matplotlib is loaded. # We don't warn for the number of open plots, as none is actually being opened os.environ["MPLBACKEND"] = "Template" -import matplotlib +import matplotlib # noqa: E402 isort:skip matplotlib.rc("figure", max_open_warning=10000) -import numpy +import numpy # noqa: E402 isort:skip BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.join(BASE_PATH)) -import pandas +import pandas # noqa: E402 isort:skip sys.path.insert(1, os.path.join(BASE_PATH, "doc", "sphinxext")) -from numpydoc.docscrape import NumpyDocString -from pandas.io.formats.printing import pprint_thing +from numpydoc.docscrape import NumpyDocString # noqa: E402 isort:skip +from pandas.io.formats.printing import pprint_thing # noqa: E402 isort:skip PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"] DIRECTIVES = ["versionadded", "versionchanged", "deprecated"] +DIRECTIVE_PATTERN = re.compile(rf"^\s*\.\. ({'|'.join(DIRECTIVES)})(?!::)", re.I | re.M) ALLOWED_SECTIONS = [ "Parameters", "Attributes", @@ -93,6 +94,7 @@ "GL07": "Sections are in the wrong order. Correct order is: " "{correct_sections}", "GL08": "The object does not have a docstring", "GL09": "Deprecation warning should precede extended summary", + "GL10": "reST directives {directives} must be followed by two colons", "SS01": "No summary found (a short summary in a single line should be " "present at the beginning of the docstring)", "SS02": "Summary does not start with a capital letter", @@ -478,6 +480,10 @@ def parameter_mismatches(self): def correct_parameters(self): return not bool(self.parameter_mismatches) + @property + def directives_without_two_colons(self): + return DIRECTIVE_PATTERN.findall(self.raw_doc) + def parameter_type(self, param): return self.doc_parameters[param][0] @@ -697,6 +703,10 @@ def get_validation_data(doc): if doc.deprecated and not doc.extended_summary.startswith(".. deprecated:: "): errs.append(error("GL09")) + directives_without_two_colons = doc.directives_without_two_colons + if directives_without_two_colons: + errs.append(error("GL10", directives=directives_without_two_colons)) + if not doc.summary: errs.append(error("SS01")) else: diff --git a/setup.cfg b/setup.cfg index 716ff5d9d8853..43dbac15f5cfe 100644 --- a/setup.cfg +++ b/setup.cfg @@ -110,68 +110,25 @@ directory = coverage_html_report # To be kept consistent with "Import Formatting" section in contributing.rst [isort] -known_pre_libs=pandas._config -known_pre_core=pandas._libs,pandas.util._*,pandas.compat,pandas.errors -known_dtypes=pandas.core.dtypes -known_post_core=pandas.tseries,pandas.io,pandas.plotting -sections=FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER - -known_first_party=pandas -known_third_party=Cython,numpy,dateutil,matplotlib,python-dateutil,pytz,pyarrow,pytest - -multi_line_output=3 -include_trailing_comma=True -force_grid_wrap=0 -combine_as_imports=True -line_length=88 -force_sort_within_sections=True -skip_glob=env, -skip= - pandas/__init__.py - pandas/core/api.py, - pandas/io/msgpack/__init__.py - asv_bench/benchmarks/attrs_caching.py, - asv_bench/benchmarks/binary_ops.py, - asv_bench/benchmarks/categoricals.py, - asv_bench/benchmarks/ctors.py, - asv_bench/benchmarks/eval.py, - asv_bench/benchmarks/frame_ctor.py, - asv_bench/benchmarks/frame_methods.py, - asv_bench/benchmarks/gil.py, - asv_bench/benchmarks/groupby.py, - asv_bench/benchmarks/index_object.py, - asv_bench/benchmarks/indexing.py, - asv_bench/benchmarks/inference.py, - asv_bench/benchmarks/io/csv.py, - asv_bench/benchmarks/io/excel.py, - asv_bench/benchmarks/io/hdf.py, - asv_bench/benchmarks/io/json.py, - asv_bench/benchmarks/io/msgpack.py, - asv_bench/benchmarks/io/pickle.py, - asv_bench/benchmarks/io/sql.py, - asv_bench/benchmarks/io/stata.py, - asv_bench/benchmarks/join_merge.py, - asv_bench/benchmarks/multiindex_object.py, - asv_bench/benchmarks/panel_ctor.py, - asv_bench/benchmarks/panel_methods.py, - asv_bench/benchmarks/plotting.py, - asv_bench/benchmarks/reindex.py, - asv_bench/benchmarks/replace.py, - asv_bench/benchmarks/reshape.py, - asv_bench/benchmarks/rolling.py, - asv_bench/benchmarks/series_methods.py, - asv_bench/benchmarks/sparse.py, - asv_bench/benchmarks/stat_ops.py, - asv_bench/benchmarks/timeseries.py - asv_bench/benchmarks/pandas_vb_common.py - asv_bench/benchmarks/offset.py - asv_bench/benchmarks/dtypes.py - asv_bench/benchmarks/strings.py - asv_bench/benchmarks/period.py +known_pre_libs = pandas._config +known_pre_core = pandas._libs,pandas.util._*,pandas.compat,pandas.errors +known_dtypes = pandas.core.dtypes +known_post_core = pandas.tseries,pandas.io,pandas.plotting +sections = FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER +known_first_party = pandas +known_third_party = _pytest,announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,yaml +multi_line_output = 3 +include_trailing_comma = True +force_grid_wrap = 0 +combine_as_imports = True +line_length = 88 +force_sort_within_sections = True +skip_glob = env, +skip = pandas/__init__.py,pandas/core/api.py [mypy] ignore_missing_imports=True no_implicit_optional=True [mypy-pandas.conftest,pandas.tests.*] -ignore_errors=True \ No newline at end of file +ignore_errors=True diff --git a/setup.py b/setup.py index d2c6b18b892cd..a86527ace092b 100755 --- a/setup.py +++ b/setup.py @@ -6,16 +6,16 @@ BSD license. Parts are from lxml (https://github.com/lxml/lxml) """ +from distutils.sysconfig import get_config_vars +from distutils.version import LooseVersion import os from os.path import join as pjoin - -import pkg_resources import platform -from distutils.sysconfig import get_config_vars -import sys import shutil -from distutils.version import LooseVersion -from setuptools import setup, Command, find_packages +import sys + +import pkg_resources +from setuptools import Command, find_packages, setup # versioning import versioneer @@ -58,8 +58,8 @@ def is_platform_mac(): # The import of Extension must be after the import of Cython, otherwise # we do not get the appropriately patched class. # See https://cython.readthedocs.io/en/latest/src/reference/compilation.html -from distutils.extension import Extension # noqa:E402 -from distutils.command.build import build # noqa:E402 +from distutils.extension import Extension # noqa: E402 isort:skip +from distutils.command.build import build # noqa: E402 isort:skip try: if not _CYTHON_INSTALLED: @@ -831,9 +831,7 @@ def srcpath(name=None, suffix=".pyx", subdir="src"): ] }, entry_points={ - "pandas_plotting_backends": [ - "matplotlib = pandas:plotting._matplotlib", - ], + "pandas_plotting_backends": ["matplotlib = pandas:plotting._matplotlib"] }, **setuptools_kwargs )
This fixes #27755, for more than three years now NumPy has not allowed the subtraction of boolean series. TypeError Traceback (most recent call last) <ipython-input-46-3da3b949c6bd> in <module> 1 data = pd.Series([0,-1,-2,-3,-4,-3,-2,-1,0,-1,-1,0,-1,-2,-3,-2,0]) 2 filtered = data.between(-2,0, inclusive = True) ----> 3 filtered.diff() 4 print(filtered) ~\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\core\series.py in diff(self, periods) 2191 dtype: float64 2192 """ -> 2193 result = algorithms.diff(com.values_from_object(self), periods) 2194 return self._constructor(result, index=self.index).__finalize__(self) 2195 ~\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\core\algorithms.py in diff(arr, n, axis) 1817 out_arr[res_indexer] = result 1818 else: -> 1819 out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer] 1820 1821 if is_timedelta: TypeError: numpy boolean subtract, the `-` operator, is deprecated, use the bitwise_xor, the `^` operator, or the logical_xor function instead. - [x] closes #17294 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27755
2019-08-05T12:34:44Z
2019-09-02T18:44:21Z
null
2019-09-30T11:25:24Z
Slightly rephrase SPSS doc
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 8e5352c337072..947bf15a49c7a 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5491,30 +5491,29 @@ The top-level function :func:`read_spss` can read (but not write) SPSS `sav` (.sav) and `zsav` (.zsav) format files. SPSS files contain column names. By default the -whole file is read, categorical columns are converted into ``pd.Categorical`` +whole file is read, categorical columns are converted into ``pd.Categorical``, and a ``DataFrame`` with all columns is returned. -Specify a ``usecols`` to obtain a subset of columns. Specify ``convert_categoricals=False`` +Specify the ``usecols`` parameter to obtain a subset of columns. Specify ``convert_categoricals=False`` to avoid converting categorical columns into ``pd.Categorical``. -Read a spss file: +Read an SPSS file: .. code-block:: python - df = pd.read_spss('spss_data.zsav') + df = pd.read_spss('spss_data.sav') -Extract a subset of columns ``usecols`` from SPSS file and +Extract a subset of columns contained in ``usecols`` from an SPSS file and avoid converting categorical columns into ``pd.Categorical``: .. code-block:: python - df = pd.read_spss('spss_data.zsav', usecols=['foo', 'bar'], + df = pd.read_spss('spss_data.sav', usecols=['foo', 'bar'], convert_categoricals=False) -More info_ about the sav and zsav file format is available from the IBM -web site. +More information about the `sav` and `zsav` file format is available here_. -.. _info: https://www.ibm.com/support/knowledgecenter/en/SSLVMB_22.0.0/com.ibm.spss.statistics.help/spss/base/savedatatypes.htm +.. _here: https://www.ibm.com/support/knowledgecenter/en/SSLVMB_22.0.0/com.ibm.spss.statistics.help/spss/base/savedatatypes.htm .. _io.other:
As a follow-up to #27594 (which I couldn't review in due time), here are some minor improvements for the SPSS user guide text.
https://api.github.com/repos/pandas-dev/pandas/pulls/27754
2019-08-05T12:15:09Z
2019-08-05T13:27:42Z
2019-08-05T13:27:42Z
2019-08-05T13:29:11Z
Backport PR #27720 on branch 0.25.x (BUG: fix replace_list)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index b5bd83fd17530..4d9ee4c676759 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -152,7 +152,7 @@ ExtensionArray Other ^^^^^ - +- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`) - - - diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 19f126c36cde7..9aced760725be 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6684,9 +6684,8 @@ def replace( else: # need a non-zero len on all axes - for a in self._AXIS_ORDERS: - if not len(self._get_axis(a)): - return self + if not self.size: + return self new_data = self._data if is_dict_like(to_replace): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2e7280eeae0e2..c47f3909973ac 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -7,7 +7,7 @@ import numpy as np -from pandas._libs import internals as libinternals, lib +from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -602,9 +602,10 @@ def comp(s, regex=False): """ if isna(s): return isna(values) - if hasattr(s, "asm8"): + if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None: + return _compare_or_regex_search( - maybe_convert_objects(values), getattr(s, "asm8"), regex + maybe_convert_objects(values), s.asm8, regex ) return _compare_or_regex_search(values, s, regex) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index dea1d5114f1b9..ed80e249220fd 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -1029,22 +1029,20 @@ def test_replace_series(self, how, to_key, from_key): tm.assert_series_equal(result, exp) - # TODO(jbrockmendel) commented out to only have a single xfail printed - @pytest.mark.xfail( - reason="GH #18376, tzawareness-compat bug in BlockManager.replace_list" + @pytest.mark.parametrize("how", ["dict", "series"]) + @pytest.mark.parametrize( + "to_key", + ["timedelta64[ns]", "bool", "object", "complex128", "float64", "int64"], ) - # @pytest.mark.parametrize('how', ['dict', 'series']) - # @pytest.mark.parametrize('to_key', ['timedelta64[ns]', 'bool', 'object', - # 'complex128', 'float64', 'int64']) - # @pytest.mark.parametrize('from_key', ['datetime64[ns, UTC]', - # 'datetime64[ns, US/Eastern]']) - # def test_replace_series_datetime_tz(self, how, to_key, from_key): - def test_replace_series_datetime_tz(self): + @pytest.mark.parametrize( + "from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"] + ) + def test_replace_series_datetime_tz(self, how, to_key, from_key): how = "series" from_key = "datetime64[ns, US/Eastern]" to_key = "timedelta64[ns]" - index = pd.Index([3, 4], name="xxx") + index = pd.Index([3, 4], name="xyz") obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key @@ -1061,24 +1059,17 @@ def test_replace_series_datetime_tz(self): tm.assert_series_equal(result, exp) - # TODO(jreback) commented out to only have a single xfail printed - @pytest.mark.xfail( - reason="different tz, currently mask_missing raises SystemError", strict=False + @pytest.mark.parametrize("how", ["dict", "series"]) + @pytest.mark.parametrize( + "to_key", + ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], ) - # @pytest.mark.parametrize('how', ['dict', 'series']) - # @pytest.mark.parametrize('to_key', [ - # 'datetime64[ns]', 'datetime64[ns, UTC]', - # 'datetime64[ns, US/Eastern]']) - # @pytest.mark.parametrize('from_key', [ - # 'datetime64[ns]', 'datetime64[ns, UTC]', - # 'datetime64[ns, US/Eastern]']) - # def test_replace_series_datetime_datetime(self, how, to_key, from_key): - def test_replace_series_datetime_datetime(self): - how = "dict" - to_key = "datetime64[ns]" - from_key = "datetime64[ns]" - - index = pd.Index([3, 4], name="xxx") + @pytest.mark.parametrize( + "from_key", + ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], + ) + def test_replace_series_datetime_datetime(self, how, to_key, from_key): + index = pd.Index([3, 4], name="xyz") obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key
Backport PR #27720: BUG: fix replace_list
https://api.github.com/repos/pandas-dev/pandas/pulls/27753
2019-08-05T11:58:58Z
2019-08-05T12:56:44Z
2019-08-05T12:56:44Z
2019-08-05T12:56:44Z
Backport PR #27715 on branch 0.25.x (TST: troubleshoot inconsistent xfails)
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index c9597505fa596..5ecd641fc68be 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -11,6 +11,7 @@ import struct import sys +PY35 = sys.version_info[:2] == (3, 5) PY36 = sys.version_info >= (3, 6) PY37 = sys.version_info >= (3, 7) PYPY = platform.python_implementation() == "PyPy" diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 6037273450a1c..5a1699c9292ef 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -705,6 +705,7 @@ def test_comparison_tzawareness_compat_scalars(self, op, box_with_array): # Raising in __eq__ will fallback to NumPy, which warns, fails, # then re-raises the original exception. So we just need to ignore. @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning") + @pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning") def test_scalar_comparison_tzawareness( self, op, other, tz_aware_fixture, box_with_array ): diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 8c0930c044838..c500760fa1390 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -1789,9 +1789,10 @@ def test_result_types(self): self.check_result_type(np.float32, np.float32) self.check_result_type(np.float64, np.float64) - def test_result_types2(self): + @td.skip_if_windows + def test_result_complex128(self): # xref https://github.com/pandas-dev/pandas/issues/12293 - pytest.skip("unreliable tests on complex128") + # this fails on Windows, apparently a floating point precision issue # Did not test complex64 because DataFrame is converting it to # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952 diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 9a7a43cff0c27..a60607d586ada 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -142,16 +142,6 @@ def test_divmod_series_array(self): # skipping because it is not implemented pass - @pytest.mark.xfail(reason="different implementation", strict=False) - def test_direct_arith_with_series_returns_not_implemented(self, data): - # Right now, we have trouble with this. Returning NotImplemented - # fails other tests like - # tests/arithmetic/test_datetime64::TestTimestampSeriesArithmetic:: - # test_dt64_seris_add_intlike - return super( - TestArithmeticOps, self - ).test_direct_arith_with_series_returns_not_implemented(data) - class TestCasting(BaseDatetimeTests, base.BaseCastingTests): pass @@ -163,12 +153,6 @@ def _compare_other(self, s, data, op_name, other): # with (some) integers, depending on the value. pass - @pytest.mark.xfail(reason="different implementation", strict=False) - def test_direct_arith_with_series_returns_not_implemented(self, data): - return super( - TestComparisonOps, self - ).test_direct_arith_with_series_returns_not_implemented(data) - class TestMissing(BaseDatetimeTests, base.BaseMissingTests): pass diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index d5c66f0c1dd64..e99208ac78e15 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1819,10 +1819,17 @@ def test_any_all_bool_only(self): (np.any, {"A": pd.Series([0, 1], dtype="category")}, True), (np.all, {"A": pd.Series([1, 2], dtype="category")}, True), (np.any, {"A": pd.Series([1, 2], dtype="category")}, True), - # # Mix - # GH 21484 - # (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'), - # 'B': pd.Series([10, 20], dtype='m8[ns]')}, True), + # Mix GH#21484 + pytest.param( + np.all, + { + "A": pd.Series([10, 20], dtype="M8[ns]"), + "B": pd.Series([10, 20], dtype="m8[ns]"), + }, + True, + # In 1.13.3 and 1.14 np.all(df) returns a Timedelta here + marks=[td.skip_if_np_lt("1.15")], + ), ], ) def test_any_all_np_func(self, func, data, expected): diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 486b3b28b29a3..9b8c8e6d8a077 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -4,8 +4,6 @@ import numpy as np import pytest -from pandas.compat import PY37 - import pandas as pd from pandas import ( Categorical, @@ -209,7 +207,7 @@ def test_level_get_group(observed): assert_frame_equal(result, expected) -@pytest.mark.xfail(PY37, reason="flaky on 3.7, xref gh-21636", strict=False) +# GH#21636 previously flaky on py37 @pytest.mark.parametrize("ordered", [True, False]) def test_apply(ordered): # GH 10138 diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 66a22ae7e9e46..88bc11c588673 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -759,6 +759,8 @@ def test_constructor_with_int_tz(self, klass, box, tz, dtype): assert result == expected # This is the desired future behavior + # Note: this xfail is not strict because the test passes with + # None or any of the UTC variants for tz_naive_fixture @pytest.mark.xfail(reason="Future behavior", strict=False) @pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning") def test_construction_int_rountrip(self, tz_naive_fixture): @@ -766,7 +768,7 @@ def test_construction_int_rountrip(self, tz_naive_fixture): # TODO(GH-24559): Remove xfail tz = tz_naive_fixture result = 1293858000000000000 - expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0] + expected = DatetimeIndex([result], tz=tz).asi8[0] assert result == expected def test_construction_from_replaced_timestamps_with_dst(self): diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 10d422e8aa52c..8db15709da35d 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -741,10 +741,7 @@ def test_to_datetime_tz_psycopg2(self, cache): ) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize( - "cache", - [pytest.param(True, marks=pytest.mark.skipif(True, reason="GH 18111")), False], - ) + @pytest.mark.parametrize("cache", [True, False]) def test_datetime_bool(self, cache): # GH13176 with pytest.raises(TypeError): diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index c6485ff21bcfb..ee236a8253b01 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -340,7 +340,6 @@ def test_to_csv_string_array_ascii(self): with open(path, "r") as f: assert f.read() == expected_ascii - @pytest.mark.xfail(strict=False) def test_to_csv_string_array_utf8(self): # GH 10813 str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}] diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index a04fb9fd50257..d634859e72d7b 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -33,6 +33,10 @@ except ImportError: _HAVE_FASTPARQUET = False +pytestmark = pytest.mark.filterwarnings( + "ignore:RangeIndex.* is deprecated:DeprecationWarning" +) + # setup engines & skips @pytest.fixture( @@ -408,8 +412,6 @@ def test_basic(self, pa, df_full): check_round_trip(df, pa) - # TODO: This doesn't fail on all systems; track down which - @pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)", strict=False) def test_basic_subset_columns(self, pa, df_full): # GH18628 diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index e3bc3d452f038..69070ea11e478 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1098,7 +1098,6 @@ def test_time(self): assert xp == rs @pytest.mark.slow - @pytest.mark.xfail(strict=False, reason="Unreliable test") def test_time_change_xlim(self): t = datetime(1, 1, 1, 3, 30, 0) deltas = np.random.randint(1, 20, 3).cumsum() diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 4404b93e86218..b57b817461788 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -10,6 +10,7 @@ from pandas._libs.tslibs.parsing import DateParseError from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz +from pandas.compat import PY35 from pandas.compat.numpy import np_datetime64_compat import pandas as pd @@ -1579,8 +1580,9 @@ def test_period_immutable(): per.freq = 2 * freq -# TODO: This doesn't fail on all systems; track down which -@pytest.mark.xfail(reason="Parses as Jan 1, 0007 on some systems", strict=False) +@pytest.mark.xfail( + PY35, reason="Parsing as Period('0007-01-01', 'D') for reasons unknown", strict=True +) def test_small_year_parsing(): per1 = Period("0001-01-07", "D") assert per1.year == 1 diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 32d32a5d14fb2..3a5a387b919be 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1489,7 +1489,7 @@ def test_value_counts_with_nan(self): "unicode_", "timedelta64[h]", pytest.param( - "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=False) + "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=True) ), ], ) diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py index d7295c4bfe5f0..c553cd3fd1a7a 100644 --- a/pandas/tests/sparse/test_combine_concat.py +++ b/pandas/tests/sparse/test_combine_concat.py @@ -440,7 +440,7 @@ def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx): "fill_value,sparse_idx,dense_idx", itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0]), ) - @pytest.mark.xfail(reason="The iloc fails and I can't make expected", strict=False) + @pytest.mark.xfail(reason="The iloc fails and I can't make expected", strict=True) def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx): # See GH16874, GH18914 and #18686 for why this should be a DataFrame from pandas.core.dtypes.common import is_sparse diff --git a/pandas/tests/sparse/test_pivot.py b/pandas/tests/sparse/test_pivot.py index 85b899dfe76d5..880c1c55f9f79 100644 --- a/pandas/tests/sparse/test_pivot.py +++ b/pandas/tests/sparse/test_pivot.py @@ -2,7 +2,6 @@ import pytest import pandas as pd -from pandas import _np_version_under1p17 import pandas.util.testing as tm @@ -49,11 +48,6 @@ def test_pivot_table_with_nans(self): ) tm.assert_frame_equal(res_sparse, res_dense) - @pytest.mark.xfail( - not _np_version_under1p17, - reason="failing occasionally on numpy > 1.17", - strict=False, - ) def test_pivot_table_multi(self): res_sparse = pd.pivot_table( self.sparse, index="A", columns="B", values=["D", "E"]
Backport PR #27715: TST: troubleshoot inconsistent xfails
https://api.github.com/repos/pandas-dev/pandas/pulls/27752
2019-08-05T11:55:58Z
2019-08-05T12:56:32Z
2019-08-05T12:56:32Z
2019-08-05T12:56:32Z
REF: combine dispatch_to_index_op into dispatch_to_extension_op
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 01bfbed1aab4c..261660dda6fdd 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -37,6 +37,7 @@ from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeArray, + ABCDatetimeIndex, ABCIndex, ABCIndexClass, ABCSeries, @@ -90,7 +91,7 @@ def get_op_result_name(left, right): name : object Usually a string """ - # `left` is always a pd.Series when called from within ops + # `left` is always a Series when called from within ops if isinstance(right, (ABCSeries, ABCIndexClass)): name = _maybe_match_name(left, right) else: @@ -609,42 +610,6 @@ def column_op(a, b): return result -def dispatch_to_index_op(op, left, right, index_class): - """ - Wrap Series left in the given index_class to delegate the operation op - to the index implementation. DatetimeIndex and TimedeltaIndex perform - type checking, timezone handling, overflow checks, etc. - - Parameters - ---------- - op : binary operator (operator.add, operator.sub, ...) - left : Series - right : object - index_class : DatetimeIndex or TimedeltaIndex - - Returns - ------- - result : object, usually DatetimeIndex, TimedeltaIndex, or Series - """ - left_idx = index_class(left) - - # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, - # left_idx may inherit a freq from a cached DatetimeIndex. - # See discussion in GH#19147. - if getattr(left_idx, "freq", None) is not None: - left_idx = left_idx._shallow_copy(freq=None) - try: - result = op(left_idx, right) - except NullFrequencyError: - # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError - # on add/sub of integers (or int-like). We re-raise as a TypeError. - raise TypeError( - "incompatible type for a datetime/timedelta " - "operation [{name}]".format(name=op.__name__) - ) - return result - - def dispatch_to_extension_op(op, left, right): """ Assume that left or right is a Series backed by an ExtensionArray, @@ -665,13 +630,16 @@ def dispatch_to_extension_op(op, left, right): else: new_right = right - res_values = op(new_left, new_right) - res_name = get_op_result_name(left, right) - - if op.__name__ in ["divmod", "rdivmod"]: - return _construct_divmod_result(left, res_values, left.index, res_name) - - return _construct_result(left, res_values, left.index, res_name) + try: + res_values = op(new_left, new_right) + except NullFrequencyError: + # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError + # on add/sub of integers (or int-like). We re-raise as a TypeError. + raise TypeError( + "incompatible type for a datetime/timedelta " + "operation [{name}]".format(name=op.__name__) + ) + return res_values # ----------------------------------------------------------------------------- @@ -993,22 +961,22 @@ def wrapper(left, right): ) elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): - # Give dispatch_to_index_op a chance for tests like - # test_dt64_series_add_intlike, which the index dispatching handles - # specifically. - result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) - return construct_result( - left, result, index=left.index, name=res_name, dtype=result.dtype - ) + from pandas.core.arrays import DatetimeArray + + result = dispatch_to_extension_op(op, DatetimeArray(left), right) + return construct_result(left, result, index=left.index, name=res_name) elif is_extension_array_dtype(left) or ( is_extension_array_dtype(right) and not is_scalar(right) ): # GH#22378 disallow scalar to exclude e.g. "category", "Int64" - return dispatch_to_extension_op(op, left, right) + result = dispatch_to_extension_op(op, left, right) + return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(left): - result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) + from pandas.core.arrays import TimedeltaArray + + result = dispatch_to_extension_op(op, TimedeltaArray(left), right) return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(right): @@ -1022,7 +990,7 @@ def wrapper(left, right): # does inference in the case where `result` has object-dtype. return construct_result(left, result, index=left.index, name=res_name) - elif isinstance(right, (ABCDatetimeArray, pd.DatetimeIndex)): + elif isinstance(right, (ABCDatetimeArray, ABCDatetimeIndex)): result = op(left._values, right) return construct_result(left, result, index=left.index, name=res_name) @@ -1129,20 +1097,23 @@ def wrapper(self, other, axis=None): raise ValueError("Can only compare identically-labeled Series objects") elif is_categorical_dtype(self): - # Dispatch to Categorical implementation; pd.CategoricalIndex + # Dispatch to Categorical implementation; CategoricalIndex # behavior is non-canonical GH#19513 - res_values = dispatch_to_index_op(op, self, other, pd.Categorical) + res_values = dispatch_to_extension_op(op, self, other) return self._constructor(res_values, index=self.index, name=res_name) elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self): # Dispatch to DatetimeIndex to ensure identical # Series/Index behavior + from pandas.core.arrays import DatetimeArray - res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex) + res_values = dispatch_to_extension_op(op, DatetimeArray(self), other) return self._constructor(res_values, index=self.index, name=res_name) elif is_timedelta64_dtype(self): - res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex) + from pandas.core.arrays import TimedeltaArray + + res_values = dispatch_to_extension_op(op, TimedeltaArray(self), other) return self._constructor(res_values, index=self.index, name=res_name) elif is_extension_array_dtype(self) or ( @@ -1150,7 +1121,8 @@ def wrapper(self, other, axis=None): ): # Note: the `not is_scalar(other)` condition rules out # e.g. other == "category" - return dispatch_to_extension_op(op, self, other) + res_values = dispatch_to_extension_op(op, self, other) + return self._constructor(res_values, index=self.index).rename(res_name) elif isinstance(other, ABCSeries): # By this point we have checked that self._indexed_same(other)
Took a couple of preliminaries, but now we can merge these two together and we're within striking distance of being able to collapse all of the Series ops into much simpler functions.
https://api.github.com/repos/pandas-dev/pandas/pulls/27747
2019-08-04T23:48:14Z
2019-08-05T11:44:37Z
2019-08-05T11:44:37Z
2019-08-05T14:29:57Z
CLN: Move base.StringMixin to computations.common
diff --git a/pandas/core/base.py b/pandas/core/base.py index 38a8bf7171521..7d2a62318232c 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -46,30 +46,6 @@ ) -class StringMixin: - """ - Implements string methods so long as object defines a `__str__` method. - """ - - # side note - this could be made into a metaclass if more than one - # object needs - - # ---------------------------------------------------------------------- - # Formatting - - def __str__(self): - """ - Return a string representation for a particular Object - """ - raise AbstractMethodError(self) - - def __repr__(self): - """ - Return a string representation for a particular object. - """ - return str(self) - - class PandasObject(DirNamesMixin): """baseclass for various pandas objects""" diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py index ddb1023479cba..b8e212fd2a32e 100644 --- a/pandas/core/computation/common.py +++ b/pandas/core/computation/common.py @@ -36,3 +36,8 @@ def _remove_spaces_column_name(name): class NameResolutionError(NameError): pass + + +class StringMixin: + # TODO: delete this class. Removing this ATM caused a failure. + pass diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index e10d189bc3c6f..d0d87c23e9346 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -13,7 +13,6 @@ import pandas as pd from pandas.core import common as com -from pandas.core.base import StringMixin from pandas.core.computation.common import ( _BACKTICK_QUOTED_STRING, _remove_spaces_column_name, @@ -799,7 +798,7 @@ def __init__(self, env, engine, parser, preparser=lambda x: x): super().__init__(env, engine, parser, preparser=preparser) -class Expr(StringMixin): +class Expr: """Object encapsulating an expression. @@ -831,7 +830,7 @@ def assigner(self): def __call__(self): return self.terms(self.env) - def __str__(self): + def __repr__(self): return printing.pprint_thing(self.terms) def __len__(self): diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 870acc3cc9956..2bf09a553ce18 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -12,7 +12,6 @@ from pandas.core.dtypes.common import is_list_like, is_scalar -from pandas.core.base import StringMixin import pandas.core.common as com from pandas.core.computation.common import _ensure_decoded, _result_type_many from pandas.core.computation.scope import _DEFAULT_GLOBALS @@ -63,7 +62,7 @@ def __init__(self, name, is_local): super().__init__(msg.format(name)) -class Term(StringMixin): +class Term: def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, str) else cls supr_new = super(Term, klass).__new__ @@ -82,7 +81,7 @@ def __init__(self, name, env, side=None, encoding=None): def local_name(self): return self.name.replace(_LOCAL_TAG, "") - def __str__(self): + def __repr__(self): return pprint_thing(self.name) def __call__(self, *args, **kwargs): @@ -182,7 +181,7 @@ def _resolve_name(self): def name(self): return self.value - def __str__(self): + def __repr__(self): # in python 2 str() of float # can truncate shorter than repr() return repr(self.name) @@ -191,7 +190,7 @@ def __str__(self): _bool_op_map = {"not": "~", "and": "&", "or": "|"} -class Op(StringMixin): +class Op: """Hold an operator of arbitrary arity """ @@ -204,7 +203,7 @@ def __init__(self, op, operands, *args, **kwargs): def __iter__(self): return iter(self.operands) - def __str__(self): + def __repr__(self): """Print a generic n-ary operator and its operands using infix notation""" # recurse over the operands @@ -537,7 +536,7 @@ def __call__(self, env): operand = self.operand(env) return self.func(operand) - def __str__(self): + def __repr__(self): return pprint_thing("{0}({1})".format(self.op, self.operand)) @property @@ -562,7 +561,7 @@ def __call__(self, env): with np.errstate(all="ignore"): return self.func.func(*operands) - def __str__(self): + def __repr__(self): operands = map(str, self.operands) return pprint_thing("{0}({1})".format(self.op, ",".join(operands))) diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 60cf35163bcf4..1523eb05ac41d 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -11,7 +11,6 @@ from pandas.core.dtypes.common import is_list_like import pandas as pd -from pandas.core.base import StringMixin import pandas.core.common as com from pandas.core.computation import expr, ops from pandas.core.computation.common import _ensure_decoded @@ -32,8 +31,7 @@ def __init__(self, level, global_dict=None, local_dict=None, queryables=None): class Term(ops.Term): def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, str) else cls - supr_new = StringMixin.__new__ - return supr_new(klass) + return object.__new__(klass) def __init__(self, name, env, side=None, encoding=None): super().__init__(name, env, side=side, encoding=encoding) @@ -231,7 +229,7 @@ def convert_values(self): class FilterBinOp(BinOp): - def __str__(self): + def __repr__(self): return pprint_thing( "[Filter : [{lhs}] -> [{op}]".format(lhs=self.filter[0], op=self.filter[1]) ) @@ -297,7 +295,7 @@ def evaluate(self): class ConditionBinOp(BinOp): - def __str__(self): + def __repr__(self): return pprint_thing("[Condition : [{cond}]]".format(cond=self.condition)) def invert(self): @@ -548,7 +546,7 @@ def __init__(self, where, queryables=None, encoding=None, scope_level=0): ) self.terms = self.parse() - def __str__(self): + def __repr__(self): if self.terms is not None: return pprint_thing(self.terms) return pprint_thing(self.expr) diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 4d5a523337f66..8ddd0dd7622e7 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -15,8 +15,8 @@ from pandas._libs.tslibs import Timestamp from pandas.compat.chainmap import DeepChainMap -from pandas.core.base import StringMixin import pandas.core.computation as compu +from pandas.core.computation.common import StringMixin def _ensure_scope( @@ -141,7 +141,7 @@ def __init__( self.resolvers = DeepChainMap(*resolvers) self.temps = {} - def __str__(self): + def __repr__(self): scope_keys = _get_pretty_string(list(self.scope.keys())) res_keys = _get_pretty_string(list(self.resolvers.keys())) unicode_str = "{name}(scope={scope_keys}, resolvers={res_keys})" diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index 3e8f653c47424..3c6da304dd68d 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -14,7 +14,6 @@ period_range, timedelta_range, ) -from pandas.core.base import StringMixin from pandas.core.index import MultiIndex import pandas.util.testing as tm @@ -226,11 +225,11 @@ class TestCategoricalRepr: def test_categorical_repr_unicode(self): # see gh-21002 - class County(StringMixin): + class County: name = "San Sebastián" state = "PR" - def __str__(self): + def __repr__(self): return self.name + ", " + self.state cat = pd.Categorical([County() for _ in range(61)])
``StringMixin`` should be removed, but I can't figure out how to remove it without breaking tests, so this moves ``StringMixin`` to core.computation, which is the only module, where it is currently used. This makes ``core.base.py`` a bit cleaner.
https://api.github.com/repos/pandas-dev/pandas/pulls/27746
2019-08-04T23:17:33Z
2019-08-05T11:41:53Z
2019-08-05T11:41:53Z
2019-08-05T13:25:11Z
Backport PR #27702 on branch 0.25.x (BUG: Concatenation warning still appears with sort=False)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index c80195af413f7..01e4046e8b743 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -125,7 +125,7 @@ Reshaping ^^^^^^^^^ - A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) -- +- :meth:`DataFrame.join` now suppresses the ``FutureWarning`` when the sort parameter is specified (:issue:`21952`) - Sparse diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 245e41ed16eb2..0570b9af2d256 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7274,10 +7274,14 @@ def _join_compat( # join indexes only using concat if can_concat: if how == "left": - res = concat(frames, axis=1, join="outer", verify_integrity=True) + res = concat( + frames, axis=1, join="outer", verify_integrity=True, sort=sort + ) return res.reindex(self.index, copy=False) else: - return concat(frames, axis=1, join=how, verify_integrity=True) + return concat( + frames, axis=1, join=how, verify_integrity=True, sort=sort + ) joined = frames[0] diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py index adace5e4784ae..220968d4b3d29 100644 --- a/pandas/tests/frame/test_join.py +++ b/pandas/tests/frame/test_join.py @@ -193,3 +193,32 @@ def test_join_left_sequence_non_unique_index(): ) tm.assert_frame_equal(joined, expected) + + +@pytest.mark.parametrize("sort_kw", [True, False, None]) +def test_suppress_future_warning_with_sort_kw(sort_kw): + a = DataFrame({"col1": [1, 2]}, index=["c", "a"]) + + b = DataFrame({"col2": [4, 5]}, index=["b", "a"]) + + c = DataFrame({"col3": [7, 8]}, index=["a", "b"]) + + expected = DataFrame( + { + "col1": {"a": 2.0, "b": float("nan"), "c": 1.0}, + "col2": {"a": 5.0, "b": 4.0, "c": float("nan")}, + "col3": {"a": 7.0, "b": 8.0, "c": float("nan")}, + } + ) + if sort_kw is False: + expected = expected.reindex(index=["c", "a", "b"]) + + if sort_kw is None: + # only warn if not explicitly specified + ctx = tm.assert_produces_warning(FutureWarning, check_stacklevel=False) + else: + ctx = tm.assert_produces_warning(None, check_stacklevel=False) + + with ctx: + result = a.join([b, c], how="outer", sort=sort_kw) + tm.assert_frame_equal(result, expected)
Backport PR #27702: BUG: Concatenation warning still appears with sort=False
https://api.github.com/repos/pandas-dev/pandas/pulls/27743
2019-08-04T21:59:22Z
2019-08-05T06:35:41Z
2019-08-05T06:35:41Z
2019-08-05T06:35:41Z
Backport PR #27712 on branch 0.25.x (BUG: partial string indexing with scalar)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index c80195af413f7..3097bfa21f9e1 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -82,7 +82,7 @@ Interval Indexing ^^^^^^^^ -- +- Bug in partial-string indexing returning a NumPy array rather than a ``Series`` when indexing with a scalar like ``.loc['2015']`` (:issue:`27516`) - - diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 12923fd790972..17122d0981995 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -263,6 +263,9 @@ def _outer_indexer(self, left, right): _infer_as_myclass = False _engine_type = libindex.ObjectEngine + # whether we support partial string indexing. Overridden + # in DatetimeIndex and PeriodIndex + _supports_partial_string_indexing = False _accessors = {"str"} diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 5024eebe03bb4..ab4975c0b359a 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -238,6 +238,7 @@ def _join_i8_wrapper(joinf, **kwargs): ) _engine_type = libindex.DatetimeEngine + _supports_partial_string_indexing = True _tz = None _freq = None diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 47cf0f26f9ca5..96031645365c6 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -173,6 +173,7 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): _data = None _engine_type = libindex.PeriodEngine + _supports_partial_string_indexing = True # ------------------------------------------------------------------------ # Index Constructors diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 5aee37bc3b833..8f242f0ae7d7c 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1726,6 +1726,11 @@ def _is_scalar_access(self, key: Tuple): if isinstance(ax, MultiIndex): return False + if isinstance(k, str) and ax._supports_partial_string_indexing: + # partial string indexing, df.loc['2000', 'A'] + # should not be considered scalar + return False + if not ax.is_unique: return False @@ -1741,7 +1746,10 @@ def _get_partial_string_timestamp_match_key(self, key, labels): """Translate any partial string timestamp matches in key, returning the new key (GH 10331)""" if isinstance(labels, MultiIndex): - if isinstance(key, str) and labels.levels[0].is_all_dates: + if ( + isinstance(key, str) + and labels.levels[0]._supports_partial_string_indexing + ): # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) key = tuple([key] + [slice(None)] * (len(labels.levels) - 1)) @@ -1751,7 +1759,10 @@ def _get_partial_string_timestamp_match_key(self, key, labels): # (..., slice('2016-01-01', '2016-01-01', None), ...) new_key = [] for i, component in enumerate(key): - if isinstance(component, str) and labels.levels[i].is_all_dates: + if ( + isinstance(component, str) + and labels.levels[i]._supports_partial_string_indexing + ): new_key.append(slice(component, component, None)) else: new_key.append(component) @@ -2340,7 +2351,7 @@ def convert_to_index_sliceable(obj, key): # We might have a datetimelike string that we can translate to a # slice here via partial string indexing - if idx.is_all_dates: + if idx._supports_partial_string_indexing: try: return idx._get_string_slice(key) except (KeyError, ValueError, NotImplementedError): diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 3095bf9657277..5660fa5ffed80 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -468,3 +468,14 @@ def test_getitem_with_datestring_with_UTC_offset(self, start, end): with pytest.raises(ValueError, match="The index must be timezone"): df = df.tz_localize(None) df[start:end] + + def test_slice_reduce_to_series(self): + # GH 27516 + df = pd.DataFrame( + {"A": range(24)}, index=pd.date_range("2000", periods=24, freq="M") + ) + expected = pd.Series( + range(12), index=pd.date_range("2000", periods=12, freq="M"), name="A" + ) + result = df.loc["2000", "A"] + tm.assert_series_equal(result, expected)
Backport PR #27712: BUG: partial string indexing with scalar
https://api.github.com/repos/pandas-dev/pandas/pulls/27742
2019-08-04T21:55:22Z
2019-08-05T06:35:59Z
2019-08-05T06:35:59Z
2019-08-05T06:35:59Z
CLN: deprivatize names in pd.core.common
diff --git a/pandas/core/common.py b/pandas/core/common.py index c12bfecc46518..a507625ccfa01 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -165,51 +165,39 @@ def cast_scalar_indexer(val): return val -def _not_none(*args): +def not_none(*args): """ Returns a generator consisting of the arguments that are not None. """ return (arg for arg in args if arg is not None) -def _any_none(*args): +def any_none(*args): """ Returns a boolean indicating if any argument is None. """ - for arg in args: - if arg is None: - return True - return False + return any(arg is None for arg in args) -def _all_none(*args): +def all_none(*args): """ Returns a boolean indicating if all arguments are None. """ - for arg in args: - if arg is not None: - return False - return True + return all(arg is None for arg in args) -def _any_not_none(*args): +def any_not_none(*args): """ Returns a boolean indicating if any argument is not None. """ - for arg in args: - if arg is not None: - return True - return False + return any(arg is not None for arg in args) -def _all_not_none(*args): +def all_not_none(*args): """ Returns a boolean indicating if all arguments are not None. """ - for arg in args: - if arg is None: - return False - return True + return all(arg is not None for arg in args) def count_not_none(*args): @@ -447,7 +435,7 @@ def random_state(state=None): ) -def _pipe(obj, func, *args, **kwargs): +def pipe(obj, func, *args, **kwargs): """ Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, @@ -482,7 +470,7 @@ def _pipe(obj, func, *args, **kwargs): return func(obj, *args, **kwargs) -def _get_rename_function(mapper): +def get_rename_function(mapper): """ Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1d87a6937ca34..9078e967b4d7e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1124,7 +1124,7 @@ def rename(self, *args, **kwargs): v = axes.get(self._AXIS_NAMES[axis]) if v is None: continue - f = com._get_rename_function(v) + f = com.get_rename_function(v) baxis = self._get_block_manager_axis(axis) if level is not None: level = self.axes[axis]._get_level_number(level) @@ -1312,7 +1312,7 @@ class name if non_mapper: newnames = v else: - f = com._get_rename_function(v) + f = com.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True) @@ -4993,7 +4993,7 @@ def sample( @Appender(_shared_docs["pipe"] % _shared_doc_kwargs) def pipe(self, func, *args, **kwargs): - return com._pipe(self, func, *args, **kwargs) + return com.pipe(self, func, *args, **kwargs) _shared_docs["aggregate"] = dedent( """ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 811836d0e8a4d..2ad85903b916b 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -361,7 +361,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # GH12824. def first_not_none(values): try: - return next(com._not_none(*values)) + return next(com.not_none(*values)) except StopIteration: return None diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 12b9cf25687cf..ec526b338eee1 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -590,7 +590,7 @@ def __getattr__(self, attr): ) @Appender(_pipe_template) def pipe(self, func, *args, **kwargs): - return com._pipe(self, func, *args, **kwargs) + return com.pipe(self, func, *args, **kwargs) plot = property(GroupByPlot) @@ -928,7 +928,7 @@ def _concat_objects(self, keys, values, not_indexed_same=False): def reset_identity(values): # reset the identities of the components # of the values to prevent aliasing - for v in com._not_none(*values): + for v in com.not_none(*values): ax = v._get_axis(self.axis) ax._reset_identity() return values diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index a17f74286d59f..86d55ce2e7cc3 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -283,7 +283,7 @@ def _get_consensus_names(indexes): # find the non-none names, need to tupleify to make # the set hashable, then reverse on return - consensus_names = {tuple(i.names) for i in indexes if com._any_not_none(*i.names)} + consensus_names = {tuple(i.names) for i in indexes if com.any_not_none(*i.names)} if len(consensus_names) == 1: return list(list(consensus_names)[0]) return [None] * indexes[0].nlevels diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ce7b73a92b18a..d0de995255b59 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3588,8 +3588,8 @@ def _join_multi(self, other, how, return_indexers=True): from pandas.core.reshape.merge import _restore_dropped_levels_multijoin # figure out join names - self_names = set(com._not_none(*self.names)) - other_names = set(com._not_none(*other.names)) + self_names = set(com.not_none(*self.names)) + other_names = set(com.not_none(*other.names)) overlap = self_names & other_names # need at least 1 in common diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index d6f0008a2646f..0f7f580e2c43e 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1569,7 +1569,7 @@ def date_range( dtype='datetime64[ns]', freq='D') """ - if freq is None and com._any_none(periods, start, end): + if freq is None and com.any_none(periods, start, end): freq = "D" dtarr = DatetimeArray._generate_range( diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index d941dc547befe..7a444683ffcb2 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1318,7 +1318,7 @@ def _is_type_compatible(a, b): (is_number(a) and is_number(b)) or (is_ts_compat(a) and is_ts_compat(b)) or (is_td_compat(a) and is_td_compat(b)) - or com._any_none(a, b) + or com.any_none(a, b) ) @@ -1416,7 +1416,7 @@ def interval_range( end = com.maybe_box_datetimelike(end) endpoint = start if start is not None else end - if freq is None and com._any_none(periods, start, end): + if freq is None and com.any_none(periods, start, end): freq = 1 if is_number(endpoint) else "D" if com.count_not_none(start, end, periods, freq) != 3: @@ -1463,7 +1463,7 @@ def interval_range( if is_number(endpoint): # force consistency between start/end/freq (lower end if freq skips it) - if com._all_not_none(start, end, freq): + if com.all_not_none(start, end, freq): end -= (end - start) % freq # compute the period/start/end if unspecified (at most one) @@ -1475,7 +1475,7 @@ def interval_range( end = start + (periods - 1) * freq breaks = np.linspace(start, end, periods) - if all(is_integer(x) for x in com._not_none(start, end, freq)): + if all(is_integer(x) for x in com.not_none(start, end, freq)): # np.linspace always produces float output breaks = maybe_downcast_to_dtype(breaks, "int64") else: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 16098c474a473..1389b0e31b3bf 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -110,7 +110,7 @@ def __new__( return cls._simple_new(start, dtype=dtype, name=name) # validate the arguments - if com._all_none(start, stop, step): + if com.all_none(start, stop, step): raise TypeError("RangeIndex(...) must be called with integers") start = ensure_python_int(start) if start is not None else 0 diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index a9f49ec8bd75a..f2ce562536b95 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -780,7 +780,7 @@ def timedelta_range( '5 days 00:00:00'], dtype='timedelta64[ns]', freq=None) """ - if freq is None and com._any_none(periods, start, end): + if freq is None and com.any_none(periods, start, end): freq = "D" freq, freq_infer = dtl.maybe_infer_freq(freq) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6d70fcfb62d52..2136d3d326db5 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -599,7 +599,7 @@ def _astype(self, dtype, copy=False, errors="raise", **kwargs): categories = kwargs.get("categories", None) ordered = kwargs.get("ordered", None) - if com._any_not_none(categories, ordered): + if com.any_not_none(categories, ordered): dtype = CategoricalDtype(categories, ordered) if is_categorical_dtype(self.values): diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index ce2d2ac41d3ec..4446f27da6be0 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -304,7 +304,7 @@ def __init__( raise ValueError("No objects to concatenate") if keys is None: - objs = list(com._not_none(*objs)) + objs = list(com.not_none(*objs)) else: # #1649 clean_keys = [] diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index fc32a8f0dd044..f45c7693bf6ed 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1958,7 +1958,7 @@ def _should_fill(lname, rname): def _any(x): - return x is not None and com._any_not_none(*x) + return x is not None and com.any_not_none(*x) def validate_operand(obj): diff --git a/pandas/core/series.py b/pandas/core/series.py index 106bb3c7d6cb4..9e317d365ccb8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1182,7 +1182,7 @@ def _get_with(self, key): def _get_values_tuple(self, key): # mpl hackaround - if com._any_none(*key): + if com.any_none(*key): return self._get_values(key) if not isinstance(self.index, MultiIndex): diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 012d2d9358241..b9c847ad64c57 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -611,7 +611,7 @@ def _format_hierarchical_rows(self): self.rowcounter += 1 # if index labels are not empty go ahead and dump - if com._any_not_none(*index_labels) and self.header is not False: + if com.any_not_none(*index_labels) and self.header is not False: for cidx, name in enumerate(index_labels): yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 23c07ea72d40f..a2a0e302de5dc 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1743,7 +1743,7 @@ def _cond(values): def _has_names(index: Index) -> bool: if isinstance(index, ABCMultiIndex): - return com._any_not_none(*index.names) + return com.any_not_none(*index.names) else: return index.name is not None diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index b736b978c87a5..033d93d1456c8 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -316,7 +316,7 @@ def format_attr(pair): if ( self.data.index.names - and com._any_not_none(*self.data.index.names) + and com.any_not_none(*self.data.index.names) and not hidden_index ): index_header_row = [] @@ -1405,7 +1405,7 @@ def pipe(self, func, *args, **kwargs): ... .pipe(format_conversion) ... .set_caption("Results with minimum conversion highlighted.")) """ - return com._pipe(self, func, *args, **kwargs) + return com.pipe(self, func, *args, **kwargs) def _is_visible(idx_row, idx_col, lengths): diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 1e7cd54d9f4a0..b142dbf76e6b3 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -76,7 +76,7 @@ def as_json_table_type(x): def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" - if com._all_not_none(*data.index.names): + if com.all_not_none(*data.index.names): nms = data.index.names if len(nms) == 1 and data.index.name == "index": warnings.warn("Index name of 'index' is not round-trippable") diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 415cb50472a4c..abc8a414eb37a 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -998,7 +998,7 @@ def remove(self, key, where=None, start=None, stop=None): return None # remove the node - if com._all_none(where, start, stop): + if com.all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table @@ -2634,7 +2634,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs): support fully deleting the node in its entirety (only) - where specification must be None """ - if com._all_none(where, start, stop): + if com.all_none(where, start, stop): self._handle.remove_node(self.group, recursive=True) return None diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 519465802085b..c2b37bb297ecb 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -654,7 +654,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): def _get_index_name(self): if isinstance(self.data.index, ABCMultiIndex): name = self.data.index.names - if com._any_not_none(*name): + if com.any_not_none(*name): name = ",".join(pprint_thing(x) for x in name) else: name = None @@ -1054,7 +1054,7 @@ def _make_plot(self): it = self._iter_data() stacking_id = self._get_stacking_id() - is_errorbar = com._any_not_none(*self.errors.values()) + is_errorbar = com.any_not_none(*self.errors.values()) colors = self._get_colors() for i, (label, y) in enumerate(it): diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 28051d9b7f3b9..e2e4a82ff581c 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -655,7 +655,7 @@ def _make_frame(names=None): df = _make_frame(True) df.to_csv(path, index=False) result = read_csv(path, header=[0, 1]) - assert com._all_none(*result.columns.names) + assert com.all_none(*result.columns.names) result.columns.names = df.columns.names assert_frame_equal(df, result) diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py index e4987e4483fd9..b51429d0338e3 100644 --- a/pandas/tests/scalar/interval/test_interval.py +++ b/pandas/tests/scalar/interval/test_interval.py @@ -254,6 +254,6 @@ def test_constructor_errors_tz(self, tz_left, tz_right): # GH 18538 left = Timestamp("2017-01-01", tz=tz_left) right = Timestamp("2017-01-02", tz=tz_right) - error = TypeError if com._any_none(tz_left, tz_right) else ValueError + error = TypeError if com.any_none(tz_left, tz_right) else ValueError with pytest.raises(error): Interval(left, right) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index d96f806bc383f..479e55c86fcd1 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -33,14 +33,14 @@ def __call__(self): def test_any_none(): - assert com._any_none(1, 2, 3, None) - assert not com._any_none(1, 2, 3, 4) + assert com.any_none(1, 2, 3, None) + assert not com.any_none(1, 2, 3, 4) def test_all_not_none(): - assert com._all_not_none(1, 2, 3, 4) - assert not com._all_not_none(1, 2, 3, None) - assert not com._all_not_none(None, None, None, None) + assert com.all_not_none(1, 2, 3, 4) + assert not com.all_not_none(1, 2, 3, None) + assert not com.all_not_none(None, None, None, None) def test_random_state():
Some clean-up, as these functions don't need to start with a "_".
https://api.github.com/repos/pandas-dev/pandas/pulls/27741
2019-08-04T20:06:22Z
2019-08-04T21:26:43Z
2019-08-04T21:26:43Z
2019-08-04T21:26:47Z
BUG: Fix NaT +/- DTA/TDA
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 7f35a11e57b71..6fab1b5c02be1 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -123,7 +123,9 @@ cdef class _NaT(datetime): return c_NaT elif getattr(other, '_typ', None) in ['dateoffset', 'series', 'period', 'datetimeindex', - 'timedeltaindex']: + 'datetimearray', + 'timedeltaindex', + 'timedeltaarray']: # Duplicate logic in _Timestamp.__add__ to avoid needing # to subclass; allows us to @final(_Timestamp.__add__) return NotImplemented @@ -151,9 +153,10 @@ cdef class _NaT(datetime): return self + neg_other elif getattr(other, '_typ', None) in ['period', 'series', - 'periodindex', 'dateoffset']: + 'periodindex', 'dateoffset', + 'datetimearray', + 'timedeltaarray']: return NotImplemented - return NaT def __pos__(self): diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index f935a7fa880c7..e7ad76cf95ba0 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -7,6 +7,8 @@ from pandas._libs.tslibs import iNaT import pandas.compat as compat +from pandas.core.dtypes.common import is_datetime64_any_dtype + from pandas import ( DatetimeIndex, Index, @@ -18,7 +20,7 @@ Timestamp, isna, ) -from pandas.core.arrays import PeriodArray +from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.util import testing as tm @@ -397,7 +399,9 @@ def test_nat_rfloordiv_timedelta(val, expected): "value", [ DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"), - DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"), + DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"), + DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]), + DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"], tz="US/Pacific"), TimedeltaIndex(["1 day", "2 day"], name="x"), ], ) @@ -406,19 +410,24 @@ def test_nat_arithmetic_index(op_name, value): exp_name = "x" exp_data = [NaT] * 2 - if isinstance(value, DatetimeIndex) and "plus" in op_name: - expected = DatetimeIndex(exp_data, name=exp_name, tz=value.tz) + if is_datetime64_any_dtype(value.dtype) and "plus" in op_name: + expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name) else: expected = TimedeltaIndex(exp_data, name=exp_name) - tm.assert_index_equal(_ops[op_name](NaT, value), expected) + if not isinstance(value, Index): + expected = expected.array + + op = _ops[op_name] + result = op(NaT, value) + tm.assert_equal(result, expected) @pytest.mark.parametrize( "op_name", ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], ) -@pytest.mark.parametrize("box", [TimedeltaIndex, Series]) +@pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence]) def test_nat_arithmetic_td64_vector(op_name, box): # see gh-19124 vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry After this bugfix, we can change all existing uses of `dispatch_to_index_op` in `core.ops.__init__` to use `dispatch_to_extension_op`. Once we do that, we can collapse ~50 lines of typechecking-dispatching code to all use `dispatch_to_extension_op`
https://api.github.com/repos/pandas-dev/pandas/pulls/27740
2019-08-04T16:28:31Z
2019-08-04T23:01:03Z
2019-08-04T23:01:02Z
2019-08-04T23:08:43Z
BUG: fix+test PA+all-NaT TDA
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 91dd853e78c77..6203cfdf6df6b 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -714,7 +714,12 @@ def _add_delta_tdi(self, other): """ assert isinstance(self.freq, Tick) # checked by calling function - delta = self._check_timedeltalike_freq_compat(other) + if not np.all(isna(other)): + delta = self._check_timedeltalike_freq_compat(other) + else: + # all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT + return self + np.timedelta64("NaT") + return self._addsub_int_array(delta, operator.add).asi8 def _add_delta(self, other): diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index a9d18c194889c..01bfbed1aab4c 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -5,7 +5,7 @@ """ import datetime import operator -from typing import Any, Callable +from typing import Any, Callable, Tuple import numpy as np @@ -42,7 +42,6 @@ ABCSeries, ABCSparseArray, ABCSparseSeries, - ABCTimedeltaArray, ) from pandas.core.dtypes.missing import isna, notna @@ -134,7 +133,7 @@ def _maybe_match_name(a, b): return None -def maybe_upcast_for_op(obj): +def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): """ Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. @@ -142,6 +141,7 @@ def maybe_upcast_for_op(obj): Parameters ---------- obj: object + shape : tuple[int] Returns ------- @@ -157,13 +157,22 @@ def maybe_upcast_for_op(obj): # implementation; otherwise operation against numeric-dtype # raises TypeError return Timedelta(obj) - elif isinstance(obj, np.timedelta64) and not isna(obj): + elif isinstance(obj, np.timedelta64): + if isna(obj): + # wrapping timedelta64("NaT") in Timedelta returns NaT, + # which would incorrectly be treated as a datetime-NaT, so + # we broadcast and wrap in a Series + right = np.broadcast_to(obj, shape) + + # Note: we use Series instead of TimedeltaIndex to avoid having + # to worry about catching NullFrequencyError. + return pd.Series(right) + # In particular non-nanosecond timedelta64 needs to be cast to # nanoseconds, or else we get undesired behavior like # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') - # The isna check is to avoid casting timedelta64("NaT"), which would - # return NaT and incorrectly be treated as a datetime-NaT. return Timedelta(obj) + elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to @@ -975,7 +984,7 @@ def wrapper(left, right): left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) - right = maybe_upcast_for_op(right) + right = maybe_upcast_for_op(right, left.shape) if is_categorical_dtype(left): raise TypeError( @@ -1003,31 +1012,11 @@ def wrapper(left, right): return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(right): - # We should only get here with non-scalar or timedelta64('NaT') - # values for right - # Note: we cannot use dispatch_to_index_op because - # that may incorrectly raise TypeError when we - # should get NullFrequencyError - orig_right = right - if is_scalar(right): - # broadcast and wrap in a TimedeltaIndex - assert np.isnat(right) - right = np.broadcast_to(right, left.shape) - right = pd.TimedeltaIndex(right) - - assert isinstance(right, (pd.TimedeltaIndex, ABCTimedeltaArray, ABCSeries)) - try: - result = op(left._values, right) - except NullFrequencyError: - if orig_right is not right: - # i.e. scalar timedelta64('NaT') - # We get a NullFrequencyError because we broadcast to - # TimedeltaIndex, but this should be TypeError. - raise TypeError( - "incompatible type for a datetime/timedelta " - "operation [{name}]".format(name=op.__name__) - ) - raise + # We should only get here with non-scalar values for right + # upcast by maybe_upcast_for_op + assert not isinstance(right, (np.timedelta64, np.ndarray)) + + result = op(left._values, right) # We do not pass dtype to ensure that the Series constructor # does inference in the case where `result` has object-dtype. diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index e54c16c7a27a4..c1b32e8b13442 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -12,6 +12,7 @@ import pandas as pd from pandas import Period, PeriodIndex, Series, period_range from pandas.core import ops +from pandas.core.arrays import TimedeltaArray import pandas.util.testing as tm from pandas.tseries.frequencies import to_offset @@ -1013,6 +1014,33 @@ def test_parr_add_sub_td64_nat(self, box_transpose_fail): with pytest.raises(TypeError): other - obj + @pytest.mark.parametrize( + "other", + [ + np.array(["NaT"] * 9, dtype="m8[ns]"), + TimedeltaArray._from_sequence(["NaT"] * 9), + ], + ) + def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other): + # FIXME: DataFrame fails because when when operating column-wise + # timedelta64 entries become NaT and are treated like datetimes + box = box_df_fail + + pi = pd.period_range("1994-04-01", periods=9, freq="19D") + expected = pd.PeriodIndex(["NaT"] * 9, freq="19D") + + obj = tm.box_expected(pi, box) + expected = tm.box_expected(expected, box) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + with pytest.raises(TypeError): + other - obj + class TestPeriodSeriesArithmetic: def test_ops_series_timedelta(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry In conjunction with one more branch coming up, we're going to simplify the tar out of Series arithmetic ops.
https://api.github.com/repos/pandas-dev/pandas/pulls/27739
2019-08-04T16:05:31Z
2019-08-04T21:40:45Z
2019-08-04T21:40:45Z
2019-08-04T23:11:29Z
DOC: update docstrings following refactor of buffer handling
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1d87a6937ca34..d09d11d20e137 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -68,6 +68,7 @@ from pandas.core.internals import BlockManager from pandas.core.ops import _align_method_FRAME +from pandas.io.formats import format as fmt from pandas.io.formats.format import DataFrameFormatter, format_percentiles from pandas.io.formats.printing import pprint_thing from pandas.tseries.frequencies import to_offset @@ -2881,6 +2882,7 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal' else: return xarray.Dataset.from_dataframe(self) + @Substitution(returns=fmt.return_docstring) def to_latex( self, buf=None, @@ -2914,7 +2916,7 @@ def to_latex( Parameters ---------- - buf : file descriptor or None + buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. @@ -2979,13 +2981,7 @@ def to_latex( from the pandas config module. .. versionadded:: 0.20.0 - - Returns - ------- - str or None - If buf is None, returns the resulting LateX format as a - string. Otherwise returns None. - + %(returns)s See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 23c07ea72d40f..6cb224d7722d8 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -85,8 +85,8 @@ common_docstring = """ Parameters ---------- - buf : StringIO-like, optional - Buffer to write to. + buf : str, Path or StringIO-like, optional, default None + Buffer to write to. If None, the output is returned as a string. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : %(col_space_type)s, optional @@ -156,8 +156,9 @@ return_docstring = """ Returns ------- - str (or unicode, depending on data and options) - String representation of the dataframe. + str or None + If buf is None, returns the result as a string. Otherwise returns + None. """ @@ -471,6 +472,10 @@ def _get_formatter(self, i: Union[str, int]) -> Optional[Callable]: def get_buffer( self, buf: Optional[FilePathOrBuffer[str]], encoding: Optional[str] = None ): + """ + Context manager to open, yield and close buffer for filenames or Path-like + objects, otherwise yield buf unchanged. + """ if buf is not None: buf = _stringify_path(buf) else: @@ -488,6 +493,9 @@ def get_buffer( raise TypeError("buf is not a file name and it has no write method") def write_result(self, buf: IO[str]) -> None: + """ + Write the result of serialization to buf. + """ raise AbstractMethodError(self) def get_result( @@ -495,6 +503,9 @@ def get_result( buf: Optional[FilePathOrBuffer[str]] = None, encoding: Optional[str] = None, ) -> Optional[str]: + """ + Perform serialization. Write to buf or return as string if buf is None. + """ with self.get_buffer(buf, encoding=encoding) as f: self.write_result(buf=f) if buf is None:
xref https://github.com/pandas-dev/pandas/pull/27598#discussion_r310128723
https://api.github.com/repos/pandas-dev/pandas/pulls/27738
2019-08-04T11:08:54Z
2019-08-04T21:28:23Z
2019-08-04T21:28:23Z
2019-08-05T09:35:04Z
REF: pandas/core/window.py into multiple files
diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst index 9e1374a3bd8e4..2f6addf607877 100644 --- a/doc/source/reference/window.rst +++ b/doc/source/reference/window.rst @@ -5,7 +5,6 @@ ====== Window ====== -.. currentmodule:: pandas.core.window Rolling objects are returned by ``.rolling`` calls: :func:`pandas.DataFrame.rolling`, :func:`pandas.Series.rolling`, etc. Expanding objects are returned by ``.expanding`` calls: :func:`pandas.DataFrame.expanding`, :func:`pandas.Series.expanding`, etc. @@ -13,6 +12,8 @@ EWM objects are returned by ``.ewm`` calls: :func:`pandas.DataFrame.ewm`, :func: Standard moving window functions -------------------------------- +.. currentmodule:: pandas.core.window.rolling + .. autosummary:: :toctree: api/ @@ -38,6 +39,8 @@ Standard moving window functions Standard expanding window functions ----------------------------------- +.. currentmodule:: pandas.core.window.expanding + .. autosummary:: :toctree: api/ @@ -59,6 +62,8 @@ Standard expanding window functions Exponentially-weighted moving window functions ---------------------------------------------- +.. currentmodule:: pandas.core.window.ewm + .. autosummary:: :toctree: api/ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1b39f9225a0ed..4d29f19cc01ed 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10683,9 +10683,9 @@ def _add_series_or_dataframe_operations(cls): the doc strings again. """ - from pandas.core import window as rwindow + from pandas.core.window import EWM, Expanding, Rolling, Window - @Appender(rwindow.rolling.__doc__) + @Appender(Rolling.__doc__) def rolling( self, window, @@ -10697,7 +10697,20 @@ def rolling( closed=None, ): axis = self._get_axis_number(axis) - return rwindow.rolling( + + if win_type is not None: + return Window( + self, + window=window, + min_periods=min_periods, + center=center, + win_type=win_type, + on=on, + axis=axis, + closed=closed, + ) + + return Rolling( self, window=window, min_periods=min_periods, @@ -10710,16 +10723,14 @@ def rolling( cls.rolling = rolling - @Appender(rwindow.expanding.__doc__) + @Appender(Expanding.__doc__) def expanding(self, min_periods=1, center=False, axis=0): axis = self._get_axis_number(axis) - return rwindow.expanding( - self, min_periods=min_periods, center=center, axis=axis - ) + return Expanding(self, min_periods=min_periods, center=center, axis=axis) cls.expanding = expanding - @Appender(rwindow.ewm.__doc__) + @Appender(EWM.__doc__) def ewm( self, com=None, @@ -10732,7 +10743,7 @@ def ewm( axis=0, ): axis = self._get_axis_number(axis) - return rwindow.ewm( + return EWM( self, com=com, span=span, diff --git a/pandas/core/window/__init__.py b/pandas/core/window/__init__.py new file mode 100644 index 0000000000000..dcf58a4c0dd5b --- /dev/null +++ b/pandas/core/window/__init__.py @@ -0,0 +1,3 @@ +from pandas.core.window.ewm import EWM # noqa:F401 +from pandas.core.window.expanding import Expanding, ExpandingGroupby # noqa:F401 +from pandas.core.window.rolling import Rolling, RollingGroupby, Window # noqa:F401 diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py new file mode 100644 index 0000000000000..0f2920b3558c9 --- /dev/null +++ b/pandas/core/window/common.py @@ -0,0 +1,276 @@ +"""Common utility functions for rolling operations""" +from collections import defaultdict +import warnings + +import numpy as np + +from pandas.core.dtypes.common import is_integer +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries + +import pandas.core.common as com +from pandas.core.generic import _shared_docs +from pandas.core.groupby.base import GroupByMixin +from pandas.core.index import MultiIndex + +_shared_docs = dict(**_shared_docs) +_doc_template = """ + Returns + ------- + Series or DataFrame + Return type is determined by the caller. + + See Also + -------- + Series.%(name)s : Series %(name)s. + DataFrame.%(name)s : DataFrame %(name)s. +""" + + +class _GroupByMixin(GroupByMixin): + """ + Provide the groupby facilities. + """ + + def __init__(self, obj, *args, **kwargs): + parent = kwargs.pop("parent", None) # noqa + groupby = kwargs.pop("groupby", None) + if groupby is None: + groupby, obj = obj, obj.obj + self._groupby = groupby + self._groupby.mutated = True + self._groupby.grouper.mutated = True + super().__init__(obj, *args, **kwargs) + + count = GroupByMixin._dispatch("count") + corr = GroupByMixin._dispatch("corr", other=None, pairwise=None) + cov = GroupByMixin._dispatch("cov", other=None, pairwise=None) + + def _apply( + self, func, name=None, window=None, center=None, check_minp=None, **kwargs + ): + """ + Dispatch to apply; we are stripping all of the _apply kwargs and + performing the original function call on the grouped object. + """ + + def f(x, name=name, *args): + x = self._shallow_copy(x) + + if isinstance(name, str): + return getattr(x, name)(*args, **kwargs) + + return x.apply(name, *args, **kwargs) + + return self._groupby.apply(f) + + +def _flex_binary_moment(arg1, arg2, f, pairwise=False): + + if not ( + isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) + and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame)) + ): + raise TypeError( + "arguments to moment function must be of type " + "np.ndarray/Series/DataFrame" + ) + + if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance( + arg2, (np.ndarray, ABCSeries) + ): + X, Y = _prep_binary(arg1, arg2) + return f(X, Y) + + elif isinstance(arg1, ABCDataFrame): + from pandas import DataFrame + + def dataframe_from_int_dict(data, frame_template): + result = DataFrame(data, index=frame_template.index) + if len(result.columns) > 0: + result.columns = frame_template.columns[result.columns] + return result + + results = {} + if isinstance(arg2, ABCDataFrame): + if pairwise is False: + if arg1 is arg2: + # special case in order to handle duplicate column names + for i, col in enumerate(arg1.columns): + results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) + return dataframe_from_int_dict(results, arg1) + else: + if not arg1.columns.is_unique: + raise ValueError("'arg1' columns are not unique") + if not arg2.columns.is_unique: + raise ValueError("'arg2' columns are not unique") + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", RuntimeWarning) + X, Y = arg1.align(arg2, join="outer") + X = X + 0 * Y + Y = Y + 0 * X + + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", RuntimeWarning) + res_columns = arg1.columns.union(arg2.columns) + for col in res_columns: + if col in X and col in Y: + results[col] = f(X[col], Y[col]) + return DataFrame(results, index=X.index, columns=res_columns) + elif pairwise is True: + results = defaultdict(dict) + for i, k1 in enumerate(arg1.columns): + for j, k2 in enumerate(arg2.columns): + if j < i and arg2 is arg1: + # Symmetric case + results[i][j] = results[j][i] + else: + results[i][j] = f( + *_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]) + ) + + from pandas import concat + + result_index = arg1.index.union(arg2.index) + if len(result_index): + + # construct result frame + result = concat( + [ + concat( + [results[i][j] for j, c in enumerate(arg2.columns)], + ignore_index=True, + ) + for i, c in enumerate(arg1.columns) + ], + ignore_index=True, + axis=1, + ) + result.columns = arg1.columns + + # set the index and reorder + if arg2.columns.nlevels > 1: + result.index = MultiIndex.from_product( + arg2.columns.levels + [result_index] + ) + result = result.reorder_levels([2, 0, 1]).sort_index() + else: + result.index = MultiIndex.from_product( + [range(len(arg2.columns)), range(len(result_index))] + ) + result = result.swaplevel(1, 0).sort_index() + result.index = MultiIndex.from_product( + [result_index] + [arg2.columns] + ) + else: + + # empty result + result = DataFrame( + index=MultiIndex( + levels=[arg1.index, arg2.columns], codes=[[], []] + ), + columns=arg2.columns, + dtype="float64", + ) + + # reset our index names to arg1 names + # reset our column names to arg2 names + # careful not to mutate the original names + result.columns = result.columns.set_names(arg1.columns.names) + result.index = result.index.set_names( + result_index.names + arg2.columns.names + ) + + return result + + else: + raise ValueError("'pairwise' is not True/False") + else: + results = { + i: f(*_prep_binary(arg1.iloc[:, i], arg2)) + for i, col in enumerate(arg1.columns) + } + return dataframe_from_int_dict(results, arg1) + + else: + return _flex_binary_moment(arg2, arg1, f) + + +def _get_center_of_mass(comass, span, halflife, alpha): + valid_count = com.count_not_none(comass, span, halflife, alpha) + if valid_count > 1: + raise ValueError("comass, span, halflife, and alpha are mutually exclusive") + + # Convert to center of mass; domain checks ensure 0 < alpha <= 1 + if comass is not None: + if comass < 0: + raise ValueError("comass must satisfy: comass >= 0") + elif span is not None: + if span < 1: + raise ValueError("span must satisfy: span >= 1") + comass = (span - 1) / 2.0 + elif halflife is not None: + if halflife <= 0: + raise ValueError("halflife must satisfy: halflife > 0") + decay = 1 - np.exp(np.log(0.5) / halflife) + comass = 1 / decay - 1 + elif alpha is not None: + if alpha <= 0 or alpha > 1: + raise ValueError("alpha must satisfy: 0 < alpha <= 1") + comass = (1.0 - alpha) / alpha + else: + raise ValueError("Must pass one of comass, span, halflife, or alpha") + + return float(comass) + + +def _offset(window, center): + if not is_integer(window): + window = len(window) + offset = (window - 1) / 2.0 if center else 0 + try: + return int(offset) + except TypeError: + return offset.astype(int) + + +def _require_min_periods(p): + def _check_func(minp, window): + if minp is None: + return window + else: + return max(p, minp) + + return _check_func + + +def _use_window(minp, window): + if minp is None: + return window + else: + return minp + + +def _zsqrt(x): + with np.errstate(all="ignore"): + result = np.sqrt(x) + mask = x < 0 + + if isinstance(x, ABCDataFrame): + if mask.values.any(): + result[mask] = 0 + else: + if mask.any(): + result[mask] = 0 + + return result + + +def _prep_binary(arg1, arg2): + if not isinstance(arg2, type(arg1)): + raise Exception("Input arrays must be of the same type!") + + # mask out values, this also makes a common index... + X = arg1 + 0 * arg2 + Y = arg2 + 0 * arg1 + + return X, Y diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py new file mode 100644 index 0000000000000..0ce6d5ddec2ad --- /dev/null +++ b/pandas/core/window/ewm.py @@ -0,0 +1,388 @@ +from textwrap import dedent + +import numpy as np + +import pandas._libs.window as libwindow +from pandas.compat.numpy import function as nv +from pandas.util._decorators import Appender, Substitution + +from pandas.core.dtypes.generic import ABCDataFrame + +from pandas.core.base import DataError +from pandas.core.window.common import _doc_template, _get_center_of_mass, _shared_docs +from pandas.core.window.rolling import _flex_binary_moment, _Rolling, _zsqrt + +_bias_template = """ + Parameters + ---------- + bias : bool, default False + Use a standard estimation bias correction. + *args, **kwargs + Arguments and keyword arguments to be passed into func. +""" + +_pairwise_template = """ + Parameters + ---------- + other : Series, DataFrame, or ndarray, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndex DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + bias : bool, default False + Use a standard estimation bias correction. + **kwargs + Keyword arguments to be passed into func. +""" + + +class EWM(_Rolling): + r""" + Provide exponential weighted functions. + + Parameters + ---------- + com : float, optional + Specify decay in terms of center of mass, + :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`. + span : float, optional + Specify decay in terms of span, + :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`. + halflife : float, optional + Specify decay in terms of half-life, + :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`. + alpha : float, optional + Specify smoothing factor :math:`\alpha` directly, + :math:`0 < \alpha \leq 1`. + min_periods : int, default 0 + Minimum number of observations in window required to have a value + (otherwise result is NA). + adjust : bool, default True + Divide by decaying adjustment factor in beginning periods to account + for imbalance in relative weightings + (viewing EWMA as a moving average). + ignore_na : bool, default False + Ignore missing values when calculating weights; + specify True to reproduce pre-0.15.0 behavior. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to use. The value 0 identifies the rows, and 1 + identifies the columns. + + Returns + ------- + DataFrame + A Window sub-classed for the particular operation. + + See Also + -------- + rolling : Provides rolling window calculations. + expanding : Provides expanding transformations. + + Notes + ----- + Exactly one of center of mass, span, half-life, and alpha must be provided. + Allowed values and relationship between the parameters are specified in the + parameter descriptions above; see the link at the end of this section for + a detailed explanation. + + When adjust is True (default), weighted averages are calculated using + weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1. + + When adjust is False, weighted averages are calculated recursively as: + weighted_average[0] = arg[0]; + weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i]. + + When ignore_na is False (default), weights are based on absolute positions. + For example, the weights of x and y used in calculating the final weighted + average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and + (1-alpha)**2 and alpha (if adjust is False). + + When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based + on relative positions. For example, the weights of x and y used in + calculating the final weighted average of [x, None, y] are 1-alpha and 1 + (if adjust is True), and 1-alpha and alpha (if adjust is False). + + More details can be found at + http://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows + + Examples + -------- + + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> df.ewm(com=0.5).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + """ + _attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"] + + def __init__( + self, + obj, + com=None, + span=None, + halflife=None, + alpha=None, + min_periods=0, + adjust=True, + ignore_na=False, + axis=0, + ): + self.obj = obj + self.com = _get_center_of_mass(com, span, halflife, alpha) + self.min_periods = min_periods + self.adjust = adjust + self.ignore_na = ignore_na + self.axis = axis + self.on = None + + @property + def _constructor(self): + return EWM + + _agg_see_also_doc = dedent( + """ + See Also + -------- + pandas.DataFrame.rolling.aggregate + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + + >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.004295 0.905829 -0.954544 + 2 0.735167 -0.165272 -1.619346 + 3 -0.702657 -1.340923 -0.706334 + 4 -0.246845 0.211596 -0.901819 + 5 2.463718 3.157577 -1.380906 + 6 -1.142255 2.340594 -0.039875 + 7 1.396598 -1.647453 1.677227 + 8 -0.543425 1.761277 -0.220481 + 9 -0.640505 0.289374 -1.550670 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.464856 0.569633 -0.490089 + 2 -0.207700 0.149687 -1.135379 + 3 -0.471677 -0.645305 -0.906555 + 4 -0.355635 -0.203033 -0.904111 + 5 1.076417 1.503943 -1.146293 + 6 -0.041654 1.925562 -0.588728 + 7 0.680292 0.132049 0.548693 + 8 0.067236 0.948257 0.163353 + 9 -0.286980 0.618493 -0.694496 + """ + ) + + @Substitution( + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + versionadded="", + klass="Series/Dataframe", + axis="", + ) + @Appender(_shared_docs["aggregate"]) + def aggregate(self, arg, *args, **kwargs): + return super().aggregate(arg, *args, **kwargs) + + agg = aggregate + + def _apply(self, func, **kwargs): + """ + Rolling statistical measure using supplied function. Designed to be + used with passed-in Cython array-based functions. + + Parameters + ---------- + func : str/callable to apply + + Returns + ------- + y : same type as input argument + """ + blocks, obj = self._create_blocks() + block_list = list(blocks) + + results = [] + exclude = [] + for i, b in enumerate(blocks): + try: + values = self._prep_values(b.values) + + except (TypeError, NotImplementedError): + if isinstance(obj, ABCDataFrame): + exclude.extend(b.columns) + del block_list[i] + continue + else: + raise DataError("No numeric types to aggregate") + + if values.size == 0: + results.append(values.copy()) + continue + + # if we have a string function name, wrap it + if isinstance(func, str): + cfunc = getattr(libwindow, func, None) + if cfunc is None: + raise ValueError( + "we do not support this function " + "in libwindow.{func}".format(func=func) + ) + + def func(arg): + return cfunc( + arg, + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + ) + + results.append(np.apply_along_axis(func, self.axis, values)) + + return self._wrap_results(results, block_list, obj, exclude) + + @Substitution(name="ewm") + @Appender(_doc_template) + def mean(self, *args, **kwargs): + """ + Exponential weighted moving average. + + Parameters + ---------- + *args, **kwargs + Arguments and keyword arguments to be passed into func. + """ + nv.validate_window_func("mean", args, kwargs) + return self._apply("ewma", **kwargs) + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_bias_template) + def std(self, bias=False, *args, **kwargs): + """ + Exponential weighted moving stddev. + """ + nv.validate_window_func("std", args, kwargs) + return _zsqrt(self.var(bias=bias, **kwargs)) + + vol = std + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_bias_template) + def var(self, bias=False, *args, **kwargs): + """ + Exponential weighted moving variance. + """ + nv.validate_window_func("var", args, kwargs) + + def f(arg): + return libwindow.ewmcov( + arg, + arg, + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + int(bias), + ) + + return self._apply(f, **kwargs) + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_pairwise_template) + def cov(self, other=None, pairwise=None, bias=False, **kwargs): + """ + Exponential weighted sample covariance. + """ + if other is None: + other = self._selected_obj + # only default unset + pairwise = True if pairwise is None else pairwise + other = self._shallow_copy(other) + + def _get_cov(X, Y): + X = self._shallow_copy(X) + Y = self._shallow_copy(Y) + cov = libwindow.ewmcov( + X._prep_values(), + Y._prep_values(), + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + int(bias), + ) + return X._wrap_result(cov) + + return _flex_binary_moment( + self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) + ) + + @Substitution(name="ewm") + @Appender(_doc_template) + @Appender(_pairwise_template) + def corr(self, other=None, pairwise=None, **kwargs): + """ + Exponential weighted sample correlation. + """ + if other is None: + other = self._selected_obj + # only default unset + pairwise = True if pairwise is None else pairwise + other = self._shallow_copy(other) + + def _get_corr(X, Y): + X = self._shallow_copy(X) + Y = self._shallow_copy(Y) + + def _cov(x, y): + return libwindow.ewmcov( + x, + y, + self.com, + int(self.adjust), + int(self.ignore_na), + int(self.min_periods), + 1, + ) + + x_values = X._prep_values() + y_values = Y._prep_values() + with np.errstate(all="ignore"): + cov = _cov(x_values, y_values) + x_var = _cov(x_values, x_values) + y_var = _cov(y_values, y_values) + corr = cov / _zsqrt(x_var * y_var) + return X._wrap_result(corr) + + return _flex_binary_moment( + self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) + ) diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py new file mode 100644 index 0000000000000..c43ca6b0565f3 --- /dev/null +++ b/pandas/core/window/expanding.py @@ -0,0 +1,260 @@ +from textwrap import dedent + +from pandas.compat.numpy import function as nv +from pandas.util._decorators import Appender, Substitution + +from pandas.core.window.common import _doc_template, _GroupByMixin, _shared_docs +from pandas.core.window.rolling import _Rolling_and_Expanding + + +class Expanding(_Rolling_and_Expanding): + """ + Provide expanding transformations. + + Parameters + ---------- + min_periods : int, default 1 + Minimum number of observations in window required to have a value + (otherwise result is NA). + center : bool, default False + Set the labels at the center of the window. + axis : int or str, default 0 + + Returns + ------- + a Window sub-classed for the particular operation + + See Also + -------- + rolling : Provides rolling window calculations. + ewm : Provides exponential weighted functions. + + Notes + ----- + By default, the result is set to the right edge of the window. This can be + changed to the center of the window by setting ``center=True``. + + Examples + -------- + + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> df.expanding(2).sum() + B + 0 NaN + 1 1.0 + 2 3.0 + 3 3.0 + 4 7.0 + """ + + _attributes = ["min_periods", "center", "axis"] + + def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs): + super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis) + + @property + def _constructor(self): + return Expanding + + def _get_window(self, other=None, **kwargs): + """ + Get the window length over which to perform some operation. + + Parameters + ---------- + other : object, default None + The other object that is involved in the operation. + Such an object is involved for operations like covariance. + + Returns + ------- + window : int + The window length. + """ + axis = self.obj._get_axis(self.axis) + length = len(axis) + (other is not None) * len(axis) + + other = self.min_periods or -1 + return max(length, other) + + _agg_see_also_doc = dedent( + """ + See Also + -------- + DataFrame.expanding.aggregate + DataFrame.rolling.aggregate + DataFrame.aggregate + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + + >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.004295 0.905829 -0.954544 + 2 0.735167 -0.165272 -1.619346 + 3 -0.702657 -1.340923 -0.706334 + 4 -0.246845 0.211596 -0.901819 + 5 2.463718 3.157577 -1.380906 + 6 -1.142255 2.340594 -0.039875 + 7 1.396598 -1.647453 1.677227 + 8 -0.543425 1.761277 -0.220481 + 9 -0.640505 0.289374 -1.550670 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 -2.385977 -0.102758 0.438822 + 1 -1.464856 0.569633 -0.490089 + 2 -0.207700 0.149687 -1.135379 + 3 -0.471677 -0.645305 -0.906555 + 4 -0.355635 -0.203033 -0.904111 + 5 1.076417 1.503943 -1.146293 + 6 -0.041654 1.925562 -0.588728 + 7 0.680292 0.132049 0.548693 + 8 0.067236 0.948257 0.163353 + 9 -0.286980 0.618493 -0.694496 + """ + ) + + @Substitution( + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + versionadded="", + klass="Series/Dataframe", + axis="", + ) + @Appender(_shared_docs["aggregate"]) + def aggregate(self, arg, *args, **kwargs): + return super().aggregate(arg, *args, **kwargs) + + agg = aggregate + + @Substitution(name="expanding") + @Appender(_shared_docs["count"]) + def count(self, **kwargs): + return super().count(**kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["apply"]) + def apply(self, func, raw=None, args=(), kwargs={}): + return super().apply(func, raw=raw, args=args, kwargs=kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["sum"]) + def sum(self, *args, **kwargs): + nv.validate_expanding_func("sum", args, kwargs) + return super().sum(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_doc_template) + @Appender(_shared_docs["max"]) + def max(self, *args, **kwargs): + nv.validate_expanding_func("max", args, kwargs) + return super().max(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["min"]) + def min(self, *args, **kwargs): + nv.validate_expanding_func("min", args, kwargs) + return super().min(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["mean"]) + def mean(self, *args, **kwargs): + nv.validate_expanding_func("mean", args, kwargs) + return super().mean(*args, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["median"]) + def median(self, **kwargs): + return super().median(**kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["std"]) + def std(self, ddof=1, *args, **kwargs): + nv.validate_expanding_func("std", args, kwargs) + return super().std(ddof=ddof, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["var"]) + def var(self, ddof=1, *args, **kwargs): + nv.validate_expanding_func("var", args, kwargs) + return super().var(ddof=ddof, **kwargs) + + @Substitution(name="expanding") + @Appender(_doc_template) + @Appender(_shared_docs["skew"]) + def skew(self, **kwargs): + return super().skew(**kwargs) + + _agg_doc = dedent( + """ + Examples + -------- + + The example below will show an expanding calculation with a window size of + four matching the equivalent function call using `scipy.stats`. + + >>> arr = [1, 2, 3, 4, 999] + >>> import scipy.stats + >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits + >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) + -1.200000 + >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False))) + 4.999874 + >>> s = pd.Series(arr) + >>> s.expanding(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 4.999874 + dtype: float64 + """ + ) + + @Appender(_agg_doc) + @Substitution(name="expanding") + @Appender(_shared_docs["kurt"]) + def kurt(self, **kwargs): + return super().kurt(**kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["quantile"]) + def quantile(self, quantile, interpolation="linear", **kwargs): + return super().quantile( + quantile=quantile, interpolation=interpolation, **kwargs + ) + + @Substitution(name="expanding") + @Appender(_doc_template) + @Appender(_shared_docs["cov"]) + def cov(self, other=None, pairwise=None, ddof=1, **kwargs): + return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs) + + @Substitution(name="expanding") + @Appender(_shared_docs["corr"]) + def corr(self, other=None, pairwise=None, **kwargs): + return super().corr(other=other, pairwise=pairwise, **kwargs) + + +class ExpandingGroupby(_GroupByMixin, Expanding): + """ + Provide a expanding groupby implementation. + """ + + @property + def _constructor(self): + return Expanding diff --git a/pandas/core/window.py b/pandas/core/window/rolling.py similarity index 66% rename from pandas/core/window.py rename to pandas/core/window/rolling.py index 3e3f17369db7b..323089b3fdf6b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window/rolling.py @@ -2,7 +2,6 @@ Provide a generic structure to support window functions, similar to how we have a Groupby object. """ -from collections import defaultdict from datetime import timedelta from textwrap import dedent from typing import Callable, List, Optional, Set, Union @@ -38,22 +37,17 @@ from pandas._typing import Axis, FrameOrSeries, Scalar from pandas.core.base import DataError, PandasObject, SelectionMixin import pandas.core.common as com -from pandas.core.generic import _shared_docs -from pandas.core.groupby.base import GroupByMixin -from pandas.core.index import Index, MultiIndex, ensure_index - -_shared_docs = dict(**_shared_docs) -_doc_template = """ - Returns - ------- - Series or DataFrame - Return type is determined by the caller. - - See Also - -------- - Series.%(name)s : Series %(name)s. - DataFrame.%(name)s : DataFrame %(name)s. -""" +from pandas.core.index import Index, ensure_index +from pandas.core.window.common import ( + _doc_template, + _flex_binary_moment, + _GroupByMixin, + _offset, + _require_min_periods, + _shared_docs, + _use_window, + _zsqrt, +) class _Window(PandasObject, SelectionMixin): @@ -121,6 +115,8 @@ def validate(self): "neither", ]: raise ValueError("closed must be 'right', 'left', 'both' or 'neither'") + if not isinstance(self.obj, (ABCSeries, ABCDataFrame)): + raise TypeError("invalid type: {}".format(type(self))) def _create_blocks(self): """ @@ -929,44 +925,6 @@ def mean(self, *args, **kwargs): return self._apply("roll_weighted_mean", **kwargs) -class _GroupByMixin(GroupByMixin): - """ - Provide the groupby facilities. - """ - - def __init__(self, obj, *args, **kwargs): - parent = kwargs.pop("parent", None) # noqa - groupby = kwargs.pop("groupby", None) - if groupby is None: - groupby, obj = obj, obj.obj - self._groupby = groupby - self._groupby.mutated = True - self._groupby.grouper.mutated = True - super().__init__(obj, *args, **kwargs) - - count = GroupByMixin._dispatch("count") - corr = GroupByMixin._dispatch("corr", other=None, pairwise=None) - cov = GroupByMixin._dispatch("cov", other=None, pairwise=None) - - def _apply( - self, func, name=None, window=None, center=None, check_minp=None, **kwargs - ): - """ - Dispatch to apply; we are stripping all of the _apply kwargs and - performing the original function call on the grouped object. - """ - - def f(x, name=name, *args): - x = self._shallow_copy(x) - - if isinstance(name, str): - return getattr(x, name)(*args, **kwargs) - - return x.apply(name, *args, **kwargs) - - return self._groupby.apply(f) - - class _Rolling(_Window): @property def _constructor(self): @@ -1949,6 +1907,9 @@ def corr(self, other=None, pairwise=None, **kwargs): return super().corr(other=other, pairwise=pairwise, **kwargs) +Rolling.__doc__ = Window.__doc__ + + class RollingGroupby(_GroupByMixin, Rolling): """ Provide a rolling groupby implementation. @@ -1976,883 +1937,3 @@ def _validate_monotonic(self): level. """ pass - - -class Expanding(_Rolling_and_Expanding): - """ - Provide expanding transformations. - - Parameters - ---------- - min_periods : int, default 1 - Minimum number of observations in window required to have a value - (otherwise result is NA). - center : bool, default False - Set the labels at the center of the window. - axis : int or str, default 0 - - Returns - ------- - a Window sub-classed for the particular operation - - See Also - -------- - rolling : Provides rolling window calculations. - ewm : Provides exponential weighted functions. - - Notes - ----- - By default, the result is set to the right edge of the window. This can be - changed to the center of the window by setting ``center=True``. - - Examples - -------- - - >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) - B - 0 0.0 - 1 1.0 - 2 2.0 - 3 NaN - 4 4.0 - - >>> df.expanding(2).sum() - B - 0 NaN - 1 1.0 - 2 3.0 - 3 3.0 - 4 7.0 - """ - - _attributes = ["min_periods", "center", "axis"] - - def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs): - super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis) - - @property - def _constructor(self): - return Expanding - - def _get_window(self, other=None, **kwargs): - """ - Get the window length over which to perform some operation. - - Parameters - ---------- - other : object, default None - The other object that is involved in the operation. - Such an object is involved for operations like covariance. - - Returns - ------- - window : int - The window length. - """ - axis = self.obj._get_axis(self.axis) - length = len(axis) + (other is not None) * len(axis) - - other = self.min_periods or -1 - return max(length, other) - - _agg_see_also_doc = dedent( - """ - See Also - -------- - DataFrame.expanding.aggregate - DataFrame.rolling.aggregate - DataFrame.aggregate - """ - ) - - _agg_examples_doc = dedent( - """ - Examples - -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) - >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 - - >>> df.ewm(alpha=0.5).mean() - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.464856 0.569633 -0.490089 - 2 -0.207700 0.149687 -1.135379 - 3 -0.471677 -0.645305 -0.906555 - 4 -0.355635 -0.203033 -0.904111 - 5 1.076417 1.503943 -1.146293 - 6 -0.041654 1.925562 -0.588728 - 7 0.680292 0.132049 0.548693 - 8 0.067236 0.948257 0.163353 - 9 -0.286980 0.618493 -0.694496 - """ - ) - - @Substitution( - see_also=_agg_see_also_doc, - examples=_agg_examples_doc, - versionadded="", - klass="Series/Dataframe", - axis="", - ) - @Appender(_shared_docs["aggregate"]) - def aggregate(self, arg, *args, **kwargs): - return super().aggregate(arg, *args, **kwargs) - - agg = aggregate - - @Substitution(name="expanding") - @Appender(_shared_docs["count"]) - def count(self, **kwargs): - return super().count(**kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["apply"]) - def apply(self, func, raw=None, args=(), kwargs={}): - return super().apply(func, raw=raw, args=args, kwargs=kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["sum"]) - def sum(self, *args, **kwargs): - nv.validate_expanding_func("sum", args, kwargs) - return super().sum(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_doc_template) - @Appender(_shared_docs["max"]) - def max(self, *args, **kwargs): - nv.validate_expanding_func("max", args, kwargs) - return super().max(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["min"]) - def min(self, *args, **kwargs): - nv.validate_expanding_func("min", args, kwargs) - return super().min(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["mean"]) - def mean(self, *args, **kwargs): - nv.validate_expanding_func("mean", args, kwargs) - return super().mean(*args, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["median"]) - def median(self, **kwargs): - return super().median(**kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["std"]) - def std(self, ddof=1, *args, **kwargs): - nv.validate_expanding_func("std", args, kwargs) - return super().std(ddof=ddof, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["var"]) - def var(self, ddof=1, *args, **kwargs): - nv.validate_expanding_func("var", args, kwargs) - return super().var(ddof=ddof, **kwargs) - - @Substitution(name="expanding") - @Appender(_doc_template) - @Appender(_shared_docs["skew"]) - def skew(self, **kwargs): - return super().skew(**kwargs) - - _agg_doc = dedent( - """ - Examples - -------- - - The example below will show an expanding calculation with a window size of - four matching the equivalent function call using `scipy.stats`. - - >>> arr = [1, 2, 3, 4, 999] - >>> import scipy.stats - >>> fmt = "{0:.6f}" # limit the printed precision to 6 digits - >>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False))) - -1.200000 - >>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False))) - 4.999874 - >>> s = pd.Series(arr) - >>> s.expanding(4).kurt() - 0 NaN - 1 NaN - 2 NaN - 3 -1.200000 - 4 4.999874 - dtype: float64 - """ - ) - - @Appender(_agg_doc) - @Substitution(name="expanding") - @Appender(_shared_docs["kurt"]) - def kurt(self, **kwargs): - return super().kurt(**kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["quantile"]) - def quantile(self, quantile, interpolation="linear", **kwargs): - return super().quantile( - quantile=quantile, interpolation=interpolation, **kwargs - ) - - @Substitution(name="expanding") - @Appender(_doc_template) - @Appender(_shared_docs["cov"]) - def cov(self, other=None, pairwise=None, ddof=1, **kwargs): - return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs) - - @Substitution(name="expanding") - @Appender(_shared_docs["corr"]) - def corr(self, other=None, pairwise=None, **kwargs): - return super().corr(other=other, pairwise=pairwise, **kwargs) - - -class ExpandingGroupby(_GroupByMixin, Expanding): - """ - Provide a expanding groupby implementation. - """ - - @property - def _constructor(self): - return Expanding - - -_bias_template = """ - Parameters - ---------- - bias : bool, default False - Use a standard estimation bias correction. - *args, **kwargs - Arguments and keyword arguments to be passed into func. -""" - -_pairwise_template = """ - Parameters - ---------- - other : Series, DataFrame, or ndarray, optional - If not supplied then will default to self and produce pairwise - output. - pairwise : bool, default None - If False then only matching columns between self and other will be - used and the output will be a DataFrame. - If True then all pairwise combinations will be calculated and the - output will be a MultiIndex DataFrame in the case of DataFrame - inputs. In the case of missing elements, only complete pairwise - observations will be used. - bias : bool, default False - Use a standard estimation bias correction. - **kwargs - Keyword arguments to be passed into func. -""" - - -class EWM(_Rolling): - r""" - Provide exponential weighted functions. - - Parameters - ---------- - com : float, optional - Specify decay in terms of center of mass, - :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`. - span : float, optional - Specify decay in terms of span, - :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`. - halflife : float, optional - Specify decay in terms of half-life, - :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`. - alpha : float, optional - Specify smoothing factor :math:`\alpha` directly, - :math:`0 < \alpha \leq 1`. - min_periods : int, default 0 - Minimum number of observations in window required to have a value - (otherwise result is NA). - adjust : bool, default True - Divide by decaying adjustment factor in beginning periods to account - for imbalance in relative weightings - (viewing EWMA as a moving average). - ignore_na : bool, default False - Ignore missing values when calculating weights; - specify True to reproduce pre-0.15.0 behavior. - axis : {0 or 'index', 1 or 'columns'}, default 0 - The axis to use. The value 0 identifies the rows, and 1 - identifies the columns. - - Returns - ------- - DataFrame - A Window sub-classed for the particular operation. - - See Also - -------- - rolling : Provides rolling window calculations. - expanding : Provides expanding transformations. - - Notes - ----- - Exactly one of center of mass, span, half-life, and alpha must be provided. - Allowed values and relationship between the parameters are specified in the - parameter descriptions above; see the link at the end of this section for - a detailed explanation. - - When adjust is True (default), weighted averages are calculated using - weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1. - - When adjust is False, weighted averages are calculated recursively as: - weighted_average[0] = arg[0]; - weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i]. - - When ignore_na is False (default), weights are based on absolute positions. - For example, the weights of x and y used in calculating the final weighted - average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and - (1-alpha)**2 and alpha (if adjust is False). - - When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based - on relative positions. For example, the weights of x and y used in - calculating the final weighted average of [x, None, y] are 1-alpha and 1 - (if adjust is True), and 1-alpha and alpha (if adjust is False). - - More details can be found at - http://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows - - Examples - -------- - - >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) - >>> df - B - 0 0.0 - 1 1.0 - 2 2.0 - 3 NaN - 4 4.0 - - >>> df.ewm(com=0.5).mean() - B - 0 0.000000 - 1 0.750000 - 2 1.615385 - 3 1.615385 - 4 3.670213 - """ - _attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"] - - def __init__( - self, - obj, - com=None, - span=None, - halflife=None, - alpha=None, - min_periods=0, - adjust=True, - ignore_na=False, - axis=0, - ): - self.obj = obj - self.com = _get_center_of_mass(com, span, halflife, alpha) - self.min_periods = min_periods - self.adjust = adjust - self.ignore_na = ignore_na - self.axis = axis - self.on = None - - @property - def _constructor(self): - return EWM - - _agg_see_also_doc = dedent( - """ - See Also - -------- - pandas.DataFrame.rolling.aggregate - """ - ) - - _agg_examples_doc = dedent( - """ - Examples - -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) - >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 - - >>> df.ewm(alpha=0.5).mean() - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.464856 0.569633 -0.490089 - 2 -0.207700 0.149687 -1.135379 - 3 -0.471677 -0.645305 -0.906555 - 4 -0.355635 -0.203033 -0.904111 - 5 1.076417 1.503943 -1.146293 - 6 -0.041654 1.925562 -0.588728 - 7 0.680292 0.132049 0.548693 - 8 0.067236 0.948257 0.163353 - 9 -0.286980 0.618493 -0.694496 - """ - ) - - @Substitution( - see_also=_agg_see_also_doc, - examples=_agg_examples_doc, - versionadded="", - klass="Series/Dataframe", - axis="", - ) - @Appender(_shared_docs["aggregate"]) - def aggregate(self, arg, *args, **kwargs): - return super().aggregate(arg, *args, **kwargs) - - agg = aggregate - - def _apply(self, func, **kwargs): - """ - Rolling statistical measure using supplied function. Designed to be - used with passed-in Cython array-based functions. - - Parameters - ---------- - func : str/callable to apply - - Returns - ------- - y : same type as input argument - """ - blocks, obj = self._create_blocks() - block_list = list(blocks) - - results = [] - exclude = [] - for i, b in enumerate(blocks): - try: - values = self._prep_values(b.values) - - except (TypeError, NotImplementedError): - if isinstance(obj, ABCDataFrame): - exclude.extend(b.columns) - del block_list[i] - continue - else: - raise DataError("No numeric types to aggregate") - - if values.size == 0: - results.append(values.copy()) - continue - - # if we have a string function name, wrap it - if isinstance(func, str): - cfunc = getattr(libwindow, func, None) - if cfunc is None: - raise ValueError( - "we do not support this function " - "in libwindow.{func}".format(func=func) - ) - - def func(arg): - return cfunc( - arg, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - ) - - results.append(np.apply_along_axis(func, self.axis, values)) - - return self._wrap_results(results, block_list, obj, exclude) - - @Substitution(name="ewm") - @Appender(_doc_template) - def mean(self, *args, **kwargs): - """ - Exponential weighted moving average. - - Parameters - ---------- - *args, **kwargs - Arguments and keyword arguments to be passed into func. - """ - nv.validate_window_func("mean", args, kwargs) - return self._apply("ewma", **kwargs) - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_bias_template) - def std(self, bias=False, *args, **kwargs): - """ - Exponential weighted moving stddev. - """ - nv.validate_window_func("std", args, kwargs) - return _zsqrt(self.var(bias=bias, **kwargs)) - - vol = std - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_bias_template) - def var(self, bias=False, *args, **kwargs): - """ - Exponential weighted moving variance. - """ - nv.validate_window_func("var", args, kwargs) - - def f(arg): - return libwindow.ewmcov( - arg, - arg, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - int(bias), - ) - - return self._apply(f, **kwargs) - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_pairwise_template) - def cov(self, other=None, pairwise=None, bias=False, **kwargs): - """ - Exponential weighted sample covariance. - """ - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise - other = self._shallow_copy(other) - - def _get_cov(X, Y): - X = self._shallow_copy(X) - Y = self._shallow_copy(Y) - cov = libwindow.ewmcov( - X._prep_values(), - Y._prep_values(), - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - int(bias), - ) - return X._wrap_result(cov) - - return _flex_binary_moment( - self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) - ) - - @Substitution(name="ewm") - @Appender(_doc_template) - @Appender(_pairwise_template) - def corr(self, other=None, pairwise=None, **kwargs): - """ - Exponential weighted sample correlation. - """ - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise - other = self._shallow_copy(other) - - def _get_corr(X, Y): - X = self._shallow_copy(X) - Y = self._shallow_copy(Y) - - def _cov(x, y): - return libwindow.ewmcov( - x, - y, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - 1, - ) - - x_values = X._prep_values() - y_values = Y._prep_values() - with np.errstate(all="ignore"): - cov = _cov(x_values, y_values) - x_var = _cov(x_values, x_values) - y_var = _cov(y_values, y_values) - corr = cov / _zsqrt(x_var * y_var) - return X._wrap_result(corr) - - return _flex_binary_moment( - self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) - ) - - -# Helper Funcs - - -def _flex_binary_moment(arg1, arg2, f, pairwise=False): - - if not ( - isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) - and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame)) - ): - raise TypeError( - "arguments to moment function must be of type " - "np.ndarray/Series/DataFrame" - ) - - if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance( - arg2, (np.ndarray, ABCSeries) - ): - X, Y = _prep_binary(arg1, arg2) - return f(X, Y) - - elif isinstance(arg1, ABCDataFrame): - from pandas import DataFrame - - def dataframe_from_int_dict(data, frame_template): - result = DataFrame(data, index=frame_template.index) - if len(result.columns) > 0: - result.columns = frame_template.columns[result.columns] - return result - - results = {} - if isinstance(arg2, ABCDataFrame): - if pairwise is False: - if arg1 is arg2: - # special case in order to handle duplicate column names - for i, col in enumerate(arg1.columns): - results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) - return dataframe_from_int_dict(results, arg1) - else: - if not arg1.columns.is_unique: - raise ValueError("'arg1' columns are not unique") - if not arg2.columns.is_unique: - raise ValueError("'arg2' columns are not unique") - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - X, Y = arg1.align(arg2, join="outer") - X = X + 0 * Y - Y = Y + 0 * X - - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - res_columns = arg1.columns.union(arg2.columns) - for col in res_columns: - if col in X and col in Y: - results[col] = f(X[col], Y[col]) - return DataFrame(results, index=X.index, columns=res_columns) - elif pairwise is True: - results = defaultdict(dict) - for i, k1 in enumerate(arg1.columns): - for j, k2 in enumerate(arg2.columns): - if j < i and arg2 is arg1: - # Symmetric case - results[i][j] = results[j][i] - else: - results[i][j] = f( - *_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]) - ) - - from pandas import concat - - result_index = arg1.index.union(arg2.index) - if len(result_index): - - # construct result frame - result = concat( - [ - concat( - [results[i][j] for j, c in enumerate(arg2.columns)], - ignore_index=True, - ) - for i, c in enumerate(arg1.columns) - ], - ignore_index=True, - axis=1, - ) - result.columns = arg1.columns - - # set the index and reorder - if arg2.columns.nlevels > 1: - result.index = MultiIndex.from_product( - arg2.columns.levels + [result_index] - ) - result = result.reorder_levels([2, 0, 1]).sort_index() - else: - result.index = MultiIndex.from_product( - [range(len(arg2.columns)), range(len(result_index))] - ) - result = result.swaplevel(1, 0).sort_index() - result.index = MultiIndex.from_product( - [result_index] + [arg2.columns] - ) - else: - - # empty result - result = DataFrame( - index=MultiIndex( - levels=[arg1.index, arg2.columns], codes=[[], []] - ), - columns=arg2.columns, - dtype="float64", - ) - - # reset our index names to arg1 names - # reset our column names to arg2 names - # careful not to mutate the original names - result.columns = result.columns.set_names(arg1.columns.names) - result.index = result.index.set_names( - result_index.names + arg2.columns.names - ) - - return result - - else: - raise ValueError("'pairwise' is not True/False") - else: - results = { - i: f(*_prep_binary(arg1.iloc[:, i], arg2)) - for i, col in enumerate(arg1.columns) - } - return dataframe_from_int_dict(results, arg1) - - else: - return _flex_binary_moment(arg2, arg1, f) - - -def _get_center_of_mass(comass, span, halflife, alpha): - valid_count = com.count_not_none(comass, span, halflife, alpha) - if valid_count > 1: - raise ValueError("comass, span, halflife, and alpha are mutually exclusive") - - # Convert to center of mass; domain checks ensure 0 < alpha <= 1 - if comass is not None: - if comass < 0: - raise ValueError("comass must satisfy: comass >= 0") - elif span is not None: - if span < 1: - raise ValueError("span must satisfy: span >= 1") - comass = (span - 1) / 2.0 - elif halflife is not None: - if halflife <= 0: - raise ValueError("halflife must satisfy: halflife > 0") - decay = 1 - np.exp(np.log(0.5) / halflife) - comass = 1 / decay - 1 - elif alpha is not None: - if alpha <= 0 or alpha > 1: - raise ValueError("alpha must satisfy: 0 < alpha <= 1") - comass = (1.0 - alpha) / alpha - else: - raise ValueError("Must pass one of comass, span, halflife, or alpha") - - return float(comass) - - -def _offset(window, center): - if not is_integer(window): - window = len(window) - offset = (window - 1) / 2.0 if center else 0 - try: - return int(offset) - except TypeError: - return offset.astype(int) - - -def _require_min_periods(p): - def _check_func(minp, window): - if minp is None: - return window - else: - return max(p, minp) - - return _check_func - - -def _use_window(minp, window): - if minp is None: - return window - else: - return minp - - -def _zsqrt(x): - with np.errstate(all="ignore"): - result = np.sqrt(x) - mask = x < 0 - - if isinstance(x, ABCDataFrame): - if mask.values.any(): - result[mask] = 0 - else: - if mask.any(): - result[mask] = 0 - - return result - - -def _prep_binary(arg1, arg2): - if not isinstance(arg2, type(arg1)): - raise Exception("Input arrays must be of the same type!") - - # mask out values, this also makes a common index... - X = arg1 + 0 * arg2 - Y = arg2 + 0 * arg1 - - return X, Y - - -# Top-level exports - - -def rolling(obj, win_type=None, **kwds): - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - raise TypeError("invalid type: %s" % type(obj)) - - if win_type is not None: - return Window(obj, win_type=win_type, **kwds) - - return Rolling(obj, **kwds) - - -rolling.__doc__ = Window.__doc__ - - -def expanding(obj, **kwds): - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - raise TypeError("invalid type: %s" % type(obj)) - - return Expanding(obj, **kwds) - - -expanding.__doc__ = Expanding.__doc__ - - -def ewm(obj, **kwds): - if not isinstance(obj, (ABCSeries, ABCDataFrame)): - raise TypeError("invalid type: %s" % type(obj)) - - return EWM(obj, **kwds) - - -ewm.__doc__ = EWM.__doc__ diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py index a05b567adad7a..1683fda500f85 100644 --- a/pandas/tests/window/test_ewm.py +++ b/pandas/tests/window/test_ewm.py @@ -4,7 +4,7 @@ from pandas.errors import UnsupportedFunctionCall from pandas import DataFrame, Series -import pandas.core.window as rwindow +from pandas.core.window import EWM from pandas.tests.window.common import Base @@ -60,7 +60,7 @@ def test_constructor(self, which): @pytest.mark.parametrize("method", ["std", "mean", "var"]) def test_numpy_compat(self, method): # see gh-12811 - e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5) + e = EWM(Series([2, 4, 6]), alpha=0.5) msg = "numpy operations are not valid with window objects" diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py index 1e92c981964c5..098acdff93ac6 100644 --- a/pandas/tests/window/test_expanding.py +++ b/pandas/tests/window/test_expanding.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.core.window as rwindow +from pandas.core.window import Expanding from pandas.tests.window.common import Base import pandas.util.testing as tm @@ -42,7 +42,7 @@ def test_constructor(self, which): @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) def test_numpy_compat(self, method): # see gh-12811 - e = rwindow.Expanding(Series([2, 4, 6]), window=2) + e = Expanding(Series([2, 4, 6]), window=2) msg = "numpy operations are not valid with window objects" diff --git a/pandas/tests/window/test_moments.py b/pandas/tests/window/test_moments.py index d860859958254..3d6cd7d10bd10 100644 --- a/pandas/tests/window/test_moments.py +++ b/pandas/tests/window/test_moments.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import DataFrame, Index, Series, concat, isna, notna -import pandas.core.window as rwindow +from pandas.core.window.common import _flex_binary_moment from pandas.tests.window.common import Base import pandas.util.testing as tm @@ -1878,7 +1878,7 @@ def test_flex_binary_moment(self): " np.ndarray/Series/DataFrame" ) with pytest.raises(TypeError, match=msg): - rwindow._flex_binary_moment(5, 6, None) + _flex_binary_moment(5, 6, None) def test_corr_sanity(self): # GH 3155 diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index f0787ab3d191f..b4787bf25e3bb 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import DataFrame, Series -import pandas.core.window as rwindow +from pandas.core.window import Rolling from pandas.tests.window.common import Base import pandas.util.testing as tm @@ -101,7 +101,7 @@ def test_constructor_timedelta_window_and_minperiods(self, window, raw): @pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"]) def test_numpy_compat(self, method): # see gh-12811 - r = rwindow.Rolling(Series([2, 4, 6]), window=2) + r = Rolling(Series([2, 4, 6]), window=2) msg = "numpy operations are not valid with window objects" diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py index a6a56c98a9377..5692404205012 100644 --- a/pandas/tests/window/test_window.py +++ b/pandas/tests/window/test_window.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import Series -import pandas.core.window as rwindow +from pandas.core.window import Window from pandas.tests.window.common import Base @@ -50,7 +50,7 @@ def test_constructor_with_win_type(self, which, win_types): @pytest.mark.parametrize("method", ["sum", "mean"]) def test_numpy_compat(self, method): # see gh-12811 - w = rwindow.Window(Series([2, 4, 6]), window=[0, 2]) + w = Window(Series([2, 4, 6]), window=[0, 2]) msg = "numpy operations are not valid with window objects"
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Splits `pandas/core/window.py` into more logical division of: `pandas/core/window/rolling.py` `pandas/core/window/expanding.py` `pandas/core/window/ewm.py` `pandas/core/window/common.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/27736
2019-08-04T05:33:41Z
2019-08-07T13:32:48Z
2019-08-07T13:32:47Z
2019-08-07T16:53:20Z
REF: separate out invalid ops
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 2747b1d7dd9f1..770870a466aa9 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -44,9 +44,10 @@ from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna from pandas._typing import DatetimeLikeScalar -from pandas.core import missing, nanops, ops +from pandas.core import missing, nanops from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts import pandas.core.common as com +from pandas.core.ops.invalid import make_invalid_op from pandas.tseries import frequencies from pandas.tseries.offsets import DateOffset, Tick @@ -921,18 +922,18 @@ def _is_unique(self): # pow is invalid for all three subclasses; TimedeltaArray will override # the multiplication and division ops - __pow__ = ops.make_invalid_op("__pow__") - __rpow__ = ops.make_invalid_op("__rpow__") - __mul__ = ops.make_invalid_op("__mul__") - __rmul__ = ops.make_invalid_op("__rmul__") - __truediv__ = ops.make_invalid_op("__truediv__") - __rtruediv__ = ops.make_invalid_op("__rtruediv__") - __floordiv__ = ops.make_invalid_op("__floordiv__") - __rfloordiv__ = ops.make_invalid_op("__rfloordiv__") - __mod__ = ops.make_invalid_op("__mod__") - __rmod__ = ops.make_invalid_op("__rmod__") - __divmod__ = ops.make_invalid_op("__divmod__") - __rdivmod__ = ops.make_invalid_op("__rdivmod__") + __pow__ = make_invalid_op("__pow__") + __rpow__ = make_invalid_op("__rpow__") + __mul__ = make_invalid_op("__mul__") + __rmul__ = make_invalid_op("__rmul__") + __truediv__ = make_invalid_op("__truediv__") + __rtruediv__ = make_invalid_op("__rtruediv__") + __floordiv__ = make_invalid_op("__floordiv__") + __rfloordiv__ = make_invalid_op("__rfloordiv__") + __mod__ = make_invalid_op("__mod__") + __rmod__ = make_invalid_op("__rmod__") + __divmod__ = make_invalid_op("__divmod__") + __rdivmod__ = make_invalid_op("__rdivmod__") def _add_datetimelike_scalar(self, other): # Overriden by TimedeltaArray diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 061ee4b90d0e9..28537124536e7 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -53,6 +53,7 @@ from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com +from pandas.core.ops.invalid import invalid_comparison from pandas.tseries.frequencies import get_period_alias, to_offset from pandas.tseries.offsets import Day, Tick @@ -171,13 +172,13 @@ def wrapper(self, other): other = _to_M8(other, tz=self.tz) except ValueError: # string that cannot be parsed to Timestamp - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) result = op(self.asi8, other.view("i8")) if isna(other): result.fill(nat_result) elif lib.is_scalar(other) or np.ndim(other) == 0: - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: @@ -191,7 +192,7 @@ def wrapper(self, other): ): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) if is_object_dtype(other): # We have to use _comp_method_OBJECT_ARRAY instead of numpy @@ -204,7 +205,7 @@ def wrapper(self, other): o_mask = isna(other) elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)): # e.g. is_timedelta64_dtype(other) - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) else: self._assert_tzawareness_compat(other) if isinstance(other, (ABCIndexClass, ABCSeries)): diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index afd1e8203059e..94dd561fc96f7 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -41,9 +41,9 @@ ) from pandas.core.dtypes.missing import isna -from pandas.core import ops from pandas.core.algorithms import checked_add_with_arr import pandas.core.common as com +from pandas.core.ops.invalid import invalid_comparison from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import Tick @@ -90,14 +90,14 @@ def wrapper(self, other): other = Timedelta(other) except ValueError: # failed to parse as timedelta - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) result = op(self.view("i8"), other.value) if isna(other): result.fill(nat_result) elif not is_list_like(other): - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") @@ -106,7 +106,7 @@ def wrapper(self, other): try: other = type(self)._from_sequence(other)._data except (ValueError, TypeError): - return ops.invalid_comparison(self, other, op) + return invalid_comparison(self, other, op) result = op(self.view("i8"), other.view("i8")) result = com.values_from_object(result) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2271ff643bc15..57e84282aed72 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -70,7 +70,8 @@ from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.frozen import FrozenList import pandas.core.missing as missing -from pandas.core.ops import get_op_result_name, make_invalid_op +from pandas.core.ops import get_op_result_name +from pandas.core.ops.invalid import make_invalid_op import pandas.core.sorting as sorting from pandas.core.strings import StringMethods diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 48b3d74e8d02c..4ab1941e3493f 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -49,15 +49,15 @@ import pandas as pd from pandas._typing import ArrayLike from pandas.core.construction import extract_array - -from . import missing -from .docstrings import ( +from pandas.core.ops import missing +from pandas.core.ops.docstrings import ( _arith_doc_FRAME, _flex_comp_doc_FRAME, _make_flex_doc, _op_descriptions, ) -from .roperator import ( # noqa:F401 +from pandas.core.ops.invalid import invalid_comparison +from pandas.core.ops.roperator import ( # noqa:F401 radd, rand_, rdiv, @@ -185,29 +185,6 @@ def maybe_upcast_for_op(obj, shape: Tuple[int, ...]): # ----------------------------------------------------------------------------- -def make_invalid_op(name): - """ - Return a binary method that always raises a TypeError. - - Parameters - ---------- - name : str - - Returns - ------- - invalid_op : function - """ - - def invalid_op(self, other=None): - raise TypeError( - "cannot perform {name} with this index type: " - "{typ}".format(name=name, typ=type(self).__name__) - ) - - invalid_op.__name__ = name - return invalid_op - - def _gen_eval_kwargs(name): """ Find the keyword arguments to pass to numexpr for the given operation. @@ -476,38 +453,6 @@ def masked_arith_op(x, y, op): return result -def invalid_comparison(left, right, op): - """ - If a comparison has mismatched types and is not necessarily meaningful, - follow python3 conventions by: - - - returning all-False for equality - - returning all-True for inequality - - raising TypeError otherwise - - Parameters - ---------- - left : array-like - right : scalar, array-like - op : operator.{eq, ne, lt, le, gt} - - Raises - ------ - TypeError : on inequality comparisons - """ - if op is operator.eq: - res_values = np.zeros(left.shape, dtype=bool) - elif op is operator.ne: - res_values = np.ones(left.shape, dtype=bool) - else: - raise TypeError( - "Invalid comparison between dtype={dtype} and {typ}".format( - dtype=left.dtype, typ=type(right).__name__ - ) - ) - return res_values - - # ----------------------------------------------------------------------------- # Dispatch logic diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py new file mode 100644 index 0000000000000..013ff7689b221 --- /dev/null +++ b/pandas/core/ops/invalid.py @@ -0,0 +1,61 @@ +""" +Templates for invalid operations. +""" +import operator + +import numpy as np + + +def invalid_comparison(left, right, op): + """ + If a comparison has mismatched types and is not necessarily meaningful, + follow python3 conventions by: + + - returning all-False for equality + - returning all-True for inequality + - raising TypeError otherwise + + Parameters + ---------- + left : array-like + right : scalar, array-like + op : operator.{eq, ne, lt, le, gt} + + Raises + ------ + TypeError : on inequality comparisons + """ + if op is operator.eq: + res_values = np.zeros(left.shape, dtype=bool) + elif op is operator.ne: + res_values = np.ones(left.shape, dtype=bool) + else: + raise TypeError( + "Invalid comparison between dtype={dtype} and {typ}".format( + dtype=left.dtype, typ=type(right).__name__ + ) + ) + return res_values + + +def make_invalid_op(name: str): + """ + Return a binary method that always raises a TypeError. + + Parameters + ---------- + name : str + + Returns + ------- + invalid_op : function + """ + + def invalid_op(self, other=None): + raise TypeError( + "cannot perform {name} with this index type: " + "{typ}".format(name=name, typ=type(self).__name__) + ) + + invalid_op.__name__ = name + return invalid_op
We moved ops.py to `ops.__init__` a while back, still need to get the bulk of it out of `__init__`. This separates out templated invalid operations, which are the main things that outside modules import (this helps us move towards getting rid of the `import pandas as pd` in the main file)
https://api.github.com/repos/pandas-dev/pandas/pulls/27735
2019-08-04T00:35:40Z
2019-08-05T15:52:11Z
2019-08-05T15:52:11Z
2019-08-05T16:38:29Z
TYPING: type hints for io.formats.latex
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index dad099b747701..c60e15b733f0a 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -1,11 +1,13 @@ """ Module for formatting output data in Latex. """ +from typing import IO, List, Optional, Tuple + import numpy as np from pandas.core.dtypes.generic import ABCMultiIndex -from pandas.io.formats.format import TableFormatter +from pandas.io.formats.format import DataFrameFormatter, TableFormatter class LatexFormatter(TableFormatter): @@ -28,12 +30,12 @@ class LatexFormatter(TableFormatter): def __init__( self, - formatter, - column_format=None, - longtable=False, - multicolumn=False, - multicolumn_format=None, - multirow=False, + formatter: DataFrameFormatter, + column_format: Optional[str] = None, + longtable: bool = False, + multicolumn: bool = False, + multicolumn_format: Optional[str] = None, + multirow: bool = False, ): self.fmt = formatter self.frame = self.fmt.frame @@ -44,7 +46,7 @@ def __init__( self.multicolumn_format = multicolumn_format self.multirow = multirow - def write_result(self, buf): + def write_result(self, buf: IO[str]) -> None: """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ @@ -124,7 +126,7 @@ def pad_empties(x): if self.fmt.has_index_names and self.fmt.show_index_names: nlevels += 1 strrows = list(zip(*strcols)) - self.clinebuf = [] + self.clinebuf = [] # type: List[List[int]] for i, row in enumerate(strrows): if i == nlevels and self.fmt.header: @@ -186,7 +188,7 @@ def pad_empties(x): else: buf.write("\\end{longtable}\n") - def _format_multicolumn(self, row, ilevels): + def _format_multicolumn(self, row: List[str], ilevels: int) -> List[str]: r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format @@ -227,7 +229,9 @@ def append_col(): append_col() return row2 - def _format_multirow(self, row, ilevels, i, rows): + def _format_multirow( + self, row: List[str], ilevels: int, i: int, rows: List[Tuple[str, ...]] + ) -> List[str]: r""" Check following rows, whether row should be a multirow @@ -254,7 +258,7 @@ def _format_multirow(self, row, ilevels, i, rows): self.clinebuf.append([i + nrow - 1, j + 1]) return row - def _print_cline(self, buf, i, icol): + def _print_cline(self, buf: IO[str], i: int, icol: int) -> None: """ Print clines after multirow-blocks are finished """
https://api.github.com/repos/pandas-dev/pandas/pulls/27734
2019-08-03T20:58:05Z
2019-08-04T21:21:56Z
2019-08-04T21:21:56Z
2019-08-05T09:34:26Z
BUG: fix to_datetime(dti, utc=True)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index b5bd83fd17530..51307d6771559 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -31,7 +31,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ - +- Bug in :func:`to_datetime` where passing a timezone-naive :class:`DatetimeArray` or :class:`DatetimeIndex` and ``utc=True`` would incorrectly return a timezone-naive result (:issue:`27733`) - - - diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 172084e97a959..b07647cf5b5fb 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -334,6 +334,9 @@ def _convert_listlike_datetimes( return DatetimeIndex(arg, tz=tz, name=name) except ValueError: pass + elif tz: + # DatetimeArray, DatetimeIndex + return arg.tz_localize(tz) return arg diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 10d422e8aa52c..23540041a3d70 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1623,6 +1623,18 @@ def test_dayfirst(self, cache): tm.assert_index_equal(expected, idx5) tm.assert_index_equal(expected, idx6) + @pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray]) + def test_to_datetime_dta_tz(self, klass): + # GH#27733 + dti = date_range("2015-04-05", periods=3).rename("foo") + expected = dti.tz_localize("UTC") + + obj = klass(dti) + expected = klass(expected) + + result = to_datetime(obj, utc=True) + tm.assert_equal(result, expected) + class TestGuessDatetimeFormat: @td.skip_if_not_us_locale
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ATM `to_datetime(naive_dti, utc=True)` returns naive incorrect, same for naive `DatetimeArray`
https://api.github.com/repos/pandas-dev/pandas/pulls/27733
2019-08-03T20:26:27Z
2019-08-05T20:29:17Z
2019-08-05T20:29:17Z
2019-08-05T21:09:30Z
TYPING: more type hints for io.common
diff --git a/pandas/io/common.py b/pandas/io/common.py index 290022167e520..a505f28982aeb 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -9,6 +9,7 @@ import mmap import os import pathlib +from pathlib import Path from typing import ( IO, Any, @@ -21,6 +22,7 @@ Tuple, Type, Union, + overload, ) from urllib.error import URLError # noqa from urllib.parse import ( # noqa @@ -140,9 +142,33 @@ def _validate_header_arg(header) -> None: ) -def _stringify_path( - filepath_or_buffer: FilePathOrBuffer[AnyStr] -) -> FilePathOrBuffer[AnyStr]: +# Overload *variants* for '_stringify_path'. +# These variants give extra information to the type checker. +# They are ignored at runtime. + + +@overload +def _stringify_path(filepath_or_buffer: Union[str, Path]) -> str: + ... + + +@overload +def _stringify_path(filepath_or_buffer: IO[AnyStr]) -> IO[AnyStr]: + ... + + +# The actual *implementation* of '_stringify_path'. +# The implementation contains the actual runtime logic. +# +# It may or may not have type hints. If it does, mypy +# will check the body of the implementation against the +# type hints. +# +# Mypy will also check and make sure the signature is +# consistent with the provided variants. + + +def _stringify_path(filepath_or_buffer: FilePathOrBuffer): """Attempt to convert a path-like object to a string. Parameters @@ -188,6 +214,42 @@ def is_gcs_url(url) -> bool: return False +# Overload *variants* for 'get_filepath_or_buffer'. +# These variants give extra information to the type checker. +# They are ignored at runtime. + + +@overload +def get_filepath_or_buffer( + filepath_or_buffer: IO[AnyStr], + encoding: Optional[str] = None, + compression: Optional[str] = None, + mode: Optional[str] = None, +) -> Tuple[IO[AnyStr], Optional[str], Optional[str], bool]: + ... + + +@overload +def get_filepath_or_buffer( + filepath_or_buffer: Union[str, Path], + encoding: Optional[str] = None, + compression: Optional[str] = None, + mode: Optional[str] = None, +) -> Tuple[Union[str, IO], Optional[str], Optional[str], bool]: + ... + + +# The actual *implementation* of 'get_filepath_or_buffer'. +# The implementation contains the actual runtime logic. +# +# It may or may not have type hints. If it does, mypy +# will check the body of the implementation against the +# type hints. +# +# Mypy will also check and make sure the signature is +# consistent with the provided variants. + + def get_filepath_or_buffer( filepath_or_buffer: FilePathOrBuffer, encoding: Optional[str] = None, @@ -213,10 +275,10 @@ def get_filepath_or_buffer( compression, str, should_close, bool) """ - filepath_or_buffer = _stringify_path(filepath_or_buffer) + fp_or_buf = _stringify_path(filepath_or_buffer) - if isinstance(filepath_or_buffer, str) and _is_url(filepath_or_buffer): - req = urlopen(filepath_or_buffer) + if isinstance(fp_or_buf, str) and _is_url(fp_or_buf): + req = urlopen(fp_or_buf) content_encoding = req.headers.get("Content-Encoding", None) if content_encoding == "gzip": # Override compression based on Content-Encoding header @@ -225,28 +287,28 @@ def get_filepath_or_buffer( req.close() return reader, encoding, compression, True - if is_s3_url(filepath_or_buffer): + if is_s3_url(fp_or_buf): from pandas.io import s3 return s3.get_filepath_or_buffer( - filepath_or_buffer, encoding=encoding, compression=compression, mode=mode + fp_or_buf, encoding=encoding, compression=compression, mode=mode ) - if is_gcs_url(filepath_or_buffer): + if is_gcs_url(fp_or_buf): from pandas.io import gcs return gcs.get_filepath_or_buffer( - filepath_or_buffer, encoding=encoding, compression=compression, mode=mode + fp_or_buf, encoding=encoding, compression=compression, mode=mode ) - if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): - return _expand_user(filepath_or_buffer), None, compression, False + if isinstance(fp_or_buf, (str, bytes, mmap.mmap)): + return _expand_user(fp_or_buf), None, compression, False - if not is_file_like(filepath_or_buffer): + if not is_file_like(fp_or_buf): msg = "Invalid file path or buffer object type: {_type}" - raise ValueError(msg.format(_type=type(filepath_or_buffer))) + raise ValueError(msg.format(_type=type(fp_or_buf))) - return filepath_or_buffer, None, compression, False + return fp_or_buf, None, compression, False def file_path_to_url(path: str) -> str: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index a3ff837bc7f52..d790c8bb08597 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -458,7 +458,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): finally: parser.close() - if should_close: + if should_close and not isinstance(fp_or_buf, str): try: fp_or_buf.close() except ValueError:
https://api.github.com/repos/pandas-dev/pandas/pulls/27732
2019-08-03T19:25:40Z
2019-08-30T15:26:28Z
null
2019-08-30T15:26:28Z
BUG: update dict of sheets before check
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index d8f5da5ab5bc6..a0ff1bf640278 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -397,6 +397,11 @@ def write_cells( _style_cache = {} + # Update sheet list + self.sheets = {} + for wks in self.book.worksheets: + self.sheets[wks.title] = wks + if sheet_name in self.sheets: wks = self.sheets[sheet_name] else: diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py index 79fc87a62ad08..c19376b1a3438 100644 --- a/pandas/tests/io/excel/test_openpyxl.py +++ b/pandas/tests/io/excel/test_openpyxl.py @@ -78,10 +78,12 @@ def test_write_cells_merge_styled(ext): @pytest.mark.parametrize( - "mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])] + "mode,expected", [("w", ["new_sheet"]), + ("a", ["foo", "bar", "existing_sheet", "new_sheet"])] ) def test_write_append_mode(ext, mode, expected): - df = DataFrame([1], columns=["baz"]) + df_new_sheet = DataFrame([1], columns=["new_sheet"]) + df_existing_sheet = DataFrame([1], columns=["existing_sheet"]) with ensure_clean(ext) as f: wb = openpyxl.Workbook() @@ -89,10 +91,13 @@ def test_write_append_mode(ext, mode, expected): wb.worksheets[0]["A1"].value = "foo" wb.create_sheet("bar") wb.worksheets[1]["A1"].value = "bar" + wb.create_sheet("existing_sheet") wb.save(f) writer = ExcelWriter(f, engine="openpyxl", mode=mode) - df.to_excel(writer, sheet_name="baz", index=False) + if mode == "a": + df_existing_sheet.to_excel(writer, sheet_name="existing_sheet", index=False) + df_new_sheet.to_excel(writer, sheet_name="new_sheet", index=False) writer.save() wb2 = openpyxl.load_workbook(f)
```python import pandas as pd df = pd.DataFrame( { "id": ["1", "2", "3", "4", "5"], "Feature1": ["A", "C", "E", "G", "I"], "Feature2": ["B", "D", "F", "H", "J"], }, columns=["id", "Feature1", "Feature2"], ) writer = pd.ExcelWriter(path="testOutput.xlsx", mode="a", engine="openpyxl") df.to_excel(writer, sheet_name="Main") writer.save() writer.close() ``` #### Problem description I have excel file with existing sheet "Main", and if I try to append some dataframe to this sheet, on first df.to_excel() pandas creates new sheet with name "Main1" and saves information to this new sheet. But if once to_excel(), than pandas add new sheet to dictionary and saves to existing sheet. So, on start appen pandas don't know nothing about existing sheets, and check in **_openpyxl.py** file don't realy work ```python if sheet_name in self.sheets: wks = self.sheets[sheet_name] else: wks = self.book.create_sheet() wks.title = sheet_name self.sheets[sheet_name] = wks ``` so I add new code to update sheetname-list before check. - [x] without issue - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] remove the different (ambiguity) behavior of the function in the case of the existence of a sheet of the book. When the first time the DataFrame is written to the sheet, a new sheet is created (moreover, if we try to write to the sheet with the name "main" which alreary exist in the book, now sheet "main1" will be created. And we try to write another DataFrame inside the current function on the same sheet "main", function again wrote to the sheet "main1", although we asked it to write to the sheet "main". So now if sheet exist then it will write to existing sheet, if doesn't exist it will create sheet with name which we want.
https://api.github.com/repos/pandas-dev/pandas/pulls/27730
2019-08-03T16:14:33Z
2019-11-07T21:09:42Z
null
2019-11-07T21:09:43Z
REF: define concat classmethods in the appropriate places
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 9c49e91134288..12f3fd2c75dc8 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -20,12 +20,11 @@ is_timedelta64_dtype, ) from pandas.core.dtypes.generic import ( + ABCCategoricalIndex, ABCDatetimeArray, - ABCDatetimeIndex, ABCIndexClass, - ABCPeriodIndex, ABCRangeIndex, - ABCTimedeltaIndex, + ABCSeries, ) @@ -285,14 +284,14 @@ def union_categoricals(to_union, sort_categories=False, ignore_order=False): [b, c, a, b] Categories (3, object): [b, c, a] """ - from pandas import Index, Categorical, CategoricalIndex, Series + from pandas import Index, Categorical from pandas.core.arrays.categorical import _recode_for_categories if len(to_union) == 0: raise ValueError("No Categoricals to union") def _maybe_unwrap(x): - if isinstance(x, (CategoricalIndex, Series)): + if isinstance(x, (ABCCategoricalIndex, ABCSeries)): return x.values elif isinstance(x, Categorical): return x @@ -450,31 +449,6 @@ def _concat_datetimetz(to_concat, name=None): return sample._concat_same_type(to_concat) -def _concat_index_same_dtype(indexes, klass=None): - klass = klass if klass is not None else indexes[0].__class__ - return klass(np.concatenate([x._values for x in indexes])) - - -def _concat_index_asobject(to_concat, name=None): - """ - concat all inputs as object. DatetimeIndex, TimedeltaIndex and - PeriodIndex are converted to object dtype before concatenation - """ - from pandas import Index - from pandas.core.arrays import ExtensionArray - - klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, ExtensionArray) - to_concat = [x.astype(object) if isinstance(x, klasses) else x for x in to_concat] - - self = to_concat[0] - attribs = self._get_attributes_dict() - attribs["name"] = name - - to_concat = [x._values if isinstance(x, Index) else x for x in to_concat] - - return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs) - - def _concat_sparse(to_concat, axis=0, typs=None): """ provide concatenation of an sparse/dense array of arrays each of which is a @@ -505,52 +479,3 @@ def _concat_sparse(to_concat, axis=0, typs=None): ] return SparseArray._concat_same_type(to_concat) - - -def _concat_rangeindex_same_dtype(indexes): - """ - Concatenates multiple RangeIndex instances. All members of "indexes" must - be of type RangeIndex; result will be RangeIndex if possible, Int64Index - otherwise. E.g.: - indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) - indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) - """ - from pandas import Int64Index, RangeIndex - - start = step = next_ = None - - # Filter the empty indexes - non_empty_indexes = [obj for obj in indexes if len(obj)] - - for obj in non_empty_indexes: - rng = obj._range # type: range - - if start is None: - # This is set by the first non-empty index - start = rng.start - if step is None and len(rng) > 1: - step = rng.step - elif step is None: - # First non-empty index had only one element - if rng.start == start: - return _concat_index_same_dtype(indexes, klass=Int64Index) - step = rng.start - start - - non_consecutive = (step != rng.step and len(rng) > 1) or ( - next_ is not None and rng.start != next_ - ) - if non_consecutive: - return _concat_index_same_dtype(indexes, klass=Int64Index) - - if step is not None: - next_ = rng[-1] + step - - if non_empty_indexes: - # Get the stop value from "next" or alternatively - # from the last non-empty index - stop = non_empty_indexes[-1].stop if next_ is None else next_ - return RangeIndex(start, stop, step) - - # Here all "indexes" had 0 length, i.e. were empty. - # In this case return an empty range index. - return RangeIndex(0, 0) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ce7b73a92b18a..b167f76d16445 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -51,6 +51,7 @@ ABCDataFrame, ABCDateOffset, ABCDatetimeArray, + ABCDatetimeIndex, ABCIndexClass, ABCMultiIndex, ABCPandasArray, @@ -4309,14 +4310,25 @@ def _concat(self, to_concat, name): if len(typs) == 1: return self._concat_same_dtype(to_concat, name=name) - return _concat._concat_index_asobject(to_concat, name=name) + return Index._concat_same_dtype(self, to_concat, name=name) def _concat_same_dtype(self, to_concat, name): """ Concatenate to_concat which has the same class. """ # must be overridden in specific classes - return _concat._concat_index_asobject(to_concat, name) + klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, ExtensionArray) + to_concat = [ + x.astype(object) if isinstance(x, klasses) else x for x in to_concat + ] + + self = to_concat[0] + attribs = self._get_attributes_dict() + attribs["name"] = name + + to_concat = [x._values if isinstance(x, Index) else x for x in to_concat] + + return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs) def putmask(self, mask, value): """ diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 1a1f8ae826ca7..2cdf73788dd9b 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -17,7 +17,6 @@ needs_i8_conversion, pandas_dtype, ) -import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ( ABCFloat64Index, ABCInt64Index, @@ -129,7 +128,8 @@ def _assert_safe_casting(cls, data, subarr): pass def _concat_same_dtype(self, indexes, name): - return _concat._concat_index_same_dtype(indexes).rename(name) + result = type(indexes[0])(np.concatenate([x._values for x in indexes])) + return result.rename(name) @property def is_all_dates(self): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 16098c474a473..a026f08a7560d 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -11,7 +11,6 @@ from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, cache_readonly -from pandas.core.dtypes import concat as _concat from pandas.core.dtypes.common import ( ensure_platform_int, ensure_python_int, @@ -647,7 +646,53 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False) return super().join(other, how, level, return_indexers, sort) def _concat_same_dtype(self, indexes, name): - return _concat._concat_rangeindex_same_dtype(indexes).rename(name) + """ + Concatenates multiple RangeIndex instances. All members of "indexes" must + be of type RangeIndex; result will be RangeIndex if possible, Int64Index + otherwise. E.g.: + indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) + indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) + """ + start = step = next_ = None + + # Filter the empty indexes + non_empty_indexes = [obj for obj in indexes if len(obj)] + + for obj in non_empty_indexes: + rng = obj._range # type: range + + if start is None: + # This is set by the first non-empty index + start = rng.start + if step is None and len(rng) > 1: + step = rng.step + elif step is None: + # First non-empty index had only one element + if rng.start == start: + result = Int64Index(np.concatenate([x._values for x in indexes])) + return result.rename(name) + + step = rng.start - start + + non_consecutive = (step != rng.step and len(rng) > 1) or ( + next_ is not None and rng.start != next_ + ) + if non_consecutive: + result = Int64Index(np.concatenate([x._values for x in indexes])) + return result.rename(name) + + if step is not None: + next_ = rng[-1] + step + + if non_empty_indexes: + # Get the stop value from "next" or alternatively + # from the last non-empty index + stop = non_empty_indexes[-1].stop if next_ is None else next_ + return RangeIndex(start, stop, step).rename(name) + + # Here all "indexes" had 0 length, i.e. were empty. + # In this case return an empty range index. + return RangeIndex(0, 0).rename(name) def __len__(self): """ diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index e79991f652154..280b0a99c7e68 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -411,7 +411,7 @@ def test_append(self): tm.assert_index_equal(result, expected, exact=True) def test_append_to_another(self): - # hits _concat_index_asobject + # hits Index._concat_same_dtype fst = Index(["a", "b"]) snd = CategoricalIndex(["d", "e"]) result = fst.append(snd)
cc @jorisvandenbossche we briefly discussed at the sprint the idea that `dtype.concat` is a weird place to define these functions. This PR takes the subset of `dtypes.concat` methods that are a) private and b) equivalent to `klass._concat_same_dtype` for some `klass` and moves the implementation to the appropriate class. The categorical one is left in place for now because a) it is public and b) it'd be a pretty big move in and of itself.
https://api.github.com/repos/pandas-dev/pandas/pulls/27727
2019-08-03T02:44:17Z
2019-08-05T11:54:25Z
2019-08-05T11:54:25Z
2019-08-05T14:30:58Z
BUG: fix+test DTA/TDA/PA add/sub Index
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 47b138a9e1604..9f3249e14d851 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1216,7 +1216,7 @@ def _time_shift(self, periods, freq=None): def __add__(self, other): other = lib.item_from_zerodim(other) - if isinstance(other, (ABCSeries, ABCDataFrame)): + if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)): return NotImplemented # scalar others @@ -1282,7 +1282,7 @@ def __radd__(self, other): def __sub__(self, other): other = lib.item_from_zerodim(other) - if isinstance(other, (ABCSeries, ABCDataFrame)): + if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)): return NotImplemented # scalar others @@ -1349,7 +1349,7 @@ def __sub__(self, other): return result def __rsub__(self, other): - if is_datetime64_dtype(other) and is_timedelta64_dtype(self): + if is_datetime64_any_dtype(other) and is_timedelta64_dtype(self): # ndarray[datetime64] cannot be subtracted from self, so # we need to wrap in DatetimeArray/Index and flip the operation if not isinstance(other, DatetimeLikeArrayMixin): diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index cca6836acf626..0e01216af9ec0 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -2249,6 +2249,23 @@ def test_add_datetimelike_and_dti(self, addend, tz): # ------------------------------------------------------------- + def test_dta_add_sub_index(self, tz_naive_fixture): + # Check that DatetimeArray defers to Index classes + dti = date_range("20130101", periods=3, tz=tz_naive_fixture) + dta = dti.array + result = dta - dti + expected = dti - dti + tm.assert_index_equal(result, expected) + + tdi = result + result = dta + tdi + expected = dti + tdi + tm.assert_index_equal(result, expected) + + result = dta - tdi + expected = dti - tdi + tm.assert_index_equal(result, expected) + def test_sub_dti_dti(self): # previously performed setop (deprecated in 0.16.0), now changed to # return subtraction -> TimeDeltaIndex (GH ...) @@ -2554,6 +2571,7 @@ def test_shift_months(years, months): tm.assert_index_equal(actual, expected) +# FIXME: this belongs in scalar tests class SubDatetime(datetime): pass diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index c1b32e8b13442..4b58c290c3cea 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -1041,6 +1041,18 @@ def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other): with pytest.raises(TypeError): other - obj + # --------------------------------------------------------------- + # Unsorted + + def test_parr_add_sub_index(self): + # Check that PeriodArray defers to Index on arithmetic ops + pi = pd.period_range("2000-12-31", periods=3) + parr = pi.array + + result = parr - pi + expected = pi - pi + tm.assert_index_equal(result, expected) + class TestPeriodSeriesArithmetic: def test_ops_series_timedelta(self): diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 326c565308124..4f5e00bc5a37d 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -480,6 +480,25 @@ def test_timedelta(self, freq): tm.assert_index_equal(result1, result4) tm.assert_index_equal(result2, result3) + def test_tda_add_sub_index(self): + # Check that TimedeltaArray defers to Index on arithmetic ops + tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"]) + tda = tdi.array + + dti = pd.date_range("1999-12-31", periods=3, freq="D") + + result = tda + dti + expected = tdi + dti + tm.assert_index_equal(result, expected) + + result = tda + tdi + expected = tdi + tdi + tm.assert_index_equal(result, expected) + + result = tda - tdi + expected = tdi - tdi + tm.assert_index_equal(result, expected) + class TestAddSubNaTMasking: # TODO: parametrize over boxes
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Looking forward to doing a couple of test-cleanup passes.
https://api.github.com/repos/pandas-dev/pandas/pulls/27726
2019-08-02T23:27:33Z
2019-08-05T11:45:25Z
2019-08-05T11:45:25Z
2019-08-05T14:28:54Z
STYLE: Add flake8-comprehensions to pre-commit configuration
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5f7143ef518bb..32ffb3330564c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,6 +9,7 @@ repos: hooks: - id: flake8 language: python_venv + additional_dependencies: [flake8-comprehensions] - repo: https://github.com/pre-commit/mirrors-isort rev: v4.3.20 hooks:
- [x] closes #27724 Running `pre-commit` with this change catches the error described in the issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/27725
2019-08-02T22:19:58Z
2019-08-04T21:38:54Z
2019-08-04T21:38:53Z
2019-08-05T18:39:39Z
CLN: collected cleanups from other branches
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index f704ceffa662e..7424c4ddc3d92 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -47,10 +47,6 @@ cpdef get_value_at(ndarray arr, object loc, object tz=None): return util.get_value_at(arr, loc) -def get_value_box(arr: ndarray, loc: object) -> object: - return get_value_at(arr, loc, tz=None) - - # Don't populate hash tables in monotonic indexes larger than this _SIZE_CUTOFF = 1000000 diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 39529177b9e35..667fb4501ed95 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -125,7 +125,11 @@ def __init__(self, values, copy=False): if isinstance(values, type(self)): values = values._ndarray if not isinstance(values, np.ndarray): - raise ValueError("'values' must be a NumPy array.") + raise ValueError( + "'values' must be a NumPy array, not {typ}".format( + typ=type(values).__name__ + ) + ) if values.ndim != 1: raise ValueError("PandasArray must be 1-dimensional.") diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index afd1e8203059e..ec4419b02d8b1 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -173,8 +173,8 @@ class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): "ceil", ] - # Needed so that NaT.__richcmp__(DateTimeArray) operates pointwise - ndim = 1 + # Note: ndim must be defined to ensure NaT.__richcmp(TimedeltaArray) + # operates pointwise. @property def _box_func(self): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2271ff643bc15..d93a95931dcc1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4713,7 +4713,7 @@ def get_value(self, series, key): raise try: - return libindex.get_value_box(s, key) + return libindex.get_value_at(s, key) except IndexError: raise except TypeError: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6a2aebe5db246..9f3aa699cfaf4 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -434,7 +434,7 @@ def f(m, v, i): return self.split_and_operate(mask, f, inplace) - def split_and_operate(self, mask, f, inplace): + def split_and_operate(self, mask, f, inplace: bool): """ split the block per-column, and apply the callable f per-column, return a new block for each. Handle @@ -493,17 +493,15 @@ def make_a_block(nv, ref_loc): return new_blocks - def _maybe_downcast(self, blocks, downcast=None): + def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]: # no need to downcast our float # unless indicated - if downcast is None and self.is_float: - return blocks - elif downcast is None and (self.is_timedelta or self.is_datetime): + if downcast is None and ( + self.is_float or self.is_timedelta or self.is_datetime + ): return blocks - if not isinstance(blocks, list): - blocks = [blocks] return _extend_blocks([b.downcast(downcast) for b in blocks]) def downcast(self, dtypes=None): @@ -1343,7 +1341,15 @@ def shift(self, periods, axis=0, fill_value=None): return [self.make_block(new_values)] - def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): + def where( + self, + other, + cond, + align=True, + errors="raise", + try_cast: bool = False, + axis: int = 0, + ) -> List["Block"]: """ evaluate the block; return result block(s) from the result @@ -1442,7 +1448,7 @@ def func(cond, values, other): if try_cast: result = self._try_cast_result(result) - return self.make_block(result) + return [self.make_block(result)] # might need to separate out blocks axis = cond.ndim - 1 @@ -1474,9 +1480,9 @@ def _unstack(self, unstacker_func, new_columns, n_rows, fill_value): new_columns : Index All columns of the unstacked BlockManager. n_rows : int - Only used in ExtensionBlock.unstack + Only used in ExtensionBlock._unstack fill_value : int - Only used in ExtensionBlock.unstack + Only used in ExtensionBlock._unstack Returns ------- @@ -1550,7 +1556,7 @@ def quantile(self, qs, interpolation="linear", axis=0): result = result[..., 0] result = lib.item_from_zerodim(result) - ndim = getattr(result, "ndim", None) or 0 + ndim = np.ndim(result) return make_block(result, placement=np.arange(len(result)), ndim=ndim) def _replace_coerce( @@ -1923,7 +1929,15 @@ def shift( ) ] - def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): + def where( + self, + other, + cond, + align=True, + errors="raise", + try_cast: bool = False, + axis: int = 0, + ) -> List["Block"]: if isinstance(other, ABCDataFrame): # ExtensionArrays are 1-D, so if we get here then # `other` should be a DataFrame with a single column. @@ -1968,7 +1982,7 @@ def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0) np.where(cond, self.values, other), dtype=dtype ) - return self.make_block_same_class(result, placement=self.mgr_locs) + return [self.make_block_same_class(result, placement=self.mgr_locs)] @property def _ftype(self): @@ -2706,7 +2720,7 @@ def f(m, v, i): return blocks - def _maybe_downcast(self, blocks, downcast=None): + def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]: if downcast is not None: return blocks @@ -3031,7 +3045,15 @@ def concat_same_type(self, to_concat, placement=None): values, placement=placement or slice(0, len(values), 1), ndim=self.ndim ) - def where(self, other, cond, align=True, errors="raise", try_cast=False, axis=0): + def where( + self, + other, + cond, + align=True, + errors="raise", + try_cast: bool = False, + axis: int = 0, + ) -> List["Block"]: # TODO(CategoricalBlock.where): # This can all be deleted in favor of ExtensionBlock.where once # we enforce the deprecation. diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e5acd23b77d5d..b30ddbc383906 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1823,7 +1823,7 @@ def _simple_blockify(tuples, dtype): """ values, placement = _stack_arrays(tuples, dtype) - # CHECK DTYPE? + # TODO: CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 66878c3b1026c..a5d0e2cb3b58f 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1630,15 +1630,14 @@ def _get_period_bins(self, ax): def _take_new_index(obj, indexer, new_index, axis=0): - from pandas.core.api import Series, DataFrame - if isinstance(obj, Series): + if isinstance(obj, ABCSeries): new_values = algos.take_1d(obj.values, indexer) - return Series(new_values, index=new_index, name=obj.name) - elif isinstance(obj, DataFrame): + return obj._constructor(new_values, index=new_index, name=obj.name) + elif isinstance(obj, ABCDataFrame): if axis == 1: raise NotImplementedError("axis 1 is not supported") - return DataFrame( + return obj._constructor( obj._data.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) ) else: diff --git a/pandas/core/window.py b/pandas/core/window.py index 4b6a1cf2e9a04..a7425bc1466c3 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -265,6 +265,8 @@ def _wrap_result(self, result, block=None, obj=None): # coerce if necessary if block is not None: if is_timedelta64_dtype(block.values.dtype): + # TODO: do we know what result.dtype is at this point? + # i.e. can we just do an astype? from pandas import to_timedelta result = to_timedelta(result.ravel(), unit="ns").values.reshape( diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 9b8c8e6d8a077..ce724f5a60beb 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -506,7 +506,7 @@ def test_datetime(): desc_result = grouped.describe() idx = cats.codes.argsort() - ord_labels = cats.take_nd(idx) + ord_labels = cats.take(idx) ord_data = data.take(idx) expected = ord_data.groupby(ord_labels, observed=False).describe() assert_frame_equal(desc_result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/27723
2019-08-02T21:41:51Z
2019-08-05T15:52:45Z
2019-08-05T15:52:45Z
2019-08-05T16:37:58Z
BUG: fix replace_list
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index c80195af413f7..a4e792091cb4b 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -152,7 +152,7 @@ ExtensionArray Other ^^^^^ - +- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when replacing timezone-aware timestamps using a dict-like replacer (:issue:`27720`) - - - diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 821c35e0cce2f..2b783e3e7aaf8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6658,9 +6658,8 @@ def replace( else: # need a non-zero len on all axes - for a in self._AXIS_ORDERS: - if not len(self._get_axis(a)): - return self + if not self.size: + return self new_data = self._data if is_dict_like(to_replace): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 8956821740bf3..b2019e3e59dda 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -7,7 +7,7 @@ import numpy as np -from pandas._libs import internals as libinternals, lib +from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -602,9 +602,10 @@ def comp(s, regex=False): """ if isna(s): return isna(values) - if hasattr(s, "asm8"): + if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None: + return _compare_or_regex_search( - maybe_convert_objects(values), getattr(s, "asm8"), regex + maybe_convert_objects(values), s.asm8, regex ) return _compare_or_regex_search(values, s, regex) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index dea1d5114f1b9..ed80e249220fd 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -1029,22 +1029,20 @@ def test_replace_series(self, how, to_key, from_key): tm.assert_series_equal(result, exp) - # TODO(jbrockmendel) commented out to only have a single xfail printed - @pytest.mark.xfail( - reason="GH #18376, tzawareness-compat bug in BlockManager.replace_list" + @pytest.mark.parametrize("how", ["dict", "series"]) + @pytest.mark.parametrize( + "to_key", + ["timedelta64[ns]", "bool", "object", "complex128", "float64", "int64"], ) - # @pytest.mark.parametrize('how', ['dict', 'series']) - # @pytest.mark.parametrize('to_key', ['timedelta64[ns]', 'bool', 'object', - # 'complex128', 'float64', 'int64']) - # @pytest.mark.parametrize('from_key', ['datetime64[ns, UTC]', - # 'datetime64[ns, US/Eastern]']) - # def test_replace_series_datetime_tz(self, how, to_key, from_key): - def test_replace_series_datetime_tz(self): + @pytest.mark.parametrize( + "from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"] + ) + def test_replace_series_datetime_tz(self, how, to_key, from_key): how = "series" from_key = "datetime64[ns, US/Eastern]" to_key = "timedelta64[ns]" - index = pd.Index([3, 4], name="xxx") + index = pd.Index([3, 4], name="xyz") obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key @@ -1061,24 +1059,17 @@ def test_replace_series_datetime_tz(self): tm.assert_series_equal(result, exp) - # TODO(jreback) commented out to only have a single xfail printed - @pytest.mark.xfail( - reason="different tz, currently mask_missing raises SystemError", strict=False + @pytest.mark.parametrize("how", ["dict", "series"]) + @pytest.mark.parametrize( + "to_key", + ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], ) - # @pytest.mark.parametrize('how', ['dict', 'series']) - # @pytest.mark.parametrize('to_key', [ - # 'datetime64[ns]', 'datetime64[ns, UTC]', - # 'datetime64[ns, US/Eastern]']) - # @pytest.mark.parametrize('from_key', [ - # 'datetime64[ns]', 'datetime64[ns, UTC]', - # 'datetime64[ns, US/Eastern]']) - # def test_replace_series_datetime_datetime(self, how, to_key, from_key): - def test_replace_series_datetime_datetime(self): - how = "dict" - to_key = "datetime64[ns]" - from_key = "datetime64[ns]" - - index = pd.Index([3, 4], name="xxx") + @pytest.mark.parametrize( + "from_key", + ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], + ) + def test_replace_series_datetime_datetime(self, how, to_key, from_key): + index = pd.Index([3, 4], name="xyz") obj = pd.Series(self.rep[from_key], index=index, name="yyy") assert obj.dtype == from_key
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27720
2019-08-02T19:01:24Z
2019-08-05T11:58:49Z
2019-08-05T11:58:49Z
2019-08-05T14:44:19Z
Removed continued parametrization of FilePathOrBuffer
diff --git a/pandas/_typing.py b/pandas/_typing.py index 837a7a89e0b83..0a40e342eb8e9 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -1,5 +1,5 @@ from pathlib import Path -from typing import IO, TYPE_CHECKING, AnyStr, Optional, TypeVar, Union +from typing import IO, TYPE_CHECKING, Optional, TypeVar, Union import numpy as np @@ -22,7 +22,7 @@ ArrayLike = TypeVar("ArrayLike", "ExtensionArray", np.ndarray) DatetimeLikeScalar = TypeVar("DatetimeLikeScalar", "Period", "Timestamp", "Timedelta") Dtype = Union[str, np.dtype, "ExtensionDtype"] -FilePathOrBuffer = Union[str, Path, IO[AnyStr]] +FilePathOrBuffer = Union[str, Path, IO] FrameOrSeries = TypeVar("FrameOrSeries", "Series", "DataFrame") Scalar = Union[str, int, float] diff --git a/pandas/io/common.py b/pandas/io/common.py index e01e473047b88..7e787554f3e1a 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -10,7 +10,7 @@ import mmap import os import pathlib -from typing import IO, AnyStr, BinaryIO, Optional, TextIO, Type +from typing import IO, BinaryIO, Optional, TextIO, Type from urllib.error import URLError # noqa from urllib.parse import ( # noqa urlencode, @@ -96,9 +96,7 @@ def _is_url(url) -> bool: return False -def _expand_user( - filepath_or_buffer: FilePathOrBuffer[AnyStr] -) -> FilePathOrBuffer[AnyStr]: +def _expand_user(filepath_or_buffer: FilePathOrBuffer) -> FilePathOrBuffer: """Return the argument with an initial component of ~ or ~user replaced by that user's home directory. @@ -126,9 +124,7 @@ def _validate_header_arg(header) -> None: ) -def _stringify_path( - filepath_or_buffer: FilePathOrBuffer[AnyStr] -) -> FilePathOrBuffer[AnyStr]: +def _stringify_path(filepath_or_buffer: FilePathOrBuffer) -> FilePathOrBuffer: """Attempt to convert a path-like object to a string. Parameters diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 23c07ea72d40f..abe1bab3f57bb 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -469,7 +469,7 @@ def _get_formatter(self, i: Union[str, int]) -> Optional[Callable]: @contextmanager def get_buffer( - self, buf: Optional[FilePathOrBuffer[str]], encoding: Optional[str] = None + self, buf: Optional[FilePathOrBuffer], encoding: Optional[str] = None ): if buf is not None: buf = _stringify_path(buf) @@ -491,9 +491,7 @@ def write_result(self, buf: IO[str]) -> None: raise AbstractMethodError(self) def get_result( - self, - buf: Optional[FilePathOrBuffer[str]] = None, - encoding: Optional[str] = None, + self, buf: Optional[FilePathOrBuffer] = None, encoding: Optional[str] = None ) -> Optional[str]: with self.get_buffer(buf, encoding=encoding) as f: self.write_result(buf=f) @@ -861,12 +859,12 @@ def _join_multiline(self, *args) -> str: st = ed return "\n\n".join(str_lst) - def to_string(self, buf: Optional[FilePathOrBuffer[str]] = None) -> Optional[str]: + def to_string(self, buf: Optional[FilePathOrBuffer] = None) -> Optional[str]: return self.get_result(buf=buf) def to_latex( self, - buf: Optional[FilePathOrBuffer[str]] = None, + buf: Optional[FilePathOrBuffer] = None, column_format: Optional[str] = None, longtable: bool = False, encoding: Optional[str] = None, @@ -904,7 +902,7 @@ def _format_col(self, i: int) -> List[str]: def to_html( self, - buf: Optional[FilePathOrBuffer[str]] = None, + buf: Optional[FilePathOrBuffer] = None, classes: Optional[Union[str, List, Tuple]] = None, notebook: bool = False, border: Optional[int] = None,
follow up to #27598 to simplify things a bit. I think it was a mistake to add `IO[AnyStr]` as `AnyStr` is a TypeVar and not something to actually parametrize with. The source for `IO` already does this: https://github.com/python/cpython/blob/8990ac0ab0398bfb9c62031288030fe7c630c2c7/Lib/typing.py#L1452 I'm not sure why mypy doesn't error on this. @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/27719
2019-08-02T17:54:39Z
2019-08-25T16:03:22Z
null
2020-01-16T00:35:08Z
remove confusing instructions and link.
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 80dc8b0d8782b..b38f7767ae073 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -133,22 +133,11 @@ Installing a C compiler Pandas uses C extensions (mostly written using Cython) to speed up certain operations. To install pandas from source, you need to compile these C extensions, which means you need a C compiler. This process depends on which -platform you're using. Follow the `CPython contributing guide -<https://devguide.python.org/setup/#compile-and-build>`_ for getting a -compiler installed. You don't need to do any of the ``./configure`` or ``make`` -steps; you only need to install the compiler. - -For Windows developers, when using Python 3.5 and later, it is sufficient to -install `Visual Studio 2017 <https://visualstudio.com/>`_ with the -**Python development workload** and the **Python native development tools** -option. Otherwise, the following links may be helpful. - -* https://blogs.msdn.microsoft.com/pythonengineering/2017/03/07/python-support-in-vs2017/ -* https://blogs.msdn.microsoft.com/pythonengineering/2016/04/11/unable-to-find-vcvarsall-bat/ -* https://github.com/conda/conda-recipes/wiki/Building-from-Source-on-Windows-32-bit-and-64-bit -* https://cowboyprogrammer.org/building-python-wheels-for-windows/ -* https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ -* https://support.enthought.com/hc/en-us/articles/204469260-Building-Python-extensions-with-Canopy +platform you're using. + +* Windows: https://devguide.python.org/setup/#windows-compiling +* Mac: https://devguide.python.org/setup/#macos +* Unix: https://devguide.python.org/setup/#unix-compiling Let us know if you have any difficulties by opening an issue or reaching out on `Gitter`_.
- [ ] closes #27707 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27717
2019-08-02T16:16:54Z
2019-08-05T13:32:32Z
2019-08-05T13:32:31Z
2019-08-12T21:24:56Z
pandas.PeriodIndex and pandas.DateTimeIndex docstring quotes fix
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 2e086c8ce8c34..6a4ca0ab4147a 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1414,17 +1414,69 @@ def date(self): return tslib.ints_to_pydatetime(timestamps, box="date") - year = _field_accessor("year", "Y", "The year of the datetime.") - month = _field_accessor("month", "M", "The month as January=1, December=12. ") - day = _field_accessor("day", "D", "The days of the datetime.") - hour = _field_accessor("hour", "h", "The hours of the datetime.") - minute = _field_accessor("minute", "m", "The minutes of the datetime.") - second = _field_accessor("second", "s", "The seconds of the datetime.") + year = _field_accessor( + "year", + "Y", + """ + The year of the datetime. + """, + ) + month = _field_accessor( + "month", + "M", + """ + The month as January=1, December=12. + """, + ) + day = _field_accessor( + "day", + "D", + """ + The month as January=1, December=12. + """, + ) + hour = _field_accessor( + "hour", + "h", + """ + The hours of the datetime. + """, + ) + minute = _field_accessor( + "minute", + "m", + """ + The minutes of the datetime. + """, + ) + second = _field_accessor( + "second", + "s", + """ + The seconds of the datetime. + """, + ) microsecond = _field_accessor( - "microsecond", "us", "The microseconds of the datetime." + "microsecond", + "us", + """ + The microseconds of the datetime. + """, + ) + nanosecond = _field_accessor( + "nanosecond", + "ns", + """ + The nanoseconds of the datetime. + """, + ) + weekofyear = _field_accessor( + "weekofyear", + "woy", + """ + The week ordinal of the year. + """, ) - nanosecond = _field_accessor("nanosecond", "ns", "The nanoseconds of the datetime.") - weekofyear = _field_accessor("weekofyear", "woy", "The week ordinal of the year.") week = weekofyear _dayofweek_doc = """ The day of the week with Monday=0, Sunday=6. @@ -1466,13 +1518,31 @@ def date(self): weekday_name = _field_accessor( "weekday_name", "weekday_name", - "The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0", + """ + The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0 + """, ) - dayofyear = _field_accessor("dayofyear", "doy", "The ordinal day of the year.") - quarter = _field_accessor("quarter", "q", "The quarter of the date.") + dayofyear = _field_accessor( + "dayofyear", + "doy", + """ + The ordinal day of the year. + """, + ) + quarter = _field_accessor( + "quarter", + "q", + """ + The quarter of the date. + """, + ) days_in_month = _field_accessor( - "days_in_month", "dim", "The number of days in the month." + "days_in_month", + "dim", + """ + The number of days in the month. + """, ) daysinmonth = days_in_month _is_month_doc = """ diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index c290391278def..91dd853e78c77 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -342,25 +342,85 @@ def __array__(self, dtype=None): # -------------------------------------------------------------------- # Vectorized analogues of Period properties - year = _field_accessor("year", 0, "The year of the period") - month = _field_accessor("month", 3, "The month as January=1, December=12") - day = _field_accessor("day", 4, "The days of the period") - hour = _field_accessor("hour", 5, "The hour of the period") - minute = _field_accessor("minute", 6, "The minute of the period") - second = _field_accessor("second", 7, "The second of the period") - weekofyear = _field_accessor("week", 8, "The week ordinal of the year") + year = _field_accessor( + "year", + 0, + """ + The year of the period. + """, + ) + month = _field_accessor( + "month", + 3, + """ + The month as January=1, December=12. + """, + ) + day = _field_accessor( + "day", + 4, + """ + The days of the period. + """, + ) + hour = _field_accessor( + "hour", + 5, + """ + The hour of the period. + """, + ) + minute = _field_accessor( + "minute", + 6, + """ + The minute of the period. + """, + ) + second = _field_accessor( + "second", + 7, + """ + The second of the period. + """, + ) + weekofyear = _field_accessor( + "week", + 8, + """ + The week ordinal of the year. + """, + ) week = weekofyear dayofweek = _field_accessor( - "dayofweek", 10, "The day of the week with Monday=0, Sunday=6" + "dayofweek", + 10, + """ + The day of the week with Monday=0, Sunday=6. + """, ) weekday = dayofweek dayofyear = day_of_year = _field_accessor( - "dayofyear", 9, "The ordinal day of the year" + "dayofyear", + 9, + """ + The ordinal day of the year. + """, + ) + quarter = _field_accessor( + "quarter", + 2, + """ + The quarter of the date. + """, ) - quarter = _field_accessor("quarter", 2, "The quarter of the date") qyear = _field_accessor("qyear", 1) days_in_month = _field_accessor( - "days_in_month", 11, "The number of days in the month" + "days_in_month", + 11, + """ + The number of days in the month. + """, ) daysinmonth = days_in_month
- [x] closes #27713 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27716
2019-08-02T16:11:27Z
2019-08-02T21:12:01Z
2019-08-02T21:12:01Z
2019-08-02T21:12:03Z
TST: troubleshoot inconsistent xfails
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index c9597505fa596..5ecd641fc68be 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -11,6 +11,7 @@ import struct import sys +PY35 = sys.version_info[:2] == (3, 5) PY36 = sys.version_info >= (3, 6) PY37 = sys.version_info >= (3, 7) PYPY = platform.python_implementation() == "PyPy" diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index cca6836acf626..0b17ece14cbd1 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -666,6 +666,7 @@ def test_comparison_tzawareness_compat_scalars(self, op, box_with_array): # Raising in __eq__ will fallback to NumPy, which warns, fails, # then re-raises the original exception. So we just need to ignore. @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning") + @pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning") def test_scalar_comparison_tzawareness( self, op, other, tz_aware_fixture, box_with_array ): diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 8c0930c044838..c500760fa1390 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -1789,9 +1789,10 @@ def test_result_types(self): self.check_result_type(np.float32, np.float32) self.check_result_type(np.float64, np.float64) - def test_result_types2(self): + @td.skip_if_windows + def test_result_complex128(self): # xref https://github.com/pandas-dev/pandas/issues/12293 - pytest.skip("unreliable tests on complex128") + # this fails on Windows, apparently a floating point precision issue # Did not test complex64 because DataFrame is converting it to # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952 diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 9a7a43cff0c27..a60607d586ada 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -142,16 +142,6 @@ def test_divmod_series_array(self): # skipping because it is not implemented pass - @pytest.mark.xfail(reason="different implementation", strict=False) - def test_direct_arith_with_series_returns_not_implemented(self, data): - # Right now, we have trouble with this. Returning NotImplemented - # fails other tests like - # tests/arithmetic/test_datetime64::TestTimestampSeriesArithmetic:: - # test_dt64_seris_add_intlike - return super( - TestArithmeticOps, self - ).test_direct_arith_with_series_returns_not_implemented(data) - class TestCasting(BaseDatetimeTests, base.BaseCastingTests): pass @@ -163,12 +153,6 @@ def _compare_other(self, s, data, op_name, other): # with (some) integers, depending on the value. pass - @pytest.mark.xfail(reason="different implementation", strict=False) - def test_direct_arith_with_series_returns_not_implemented(self, data): - return super( - TestComparisonOps, self - ).test_direct_arith_with_series_returns_not_implemented(data) - class TestMissing(BaseDatetimeTests, base.BaseMissingTests): pass diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index d5c66f0c1dd64..e99208ac78e15 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1819,10 +1819,17 @@ def test_any_all_bool_only(self): (np.any, {"A": pd.Series([0, 1], dtype="category")}, True), (np.all, {"A": pd.Series([1, 2], dtype="category")}, True), (np.any, {"A": pd.Series([1, 2], dtype="category")}, True), - # # Mix - # GH 21484 - # (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'), - # 'B': pd.Series([10, 20], dtype='m8[ns]')}, True), + # Mix GH#21484 + pytest.param( + np.all, + { + "A": pd.Series([10, 20], dtype="M8[ns]"), + "B": pd.Series([10, 20], dtype="m8[ns]"), + }, + True, + # In 1.13.3 and 1.14 np.all(df) returns a Timedelta here + marks=[td.skip_if_np_lt("1.15")], + ), ], ) def test_any_all_np_func(self, func, data, expected): diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 486b3b28b29a3..9b8c8e6d8a077 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -4,8 +4,6 @@ import numpy as np import pytest -from pandas.compat import PY37 - import pandas as pd from pandas import ( Categorical, @@ -209,7 +207,7 @@ def test_level_get_group(observed): assert_frame_equal(result, expected) -@pytest.mark.xfail(PY37, reason="flaky on 3.7, xref gh-21636", strict=False) +# GH#21636 previously flaky on py37 @pytest.mark.parametrize("ordered", [True, False]) def test_apply(ordered): # GH 10138 diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 66a22ae7e9e46..88bc11c588673 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -759,6 +759,8 @@ def test_constructor_with_int_tz(self, klass, box, tz, dtype): assert result == expected # This is the desired future behavior + # Note: this xfail is not strict because the test passes with + # None or any of the UTC variants for tz_naive_fixture @pytest.mark.xfail(reason="Future behavior", strict=False) @pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning") def test_construction_int_rountrip(self, tz_naive_fixture): @@ -766,7 +768,7 @@ def test_construction_int_rountrip(self, tz_naive_fixture): # TODO(GH-24559): Remove xfail tz = tz_naive_fixture result = 1293858000000000000 - expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0] + expected = DatetimeIndex([result], tz=tz).asi8[0] assert result == expected def test_construction_from_replaced_timestamps_with_dst(self): diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 10d422e8aa52c..8db15709da35d 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -741,10 +741,7 @@ def test_to_datetime_tz_psycopg2(self, cache): ) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize( - "cache", - [pytest.param(True, marks=pytest.mark.skipif(True, reason="GH 18111")), False], - ) + @pytest.mark.parametrize("cache", [True, False]) def test_datetime_bool(self, cache): # GH13176 with pytest.raises(TypeError): diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index c6485ff21bcfb..ee236a8253b01 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -340,7 +340,6 @@ def test_to_csv_string_array_ascii(self): with open(path, "r") as f: assert f.read() == expected_ascii - @pytest.mark.xfail(strict=False) def test_to_csv_string_array_utf8(self): # GH 10813 str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}] diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index a04fb9fd50257..d634859e72d7b 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -33,6 +33,10 @@ except ImportError: _HAVE_FASTPARQUET = False +pytestmark = pytest.mark.filterwarnings( + "ignore:RangeIndex.* is deprecated:DeprecationWarning" +) + # setup engines & skips @pytest.fixture( @@ -408,8 +412,6 @@ def test_basic(self, pa, df_full): check_round_trip(df, pa) - # TODO: This doesn't fail on all systems; track down which - @pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)", strict=False) def test_basic_subset_columns(self, pa, df_full): # GH18628 diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index e3bc3d452f038..69070ea11e478 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1098,7 +1098,6 @@ def test_time(self): assert xp == rs @pytest.mark.slow - @pytest.mark.xfail(strict=False, reason="Unreliable test") def test_time_change_xlim(self): t = datetime(1, 1, 1, 3, 30, 0) deltas = np.random.randint(1, 20, 3).cumsum() diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 4404b93e86218..b57b817461788 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -10,6 +10,7 @@ from pandas._libs.tslibs.parsing import DateParseError from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz +from pandas.compat import PY35 from pandas.compat.numpy import np_datetime64_compat import pandas as pd @@ -1579,8 +1580,9 @@ def test_period_immutable(): per.freq = 2 * freq -# TODO: This doesn't fail on all systems; track down which -@pytest.mark.xfail(reason="Parses as Jan 1, 0007 on some systems", strict=False) +@pytest.mark.xfail( + PY35, reason="Parsing as Period('0007-01-01', 'D') for reasons unknown", strict=True +) def test_small_year_parsing(): per1 = Period("0001-01-07", "D") assert per1.year == 1 diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 32d32a5d14fb2..3a5a387b919be 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1489,7 +1489,7 @@ def test_value_counts_with_nan(self): "unicode_", "timedelta64[h]", pytest.param( - "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=False) + "datetime64[D]", marks=pytest.mark.xfail(reason="GH#7996", strict=True) ), ], ) diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py index d7295c4bfe5f0..c553cd3fd1a7a 100644 --- a/pandas/tests/sparse/test_combine_concat.py +++ b/pandas/tests/sparse/test_combine_concat.py @@ -440,7 +440,7 @@ def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx): "fill_value,sparse_idx,dense_idx", itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0]), ) - @pytest.mark.xfail(reason="The iloc fails and I can't make expected", strict=False) + @pytest.mark.xfail(reason="The iloc fails and I can't make expected", strict=True) def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx): # See GH16874, GH18914 and #18686 for why this should be a DataFrame from pandas.core.dtypes.common import is_sparse diff --git a/pandas/tests/sparse/test_pivot.py b/pandas/tests/sparse/test_pivot.py index 85b899dfe76d5..880c1c55f9f79 100644 --- a/pandas/tests/sparse/test_pivot.py +++ b/pandas/tests/sparse/test_pivot.py @@ -2,7 +2,6 @@ import pytest import pandas as pd -from pandas import _np_version_under1p17 import pandas.util.testing as tm @@ -49,11 +48,6 @@ def test_pivot_table_with_nans(self): ) tm.assert_frame_equal(res_sparse, res_dense) - @pytest.mark.xfail( - not _np_version_under1p17, - reason="failing occasionally on numpy > 1.17", - strict=False, - ) def test_pivot_table_multi(self): res_sparse = pd.pivot_table( self.sparse, index="A", columns="B", values=["D", "E"]
https://api.github.com/repos/pandas-dev/pandas/pulls/27715
2019-08-02T15:42:37Z
2019-08-05T11:55:36Z
2019-08-05T11:55:36Z
2019-08-05T14:28:35Z
REF/CLN: maybe_downcast_to_dtype
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index fd8536e38eee7..4bb1deffd9524 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -46,6 +46,7 @@ ) from .dtypes import DatetimeTZDtype, ExtensionDtype, PeriodDtype from .generic import ( + ABCDataFrame, ABCDatetimeArray, ABCDatetimeIndex, ABCPeriodArray, @@ -95,12 +96,13 @@ def maybe_downcast_to_dtype(result, dtype): """ try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32 """ + do_round = False if is_scalar(result): return result - - def trans(x): - return x + elif isinstance(result, ABCDataFrame): + # occurs in pivot_table doctest + return result if isinstance(dtype, str): if dtype == "infer": @@ -118,83 +120,115 @@ def trans(x): elif inferred_type == "floating": dtype = "int64" if issubclass(result.dtype.type, np.number): - - def trans(x): # noqa - return x.round() + do_round = True else: dtype = "object" - if isinstance(dtype, str): dtype = np.dtype(dtype) - try: + converted = maybe_downcast_numeric(result, dtype, do_round) + if converted is not result: + return converted + + # a datetimelike + # GH12821, iNaT is casted to float + if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]: + try: + result = result.astype(dtype) + except Exception: + if dtype.tz: + # convert to datetime and change timezone + from pandas import to_datetime + + result = to_datetime(result).tz_localize("utc") + result = result.tz_convert(dtype.tz) + + elif dtype.type is Period: + # TODO(DatetimeArray): merge with previous elif + from pandas.core.arrays import PeriodArray + try: + return PeriodArray(result, freq=dtype.freq) + except TypeError: + # e.g. TypeError: int() argument must be a string, a + # bytes-like object or a number, not 'Period + pass + + return result + + +def maybe_downcast_numeric(result, dtype, do_round: bool = False): + """ + Subset of maybe_downcast_to_dtype restricted to numeric dtypes. + + Parameters + ---------- + result : ndarray or ExtensionArray + dtype : np.dtype or ExtensionDtype + do_round : bool + + Returns + ------- + ndarray or ExtensionArray + """ + if not isinstance(dtype, np.dtype): + # e.g. SparseDtype has no itemsize attr + return result + + if isinstance(result, list): + # reached via groupoby.agg _ohlc; really this should be handled + # earlier + result = np.array(result) + + def trans(x): + if do_round: + return x.round() + return x + + if dtype.kind == result.dtype.kind: # don't allow upcasts here (except if empty) - if dtype.kind == result.dtype.kind: - if result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape): - return result + if result.dtype.itemsize <= dtype.itemsize and result.size: + return result - if is_bool_dtype(dtype) or is_integer_dtype(dtype): + if is_bool_dtype(dtype) or is_integer_dtype(dtype): + if not result.size: # if we don't have any elements, just astype it - if not np.prod(result.shape): - return trans(result).astype(dtype) + return trans(result).astype(dtype) - # do a test on the first element, if it fails then we are done - r = result.ravel() - arr = np.array([r[0]]) + # do a test on the first element, if it fails then we are done + r = result.ravel() + arr = np.array([r[0]]) + if isna(arr).any() or not np.allclose(arr, trans(arr).astype(dtype), rtol=0): # if we have any nulls, then we are done - if isna(arr).any() or not np.allclose( - arr, trans(arr).astype(dtype), rtol=0 - ): - return result + return result + elif not isinstance(r[0], (np.integer, np.floating, np.bool, int, float, bool)): # a comparable, e.g. a Decimal may slip in here - elif not isinstance( - r[0], (np.integer, np.floating, np.bool, int, float, bool) - ): - return result + return result - if ( - issubclass(result.dtype.type, (np.object_, np.number)) - and notna(result).all() - ): - new_result = trans(result).astype(dtype) - try: - if np.allclose(new_result, result, rtol=0): - return new_result - except Exception: - - # comparison of an object dtype with a number type could - # hit here - if (new_result == result).all(): - return new_result - elif issubclass(dtype.type, np.floating) and not is_bool_dtype(result.dtype): - return result.astype(dtype) - - # a datetimelike - # GH12821, iNaT is casted to float - elif dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]: + if ( + issubclass(result.dtype.type, (np.object_, np.number)) + and notna(result).all() + ): + new_result = trans(result).astype(dtype) try: - result = result.astype(dtype) + if np.allclose(new_result, result, rtol=0): + return new_result except Exception: - if dtype.tz: - # convert to datetime and change timezone - from pandas import to_datetime - - result = to_datetime(result).tz_localize("utc") - result = result.tz_convert(dtype.tz) - - elif dtype.type == Period: - # TODO(DatetimeArray): merge with previous elif - from pandas.core.arrays import PeriodArray - - return PeriodArray(result, freq=dtype.freq) - - except Exception: - pass + # comparison of an object dtype with a number type could + # hit here + if (new_result == result).all(): + return new_result + + elif ( + issubclass(dtype.type, np.floating) + and not is_bool_dtype(result.dtype) + and not is_string_dtype(result.dtype) + ): + return result.astype(dtype) return result
Separate out a numeric-only maybe_downcast_to_dtype_numeric from maybe_downcast_to_dtype. Avoid a giant try/except by checking the correct things up front. The non-numeric portion of maybe_downcast_to_dtype casts to datetime64, datetime64tz, or PeriodArray. Following #27683 the next step in cleaning up internals is going to be getting rid of _try_cast_result, which will involve replacing a usage of maybe_cast_to_dtype with the less-broadly-scoped maybe_cast_to_dtype_numeric.
https://api.github.com/repos/pandas-dev/pandas/pulls/27714
2019-08-02T15:34:09Z
2019-08-04T21:52:45Z
2019-08-04T21:52:45Z
2019-08-04T23:12:38Z
BUG: partial string indexing with scalar
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index c80195af413f7..3097bfa21f9e1 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -82,7 +82,7 @@ Interval Indexing ^^^^^^^^ -- +- Bug in partial-string indexing returning a NumPy array rather than a ``Series`` when indexing with a scalar like ``.loc['2015']`` (:issue:`27516`) - - diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ce7b73a92b18a..a524de3002402 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -243,6 +243,9 @@ def _outer_indexer(self, left, right): _infer_as_myclass = False _engine_type = libindex.ObjectEngine + # whether we support partial string indexing. Overridden + # in DatetimeIndex and PeriodIndex + _supports_partial_string_indexing = False _accessors = {"str"} diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index d6f0008a2646f..c01acbeab1473 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -238,6 +238,7 @@ def _join_i8_wrapper(joinf, **kwargs): ) _engine_type = libindex.DatetimeEngine + _supports_partial_string_indexing = True _tz = None _freq = None diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 19fe1eb897f19..f6b3d1076043e 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -173,6 +173,7 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): _data = None _engine_type = libindex.PeriodEngine + _supports_partial_string_indexing = True # ------------------------------------------------------------------------ # Index Constructors diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index df89dbe6db6dc..e308ae03730b3 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1704,6 +1704,11 @@ def _is_scalar_access(self, key: Tuple): if isinstance(ax, MultiIndex): return False + if isinstance(k, str) and ax._supports_partial_string_indexing: + # partial string indexing, df.loc['2000', 'A'] + # should not be considered scalar + return False + if not ax.is_unique: return False @@ -1719,7 +1724,10 @@ def _get_partial_string_timestamp_match_key(self, key, labels): """Translate any partial string timestamp matches in key, returning the new key (GH 10331)""" if isinstance(labels, MultiIndex): - if isinstance(key, str) and labels.levels[0].is_all_dates: + if ( + isinstance(key, str) + and labels.levels[0]._supports_partial_string_indexing + ): # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) key = tuple([key] + [slice(None)] * (len(labels.levels) - 1)) @@ -1729,7 +1737,10 @@ def _get_partial_string_timestamp_match_key(self, key, labels): # (..., slice('2016-01-01', '2016-01-01', None), ...) new_key = [] for i, component in enumerate(key): - if isinstance(component, str) and labels.levels[i].is_all_dates: + if ( + isinstance(component, str) + and labels.levels[i]._supports_partial_string_indexing + ): new_key.append(slice(component, component, None)) else: new_key.append(component) @@ -2334,7 +2345,7 @@ def convert_to_index_sliceable(obj, key): # We might have a datetimelike string that we can translate to a # slice here via partial string indexing - if idx.is_all_dates: + if idx._supports_partial_string_indexing: try: return idx._get_string_slice(key) except (KeyError, ValueError, NotImplementedError): diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 3095bf9657277..5660fa5ffed80 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -468,3 +468,14 @@ def test_getitem_with_datestring_with_UTC_offset(self, start, end): with pytest.raises(ValueError, match="The index must be timezone"): df = df.tz_localize(None) df[start:end] + + def test_slice_reduce_to_series(self): + # GH 27516 + df = pd.DataFrame( + {"A": range(24)}, index=pd.date_range("2000", periods=24, freq="M") + ) + expected = pd.Series( + range(12), index=pd.date_range("2000", periods=12, freq="M"), name="A" + ) + result = df.loc["2000", "A"] + tm.assert_series_equal(result, expected)
Closes #27516 cc @jreback if you have thoughts on this approach.
https://api.github.com/repos/pandas-dev/pandas/pulls/27712
2019-08-02T14:22:02Z
2019-08-04T21:54:57Z
2019-08-04T21:54:57Z
2019-08-04T21:55:20Z
Backport PR #27710 on branch 0.25.x (DOC: Fixed a typo in the roadmap.rst (the word "uses" appeared twice))
diff --git a/doc/source/development/roadmap.rst b/doc/source/development/roadmap.rst index 88e0a18e6b81a..00598830e2fe9 100644 --- a/doc/source/development/roadmap.rst +++ b/doc/source/development/roadmap.rst @@ -96,7 +96,7 @@ Decoupling of indexing and internals The code for getting and setting values in pandas' data structures needs refactoring. In particular, we must clearly separate code that converts keys (e.g., the argument -to ``DataFrame.loc``) to positions from code that uses uses these positions to get +to ``DataFrame.loc``) to positions from code that uses these positions to get or set values. This is related to the proposed BlockManager rewrite. Currently, the BlockManager sometimes uses label-based, rather than position-based, indexing. We propose that it should only work with positional indexing, and the translation of keys
Backport PR #27710: DOC: Fixed a typo in the roadmap.rst (the word "uses" appeared twice)
https://api.github.com/repos/pandas-dev/pandas/pulls/27711
2019-08-02T13:28:58Z
2019-08-02T13:29:11Z
2019-08-02T13:29:11Z
2019-08-02T13:29:11Z
DOC: Fixed a typo in the roadmap.rst (the word "uses" appeared twice)
diff --git a/doc/source/development/roadmap.rst b/doc/source/development/roadmap.rst index 88e0a18e6b81a..00598830e2fe9 100644 --- a/doc/source/development/roadmap.rst +++ b/doc/source/development/roadmap.rst @@ -96,7 +96,7 @@ Decoupling of indexing and internals The code for getting and setting values in pandas' data structures needs refactoring. In particular, we must clearly separate code that converts keys (e.g., the argument -to ``DataFrame.loc``) to positions from code that uses uses these positions to get +to ``DataFrame.loc``) to positions from code that uses these positions to get or set values. This is related to the proposed BlockManager rewrite. Currently, the BlockManager sometimes uses label-based, rather than position-based, indexing. We propose that it should only work with positional indexing, and the translation of keys
https://api.github.com/repos/pandas-dev/pandas/pulls/27710
2019-08-02T13:14:59Z
2019-08-02T13:28:18Z
2019-08-02T13:28:18Z
2019-08-02T13:40:52Z
Linebreak is bleeding into the documentation page
diff --git a/pandas/core/base.py b/pandas/core/base.py index cfa8d25210129..38a8bf7171521 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -687,8 +687,9 @@ def transpose(self, *args, **kwargs): T = property( transpose, - doc="""\nReturn the transpose, which is by - definition self.\n""", + doc=""" + Return the transpose, which is by definition self. + """, ) @property
https://pandas.pydata.org/pandas-docs/stable/reference/series.html ```html <table><td> ... <tr class="row-even"><td><a class="reference internal" href="api/pandas.Series.T.html#pandas.Series.T" title="pandas.Series.T"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Series.T</span></code></a></td> <td>Return the transpose, which is by</td> </tr> ... </td> </table> ``` And looks weird on https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.T.html#pandas.Series.T - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27708
2019-08-02T07:59:37Z
2019-08-03T16:05:38Z
2019-08-03T16:05:38Z
2019-08-04T11:33:26Z
CLN: rename reduce-->do_reduce
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 739ac0ed397ca..f95685c337969 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -628,7 +628,7 @@ cdef class BlockSlider: arr.shape[1] = 0 -def reduce(arr, f, axis=0, dummy=None, labels=None): +def compute_reduction(arr, f, axis=0, dummy=None, labels=None): """ Parameters diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 6a32553fe2d38..d24aafae0967d 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1280,7 +1280,8 @@ class Timedelta(_Timedelta): else: raise ValueError( "Value must be Timedelta, string, integer, " - "float, timedelta or convertible") + "float, timedelta or convertible, not {typ}" + .format(typ=type(value).__name__)) if is_timedelta64_object(value): value = value.view('i8') diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 2246bbfde636d..5c8599dbb054b 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -221,7 +221,7 @@ def apply_raw(self): """ apply to the values as a numpy array """ try: - result = reduction.reduce(self.values, self.f, axis=self.axis) + result = reduction.compute_reduction(self.values, self.f, axis=self.axis) except Exception: result = np.apply_along_axis(self.f, self.axis, self.values) @@ -281,7 +281,7 @@ def apply_standard(self): dummy = Series(empty_arr, index=index, dtype=values.dtype) try: - result = reduction.reduce( + result = reduction.compute_reduction( values, self.f, axis=self.axis, dummy=dummy, labels=labels ) return self.obj._constructor_sliced(result, index=labels) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index b16217d5d0a32..d22b4bd4d3f2b 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2703,7 +2703,7 @@ def _convert_to_list_like(list_like): elif is_scalar(list_like): return [list_like] else: - # is this reached? + # TODO: is this reached? return [list_like] diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 47718fc39ca1d..9b516c1b6ae02 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -57,21 +57,10 @@ class AttributesMixin: _data = None # type: np.ndarray - @property - def _attributes(self): - # Inheriting subclass should implement _attributes as a list of strings - raise AbstractMethodError(self) - @classmethod def _simple_new(cls, values, **kwargs): raise AbstractMethodError(cls) - def _get_attributes_dict(self): - """ - return an attributes dict for my class - """ - return {k: getattr(self, k, None) for k in self._attributes} - @property def _scalar_type(self) -> Type[DatetimeLikeScalar]: """The scalar associated with this datelike diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 6a4ca0ab4147a..061ee4b90d0e9 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -328,7 +328,6 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps # ----------------------------------------------------------------- # Constructors - _attributes = ["freq", "tz"] _dtype = None # type: Union[np.dtype, DatetimeTZDtype] _freq = None diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 6203cfdf6df6b..20ce11c70c344 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -161,7 +161,6 @@ class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps): # array priority higher than numpy scalars __array_priority__ = 1000 - _attributes = ["freq"] _typ = "periodarray" # ABCPeriodArray _scalar_type = Period diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index dd0b9a79c6dca..afd1e8203059e 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -199,7 +199,6 @@ def dtype(self): # ---------------------------------------------------------------- # Constructors - _attributes = ["freq"] def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False): if isinstance(values, (ABCSeries, ABCIndexClass)): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ea49a13439bfb..010e5b890c5b5 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3556,7 +3556,7 @@ def _iget_item_cache(self, item): def _box_item_values(self, key, values): raise AbstractMethodError(self) - def _slice(self, slobj, axis=0, kind=None): + def _slice(self, slobj: slice, axis=0, kind=None): """ Construct a slice of this container. @@ -6183,8 +6183,6 @@ def fillna( axis = 0 axis = self._get_axis_number(axis) - from pandas import DataFrame - if value is None: if self._is_mixed_type and axis == 1: @@ -6247,7 +6245,7 @@ def fillna( new_data = self._data.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) - elif isinstance(value, DataFrame) and self.ndim == 2: + elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value) else: raise ValueError("invalid fill value with a %s" % type(value)) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ec526b338eee1..c5e81e21e9fd5 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -29,14 +29,16 @@ class providing the base-class of operations. from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.common import ( ensure_float, + is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, + is_integer_dtype, is_numeric_dtype, + is_object_dtype, is_scalar, ) from pandas.core.dtypes.missing import isna, notna -from pandas.api.types import is_datetime64_dtype, is_integer_dtype, is_object_dtype import pandas.core.algorithms as algorithms from pandas.core.arrays import Categorical from pandas.core.base import ( @@ -343,7 +345,7 @@ class _GroupBy(PandasObject, SelectionMixin): def __init__( self, - obj, + obj: NDFrame, keys=None, axis=0, level=None, @@ -360,8 +362,8 @@ def __init__( self._selection = selection - if isinstance(obj, NDFrame): - obj._consolidate_inplace() + assert isinstance(obj, NDFrame), type(obj) + obj._consolidate_inplace() self.level = level diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 5c32550af3883..143755a47b97b 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -25,6 +25,7 @@ from pandas.core.arrays import Categorical, ExtensionArray import pandas.core.common as com from pandas.core.frame import DataFrame +from pandas.core.generic import NDFrame from pandas.core.groupby.categorical import recode_for_groupby, recode_from_groupby from pandas.core.groupby.ops import BaseGrouper from pandas.core.index import CategoricalIndex, Index, MultiIndex @@ -423,7 +424,7 @@ def groups(self): def _get_grouper( - obj, + obj: NDFrame, key=None, axis=0, level=None, diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 1484feeeada64..f20c3f702e29d 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -906,7 +906,7 @@ def _get_sorted_data(self): return self.data.take(self.sort_idx, axis=self.axis) def _chop(self, sdata, slice_obj): - return sdata.iloc[slice_obj] + raise AbstractMethodError(self) def apply(self, f): raise AbstractMethodError(self) @@ -933,7 +933,7 @@ def _chop(self, sdata, slice_obj): if self.axis == 0: return sdata.iloc[slice_obj] else: - return sdata._slice(slice_obj, axis=1) # .loc[:, slice_obj] + return sdata._slice(slice_obj, axis=1) def get_splitter(data, *args, **kwargs): diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 01bfbed1aab4c..2c8006680626c 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -37,6 +37,7 @@ from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeArray, + ABCDatetimeIndex, ABCIndex, ABCIndexClass, ABCSeries, @@ -47,7 +48,7 @@ import pandas as pd from pandas._typing import ArrayLike -import pandas.core.common as com +from pandas.core.construction import extract_array from . import missing from .docstrings import ( @@ -1022,7 +1023,7 @@ def wrapper(left, right): # does inference in the case where `result` has object-dtype. return construct_result(left, result, index=left.index, name=res_name) - elif isinstance(right, (ABCDatetimeArray, pd.DatetimeIndex)): + elif isinstance(right, (ABCDatetimeArray, ABCDatetimeIndex)): result = op(left._values, right) return construct_result(left, result, index=left.index, name=res_name) @@ -1194,7 +1195,7 @@ def wrapper(self, other, axis=None): ) # always return a full value series here - res_values = com.values_from_object(res) + res_values = extract_array(res, extract_numpy=True) return self._constructor( res_values, index=self.index, name=res_name, dtype="bool" ) diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index b240876de92b1..2195686ee9c7f 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -6,15 +6,13 @@ from pandas.core.dtypes.common import ensure_int64 -from pandas import Index, isna +from pandas import Index, Series, isna from pandas.core.groupby.ops import generate_bins_generic import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal def test_series_grouper(): - from pandas import Series - obj = Series(np.random.randn(10)) dummy = obj[:0] @@ -31,8 +29,6 @@ def test_series_grouper(): def test_series_bin_grouper(): - from pandas import Series - obj = Series(np.random.randn(10)) dummy = obj[:0] @@ -123,30 +119,32 @@ class TestMoments: class TestReducer: def test_int_index(self): - from pandas.core.series import Series - arr = np.random.randn(100, 4) - result = reduction.reduce(arr, np.sum, labels=Index(np.arange(4))) + result = reduction.compute_reduction(arr, np.sum, labels=Index(np.arange(4))) expected = arr.sum(0) assert_almost_equal(result, expected) - result = reduction.reduce(arr, np.sum, axis=1, labels=Index(np.arange(100))) + result = reduction.compute_reduction( + arr, np.sum, axis=1, labels=Index(np.arange(100)) + ) expected = arr.sum(1) assert_almost_equal(result, expected) dummy = Series(0.0, index=np.arange(100)) - result = reduction.reduce(arr, np.sum, dummy=dummy, labels=Index(np.arange(4))) + result = reduction.compute_reduction( + arr, np.sum, dummy=dummy, labels=Index(np.arange(4)) + ) expected = arr.sum(0) assert_almost_equal(result, expected) dummy = Series(0.0, index=np.arange(4)) - result = reduction.reduce( + result = reduction.compute_reduction( arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100)) ) expected = arr.sum(1) assert_almost_equal(result, expected) - result = reduction.reduce( + result = reduction.compute_reduction( arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100)) ) assert_almost_equal(result, expected)
The current name makes it difficult to grep for. Assorted cleanups; added type annotations in a few places.
https://api.github.com/repos/pandas-dev/pandas/pulls/27706
2019-08-02T01:12:20Z
2019-08-05T06:31:49Z
2019-08-05T06:31:49Z
2019-08-05T14:32:37Z
BUG: Avoid try/except in blocks, fix setitem bug in datetimelike EA
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index f86b307e5ede3..2206fd3316685 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -473,6 +473,8 @@ def __setitem__( # to a period in from_sequence). For DatetimeArray, it's Timestamp... # I don't know if mypy can do that, possibly with Generics. # https://mypy.readthedocs.io/en/latest/generics.html + if lib.is_scalar(value) and not isna(value): + value = com.maybe_box_datetimelike(value) if is_list_like(value): is_slice = isinstance(key, slice) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6d70fcfb62d52..563faab98d68b 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2230,7 +2230,9 @@ def _can_hold_element(self, element): if tipo is not None: if self.is_datetimetz: # require exact match, since non-nano does not exist - return is_dtype_equal(tipo, self.dtype) + return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype( + element, self.dtype + ) # GH#27419 if we get a non-nano datetime64 object return is_datetime64_dtype(tipo) @@ -2500,26 +2502,28 @@ def concat_same_type(self, to_concat, placement=None): def fillna(self, value, limit=None, inplace=False, downcast=None): # We support filling a DatetimeTZ with a `value` whose timezone # is different by coercing to object. - try: + if self._can_hold_element(value): return super().fillna(value, limit, inplace, downcast) - except (ValueError, TypeError): - # different timezones, or a non-tz - return self.astype(object).fillna( - value, limit=limit, inplace=inplace, downcast=downcast - ) + + # different timezones, or a non-tz + return self.astype(object).fillna( + value, limit=limit, inplace=inplace, downcast=downcast + ) def setitem(self, indexer, value): # https://github.com/pandas-dev/pandas/issues/24020 # Need a dedicated setitem until #24020 (type promotion in setitem # for extension arrays) is designed and implemented. - try: + if self._can_hold_element(value) or ( + isinstance(indexer, np.ndarray) and indexer.size == 0 + ): return super().setitem(indexer, value) - except (ValueError, TypeError): - obj_vals = self.values.astype(object) - newb = make_block( - obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim - ) - return newb.setitem(indexer, value) + + obj_vals = self.values.astype(object) + newb = make_block( + obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim + ) + return newb.setitem(indexer, value) def equals(self, other): # override for significant performance improvement diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 58c2f3fc65bb2..d749d9bb47d25 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -179,6 +179,22 @@ def test_setitem_clears_freq(self): a[0] = pd.Timestamp("2000", tz="US/Central") assert a.freq is None + @pytest.mark.parametrize( + "obj", + [ + pd.Timestamp.now(), + pd.Timestamp.now().to_datetime64(), + pd.Timestamp.now().to_pydatetime(), + ], + ) + def test_setitem_objects(self, obj): + # make sure we accept datetime64 and datetime in addition to Timestamp + dti = pd.date_range("2000", periods=2, freq="D") + arr = dti._data + + arr[0] = obj + assert arr[0] == obj + def test_repeat_preserves_tz(self): dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central") arr = DatetimeArray(dti) diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 5825f9f150eb8..540c3343b2a1b 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -125,6 +125,22 @@ def test_setitem_clears_freq(self): a[0] = pd.Timedelta("1H") assert a.freq is None + @pytest.mark.parametrize( + "obj", + [ + pd.Timedelta(seconds=1), + pd.Timedelta(seconds=1).to_timedelta64(), + pd.Timedelta(seconds=1).to_pytimedelta(), + ], + ) + def test_setitem_objects(self, obj): + # make sure we accept timedelta64 and timedelta in addition to Timedelta + tdi = pd.timedelta_range("2 Days", periods=4, freq="H") + arr = TimedeltaArray(tdi, freq=tdi.freq) + + arr[0] = obj + assert arr[0] == pd.Timedelta(seconds=1) + class TestReductions: def test_min_max(self): diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index d75016824d6cf..c760c75e44f6b 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -418,7 +418,7 @@ def test_value_counts_unique_nunique_null(self, null_obj): values = o._shallow_copy(v) else: o = o.copy() - o[0:2] = iNaT + o[0:2] = pd.NaT values = o._values elif needs_i8_conversion(o):
The initial impetus for this was avoiding two try/excepts in Block methods (i.e. the diff in internals.blocks). This uncovered the bug in DTA/TDA, which accounts for the rest of the diff.
https://api.github.com/repos/pandas-dev/pandas/pulls/27704
2019-08-01T23:15:43Z
2019-08-04T21:58:00Z
2019-08-04T21:58:00Z
2019-08-04T23:12:01Z
Backport PR #27701 on branch 0.25.x (CI: Fixed CI)
diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index 0d9a760914dab..7da4abb9283df 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -29,13 +29,13 @@ dependencies: - s3fs=0.0.8 - scipy - sqlalchemy=1.1.4 - - xarray=0.8.2 + - xarray=0.10 - xlrd - xlsxwriter - xlwt # universal - - pytest>=4.0.2 - - pytest-xdist + - pytest>=5.0.1 + - pytest-xdist>=1.29.0 - pytest-mock - pip - pip:
Backport PR #27701: CI: Fixed CI
https://api.github.com/repos/pandas-dev/pandas/pulls/27703
2019-08-01T22:43:02Z
2019-08-02T11:45:44Z
2019-08-02T11:45:44Z
2019-08-02T11:45:44Z
BUG: Concatenation warning still appears with sort=False
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index c80195af413f7..01e4046e8b743 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -125,7 +125,7 @@ Reshaping ^^^^^^^^^ - A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) -- +- :meth:`DataFrame.join` now suppresses the ``FutureWarning`` when the sort parameter is specified (:issue:`21952`) - Sparse diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5980e3d133374..2a93cd5937221 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7217,10 +7217,14 @@ def _join_compat( # join indexes only using concat if can_concat: if how == "left": - res = concat(frames, axis=1, join="outer", verify_integrity=True) + res = concat( + frames, axis=1, join="outer", verify_integrity=True, sort=sort + ) return res.reindex(self.index, copy=False) else: - return concat(frames, axis=1, join=how, verify_integrity=True) + return concat( + frames, axis=1, join=how, verify_integrity=True, sort=sort + ) joined = frames[0] diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py index adace5e4784ae..220968d4b3d29 100644 --- a/pandas/tests/frame/test_join.py +++ b/pandas/tests/frame/test_join.py @@ -193,3 +193,32 @@ def test_join_left_sequence_non_unique_index(): ) tm.assert_frame_equal(joined, expected) + + +@pytest.mark.parametrize("sort_kw", [True, False, None]) +def test_suppress_future_warning_with_sort_kw(sort_kw): + a = DataFrame({"col1": [1, 2]}, index=["c", "a"]) + + b = DataFrame({"col2": [4, 5]}, index=["b", "a"]) + + c = DataFrame({"col3": [7, 8]}, index=["a", "b"]) + + expected = DataFrame( + { + "col1": {"a": 2.0, "b": float("nan"), "c": 1.0}, + "col2": {"a": 5.0, "b": 4.0, "c": float("nan")}, + "col3": {"a": 7.0, "b": 8.0, "c": float("nan")}, + } + ) + if sort_kw is False: + expected = expected.reindex(index=["c", "a", "b"]) + + if sort_kw is None: + # only warn if not explicitly specified + ctx = tm.assert_produces_warning(FutureWarning, check_stacklevel=False) + else: + ctx = tm.assert_produces_warning(None, check_stacklevel=False) + + with ctx: + result = a.join([b, c], how="outer", sort=sort_kw) + tm.assert_frame_equal(result, expected)
- [x] closes #21952 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27702
2019-08-01T21:14:26Z
2019-08-04T21:59:13Z
2019-08-04T21:59:13Z
2019-08-12T21:24:42Z
CI: Fixed CI
diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index 0d9a760914dab..7da4abb9283df 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -29,13 +29,13 @@ dependencies: - s3fs=0.0.8 - scipy - sqlalchemy=1.1.4 - - xarray=0.8.2 + - xarray=0.10 - xlrd - xlsxwriter - xlwt # universal - - pytest>=4.0.2 - - pytest-xdist + - pytest>=5.0.1 + - pytest-xdist>=1.29.0 - pytest-mock - pip - pip:
cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/27701
2019-08-01T20:42:51Z
2019-08-01T22:42:51Z
2019-08-01T22:42:51Z
2019-08-01T22:42:52Z
BUG: grouby(axis=1) cannot select column names
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index cc4bab8b9a923..a266f79aed02c 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -166,6 +166,7 @@ Groupby/resample/rolling - - +- Bug in :meth:`DataFrame.groupby` not offering selection by column name when ``axis=1`` (:issue:`27614`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 1d88ebd26b1b6..5c32550af3883 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -606,10 +606,10 @@ def is_in_obj(gpr): elif is_in_axis(gpr): # df.groupby('name') if gpr in obj: if validate: - obj._check_label_or_level_ambiguity(gpr) + obj._check_label_or_level_ambiguity(gpr, axis=axis) in_axis, name, gpr = True, gpr, obj[gpr] exclusions.append(name) - elif obj._is_level_reference(gpr): + elif obj._is_level_reference(gpr, axis=axis): in_axis, name, level, gpr = False, None, gpr, None else: raise KeyError(gpr) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 2379d25ebe5aa..4556b22b57279 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1860,3 +1860,25 @@ def test_groupby_groups_in_BaseGrouper(): result = df.groupby(["beta", pd.Grouper(level="alpha")]) expected = df.groupby(["beta", "alpha"]) assert result.groups == expected.groups + + +@pytest.mark.parametrize("group_name", ["x", ["x"]]) +def test_groupby_axis_1(group_name): + # GH 27614 + df = pd.DataFrame( + np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20] + ) + df.index.name = "y" + df.columns.name = "x" + + results = df.groupby(group_name, axis=1).sum() + expected = df.T.groupby(group_name).sum().T + assert_frame_equal(results, expected) + + # test on MI column + iterables = [["bar", "baz", "foo"], ["one", "two"]] + mi = pd.MultiIndex.from_product(iterables=iterables, names=["x", "x1"]) + df = pd.DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi) + results = df.groupby(group_name, axis=1).sum() + expected = df.T.groupby(group_name).sum().T + assert_frame_equal(results, expected)
- [x] closes #27614 - [x] tests added / passed - [ ] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27700
2019-08-01T20:13:50Z
2019-08-02T13:31:00Z
2019-08-02T13:31:00Z
2019-08-02T13:44:13Z
BUG: DTA/TDA incorrectly accepting iNaT for setitem
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index f86b307e5ede3..47b138a9e1604 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -499,9 +499,6 @@ def __setitem__( value = self._unbox_scalar(value) elif is_valid_nat_for_dtype(value, self.dtype): value = iNaT - elif not isna(value) and lib.is_integer(value) and value == iNaT: - # exclude misc e.g. object() and any NAs not allowed above - value = iNaT else: msg = ( "'value' should be a '{scalar}', 'NaT', or array of those. " diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index d9646feaf661e..ffda2f4de2700 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -682,15 +682,15 @@ def test_casting_nat_setitem_array(array, casting_nats): [ ( pd.TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, - (np.datetime64("NaT", "ns"),), + (np.datetime64("NaT", "ns"), pd.NaT.value), ), ( pd.date_range("2000-01-01", periods=3, freq="D")._data, - (np.timedelta64("NaT", "ns"),), + (np.timedelta64("NaT", "ns"), pd.NaT.value), ), ( pd.period_range("2000-01-01", periods=3, freq="D")._data, - (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns")), + (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), pd.NaT.value), ), ], ids=lambda x: type(x).__name__, diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index d75016824d6cf..c760c75e44f6b 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -418,7 +418,7 @@ def test_value_counts_unique_nunique_null(self, null_obj): values = o._shallow_copy(v) else: o = o.copy() - o[0:2] = iNaT + o[0:2] = pd.NaT values = o._values elif needs_i8_conversion(o):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry First of several related PRs.
https://api.github.com/repos/pandas-dev/pandas/pulls/27699
2019-08-01T18:16:28Z
2019-08-02T15:59:17Z
2019-08-02T15:59:17Z
2019-08-02T16:03:45Z
Backport PR #27478 on branch 0.25.x (Add a Roadmap)
diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst index a149f31118ed5..c7710ff19f078 100644 --- a/doc/source/development/index.rst +++ b/doc/source/development/index.rst @@ -16,3 +16,4 @@ Development internals extending developer + roadmap diff --git a/doc/source/development/roadmap.rst b/doc/source/development/roadmap.rst new file mode 100644 index 0000000000000..88e0a18e6b81a --- /dev/null +++ b/doc/source/development/roadmap.rst @@ -0,0 +1,193 @@ +.. _roadmap: + +======= +Roadmap +======= + +This page provides an overview of the major themes in pandas' development. Each of +these items requires a relatively large amount of effort to implement. These may +be achieved more quickly with dedicated funding or interest from contributors. + +An item being on the roadmap does not mean that it will *necessarily* happen, even +with unlimited funding. During the implementation period we may discover issues +preventing the adoption of the feature. + +Additionally, an item *not* being on the roadmap does not exclude it from inclusion +in pandas. The roadmap is intended for larger, fundamental changes to the project that +are likely to take months or years of developer time. Smaller-scoped items will continue +to be tracked on our `issue tracker <https://github.com/pandas-dev/pandas/issues>`__. + +See :ref:`roadmap.evolution` for proposing changes to this document. + +Extensibility +------------- + +Pandas :ref:`extending.extension-types` allow for extending NumPy types with custom +data types and array storage. Pandas uses extension types internally, and provides +an interface for 3rd-party libraries to define their own custom data types. + +Many parts of pandas still unintentionally convert data to a NumPy array. +These problems are especially pronounced for nested data. + +We'd like to improve the handling of extension arrays throughout the library, +making their behavior more consistent with the handling of NumPy arrays. We'll do this +by cleaning up pandas' internals and adding new methods to the extension array interface. + +String data type +---------------- + +Currently, pandas stores text data in an ``object`` -dtype NumPy array. +The current implementation has two primary drawbacks: First, ``object`` -dtype +is not specific to strings: any Python object can be stored in an ``object`` -dtype +array, not just strings. Second: this is not efficient. The NumPy memory model +isn't especially well-suited to variable width text data. + +To solve the first issue, we propose a new extension type for string data. This +will initially be opt-in, with users explicitly requesting ``dtype="string"``. +The array backing this string dtype may initially be the current implementation: +an ``object`` -dtype NumPy array of Python strings. + +To solve the second issue (performance), we'll explore alternative in-memory +array libraries (for example, Apache Arrow). As part of the work, we may +need to implement certain operations expected by pandas users (for example +the algorithm used in, ``Series.str.upper``). That work may be done outside of +pandas. + +Apache Arrow interoperability +----------------------------- + +`Apache Arrow <https://arrow.apache.org>`__ is a cross-language development +platform for in-memory data. The Arrow logical types are closely aligned with +typical pandas use cases. + +We'd like to provide better-integrated support for Arrow memory and data types +within pandas. This will let us take advantage of its I/O capabilities and +provide for better interoperability with other languages and libraries +using Arrow. + +Block manager rewrite +--------------------- + +We'd like to replace pandas current internal data structures (a collection of +1 or 2-D arrays) with a simpler collection of 1-D arrays. + +Pandas internal data model is quite complex. A DataFrame is made up of +one or more 2-dimensional "blocks", with one or more blocks per dtype. This +collection of 2-D arrays is managed by the BlockManager. + +The primary benefit of the BlockManager is improved performance on certain +operations (construction from a 2D array, binary operations, reductions across the columns), +especially for wide DataFrames. However, the BlockManager substantially increases the +complexity and maintenance burden of pandas. + +By replacing the BlockManager we hope to achieve + +* Substantially simpler code +* Easier extensibility with new logical types +* Better user control over memory use and layout +* Improved micro-performance +* Option to provide a C / Cython API to pandas' internals + +See `these design documents <https://dev.pandas.io/pandas2/internal-architecture.html#removal-of-blockmanager-new-dataframe-internals>`__ +for more. + +Decoupling of indexing and internals +------------------------------------ + +The code for getting and setting values in pandas' data structures needs refactoring. +In particular, we must clearly separate code that converts keys (e.g., the argument +to ``DataFrame.loc``) to positions from code that uses uses these positions to get +or set values. This is related to the proposed BlockManager rewrite. Currently, the +BlockManager sometimes uses label-based, rather than position-based, indexing. +We propose that it should only work with positional indexing, and the translation of keys +to positions should be entirely done at a higher level. + +Indexing is a complicated API with many subtleties. This refactor will require care +and attention. More details are discussed at +https://github.com/pandas-dev/pandas/wiki/(Tentative)-rules-for-restructuring-indexing-code + +Numba-accelerated operations +---------------------------- + +`Numba <https://numba.pydata.org>`__ is a JIT compiler for Python code. We'd like to provide +ways for users to apply their own Numba-jitted functions where pandas accepts user-defined functions +(for example, :meth:`Series.apply`, :meth:`DataFrame.apply`, :meth:`DataFrame.applymap`, +and in groupby and window contexts). This will improve the performance of +user-defined-functions in these operations by staying within compiled code. + + +Documentation improvements +-------------------------- + +We'd like to improve the content, structure, and presentation of the pandas documentation. +Some specific goals include + +* Overhaul the HTML theme with a modern, responsive design (:issue:`15556`) +* Improve the "Getting Started" documentation, designing and writing learning paths + for users different backgrounds (e.g. brand new to programming, familiar with + other languages like R, already familiar with Python). +* Improve the overall organization of the documentation and specific subsections + of the documentation to make navigation and finding content easier. + +Package docstring validation +---------------------------- + +To improve the quality and consistency of pandas docstrings, we've developed +tooling to check docstrings in a variety of ways. +https://github.com/pandas-dev/pandas/blob/master/scripts/validate_docstrings.py +contains the checks. + +Like many other projects, pandas uses the +`numpydoc <https://numpydoc.readthedocs.io/en/latest/>`__ style for writing +docstrings. With the collaboration of the numpydoc maintainers, we'd like to +move the checks to a package other than pandas so that other projects can easily +use them as well. + +Performance monitoring +---------------------- + +Pandas uses `airspeed velocity <https://asv.readthedocs.io/en/stable/>`__ to +monitor for performance regressions. ASV itself is a fabulous tool, but requires +some additional work to be integrated into an open source project's workflow. + +The `asv-runner <https://github.com/asv-runner>`__ organization, currently made up +of pandas maintainers, provides tools built on top of ASV. We have a physical +machine for running a number of project's benchmarks, and tools managing the +benchmark runs and reporting on results. + +We'd like to fund improvements and maintenance of these tools to + +* Be more stable. Currently, they're maintained on the nights and weekends when + a maintainer has free time. +* Tune the system for benchmarks to improve stability, following + https://pyperf.readthedocs.io/en/latest/system.html +* Build a GitHub bot to request ASV runs *before* a PR is merged. Currently, the + benchmarks are only run nightly. + +.. _roadmap.evolution: + +Roadmap Evolution +----------------- + +Pandas continues to evolve. The direction is primarily determined by community +interest. Everyone is welcome to review existing items on the roadmap and +to propose a new item. + +Each item on the roadmap should be a short summary of a larger design proposal. +The proposal should include + +1. Short summary of the changes, which would be appropriate for inclusion in + the roadmap if accepted. +2. Motivation for the changes. +3. An explanation of why the change is in scope for pandas. +4. Detailed design: Preferably with example-usage (even if not implemented yet) + and API documentation +5. API Change: Any API changes that may result from the proposal. + +That proposal may then be submitted as a GitHub issue, where the pandas maintainers +can review and comment on the design. The `pandas mailing list <https://mail.python.org/mailman/listinfo/pandas-dev>`__ +should be notified of the proposal. + +When there's agreement that an implementation +would be welcome, the roadmap should be updated to include the summary and a +link to the discussion issue.
Backport PR #27478: Add a Roadmap
https://api.github.com/repos/pandas-dev/pandas/pulls/27698
2019-08-01T18:04:23Z
2019-08-02T11:47:46Z
2019-08-02T11:47:46Z
2019-08-02T11:47:50Z
Replace with nested dict raises for overlapping keys
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 7fe358d3820f2..7a10447e3ad40 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -207,6 +207,7 @@ ExtensionArray Other ^^^^^ - Trying to set the ``display.precision``, ``display.max_rows`` or ``display.max_columns`` using :meth:`set_option` to anything but a ``None`` or a positive int will raise a ``ValueError`` (:issue:`23348`) +- Using :meth:`DataFrame.replace` with overlapping keys in a nested dictionary will no longer raise, now matching the behavior of a flat dictionary (:issue:`27660`) - :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now support dicts as ``compression`` argument with key ``'method'`` being the compression method and others as additional compression options when the compression method is ``'zip'``. (:issue:`26023`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fac5e0f085fc6..6ade69fb4ca9d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6669,11 +6669,7 @@ def replace( for k, v in items: keys, values = list(zip(*v.items())) or ([], []) - if set(keys) & set(values): - raise ValueError( - "Replacement not allowed with " - "overlapping keys and values" - ) + to_rep_dict[k] = list(keys) value_dict[k] = list(values) diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py index 2862615ef8585..b341ed6a52ca5 100644 --- a/pandas/tests/frame/test_replace.py +++ b/pandas/tests/frame/test_replace.py @@ -1069,18 +1069,24 @@ def test_replace_truthy(self): e = df assert_frame_equal(r, e) - def test_replace_int_to_int_chain(self): + def test_nested_dict_overlapping_keys_replace_int(self): + # GH 27660 keep behaviour consistent for simple dictionary and + # nested dictionary replacement df = DataFrame({"a": list(range(1, 5))}) - with pytest.raises(ValueError, match="Replacement not allowed .+"): - df.replace({"a": dict(zip(range(1, 5), range(2, 6)))}) - def test_replace_str_to_str_chain(self): + result = df.replace({"a": dict(zip(range(1, 5), range(2, 6)))}) + expected = df.replace(dict(zip(range(1, 5), range(2, 6)))) + assert_frame_equal(result, expected) + + def test_nested_dict_overlapping_keys_replace_str(self): + # GH 27660 a = np.arange(1, 5) astr = a.astype(str) bstr = np.arange(2, 6).astype(str) df = DataFrame({"a": astr}) - with pytest.raises(ValueError, match="Replacement not allowed .+"): - df.replace({"a": dict(zip(astr, bstr))}) + result = df.replace(dict(zip(astr, bstr))) + expected = df.replace({"a": dict(zip(astr, bstr))}) + assert_frame_equal(result, expected) def test_replace_swapping_bug(self): df = pd.DataFrame({"a": [True, False, True]})
- [x] closes #27660 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27696
2019-08-01T14:26:59Z
2019-08-27T14:09:42Z
2019-08-27T14:09:41Z
2019-08-27T14:09:55Z
Backport PR #27689 on branch 0.25.x (DOC: 0.25 fixups)
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 592b4748126c1..0509a370d7866 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -16,6 +16,7 @@ Version 0.25 .. toctree:: :maxdepth: 2 + v0.25.1 v0.25.0 Version 0.24 diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 5b8f980d27b9d..74232c41d8a8c 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1267,4 +1267,4 @@ Other Contributors ~~~~~~~~~~~~ -.. contributors:: 0.24.x..HEAD +.. contributors:: v0.24.x..HEAD diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 40fefe7ec43a8..c80195af413f7 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -1,7 +1,3 @@ -:orphan: - -.. TODO. Remove the orphan tag. - .. _whatsnew_0251: What's new in 0.25.1 (July XX, 2019) @@ -166,6 +162,4 @@ Other Contributors ~~~~~~~~~~~~ -.. TODO. Change to v0.25.0..HEAD - -.. contributors:: HEAD..HEAD +.. contributors:: v0.25.0..HEAD
Backport PR #27689: DOC: 0.25 fixups
https://api.github.com/repos/pandas-dev/pandas/pulls/27694
2019-08-01T13:57:13Z
2019-08-01T20:40:52Z
2019-08-01T20:40:52Z
2019-08-01T20:40:52Z
Backport PR #27653 on branch 0.25.x (BUG: Fix dir(interval_index))
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index fb67decb46b64..40fefe7ec43a8 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -78,7 +78,7 @@ Strings Interval ^^^^^^^^ - +- Bug in :class:`IntervalIndex` where `dir(obj)` would raise ``ValueError`` (:issue:`27571`) - - - diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 27ee685acfde7..fef17fd43a8e3 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -969,6 +969,7 @@ _TYPE_MAP = { 'M': 'datetime64', 'timedelta64[ns]': 'timedelta64', 'm': 'timedelta64', + 'interval': 'interval', } # types only exist on certain platform diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 7c293ca4e50b0..d4c6d780709a5 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1957,8 +1957,11 @@ def _validate(data): values = getattr(data, "values", data) # Series / Index values = getattr(values, "categories", values) # categorical / normal - # missing values obfuscate type inference -> skip - inferred_dtype = lib.infer_dtype(values, skipna=True) + try: + inferred_dtype = lib.infer_dtype(values, skipna=True) + except ValueError: + # GH#27571 mostly occurs with ExtensionArray + inferred_dtype = None if inferred_dtype not in allowed_types: raise AttributeError("Can only use .str accessor with string " "values!") diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 4d688976cd50b..6ab3830317059 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1134,6 +1134,17 @@ def test_categorical(self): result = lib.infer_dtype(Series(arr), skipna=True) assert result == "categorical" + def test_interval(self): + idx = pd.IntervalIndex.from_breaks(range(5), closed="both") + inferred = lib.infer_dtype(idx, skipna=False) + assert inferred == "interval" + + inferred = lib.infer_dtype(idx._data, skipna=False) + assert inferred == "interval" + + inferred = lib.infer_dtype(pd.Series(idx), skipna=False) + assert inferred == "interval" + class TestNumberScalar: def test_is_number(self): diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index c61af1ce70aed..c1a21e6a7f152 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -1095,3 +1095,10 @@ def test_is_all_dates(self): ) year_2017_index = pd.IntervalIndex([year_2017]) assert not year_2017_index.is_all_dates + + +def test_dir(): + # GH#27571 dir(interval_index) should not raise + index = IntervalIndex.from_arrays([0, 1], [1, 2]) + result = dir(index) + assert "str" not in result
Backport PR #27653: BUG: Fix dir(interval_index)
https://api.github.com/repos/pandas-dev/pandas/pulls/27693
2019-08-01T12:57:14Z
2019-08-01T13:56:31Z
2019-08-01T13:56:31Z
2019-08-01T13:56:31Z
Backport PR #27580 on branch 0.25.x
diff --git a/doc/source/install.rst b/doc/source/install.rst index 352b56ebd3020..fc99b458fa0af 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -15,35 +15,10 @@ Instructions for installing from source, `PyPI <https://pypi.org/project/pandas>`__, `ActivePython <https://www.activestate.com/activepython/downloads>`__, various Linux distributions, or a `development version <http://github.com/pandas-dev/pandas>`__ are also provided. -.. _install.dropping-27: - -Plan for dropping Python 2.7 ----------------------------- - -The Python core team plans to stop supporting Python 2.7 on January 1st, 2020. -In line with `NumPy's plans`_, all pandas releases through December 31, 2018 -will support Python 2. - -The 0.24.x feature release will be the last release to -support Python 2. The released package will continue to be available on -PyPI and through conda. - - Starting **January 1, 2019**, all new feature releases (> 0.24) will be Python 3 only. - -If there are people interested in continued support for Python 2.7 past December -31, 2018 (either backporting bug fixes or funding) please reach out to the -maintainers on the issue tracker. - -For more information, see the `Python 3 statement`_ and the `Porting to Python 3 guide`_. - -.. _NumPy's plans: https://github.com/numpy/numpy/blob/master/doc/neps/nep-0014-dropping-python2.7-proposal.rst#plan-for-dropping-python-27-support -.. _Python 3 statement: http://python3statement.org/ -.. _Porting to Python 3 guide: https://docs.python.org/3/howto/pyporting.html - Python version support ---------------------- -Officially Python 2.7, 3.5, 3.6, and 3.7. +Officially Python 3.5.3 and above, 3.6, and 3.7. Installing pandas ----------------- diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst index 62cf977d8c8ac..f4c283ea742f7 100644 --- a/doc/source/whatsnew/v0.23.0.rst +++ b/doc/source/whatsnew/v0.23.0.rst @@ -31,7 +31,7 @@ Check the :ref:`API Changes <whatsnew_0230.api_breaking>` and :ref:`deprecations .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. - See :ref:`install.dropping-27` for more. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. .. contents:: What's new in v0.23.0 :local: diff --git a/doc/source/whatsnew/v0.23.1.rst b/doc/source/whatsnew/v0.23.1.rst index d730a57a01a60..03b7d9db6bc63 100644 --- a/doc/source/whatsnew/v0.23.1.rst +++ b/doc/source/whatsnew/v0.23.1.rst @@ -12,7 +12,7 @@ and bug fixes. We recommend that all users upgrade to this version. .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. - See :ref:`install.dropping-27` for more. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. .. contents:: What's new in v0.23.1 :local: diff --git a/doc/source/whatsnew/v0.23.2.rst b/doc/source/whatsnew/v0.23.2.rst index df8cc12e3385e..9f24092d1d4ae 100644 --- a/doc/source/whatsnew/v0.23.2.rst +++ b/doc/source/whatsnew/v0.23.2.rst @@ -17,7 +17,7 @@ and bug fixes. We recommend that all users upgrade to this version. .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. - See :ref:`install.dropping-27` for more. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. .. contents:: What's new in v0.23.2 :local: diff --git a/doc/source/whatsnew/v0.23.4.rst b/doc/source/whatsnew/v0.23.4.rst index 060d1fc8eba34..eadac6f569926 100644 --- a/doc/source/whatsnew/v0.23.4.rst +++ b/doc/source/whatsnew/v0.23.4.rst @@ -12,7 +12,7 @@ and bug fixes. We recommend that all users upgrade to this version. .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. - See :ref:`install.dropping-27` for more. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. .. contents:: What's new in v0.23.4 :local: diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index a66056f661de3..d9f41d2a75116 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -6,7 +6,7 @@ What's new in 0.24.0 (January 25, 2019) .. warning:: The 0.24.x series of releases will be the last to support Python 2. Future feature - releases will support Python 3 only. See :ref:`install.dropping-27` for more + releases will support Python 3 only. See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more details. {{ header }} diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst index 1b0232cad7476..aead8c48eb9b7 100644 --- a/doc/source/whatsnew/v0.24.1.rst +++ b/doc/source/whatsnew/v0.24.1.rst @@ -6,7 +6,7 @@ Whats new in 0.24.1 (February 3, 2019) .. warning:: The 0.24.x series of releases will be the last to support Python 2. Future feature - releases will support Python 3 only. See :ref:`install.dropping-27` for more. + releases will support Python 3 only. See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. {{ header }} diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index da8064893e8a8..d1a893f99cff4 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -6,7 +6,7 @@ Whats new in 0.24.2 (March 12, 2019) .. warning:: The 0.24.x series of releases will be the last to support Python 2. Future feature - releases will support Python 3 only. See :ref:`install.dropping-27` for more. + releases will support Python 3 only. See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. {{ header }} diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 42e756635e739..5b8f980d27b9d 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -6,7 +6,7 @@ What's new in 0.25.0 (July 18, 2019) .. warning:: Starting with the 0.25.x series of releases, pandas only supports Python 3.5.3 and higher. - See :ref:`install.dropping-27` for more details. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more details. .. warning::
closes #27558
https://api.github.com/repos/pandas-dev/pandas/pulls/27691
2019-08-01T12:24:09Z
2019-08-01T13:56:20Z
2019-08-01T13:56:20Z
2019-10-29T06:51:25Z
Auto backport of pr 27580 on 0.25.x
diff --git a/Makefile b/Makefile index baceefe6d49ff..9e69eb7922925 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ lint-diff: git diff upstream/master --name-only -- "*.py" | xargs flake8 black: - black . --exclude '(asv_bench/env|\.egg|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)' + black . --exclude '(asv_bench/env|\.egg|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist|setup.py)' develop: build python setup.py develop diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 96a8440d85694..06d45e38bfcdb 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -56,7 +56,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then black --version MSG='Checking black formatting' ; echo $MSG - black . --check --exclude '(asv_bench/env|\.egg|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)' + black . --check --exclude '(asv_bench/env|\.egg|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist|setup.py)' RET=$(($RET + $?)) ; echo $MSG "DONE" # `setup.cfg` contains the list of error codes that are being ignored in flake8 diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst index b492a4edd70a4..e341dcb8318bc 100644 --- a/doc/source/development/extending.rst +++ b/doc/source/development/extending.rst @@ -441,5 +441,22 @@ This would be more or less equivalent to: The backend module can then use other visualization tools (Bokeh, Altair,...) to generate the plots. +Libraries implementing the plotting backend should use `entry points <https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`__ +to make their backend discoverable to pandas. The key is ``"pandas_plotting_backends"``. For example, pandas +registers the default "matplotlib" backend as follows. + +.. code-block:: python + + # in setup.py + setup( # noqa: F821 + ..., + entry_points={ + "pandas_plotting_backends": [ + "matplotlib = pandas:plotting._matplotlib", + ], + }, + ) + + More information on how to implement a third-party plotting backend can be found at https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py#L1. diff --git a/doc/source/install.rst b/doc/source/install.rst index 352b56ebd3020..fc99b458fa0af 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -15,35 +15,10 @@ Instructions for installing from source, `PyPI <https://pypi.org/project/pandas>`__, `ActivePython <https://www.activestate.com/activepython/downloads>`__, various Linux distributions, or a `development version <http://github.com/pandas-dev/pandas>`__ are also provided. -.. _install.dropping-27: - -Plan for dropping Python 2.7 ----------------------------- - -The Python core team plans to stop supporting Python 2.7 on January 1st, 2020. -In line with `NumPy's plans`_, all pandas releases through December 31, 2018 -will support Python 2. - -The 0.24.x feature release will be the last release to -support Python 2. The released package will continue to be available on -PyPI and through conda. - - Starting **January 1, 2019**, all new feature releases (> 0.24) will be Python 3 only. - -If there are people interested in continued support for Python 2.7 past December -31, 2018 (either backporting bug fixes or funding) please reach out to the -maintainers on the issue tracker. - -For more information, see the `Python 3 statement`_ and the `Porting to Python 3 guide`_. - -.. _NumPy's plans: https://github.com/numpy/numpy/blob/master/doc/neps/nep-0014-dropping-python2.7-proposal.rst#plan-for-dropping-python-27-support -.. _Python 3 statement: http://python3statement.org/ -.. _Porting to Python 3 guide: https://docs.python.org/3/howto/pyporting.html - Python version support ---------------------- -Officially Python 2.7, 3.5, 3.6, and 3.7. +Officially Python 3.5.3 and above, 3.6, and 3.7. Installing pandas ----------------- diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst index 62cf977d8c8ac..f4c283ea742f7 100644 --- a/doc/source/whatsnew/v0.23.0.rst +++ b/doc/source/whatsnew/v0.23.0.rst @@ -31,7 +31,7 @@ Check the :ref:`API Changes <whatsnew_0230.api_breaking>` and :ref:`deprecations .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. - See :ref:`install.dropping-27` for more. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. .. contents:: What's new in v0.23.0 :local: diff --git a/doc/source/whatsnew/v0.23.1.rst b/doc/source/whatsnew/v0.23.1.rst index d730a57a01a60..03b7d9db6bc63 100644 --- a/doc/source/whatsnew/v0.23.1.rst +++ b/doc/source/whatsnew/v0.23.1.rst @@ -12,7 +12,7 @@ and bug fixes. We recommend that all users upgrade to this version. .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. - See :ref:`install.dropping-27` for more. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. .. contents:: What's new in v0.23.1 :local: diff --git a/doc/source/whatsnew/v0.23.2.rst b/doc/source/whatsnew/v0.23.2.rst index df8cc12e3385e..9f24092d1d4ae 100644 --- a/doc/source/whatsnew/v0.23.2.rst +++ b/doc/source/whatsnew/v0.23.2.rst @@ -17,7 +17,7 @@ and bug fixes. We recommend that all users upgrade to this version. .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. - See :ref:`install.dropping-27` for more. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. .. contents:: What's new in v0.23.2 :local: diff --git a/doc/source/whatsnew/v0.23.4.rst b/doc/source/whatsnew/v0.23.4.rst index 060d1fc8eba34..eadac6f569926 100644 --- a/doc/source/whatsnew/v0.23.4.rst +++ b/doc/source/whatsnew/v0.23.4.rst @@ -12,7 +12,7 @@ and bug fixes. We recommend that all users upgrade to this version. .. warning:: Starting January 1, 2019, pandas feature releases will support Python 3 only. - See :ref:`install.dropping-27` for more. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. .. contents:: What's new in v0.23.4 :local: diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index a66056f661de3..d9f41d2a75116 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -6,7 +6,7 @@ What's new in 0.24.0 (January 25, 2019) .. warning:: The 0.24.x series of releases will be the last to support Python 2. Future feature - releases will support Python 3 only. See :ref:`install.dropping-27` for more + releases will support Python 3 only. See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more details. {{ header }} diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst index 1b0232cad7476..aead8c48eb9b7 100644 --- a/doc/source/whatsnew/v0.24.1.rst +++ b/doc/source/whatsnew/v0.24.1.rst @@ -6,7 +6,7 @@ Whats new in 0.24.1 (February 3, 2019) .. warning:: The 0.24.x series of releases will be the last to support Python 2. Future feature - releases will support Python 3 only. See :ref:`install.dropping-27` for more. + releases will support Python 3 only. See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. {{ header }} diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index da8064893e8a8..d1a893f99cff4 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -6,7 +6,7 @@ Whats new in 0.24.2 (March 12, 2019) .. warning:: The 0.24.x series of releases will be the last to support Python 2. Future feature - releases will support Python 3 only. See :ref:`install.dropping-27` for more. + releases will support Python 3 only. See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more. {{ header }} diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 42e756635e739..5b8f980d27b9d 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -6,7 +6,7 @@ What's new in 0.25.0 (July 18, 2019) .. warning:: Starting with the 0.25.x series of releases, pandas only supports Python 3.5.3 and higher. - See :ref:`install.dropping-27` for more details. + See `Dropping Python 2.7 <https://pandas.pydata.org/pandas-docs/version/0.24/install.html#install-dropping-27>`_ for more details. .. warning:: diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 6234bc0f7bd35..fb67decb46b64 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -50,20 +50,21 @@ Timedelta Timezones ^^^^^^^^^ -- +- Bug in :class:`Index` where a numpy object array with a timezone aware :class:`Timestamp` and ``np.nan`` would not return a :class:`DatetimeIndex` (:issue:`27011`) - - Numeric ^^^^^^^ -- +- Bug in :meth:`Series.interpolate` when using a timezone aware :class:`DatetimeIndex` (:issue:`27548`) +- Bug when printing negative floating point complex numbers would raise an ``IndexError`` (:issue:`27484`) - - Conversion ^^^^^^^^^^ -- +- Improved the warnings for the deprecated methods :meth:`Series.real` and :meth:`Series.imag` (:issue:`27610`) - - @@ -113,21 +114,21 @@ I/O Plotting ^^^^^^^^ -- +- Added a pandas_plotting_backends entrypoint group for registering plot backends. See :ref:`extending.plotting-backends` for more (:issue:`26747`). - - Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- +- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) - - Reshaping ^^^^^^^^^ -- +- A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) - - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c15f4ad8e1900..245e41ed16eb2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2601,12 +2601,12 @@ def memory_usage(self, index=True, deep=False): ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() - int64 float64 complex128 object bool - 0 1 1.0 1.0+0.0j 1 True - 1 1 1.0 1.0+0.0j 1 True - 2 1 1.0 1.0+0.0j 1 True - 3 1 1.0 1.0+0.0j 1 True - 4 1 1.0 1.0+0.0j 1 True + int64 float64 complex128 object bool + 0 1 1.0 1.000000+0.000000j 1 True + 1 1 1.0 1.000000+0.000000j 1 True + 2 1 1.0 1.000000+0.000000j 1 True + 3 1 1.0 1.000000+0.000000j 1 True + 4 1 1.0 1.000000+0.000000j 1 True >>> df.memory_usage() Index 128 diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f28f58b070368..19f126c36cde7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -30,7 +30,6 @@ is_bool, is_bool_dtype, is_datetime64_any_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, is_dict_like, is_extension_array_dtype, @@ -7035,7 +7034,7 @@ def interpolate( methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index) - or is_datetime64_dtype(index) + or is_datetime64_any_dtype(index) or is_timedelta64_dtype(index) ) if method not in methods and not is_numeric_or_datetime: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 7fd0ca94e7997..5b9cec6903749 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -42,7 +42,7 @@ from pandas.core.base import DataError, SpecificationError import pandas.core.common as com from pandas.core.frame import DataFrame -from pandas.core.generic import NDFrame, _shared_docs +from pandas.core.generic import ABCDataFrame, ABCSeries, NDFrame, _shared_docs from pandas.core.groupby import base from pandas.core.groupby.groupby import GroupBy, _apply_docs, _transform_template from pandas.core.index import Index, MultiIndex @@ -1025,8 +1025,8 @@ def transform(self, func, *args, **kwargs): object.__setattr__(group, "name", name) res = wrapper(group) - if hasattr(res, "values"): - res = res.values + if isinstance(res, (ABCDataFrame, ABCSeries)): + res = res._values indexer = self._get_index(name) s = klass(res, indexer) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 33de8e41b2f65..12923fd790972 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -489,19 +489,15 @@ def __new__( pass elif inferred != "string": if inferred.startswith("datetime"): - if ( - lib.is_datetime_with_singletz_array(subarr) - or "tz" in kwargs - ): - # only when subarr has the same tz - from pandas import DatetimeIndex + from pandas import DatetimeIndex - try: - return DatetimeIndex( - subarr, copy=copy, name=name, **kwargs - ) - except OutOfBoundsDatetime: - pass + try: + return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) + except (ValueError, OutOfBoundsDatetime): + # GH 27011 + # If we have mixed timezones, just send it + # down the base constructor + pass elif inferred.startswith("timedelta"): from pandas import TimedeltaIndex @@ -1550,7 +1546,11 @@ def _validate_index_level(self, level): "Too many levels:" " Index has only 1 level, not %d" % (level + 1) ) elif level != self.name: - raise KeyError("Level %s must be same as name (%s)" % (level, self.name)) + raise KeyError( + "Requested level ({}) does not match index name ({})".format( + level, self.name + ) + ) def _get_level_number(self, level): self._validate_index_level(level) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 540a06caec220..a24900543b81a 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -12,6 +12,7 @@ ensure_platform_int, is_bool_dtype, is_extension_array_dtype, + is_integer, is_integer_dtype, is_list_like, is_object_dtype, @@ -402,6 +403,10 @@ def unstack(obj, level, fill_value=None): else: level = level[0] + # Prioritize integer interpretation (GH #21677): + if not is_integer(level) and not level == "__placeholder__": + level = obj.index._get_level_number(level) + if isinstance(obj, DataFrame): if isinstance(obj.index, MultiIndex): return _unstack_frame(obj, level, fill_value=fill_value) diff --git a/pandas/core/series.py b/pandas/core/series.py index 59ea8c6bd6c5d..42afb3537c5d8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -958,7 +958,9 @@ def real(self): .. deprecated 0.25.0 """ warnings.warn( - "`real` has be deprecated and will be removed in a " "future verison", + "`real` is deprecated and will be removed in a future version. " + "To eliminate this warning for a Series `ser`, use " + "`np.real(ser.to_numpy())` or `ser.to_numpy().real`.", FutureWarning, stacklevel=2, ) @@ -976,7 +978,9 @@ def imag(self): .. deprecated 0.25.0 """ warnings.warn( - "`imag` has be deprecated and will be removed in a " "future verison", + "`imag` is deprecated and will be removed in a future version. " + "To eliminate this warning for a Series `ser`, use " + "`np.imag(ser.to_numpy())` or `ser.to_numpy().imag`.", FutureWarning, stacklevel=2, ) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 0e8ed7b25d665..11275973e2bb8 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -5,6 +5,7 @@ from functools import partial from io import StringIO +import re from shutil import get_terminal_size from unicodedata import east_asian_width @@ -1584,17 +1585,10 @@ def _trim_zeros_complex(str_complexes, na_rep="NaN"): Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those. """ - - def separate_and_trim(str_complex, na_rep): - num_arr = str_complex.split("+") - return ( - _trim_zeros_float([num_arr[0]], na_rep) - + ["+"] - + _trim_zeros_float([num_arr[1][:-1]], na_rep) - + ["j"] - ) - - return ["".join(separate_and_trim(x, na_rep)) for x in str_complexes] + return [ + "".join(_trim_zeros_float(re.split(r"([j+-])", x), na_rep)) + for x in str_complexes + ] def _trim_zeros_float(str_floats, na_rep="NaN"): diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 0610780edb28d..a3c1499845c2a 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1533,6 +1533,53 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs): return self(kind="hexbin", x=x, y=y, C=C, **kwargs) +_backends = {} + + +def _find_backend(backend: str): + """ + Find a pandas plotting backend> + + Parameters + ---------- + backend : str + The identifier for the backend. Either an entrypoint item registered + with pkg_resources, or a module name. + + Notes + ----- + Modifies _backends with imported backends as a side effect. + + Returns + ------- + types.ModuleType + The imported backend. + """ + import pkg_resources # Delay import for performance. + + for entry_point in pkg_resources.iter_entry_points("pandas_plotting_backends"): + if entry_point.name == "matplotlib": + # matplotlib is an optional dependency. When + # missing, this would raise. + continue + _backends[entry_point.name] = entry_point.load() + + try: + return _backends[backend] + except KeyError: + # Fall back to unregisted, module name approach. + try: + module = importlib.import_module(backend) + except ImportError: + # We re-raise later on. + pass + else: + _backends[backend] = module + return module + + raise ValueError("No backend {}".format(backend)) + + def _get_plot_backend(backend=None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). @@ -1546,7 +1593,18 @@ def _get_plot_backend(backend=None): The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. """ - backend_str = backend or pandas.get_option("plotting.backend") - if backend_str == "matplotlib": - backend_str = "pandas.plotting._matplotlib" - return importlib.import_module(backend_str) + backend = backend or pandas.get_option("plotting.backend") + + if backend == "matplotlib": + # Because matplotlib is an optional dependency and first-party backend, + # we need to attempt an import here to raise an ImportError if needed. + import pandas.plotting._matplotlib as module + + _backends["matplotlib"] = module + + if backend in _backends: + return _backends[backend] + + module = _find_backend(backend) + _backends[backend] = module + return module diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py index 82409df5b46f7..6a86289b6fcc6 100644 --- a/pandas/tests/arrays/interval/test_interval.py +++ b/pandas/tests/arrays/interval/test_interval.py @@ -42,10 +42,9 @@ class TestAttributes: (0, 1), (Timedelta("0 days"), Timedelta("1 day")), (Timestamp("2018-01-01"), Timestamp("2018-01-02")), - pytest.param( + ( Timestamp("2018-01-01", tz="US/Eastern"), Timestamp("2018-01-02", tz="US/Eastern"), - marks=pytest.mark.xfail(strict=True, reason="GH 27011"), ), ], ) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index c57b2a6964f39..a6fd980faefcd 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -1083,7 +1083,7 @@ def test_reset_index_level(self): # Missing levels - for both MultiIndex and single-level Index: for idx_lev in ["A", "B"], ["A"]: - with pytest.raises(KeyError, match="Level E "): + with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"): df.set_index(idx_lev).reset_index(level=["A", "E"]) with pytest.raises(IndexError, match="Too many levels"): df.set_index(idx_lev).reset_index(level=[0, 1, 2]) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 1eab3ba253f4d..9a8b7cf18f2c0 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -1001,3 +1001,27 @@ def test_ffill_not_in_axis(func, key, val): expected = df assert_frame_equal(result, expected) + + +def test_transform_lambda_with_datetimetz(): + # GH 27496 + df = DataFrame( + { + "time": [ + Timestamp("2010-07-15 03:14:45"), + Timestamp("2010-11-19 18:47:06"), + ], + "timezone": ["Etc/GMT+4", "US/Eastern"], + } + ) + result = df.groupby(["timezone"])["time"].transform( + lambda x: x.dt.tz_localize(x.name) + ) + expected = Series( + [ + Timestamp("2010-07-15 03:14:45", tz="Etc/GMT+4"), + Timestamp("2010-11-19 18:47:06", tz="US/Eastern"), + ], + name="time", + ) + assert_series_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 6708feda7dd1e..66a22ae7e9e46 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -822,6 +822,12 @@ def test_constructor_wrong_precision_raises(self): with pytest.raises(ValueError): pd.DatetimeIndex(["2000"], dtype="datetime64[us]") + def test_index_constructor_with_numpy_object_array_and_timestamp_tz_with_nan(self): + # GH 27011 + result = Index(np.array([Timestamp("2019", tz="UTC"), np.nan], dtype=object)) + expected = DatetimeIndex([Timestamp("2019", tz="UTC"), pd.NaT]) + tm.assert_index_equal(result, expected) + class TestTimeSeries: def test_dti_constructor_preserve_dti_freq(self): diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index e75d80bec1fdf..c40a9bce9385b 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2004,7 +2004,7 @@ def test_isin_level_kwarg_bad_label_raises(self, label, indices): msg = "'Level {} not found'" else: index = index.rename("foo") - msg = r"'Level {} must be same as name \(foo\)'" + msg = r"Requested level \({}\) does not match index name \(foo\)" with pytest.raises(KeyError, match=msg.format(label)): index.isin([], level=label) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 0e9aa07a4c05a..ae1a21e9b3980 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -35,7 +35,8 @@ def test_droplevel(self, indices): for level in "wrong", ["wrong"]: with pytest.raises( - KeyError, match=re.escape("'Level wrong must be same as name (None)'") + KeyError, + match=r"'Requested level \(wrong\) does not match index name \(None\)'", ): indices.droplevel(level) @@ -200,7 +201,7 @@ def test_unique(self, indices): with pytest.raises(IndexError, match=msg): indices.unique(level=3) - msg = r"Level wrong must be same as name \({}\)".format( + msg = r"Requested level \(wrong\) does not match index name \({}\)".format( re.escape(indices.name.__repr__()) ) with pytest.raises(KeyError, match=msg): diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 818bbc566aca8..ad47f714c9550 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1537,7 +1537,7 @@ def test_to_string_float_index(self): assert result == expected def test_to_string_complex_float_formatting(self): - # GH #25514 + # GH #25514, 25745 with pd.option_context("display.precision", 5): df = DataFrame( { @@ -1545,6 +1545,7 @@ def test_to_string_complex_float_formatting(self): (0.4467846931321966 + 0.0715185102060818j), (0.2739442392974528 + 0.23515228785438969j), (0.26974928742135185 + 0.3250604054898979j), + (-1j), ] } ) @@ -1552,7 +1553,8 @@ def test_to_string_complex_float_formatting(self): expected = ( " x\n0 0.44678+0.07152j\n" "1 0.27394+0.23515j\n" - "2 0.26975+0.32506j" + "2 0.26975+0.32506j\n" + "3 -0.00000-1.00000j" ) assert result == expected diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py index 51f2abb6cc2f4..e79e7b6239eb3 100644 --- a/pandas/tests/plotting/test_backend.py +++ b/pandas/tests/plotting/test_backend.py @@ -1,5 +1,11 @@ +import sys +import types + +import pkg_resources import pytest +import pandas.util._test_decorators as td + import pandas @@ -36,3 +42,44 @@ def test_backend_is_correct(monkeypatch): pandas.set_option("plotting.backend", "matplotlib") except ImportError: pass + + +@td.skip_if_no_mpl +def test_register_entrypoint(): + mod = types.ModuleType("my_backend") + mod.plot = lambda *args, **kwargs: 1 + + backends = pkg_resources.get_entry_map("pandas") + my_entrypoint = pkg_resources.EntryPoint( + "pandas_plotting_backend", + mod.__name__, + dist=pkg_resources.get_distribution("pandas"), + ) + backends["pandas_plotting_backends"]["my_backend"] = my_entrypoint + # TODO: the docs recommend importlib.util.module_from_spec. But this works for now. + sys.modules["my_backend"] = mod + + result = pandas.plotting._core._get_plot_backend("my_backend") + assert result is mod + + # TODO: https://github.com/pandas-dev/pandas/issues/27517 + # Remove the td.skip_if_no_mpl + with pandas.option_context("plotting.backend", "my_backend"): + result = pandas.plotting._core._get_plot_backend() + + assert result is mod + + +def test_register_import(): + mod = types.ModuleType("my_backend2") + mod.plot = lambda *args, **kwargs: 1 + sys.modules["my_backend2"] = mod + + result = pandas.plotting._core._get_plot_backend("my_backend2") + assert result is mod + + +@td.skip_if_mpl +def test_no_matplotlib_ok(): + with pytest.raises(ImportError): + pandas.plotting._core._get_plot_backend("matplotlib") diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 63baa6af7c02a..11add8d61deeb 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -322,9 +322,9 @@ def test_reset_index_drop_errors(self): # KeyError raised for series index when passed level name is missing s = Series(range(4)) - with pytest.raises(KeyError, match="must be same as name"): + with pytest.raises(KeyError, match="does not match index name"): s.reset_index("wrong", drop=True) - with pytest.raises(KeyError, match="must be same as name"): + with pytest.raises(KeyError, match="does not match index name"): s.reset_index("wrong") # KeyError raised for series when level to be dropped is missing diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index c5fc52b9b0c41..10375719be8d2 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -1518,10 +1518,16 @@ def test_interp_nonmono_raise(self): s.interpolate(method="krogh") @td.skip_if_no_scipy - def test_interp_datetime64(self): - df = Series([1, np.nan, 3], index=date_range("1/1/2000", periods=3)) - result = df.interpolate(method="nearest") - expected = Series([1.0, 1.0, 3.0], index=date_range("1/1/2000", periods=3)) + @pytest.mark.parametrize("method", ["nearest", "pad"]) + def test_interp_datetime64(self, method, tz_naive_fixture): + df = Series( + [1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture) + ) + result = df.interpolate(method=method) + expected = Series( + [1.0, 1.0, 3.0], + index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture), + ) assert_series_equal(result, expected) def test_interp_limit_no_nans(self): diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index c97c69c323b56..dc4db6e7902a8 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -524,6 +524,22 @@ def test_stack_unstack_preserve_names(self): restacked = unstacked.stack() assert restacked.index.names == self.frame.index.names + @pytest.mark.parametrize("method", ["stack", "unstack"]) + def test_stack_unstack_wrong_level_name(self, method): + # GH 18303 - wrong level name should raise + + # A DataFrame with flat axes: + df = self.frame.loc["foo"] + + with pytest.raises(KeyError, match="does not match index name"): + getattr(df, method)("mistake") + + if method == "unstack": + # Same on a Series: + s = df.iloc[:, 0] + with pytest.raises(KeyError, match="does not match index name"): + getattr(s, method)("mistake") + def test_unstack_level_name(self): result = self.frame.unstack("second") expected = self.frame.unstack(level=1) diff --git a/setup.py b/setup.py index 53e12da53cdeb..d2c6b18b892cd 100755 --- a/setup.py +++ b/setup.py @@ -830,5 +830,10 @@ def srcpath(name=None, suffix=".pyx", subdir="src"): "hypothesis>=3.58", ] }, + entry_points={ + "pandas_plotting_backends": [ + "matplotlib = pandas:plotting._matplotlib", + ], + }, **setuptools_kwargs )
https://api.github.com/repos/pandas-dev/pandas/pulls/27690
2019-08-01T12:21:51Z
2019-08-01T12:22:26Z
null
2019-08-01T12:22:26Z
DOC: 0.25 fixups
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index b7555ed94a1ed..aeab2cf5809e7 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 0.25 .. toctree:: :maxdepth: 2 + v0.25.1 v0.25.0 Version 0.24 diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index f4ef26dd8b740..51d05911829d6 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1268,4 +1268,4 @@ Other Contributors ~~~~~~~~~~~~ -.. contributors:: 0.24.x..HEAD +.. contributors:: v0.24.x..HEAD diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index fb67decb46b64..fabdde33f5660 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -1,7 +1,3 @@ -:orphan: - -.. TODO. Remove the orphan tag. - .. _whatsnew_0251: What's new in 0.25.1 (July XX, 2019) @@ -166,6 +162,4 @@ Other Contributors ~~~~~~~~~~~~ -.. TODO. Change to v0.25.0..HEAD - -.. contributors:: HEAD..HEAD +.. contributors:: v0.25.0..HEAD
* fix tag for contributors * fix 0.25.1 contributors * remove orphan Closes https://github.com/pandas-dev/pandas/issues/27687
https://api.github.com/repos/pandas-dev/pandas/pulls/27689
2019-08-01T12:07:54Z
2019-08-01T13:57:00Z
2019-08-01T13:57:00Z
2019-10-22T12:10:11Z
CLN: Prune unnecessary internals
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index da3db1c18e534..811836d0e8a4d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -671,7 +671,7 @@ def _transform_item_by_item(self, obj, wrapper): except Exception: pass - if len(output) == 0: # pragma: no cover + if len(output) == 0: raise TypeError("Transform function invalid for data types") columns = obj.columns diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 15b94e59c065c..12b9cf25687cf 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1206,7 +1206,7 @@ def mean(self, *args, **kwargs): ) except GroupByError: raise - except Exception: # pragma: no cover + except Exception: with _group_selection_context(self): f = lambda x: x.mean(axis=self.axis, **kwargs) return self._python_agg_general(f) @@ -1232,7 +1232,7 @@ def median(self, **kwargs): ) except GroupByError: raise - except Exception: # pragma: no cover + except Exception: def f(x): if isinstance(x, np.ndarray): @@ -2470,7 +2470,7 @@ def groupby(obj, by, **kwds): from pandas.core.groupby.generic import DataFrameGroupBy klass = DataFrameGroupBy - else: # pragma: no cover + else: raise TypeError("invalid type: {}".format(obj)) return klass(obj, by, **kwds) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 4a8ee8fa2c5f4..6d70fcfb62d52 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -760,7 +760,7 @@ def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs): values[mask] = na_rep return values - # block actions #### + # block actions # def copy(self, deep=True): """ copy constructor """ values = self.values @@ -1538,16 +1538,14 @@ def quantile(self, qs, interpolation="linear", axis=0): ).reshape(len(values), len(qs)) else: # asarray needed for Sparse, see GH#24600 - # Note: we use self.values below instead of values because the - # `asi8` conversion above will behave differently under `isna` - mask = np.asarray(isna(self.values)) + mask = np.asarray(isna(values)) result = nanpercentile( values, np.array(qs) * 100, axis=axis, na_value=self.fill_value, mask=mask, - ndim=self.ndim, + ndim=values.ndim, interpolation=interpolation, ) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 344d41ed26943..8956821740bf3 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -975,8 +975,6 @@ def iget(self, i): """ block = self.blocks[self._blknos[i]] values = block.iget(self._blklocs[i]) - if values.ndim != 1: - return values # shortcut for select a single-dim from a 2-dim BM return SingleBlockManager( diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 0006824f09fe7..d38221d784273 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -121,7 +121,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover return except TypeError: warnings.warn( - "to_clipboard in excel mode requires a single " "character separator." + "to_clipboard in excel mode requires a single character separator." ) elif sep is not None: warnings.warn("to_clipboard with excel=False ignores the sep argument") diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 7afc234446a71..154656fbb250b 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -297,7 +297,7 @@ def read_excel( for arg in ("sheet", "sheetname", "parse_cols"): if arg in kwds: raise TypeError( - "read_excel() got an unexpected keyword argument " "`{}`".format(arg) + "read_excel() got an unexpected keyword argument `{}`".format(arg) ) if not isinstance(io, ExcelFile): @@ -353,7 +353,7 @@ def __init__(self, filepath_or_buffer): self.book = self.load_workbook(filepath_or_buffer) else: raise ValueError( - "Must explicitly set engine if not passing in" " buffer or path for io." + "Must explicitly set engine if not passing in buffer or path for io." ) @property @@ -713,9 +713,7 @@ def _get_sheet_name(self, sheet_name): if sheet_name is None: sheet_name = self.cur_sheet if sheet_name is None: # pragma: no cover - raise ValueError( - "Must pass explicit sheet_name or set " "cur_sheet property" - ) + raise ValueError("Must pass explicit sheet_name or set cur_sheet property") return sheet_name def _value_with_fmt(self, val): @@ -851,7 +849,7 @@ def parse( """ if "chunksize" in kwds: raise NotImplementedError( - "chunksize keyword of read_excel " "is not implemented" + "chunksize keyword of read_excel is not implemented" ) return self._reader.parse( diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 35a62b627823a..6fe22f14c2c5b 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -53,7 +53,7 @@ def to_feather(df, path): if df.index.name is not None: raise ValueError( - "feather does not serialize index meta-data on a " "default index" + "feather does not serialize index meta-data on a default index" ) # validate columns diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index d86bf432b83c4..60daf311397e8 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -96,9 +96,7 @@ def __init__( # validate mi options if self.has_mi_columns: if cols is not None: - raise TypeError( - "cannot specify cols with a MultiIndex on the " "columns" - ) + raise TypeError("cannot specify cols with a MultiIndex on the columns") if cols is not None: if isinstance(cols, ABCIndexClass): @@ -158,7 +156,7 @@ def save(self): """ # GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, "write"): - msg = "compression has no effect when passing file-like " "object as input." + msg = "compression has no effect when passing file-like object as input." warnings.warn(msg, RuntimeWarning, stacklevel=2) # when zip compression is called. diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 6e4894bdb0f56..980fc4888d625 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -2,9 +2,10 @@ Internal module for formatting output data in csv, html, and latex files. This module also applies to display formatting. """ - +import decimal from functools import partial from io import StringIO +import math import re from shutil import get_terminal_size from typing import ( @@ -862,7 +863,7 @@ def to_latex( with codecs.open(self.buf, "w", encoding=encoding) as f: latex_renderer.write_result(f) else: - raise TypeError("buf is not a file name and it has no write " "method") + raise TypeError("buf is not a file name and it has no write method") def _format_col(self, i: int) -> List[str]: frame = self.tr_frame @@ -907,7 +908,7 @@ def to_html( with open(self.buf, "w") as f: buffer_put_lines(f, html) else: - raise TypeError("buf is not a file name and it has no write " " method") + raise TypeError("buf is not a file name and it has no write method") def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]: from pandas.core.index import _sparsify @@ -1782,9 +1783,6 @@ def __call__(self, num: Union[int, float]) -> str: @return: engineering formatted string """ - import decimal - import math - dnum = decimal.Decimal(str(num)) if decimal.Decimal.is_nan(dnum): diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 3e5b200c4643b..f4b00b0aac5f7 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -687,7 +687,7 @@ def parser_f( read_csv = Appender( _doc_read_csv_and_table.format( func_name="read_csv", - summary=("Read a comma-separated values (csv) file " "into DataFrame."), + summary=("Read a comma-separated values (csv) file into DataFrame."), _default_sep="','", ) )(read_csv) @@ -770,7 +770,7 @@ def read_fwf( if colspecs is None and widths is None: raise ValueError("Must specify either colspecs or widths") elif colspecs not in (None, "infer") and widths is not None: - raise ValueError("You must specify only one of 'widths' and " "'colspecs'") + raise ValueError("You must specify only one of 'widths' and 'colspecs'") # Compute 'colspecs' from 'widths', if specified. if widths is not None: @@ -901,9 +901,7 @@ def _get_options_with_defaults(self, engine): # see gh-12935 if argname == "mangle_dupe_cols" and not value: - raise ValueError( - "Setting mangle_dupe_cols=False is " "not supported yet" - ) + raise ValueError("Setting mangle_dupe_cols=False is not supported yet") else: options[argname] = value @@ -942,7 +940,7 @@ def _check_file_or_buffer(self, f, engine): # needs to have that attribute ("next" for Python 2.x, "__next__" # for Python 3.x) if engine != "c" and not hasattr(f, next_attr): - msg = "The 'python' engine cannot iterate " "through this file buffer." + msg = "The 'python' engine cannot iterate through this file buffer." raise ValueError(msg) return engine @@ -959,7 +957,7 @@ def _clean_options(self, options, engine): # C engine not supported yet if engine == "c": if options["skipfooter"] > 0: - fallback_reason = "the 'c' engine does not support" " skipfooter" + fallback_reason = "the 'c' engine does not support skipfooter" engine = "python" encoding = sys.getfilesystemencoding() or "utf-8" @@ -1397,11 +1395,11 @@ def __init__(self, kwds): raise ValueError("header must be integer or list of integers") if kwds.get("usecols"): raise ValueError( - "cannot specify usecols when " "specifying a multi-index header" + "cannot specify usecols when specifying a multi-index header" ) if kwds.get("names"): raise ValueError( - "cannot specify names when " "specifying a multi-index header" + "cannot specify names when specifying a multi-index header" ) # validate index_col that only contains integers @@ -1611,7 +1609,7 @@ def _get_name(icol): if col_names is None: raise ValueError( - ("Must supply column order to use {icol!s} " "as index").format( + ("Must supply column order to use {icol!s} as index").format( icol=icol ) ) @@ -2379,7 +2377,7 @@ def _make_reader(self, f): if sep is None or len(sep) == 1: if self.lineterminator: raise ValueError( - "Custom line terminators not supported in " "python parser (yet)" + "Custom line terminators not supported in python parser (yet)" ) class MyDialect(csv.Dialect): @@ -2662,7 +2660,7 @@ def _infer_columns(self): "number of header fields in the file" ) if len(columns) > 1: - raise TypeError("Cannot pass names with multi-index " "columns") + raise TypeError("Cannot pass names with multi-index columns") if self.usecols is not None: # Set _use_cols. We don't store columns because they are @@ -2727,7 +2725,7 @@ def _handle_usecols(self, columns, usecols_key): elif any(isinstance(u, str) for u in self.usecols): if len(columns) > 1: raise ValueError( - "If using multiple headers, usecols must " "be integers." + "If using multiple headers, usecols must be integers." ) col_indices = [] diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index da9264557931d..415cb50472a4c 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -366,7 +366,7 @@ def read_hdf(path_or_buf, key=None, mode="r", **kwargs): path_or_buf = _stringify_path(path_or_buf) if not isinstance(path_or_buf, str): raise NotImplementedError( - "Support for generic buffers has not " "been implemented." + "Support for generic buffers has not been implemented." ) try: exists = os.path.exists(path_or_buf) @@ -1047,7 +1047,7 @@ def append( """ if columns is not None: raise TypeError( - "columns is not a supported keyword in append, " "try data_columns" + "columns is not a supported keyword in append, try data_columns" ) if dropna is None: @@ -2161,7 +2161,7 @@ def set_atom( # which is an error raise TypeError( - "too many timezones in this block, create separate " "data columns" + "too many timezones in this block, create separate data columns" ) elif inferred_type == "unicode": raise TypeError("[unicode] is not implemented as a table column") @@ -2338,9 +2338,7 @@ def validate_attr(self, append): if append: existing_fields = getattr(self.attrs, self.kind_attr, None) if existing_fields is not None and existing_fields != list(self.values): - raise ValueError( - "appended items do not match existing items" " in table!" - ) + raise ValueError("appended items do not match existing items in table!") existing_dtype = getattr(self.attrs, self.dtype_attr, None) if existing_dtype is not None and existing_dtype != self.dtype: @@ -2834,7 +2832,7 @@ def write_multi_index(self, key, index): # write the level if is_extension_type(lev): raise NotImplementedError( - "Saving a MultiIndex with an " "extension dtype is not supported." + "Saving a MultiIndex with an extension dtype is not supported." ) level_key = "{key}_level{idx}".format(key=key, idx=i) conv_level = _convert_index( @@ -3079,7 +3077,7 @@ def validate_read(self, kwargs): kwargs = super().validate_read(kwargs) if "start" in kwargs or "stop" in kwargs: raise NotImplementedError( - "start and/or stop are not supported " "in fixed Sparse reading" + "start and/or stop are not supported in fixed Sparse reading" ) return kwargs @@ -3376,7 +3374,7 @@ def validate_multiindex(self, obj): return obj.reset_index(), levels except ValueError: raise ValueError( - "duplicate names/columns in the multi-index when " "storing as a table" + "duplicate names/columns in the multi-index when storing as a table" ) @property @@ -4081,7 +4079,7 @@ def read_column(self, column, where=None, start=None, stop=None): return False if where is not None: - raise TypeError("read_column does not currently accept a where " "clause") + raise TypeError("read_column does not currently accept a where clause") # find the axes for a in self.axes: @@ -4990,7 +4988,7 @@ def __init__(self, table, where=None, start=None, stop=None): self.stop is not None and (where >= self.stop).any() ): raise ValueError( - "where must have index locations >= start and " "< stop" + "where must have index locations >= start and < stop" ) self.coordinates = where diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 34b93d72d0e29..ea26a9b8efdbf 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -26,7 +26,7 @@ "000000000000000000000000000000 " ) _correct_header1 = ( - "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!" "000000000000000001600000000" + "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000" ) _correct_header2 = ( "HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!" diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 6fe34e4e9705a..f1f52a9198d29 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -233,7 +233,7 @@ def read_sql_table( con = _engine_builder(con) if not _is_sqlalchemy_connectable(con): raise NotImplementedError( - "read_sql_table only supported for " "SQLAlchemy connectable." + "read_sql_table only supported for SQLAlchemy connectable." ) import sqlalchemy from sqlalchemy.schema import MetaData @@ -503,7 +503,7 @@ def to_sql( frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError( - "'frame' argument should be either a " "Series or a DataFrame" + "'frame' argument should be either a Series or a DataFrame" ) pandas_sql.to_sql( @@ -1756,7 +1756,7 @@ def has_table(self, name, schema=None): wld = "?" query = ( - "SELECT name FROM sqlite_master " "WHERE type='table' AND name={wld};" + "SELECT name FROM sqlite_master WHERE type='table' AND name={wld};" ).format(wld=wld) return len(self.execute(query, [name]).fetchall()) > 0 diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 32122a9daa1db..69bafc7749258 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -367,7 +367,7 @@ def convert_delta_safe(base, deltas, unit): conv_dates = convert_delta_safe(base, ms, "ms") elif fmt.startswith(("%tC", "tC")): - warnings.warn("Encountered %tC format. Leaving in Stata " "Internal Format.") + warnings.warn("Encountered %tC format. Leaving in Stata Internal Format.") conv_dates = Series(dates, dtype=np.object) if has_bad_values: conv_dates[bad_locs] = NaT @@ -856,7 +856,7 @@ def __init__(self, value): string = property( lambda self: self._str, - doc="The Stata representation of the missing value: " "'.', '.a'..'.z'", + doc="The Stata representation of the missing value: '.', '.a'..'.z'", ) value = property( lambda self: self._value, doc="The binary representation of the missing value." @@ -1959,7 +1959,7 @@ def _maybe_convert_to_int_keys(convert_dates, varlist): new_dict.update({varlist.index(key): convert_dates[key]}) else: if not isinstance(key, int): - raise ValueError("convert_dates key must be a " "column or an integer") + raise ValueError("convert_dates key must be a column or an integer") new_dict.update({key: convert_dates[key]}) return new_dict @@ -2533,9 +2533,7 @@ def _write_variable_labels(self): if col in self._variable_labels: label = self._variable_labels[col] if len(label) > 80: - raise ValueError( - "Variable labels must be 80 characters " "or fewer" - ) + raise ValueError("Variable labels must be 80 characters or fewer") is_latin1 = all(ord(c) < 256 for c in label) if not is_latin1: raise ValueError( @@ -3093,9 +3091,7 @@ def _write_variable_labels(self): if col in self._variable_labels: label = self._variable_labels[col] if len(label) > 80: - raise ValueError( - "Variable labels must be 80 characters " "or fewer" - ) + raise ValueError("Variable labels must be 80 characters or fewer") is_latin1 = all(ord(c) < 256 for c in label) if not is_latin1: raise ValueError(
Also some post-black cleanups in pandas.io (I think we're almost done with these) Removing "pragma: no cover" in a couple places because I'm working in those areas and it would be easier if we caught more specific things so I want to see if they are ever reached.
https://api.github.com/repos/pandas-dev/pandas/pulls/27685
2019-08-01T03:00:28Z
2019-08-01T12:15:17Z
2019-08-01T12:15:17Z
2019-08-01T14:09:08Z
CLN: remove _try_coerce_result altogether
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 1484feeeada64..b066629676e5d 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -25,6 +25,7 @@ is_categorical_dtype, is_complex_dtype, is_datetime64_any_dtype, + is_datetime64tz_dtype, is_integer_dtype, is_numeric_dtype, is_sparse, @@ -451,6 +452,7 @@ def wrapper(*args, **kwargs): def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs): assert kind in ["transform", "aggregate"] + orig_values = values # can we do this operation with our cython functions # if not raise NotImplementedError @@ -475,23 +477,11 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs): "timedelta64 type does not support {} operations".format(how) ) - arity = self._cython_arity.get(how, 1) - - vdim = values.ndim - swapped = False - if vdim == 1: - values = values[:, None] - out_shape = (self.ngroups, arity) - else: - if axis > 0: - swapped = True - assert axis == 1, axis - values = values.T - if arity > 1: - raise NotImplementedError( - "arity of more than 1 is not supported for the 'how' argument" - ) - out_shape = (self.ngroups,) + values.shape[1:] + if is_datetime64tz_dtype(values.dtype): + # Cast to naive; we'll cast back at the end of the function + # TODO: possible need to reshape? kludge can be avoided when + # 2D EA is allowed. + values = values.view("M8[ns]") is_datetimelike = needs_i8_conversion(values.dtype) is_numeric = is_numeric_dtype(values.dtype) @@ -513,6 +503,24 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs): else: values = values.astype(object) + arity = self._cython_arity.get(how, 1) + + vdim = values.ndim + swapped = False + if vdim == 1: + values = values[:, None] + out_shape = (self.ngroups, arity) + else: + if axis > 0: + swapped = True + assert axis == 1, axis + values = values.T + if arity > 1: + raise NotImplementedError( + "arity of more than 1 is not supported for the 'how' argument" + ) + out_shape = (self.ngroups,) + values.shape[1:] + try: func = self._get_cython_function(kind, how, values, is_numeric) except NotImplementedError: @@ -581,6 +589,9 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs): if swapped: result = result.swapaxes(0, axis) + if is_datetime64tz_dtype(orig_values.dtype): + result = type(orig_values)(result.astype(np.int64), dtype=orig_values.dtype) + return result, names def aggregate(self, values, how, axis=0, min_count=-1): diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 8ade489e71587..6a2aebe5db246 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -417,9 +417,6 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): if self._can_hold_element(value): # equivalent: self._try_coerce_args(value) would not raise blocks = self.putmask(mask, value, inplace=inplace) - blocks = [ - b.make_block(values=self._try_coerce_result(b.values)) for b in blocks - ] return self._maybe_downcast(blocks, downcast) # we can't process the value, but nothing to do @@ -734,12 +731,7 @@ def _try_coerce_args(self, other): return other - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - return result - def _try_coerce_and_cast_result(self, result, dtype=None): - result = self._try_coerce_result(result) result = self._try_cast_result(result, dtype=dtype) return result @@ -1406,7 +1398,7 @@ def func(cond, values, other): try: fastres = expressions.where(cond, values, other) - return self._try_coerce_result(fastres) + return fastres except Exception as detail: if errors == "raise": raise TypeError( @@ -1692,7 +1684,6 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) mask = _safe_reshape(mask, new_values.shape) new_values[mask] = new - new_values = self._try_coerce_result(new_values) return [self.make_block(values=new_values)] def _try_cast_result(self, result, dtype=None): @@ -1870,20 +1861,6 @@ def _slice(self, slicer): return self.values[slicer] - def _try_cast_result(self, result, dtype=None): - """ - if we have an operation that operates on for example floats - we want to try to cast back to our EA here if possible - - result could be a 2-D numpy array, e.g. the result of - a numeric operation; but it must be shape (1, X) because - we by-definition operate on the ExtensionBlocks one-by-one - - result could also be an EA Array itself, in which case it - is already a 1-D array - """ - return result - def formatting_values(self): # Deprecating the ability to override _formatting_values. # Do the warning here, it's only user in pandas, since we @@ -2443,20 +2420,6 @@ def _try_coerce_args(self, other): return other - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray): - if result.ndim == 2: - # kludge for 2D blocks with 1D EAs - result = result[0, :] - if result.dtype == np.float64: - # needed for post-groupby.median - result = self._holder._from_sequence( - result.astype(np.int64), freq=None, dtype=self.values.dtype - ) - - return result - def diff(self, n, axis=0): """1st discrete difference @@ -2619,10 +2582,6 @@ def _try_coerce_args(self, other): return other - def _try_coerce_result(self, result): - """ reverse of try_coerce_args / try_operate """ - return result - def should_store(self, value): return issubclass( value.dtype.type, np.timedelta64 @@ -3031,16 +2990,6 @@ def array_dtype(self): """ return np.object_ - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - - # GH12564: CategoricalBlock is 1-dim only - # while returned results could be any dim - if (not is_categorical_dtype(result)) and isinstance(result, np.ndarray): - result = _block_shape(result, ndim=self.ndim) - - return result - def to_dense(self): # Categorical.get_values returns a DatetimeIndex for datetime # categories, so we can't simply use `np.asarray(self.values)` like diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 8956821740bf3..c7318314b8af9 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -908,7 +908,7 @@ def fast_xs(self, loc): # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): - result[rl] = blk._try_coerce_result(blk.iget((i, loc))) + result[rl] = blk.iget((i, loc)) if is_extension_array_dtype(dtype): result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
Following #27628, we can now remove _try_coerce_result altogether. The step after this removes _try_cast_result.
https://api.github.com/repos/pandas-dev/pandas/pulls/27683
2019-07-31T20:44:49Z
2019-08-05T11:57:08Z
2019-08-05T11:57:08Z
2019-08-05T14:33:16Z
ENH: Implement weighted rolling var and std
diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst index 2f6addf607877..d09ac0d1fa7f7 100644 --- a/doc/source/reference/window.rst +++ b/doc/source/reference/window.rst @@ -34,6 +34,8 @@ Standard moving window functions Rolling.quantile Window.mean Window.sum + Window.var + Window.std .. _api.functions_expanding: diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index cb1d80a34514c..36c1889e7df06 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -110,6 +110,7 @@ Other enhancements (depending on the presence of missing data) or object dtype column. (:issue:`28368`) - :meth:`DataFrame.to_json` now accepts an ``indent`` integer argument to enable pretty printing of JSON output (:issue:`12004`) - :meth:`read_stata` can read Stata 119 dta files. (:issue:`28250`) +- Implemented :meth:`pandas.core.window.Window.var` and :meth:`pandas.core.window.Window.std` functions (:issue:`26597`) - Added ``encoding`` argument to :meth:`DataFrame.to_string` for non-ascii text (:issue:`28766`) - Added ``encoding`` argument to :func:`DataFrame.to_html` for non-ascii text (:issue:`28663`) - :meth:`Styler.background_gradient` now accepts ``vmin`` and ``vmax`` arguments (:issue:`12145`) diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index b51d61d05ce98..62066c5f66ea3 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1752,6 +1752,226 @@ cdef ndarray[float64_t] _roll_weighted_sum_mean(float64_t[:] values, return np.asarray(output) +# ---------------------------------------------------------------------- +# Rolling var for weighted window + + +cdef inline float64_t calc_weighted_var(float64_t t, + float64_t sum_w, + Py_ssize_t win_n, + unsigned int ddof, + float64_t nobs, + int64_t minp) nogil: + """ + Calculate weighted variance for a window using West's method. + + Paper: https://dl.acm.org/citation.cfm?id=359153 + + Parameters + ---------- + t: float64_t + sum of weighted squared differences + sum_w: float64_t + sum of weights + win_n: Py_ssize_t + window size + ddof: unsigned int + delta degrees of freedom + nobs: float64_t + number of observations + minp: int64_t + minimum number of observations + + Returns + ------- + result : float64_t + weighted variance of the window + """ + + cdef: + float64_t result + + # Variance is unchanged if no observation is added or removed + if (nobs >= minp) and (nobs > ddof): + + # pathological case + if nobs == 1: + result = 0 + else: + result = t * win_n / ((win_n - ddof) * sum_w) + if result < 0: + result = 0 + else: + result = NaN + + return result + + +cdef inline void add_weighted_var(float64_t val, + float64_t w, + float64_t *t, + float64_t *sum_w, + float64_t *mean, + float64_t *nobs) nogil: + """ + Update weighted mean, sum of weights and sum of weighted squared + differences to include value and weight pair in weighted variance + calculation using West's method. + + Paper: https://dl.acm.org/citation.cfm?id=359153 + + Parameters + ---------- + val: float64_t + window values + w: float64_t + window weights + t: float64_t + sum of weighted squared differences + sum_w: float64_t + sum of weights + mean: float64_t + weighted mean + nobs: float64_t + number of observations + """ + + cdef: + float64_t temp, q, r + + if isnan(val): + return + + nobs[0] = nobs[0] + 1 + + q = val - mean[0] + temp = sum_w[0] + w + r = q * w / temp + + mean[0] = mean[0] + r + t[0] = t[0] + r * sum_w[0] * q + sum_w[0] = temp + + +cdef inline void remove_weighted_var(float64_t val, + float64_t w, + float64_t *t, + float64_t *sum_w, + float64_t *mean, + float64_t *nobs) nogil: + """ + Update weighted mean, sum of weights and sum of weighted squared + differences to remove value and weight pair from weighted variance + calculation using West's method. + + Paper: https://dl.acm.org/citation.cfm?id=359153 + + Parameters + ---------- + val: float64_t + window values + w: float64_t + window weights + t: float64_t + sum of weighted squared differences + sum_w: float64_t + sum of weights + mean: float64_t + weighted mean + nobs: float64_t + number of observations + """ + + cdef: + float64_t temp, q, r + + if notnan(val): + nobs[0] = nobs[0] - 1 + + if nobs[0]: + q = val - mean[0] + temp = sum_w[0] - w + r = q * w / temp + + mean[0] = mean[0] - r + t[0] = t[0] - r * sum_w[0] * q + sum_w[0] = temp + + else: + t[0] = 0 + sum_w[0] = 0 + mean[0] = 0 + + +def roll_weighted_var(float64_t[:] values, float64_t[:] weights, + int64_t minp, unsigned int ddof): + """ + Calculates weighted rolling variance using West's online algorithm. + + Paper: https://dl.acm.org/citation.cfm?id=359153 + + Parameters + ---------- + values: float64_t[:] + values to roll window over + weights: float64_t[:] + array of weights whose lenght is window size + minp: int64_t + minimum number of observations to calculate + variance of a window + ddof: unsigned int + the divisor used in variance calculations + is the window size - ddof + + Returns + ------- + output: float64_t[:] + weighted variances of windows + """ + + cdef: + float64_t t = 0, sum_w = 0, mean = 0, nobs = 0 + float64_t val, pre_val, w, pre_w + Py_ssize_t i, n, win_n + float64_t[:] output + + n = len(values) + win_n = len(weights) + output = np.empty(n, dtype=float) + + with nogil: + + for i in range(win_n): + add_weighted_var(values[i], weights[i], &t, + &sum_w, &mean, &nobs) + + output[i] = calc_weighted_var(t, sum_w, win_n, + ddof, nobs, minp) + + for i in range(win_n, n): + val = values[i] + pre_val = values[i - win_n] + + w = weights[i % win_n] + pre_w = weights[(i - win_n) % win_n] + + if notnan(val): + if pre_val == pre_val: + remove_weighted_var(pre_val, pre_w, &t, + &sum_w, &mean, &nobs) + + add_weighted_var(val, w, &t, &sum_w, &mean, &nobs) + + elif pre_val == pre_val: + remove_weighted_var(pre_val, pre_w, &t, + &sum_w, &mean, &nobs) + + output[i] = calc_weighted_var(t, sum_w, win_n, + ddof, nobs, minp) + + return output + + # ---------------------------------------------------------------------- # Exponentially weighted moving average diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index 47bd8f2ec593b..c3b7531ce5904 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -181,13 +181,13 @@ def mean(self, *args, **kwargs): def median(self, **kwargs): return super().median(**kwargs) - @Substitution(name="expanding") + @Substitution(name="expanding", versionadded="") @Appender(_shared_docs["std"]) def std(self, ddof=1, *args, **kwargs): nv.validate_expanding_func("std", args, kwargs) return super().std(ddof=ddof, **kwargs) - @Substitution(name="expanding") + @Substitution(name="expanding", versionadded="") @Appender(_shared_docs["var"]) def var(self, ddof=1, *args, **kwargs): nv.validate_expanding_func("var", args, kwargs) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index bf5ea9c457e8a..ab59cb1170b75 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -4,7 +4,7 @@ """ from datetime import timedelta from textwrap import dedent -from typing import Callable, List, Optional, Set, Union +from typing import Callable, Dict, List, Optional, Set, Tuple, Union import warnings import numpy as np @@ -169,13 +169,30 @@ def __getattr__(self, attr): def _dir_additions(self): return self.obj._dir_additions() - def _get_window(self, other=None, **kwargs) -> int: + def _get_win_type(self, kwargs: Dict): """ - Returns window length + Exists for compatibility, overriden by subclass Window. Parameters ---------- - other: + kwargs : dict + ignored, exists for compatibility + + Returns + ------- + None + """ + return None + + def _get_window(self, other=None, win_type: Optional[str] = None) -> int: + """ + Return window length. + + Parameters + ---------- + other : + ignored, exists for compatibility + win_type : ignored, exists for compatibility Returns @@ -405,6 +422,7 @@ def _apply( ------- y : type of input """ + if center is None: center = self.center @@ -412,7 +430,8 @@ def _apply( check_minp = _use_window if window is None: - window = self._get_window(**kwargs) + win_type = self._get_win_type(kwargs) + window = self._get_window(win_type=win_type) blocks, obj = self._create_blocks() block_list = list(blocks) @@ -612,6 +631,126 @@ def aggregate(self, func, *args, **kwargs): """ ) + _shared_docs["var"] = dedent( + """ + Calculate unbiased %(name)s variance. + %(versionadded)s + Normalized by N-1 by default. This can be changed using the `ddof` + argument. + + Parameters + ---------- + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + *args, **kwargs + For NumPy compatibility. No additional arguments are used. + + Returns + ------- + Series or DataFrame + Returns the same object type as the caller of the %(name)s calculation. + + See Also + -------- + Series.%(name)s : Calling object with Series data. + DataFrame.%(name)s : Calling object with DataFrames. + Series.var : Equivalent method for Series. + DataFrame.var : Equivalent method for DataFrame. + numpy.var : Equivalent method for Numpy array. + + Notes + ----- + The default `ddof` of 1 used in :meth:`Series.var` is different than the + default `ddof` of 0 in :func:`numpy.var`. + + A minimum of 1 period is required for the rolling calculation. + + Examples + -------- + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + >>> s.rolling(3).var() + 0 NaN + 1 NaN + 2 0.333333 + 3 1.000000 + 4 1.000000 + 5 1.333333 + 6 0.000000 + dtype: float64 + + >>> s.expanding(3).var() + 0 NaN + 1 NaN + 2 0.333333 + 3 0.916667 + 4 0.800000 + 5 0.700000 + 6 0.619048 + dtype: float64 + """ + ) + + _shared_docs["std"] = dedent( + """ + Calculate %(name)s standard deviation. + %(versionadded)s + Normalized by N-1 by default. This can be changed using the `ddof` + argument. + + Parameters + ---------- + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + *args, **kwargs + For NumPy compatibility. No additional arguments are used. + + Returns + ------- + Series or DataFrame + Returns the same object type as the caller of the %(name)s calculation. + + See Also + -------- + Series.%(name)s : Calling object with Series data. + DataFrame.%(name)s : Calling object with DataFrames. + Series.std : Equivalent method for Series. + DataFrame.std : Equivalent method for DataFrame. + numpy.std : Equivalent method for Numpy array. + + Notes + ----- + The default `ddof` of 1 used in Series.std is different than the default + `ddof` of 0 in numpy.std. + + A minimum of one period is required for the rolling calculation. + + Examples + -------- + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + >>> s.rolling(3).std() + 0 NaN + 1 NaN + 2 0.577350 + 3 1.000000 + 4 1.000000 + 5 1.154701 + 6 0.000000 + dtype: float64 + + >>> s.expanding(3).std() + 0 NaN + 1 NaN + 2 0.577350 + 3 0.957427 + 4 0.894427 + 5 0.836660 + 6 0.786796 + dtype: float64 + """ + ) + class Window(_Window): """ @@ -783,15 +922,63 @@ def validate(self): else: raise ValueError("Invalid window {0}".format(window)) - def _get_window(self, other=None, **kwargs) -> np.ndarray: + def _get_win_type(self, kwargs: Dict) -> Union[str, Tuple]: """ - Provide validation for the window type, return the window - which has already been validated. + Extract arguments for the window type, provide validation for it + and return the validated window type. Parameters ---------- - other: + kwargs : dict + + Returns + ------- + win_type : str, or tuple + """ + # the below may pop from kwargs + def _validate_win_type(win_type, kwargs): + arg_map = { + "kaiser": ["beta"], + "gaussian": ["std"], + "general_gaussian": ["power", "width"], + "slepian": ["width"], + "exponential": ["tau"], + } + + if win_type in arg_map: + win_args = _pop_args(win_type, arg_map[win_type], kwargs) + if win_type == "exponential": + # exponential window requires the first arg (center) + # to be set to None (necessary for symmetric window) + win_args.insert(0, None) + + return tuple([win_type] + win_args) + + return win_type + + def _pop_args(win_type, arg_names, kwargs): + msg = "%s window requires %%s" % win_type + all_args = [] + for n in arg_names: + if n not in kwargs: + raise ValueError(msg % n) + all_args.append(kwargs.pop(n)) + return all_args + + return _validate_win_type(self.win_type, kwargs) + + def _get_window( + self, other=None, win_type: Optional[Union[str, Tuple]] = None + ) -> np.ndarray: + """ + Get the window, weights. + + Parameters + ---------- + other : ignored, exists for compatibility + win_type : str, or tuple + type of window to create Returns ------- @@ -805,37 +992,6 @@ def _get_window(self, other=None, **kwargs) -> np.ndarray: elif is_integer(window): import scipy.signal as sig - # the below may pop from kwargs - def _validate_win_type(win_type, kwargs): - arg_map = { - "kaiser": ["beta"], - "gaussian": ["std"], - "general_gaussian": ["power", "width"], - "slepian": ["width"], - "exponential": ["tau"], - } - - if win_type in arg_map: - win_args = _pop_args(win_type, arg_map[win_type], kwargs) - if win_type == "exponential": - # exponential window requires the first arg (center) - # to be set to None (necessary for symmetric window) - win_args.insert(0, None) - - return tuple([win_type] + win_args) - - return win_type - - def _pop_args(win_type, arg_names, kwargs): - msg = "%s window requires %%s" % win_type - all_args = [] - for n in arg_names: - if n not in kwargs: - raise ValueError(msg % n) - all_args.append(kwargs.pop(n)) - return all_args - - win_type = _validate_win_type(self.win_type, kwargs) # GH #15662. `False` makes symmetric window, rather than periodic. return sig.get_window(win_type, window, False).astype(float) @@ -844,7 +1000,7 @@ def _get_roll_func( ) -> Callable: def func(arg, window, min_periods=None, closed=None): minp = check_minp(min_periods, len(window)) - return cfunc(arg, window, minp) + return cfunc(arg, window, minp, **kwargs) return func @@ -922,6 +1078,18 @@ def mean(self, *args, **kwargs): nv.validate_window_func("mean", args, kwargs) return self._apply("roll_weighted_mean", **kwargs) + @Substitution(name="window", versionadded="\n.. versionadded:: 1.0.0\n") + @Appender(_shared_docs["var"]) + def var(self, ddof=1, *args, **kwargs): + nv.validate_window_func("var", args, kwargs) + return self._apply("roll_weighted_var", ddof=ddof, **kwargs) + + @Substitution(name="window", versionadded="\n.. versionadded:: 1.0.0\n") + @Appender(_shared_docs["std"]) + def std(self, ddof=1, *args, **kwargs): + nv.validate_window_func("std", args, kwargs) + return _zsqrt(self.var(ddof=ddof, **kwargs)) + class _Rolling(_Window): @property @@ -1176,66 +1344,6 @@ def mean(self, *args, **kwargs): def median(self, **kwargs): return self._apply("roll_median_c", "median", **kwargs) - _shared_docs["std"] = dedent( - """ - Calculate %(name)s standard deviation. - - Normalized by N-1 by default. This can be changed using the `ddof` - argument. - - Parameters - ---------- - ddof : int, default 1 - Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of elements. - *args, **kwargs - For NumPy compatibility. No additional arguments are used. - - Returns - ------- - Series or DataFrame - Returns the same object type as the caller of the %(name)s calculation. - - See Also - -------- - Series.%(name)s : Calling object with Series data. - DataFrame.%(name)s : Calling object with DataFrames. - Series.std : Equivalent method for Series. - DataFrame.std : Equivalent method for DataFrame. - numpy.std : Equivalent method for Numpy array. - - Notes - ----- - The default `ddof` of 1 used in Series.std is different than the default - `ddof` of 0 in numpy.std. - - A minimum of one period is required for the rolling calculation. - - Examples - -------- - >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) - >>> s.rolling(3).std() - 0 NaN - 1 NaN - 2 0.577350 - 3 1.000000 - 4 1.000000 - 5 1.154701 - 6 0.000000 - dtype: float64 - - >>> s.expanding(3).std() - 0 NaN - 1 NaN - 2 0.577350 - 3 0.957427 - 4 0.894427 - 5 0.836660 - 6 0.786796 - dtype: float64 - """ - ) - def std(self, ddof=1, *args, **kwargs): nv.validate_window_func("std", args, kwargs) window = self._get_window() @@ -1251,66 +1359,6 @@ def f(arg, *args, **kwargs): f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs ) - _shared_docs["var"] = dedent( - """ - Calculate unbiased %(name)s variance. - - Normalized by N-1 by default. This can be changed using the `ddof` - argument. - - Parameters - ---------- - ddof : int, default 1 - Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of elements. - *args, **kwargs - For NumPy compatibility. No additional arguments are used. - - Returns - ------- - Series or DataFrame - Returns the same object type as the caller of the %(name)s calculation. - - See Also - -------- - Series.%(name)s : Calling object with Series data. - DataFrame.%(name)s : Calling object with DataFrames. - Series.var : Equivalent method for Series. - DataFrame.var : Equivalent method for DataFrame. - numpy.var : Equivalent method for Numpy array. - - Notes - ----- - The default `ddof` of 1 used in :meth:`Series.var` is different than the - default `ddof` of 0 in :func:`numpy.var`. - - A minimum of 1 period is required for the rolling calculation. - - Examples - -------- - >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) - >>> s.rolling(3).var() - 0 NaN - 1 NaN - 2 0.333333 - 3 1.000000 - 4 1.000000 - 5 1.333333 - 6 0.000000 - dtype: float64 - - >>> s.expanding(3).var() - 0 NaN - 1 NaN - 2 0.333333 - 3 0.916667 - 4 0.800000 - 5 0.700000 - 6 0.619048 - dtype: float64 - """ - ) - def var(self, ddof=1, *args, **kwargs): nv.validate_window_func("var", args, kwargs) return self._apply( @@ -1845,13 +1893,13 @@ def mean(self, *args, **kwargs): def median(self, **kwargs): return super().median(**kwargs) - @Substitution(name="rolling") + @Substitution(name="rolling", versionadded="") @Appender(_shared_docs["std"]) def std(self, ddof=1, *args, **kwargs): nv.validate_rolling_func("std", args, kwargs) return super().std(ddof=ddof, **kwargs) - @Substitution(name="rolling") + @Substitution(name="rolling", versionadded="") @Appender(_shared_docs["var"]) def var(self, ddof=1, *args, **kwargs): nv.validate_rolling_func("var", args, kwargs) diff --git a/pandas/tests/window/test_moments.py b/pandas/tests/window/test_moments.py index 3d6cd7d10bd10..36a0ddb3e02d7 100644 --- a/pandas/tests/window/test_moments.py +++ b/pandas/tests/window/test_moments.py @@ -119,64 +119,95 @@ def test_cmov_window_corner(self): assert len(result) == 5 @td.skip_if_no_scipy - def test_cmov_window_frame(self): + @pytest.mark.parametrize( + "f,xp", + [ + ( + "mean", + [ + [np.nan, np.nan], + [np.nan, np.nan], + [9.252, 9.392], + [8.644, 9.906], + [8.87, 10.208], + [6.81, 8.588], + [7.792, 8.644], + [9.05, 7.824], + [np.nan, np.nan], + [np.nan, np.nan], + ], + ), + ( + "std", + [ + [np.nan, np.nan], + [np.nan, np.nan], + [3.789706, 4.068313], + [3.429232, 3.237411], + [3.589269, 3.220810], + [3.405195, 2.380655], + [3.281839, 2.369869], + [3.676846, 1.801799], + [np.nan, np.nan], + [np.nan, np.nan], + ], + ), + ( + "var", + [ + [np.nan, np.nan], + [np.nan, np.nan], + [14.36187, 16.55117], + [11.75963, 10.48083], + [12.88285, 10.37362], + [11.59535, 5.66752], + [10.77047, 5.61628], + [13.51920, 3.24648], + [np.nan, np.nan], + [np.nan, np.nan], + ], + ), + ( + "sum", + [ + [np.nan, np.nan], + [np.nan, np.nan], + [46.26, 46.96], + [43.22, 49.53], + [44.35, 51.04], + [34.05, 42.94], + [38.96, 43.22], + [45.25, 39.12], + [np.nan, np.nan], + [np.nan, np.nan], + ], + ), + ], + ) + def test_cmov_window_frame(self, f, xp): # Gh 8238 - vals = np.array( - [ - [12.18, 3.64], - [10.18, 9.16], - [13.24, 14.61], - [4.51, 8.11], - [6.15, 11.44], - [9.14, 6.21], - [11.31, 10.67], - [2.94, 6.51], - [9.42, 8.39], - [12.44, 7.34], - ] - ) - - xp = np.array( - [ - [np.nan, np.nan], - [np.nan, np.nan], - [9.252, 9.392], - [8.644, 9.906], - [8.87, 10.208], - [6.81, 8.588], - [7.792, 8.644], - [9.05, 7.824], - [np.nan, np.nan], - [np.nan, np.nan], - ] + df = DataFrame( + np.array( + [ + [12.18, 3.64], + [10.18, 9.16], + [13.24, 14.61], + [4.51, 8.11], + [6.15, 11.44], + [9.14, 6.21], + [11.31, 10.67], + [2.94, 6.51], + [9.42, 8.39], + [12.44, 7.34], + ] + ) ) + xp = DataFrame(np.array(xp)) - # DataFrame - rs = DataFrame(vals).rolling(5, win_type="boxcar", center=True).mean() - tm.assert_frame_equal(DataFrame(xp), rs) - - # invalid method - with pytest.raises(AttributeError): - (DataFrame(vals).rolling(5, win_type="boxcar", center=True).std()) - - # sum - xp = np.array( - [ - [np.nan, np.nan], - [np.nan, np.nan], - [46.26, 46.96], - [43.22, 49.53], - [44.35, 51.04], - [34.05, 42.94], - [38.96, 43.22], - [45.25, 39.12], - [np.nan, np.nan], - [np.nan, np.nan], - ] - ) + roll = df.rolling(5, win_type="boxcar", center=True) + rs = getattr(roll, f)() - rs = DataFrame(vals).rolling(5, win_type="boxcar", center=True).sum() - tm.assert_frame_equal(DataFrame(xp), rs) + tm.assert_frame_equal(xp, rs) @td.skip_if_no_scipy def test_cmov_window_na_min_periods(self): diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py index f42c507e51511..39ab3ffd9319e 100644 --- a/pandas/tests/window/test_window.py +++ b/pandas/tests/window/test_window.py @@ -60,7 +60,7 @@ def test_numpy_compat(self, method): getattr(w, method)(dtype=np.float64) @td.skip_if_no_scipy - @pytest.mark.parametrize("arg", ["median", "var", "std", "kurt", "skew"]) + @pytest.mark.parametrize("arg", ["median", "kurt", "skew"]) def test_agg_function_support(self, arg): df = pd.DataFrame({"A": np.arange(5)}) roll = df.rolling(2, win_type="triang")
- [x] closes #26597 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Used West's online [algorithm](https://dl.acm.org/citation.cfm?id=359153). [Here](http://www.nowozin.net/sebastian/blog/streaming-mean-and-variance-computation.html) is an explanation of the algorithm and link to pdf of the paper. Tested implementation with `win_type="boxcar"` comparing it with the result of `Rolling.std`. Additionaly, `_get_window` function is split into two. `_get_kwargs` function is used to split kwargs for window function and rolling function (since std and var takes an additional `ddof` argument). Shared docs for `std` and `var` are moved to be used with `Window` functions
https://api.github.com/repos/pandas-dev/pandas/pulls/27682
2019-07-31T20:14:26Z
2019-11-08T14:45:02Z
2019-11-08T14:45:01Z
2019-11-09T17:15:26Z
BUG: fixes formatted value error for missing sheet (#27676)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 8c1ce1195369d..2b4dea2e91f7d 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -356,6 +356,7 @@ I/O - Bug in :func:`read_hdf` closing stores that it didn't open when Exceptions are raised (:issue:`28699`) - Bug in :meth:`DataFrame.read_json` where using ``orient="index"`` would not maintain the order (:issue:`28557`) - Bug in :meth:`DataFrame.to_html` where the length of the ``formatters`` argument was not verified (:issue:`28469`) +- Bug in :meth:`DataFrame.read_excel` with ``engine='ods'`` when ``sheet_name`` argument references a non-existent sheet (:issue:`27676`) - Bug in :meth:`pandas.io.formats.style.Styler` formatting for floating values not displaying decimals correctly (:issue:`13257`) Plotting diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 3be36663bac79..dc1d1e71ad686 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -60,7 +60,7 @@ def get_sheet_by_name(self, name: str): if table.getAttribute("name") == name: return table - raise ValueError("sheet {name} not found".format(name)) + raise ValueError("sheet {} not found".format(name)) def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: """Parse an ODF Table into a list of lists diff --git a/pandas/tests/io/excel/test_odf.py b/pandas/tests/io/excel/test_odf.py index 76871eddf1cee..47e610562a388 100644 --- a/pandas/tests/io/excel/test_odf.py +++ b/pandas/tests/io/excel/test_odf.py @@ -36,3 +36,11 @@ def test_read_writer_table(): result = pd.read_excel("writertable.odt", "Table1", index_col=0) tm.assert_frame_equal(result, expected) + + +def test_nonexistent_sheetname_raises(read_ext): + # GH-27676 + # Specifying a non-existent sheet_name parameter should throw an error + # with the sheet name. + with pytest.raises(ValueError, match="sheet xyz not found"): + pd.read_excel("blank.ods", sheet_name="xyz")
Fixes a formatted message at [io.excel._odfreader line 62](https://github.com/pandas-dev/pandas/blob/master/pandas/io/excel/_odfreader.py#L63) - [x] closes #27676 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27677
2019-07-31T15:19:32Z
2019-10-25T17:01:30Z
2019-10-25T17:01:30Z
2019-10-28T13:56:51Z
DOC: Fix length typo
diff --git a/pandas/core/window.py b/pandas/core/window.py index 2199daa743655..4b6a1cf2e9a04 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -175,7 +175,7 @@ def _dir_additions(self): def _get_window(self, other=None, **kwargs) -> int: """ - Returns window lenght + Returns window length Parameters ---------- @@ -395,7 +395,7 @@ def _apply( name : str, optional name of this function window : int/str, default to _get_window() - window lenght or offset + window length or offset center : bool, default to self.center check_minp : function, default to _use_window **kwargs
https://api.github.com/repos/pandas-dev/pandas/pulls/27675
2019-07-31T14:09:27Z
2019-07-31T15:40:49Z
2019-07-31T15:40:49Z
2019-07-31T15:40:50Z
Handle construction of string ExtensionArray from lists
diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 9528723a6dc0f..0c25cdf121cbb 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -468,30 +468,27 @@ def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False else: subarr = com.asarray_tuplesafe(data, dtype=dtype) - # This is to prevent mixed-type Series getting all casted to - # NumPy string type, e.g. NaN --> '-1#IND'. - if issubclass(subarr.dtype.type, str): - # GH#16605 - # If not empty convert the data to dtype - # GH#19853: If data is a scalar, subarr has already the result - if not lib.is_scalar(data): - if not np.all(isna(data)): - data = np.array(data, dtype=dtype, copy=False) - subarr = np.array(data, dtype=object, copy=copy) - - if ( - not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)) - and is_object_dtype(subarr.dtype) - and not is_object_dtype(dtype) - ): - inferred = lib.infer_dtype(subarr, skipna=False) - if inferred == "period": - from pandas.core.arrays import period_array + if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)): + # This is to prevent mixed-type Series getting all casted to + # NumPy string type, e.g. NaN --> '-1#IND'. + if issubclass(subarr.dtype.type, str): + # GH#16605 + # If not empty convert the data to dtype + # GH#19853: If data is a scalar, subarr has already the result + if not lib.is_scalar(data): + if not np.all(isna(data)): + data = np.array(data, dtype=dtype, copy=False) + subarr = np.array(data, dtype=object, copy=copy) - try: - subarr = period_array(subarr) - except IncompatibleFrequency: - pass + if is_object_dtype(subarr.dtype) and not is_object_dtype(dtype): + inferred = lib.infer_dtype(subarr, skipna=False) + if inferred == "period": + from pandas.core.arrays import period_array + + try: + subarr = period_array(subarr) + except IncompatibleFrequency: + pass return subarr diff --git a/pandas/tests/extension/arrow/bool.py b/pandas/tests/extension/arrow/arrays.py similarity index 80% rename from pandas/tests/extension/arrow/bool.py rename to pandas/tests/extension/arrow/arrays.py index eb75d6d968073..6a28f76e474cc 100644 --- a/pandas/tests/extension/arrow/bool.py +++ b/pandas/tests/extension/arrow/arrays.py @@ -43,18 +43,27 @@ def _is_boolean(self): return True -class ArrowBoolArray(ExtensionArray): - def __init__(self, values): - if not isinstance(values, pa.ChunkedArray): - raise ValueError +@register_extension_dtype +class ArrowStringDtype(ExtensionDtype): - assert values.type == pa.bool_() - self._data = values - self._dtype = ArrowBoolDtype() + type = str + kind = "U" + name = "arrow_string" + na_value = pa.NULL + + @classmethod + def construct_from_string(cls, string): + if string == cls.name: + return cls() + else: + raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string)) + + @classmethod + def construct_array_type(cls): + return ArrowStringArray - def __repr__(self): - return "ArrowBoolArray({})".format(repr(self._data)) +class ArrowExtensionArray(ExtensionArray): @classmethod def from_scalars(cls, values): arr = pa.chunked_array([pa.array(np.asarray(values))]) @@ -69,6 +78,9 @@ def from_array(cls, arr): def _from_sequence(cls, scalars, dtype=None, copy=False): return cls.from_scalars(scalars) + def __repr__(self): + return "{cls}({data})".format(cls=type(self).__name__, data=repr(self._data)) + def __getitem__(self, item): if pd.api.types.is_scalar(item): return self._data.to_pandas()[item] @@ -142,3 +154,23 @@ def any(self, axis=0, out=None): def all(self, axis=0, out=None): return self._data.to_pandas().all() + + +class ArrowBoolArray(ArrowExtensionArray): + def __init__(self, values): + if not isinstance(values, pa.ChunkedArray): + raise ValueError + + assert values.type == pa.bool_() + self._data = values + self._dtype = ArrowBoolDtype() + + +class ArrowStringArray(ArrowExtensionArray): + def __init__(self, values): + if not isinstance(values, pa.ChunkedArray): + raise ValueError + + assert values.type == pa.string() + self._data = values + self._dtype = ArrowStringDtype() diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index 205edf5da5b74..cc0deca765b41 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -7,7 +7,7 @@ pytest.importorskip("pyarrow", minversion="0.10.0") -from .bool import ArrowBoolArray, ArrowBoolDtype # isort:skip +from .arrays import ArrowBoolArray, ArrowBoolDtype # isort:skip @pytest.fixture diff --git a/pandas/tests/extension/arrow/test_string.py b/pandas/tests/extension/arrow/test_string.py new file mode 100644 index 0000000000000..06f149aa4b75f --- /dev/null +++ b/pandas/tests/extension/arrow/test_string.py @@ -0,0 +1,13 @@ +import pytest + +import pandas as pd + +pytest.importorskip("pyarrow", minversion="0.10.0") + +from .arrays import ArrowStringDtype # isort:skip + + +def test_constructor_from_list(): + # GH 27673 + result = pd.Series(["E"], dtype=ArrowStringDtype()) + assert isinstance(result.dtype, ArrowStringDtype)
I had to add a string-based Arrow Extension array to trigger the bug but did not run the same test suite as we do on the boolean array as I don't see it adding value but just runtime at the moment. - [x] closes #27673 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27674
2019-07-31T13:18:15Z
2019-08-02T11:36:40Z
2019-08-02T11:36:40Z
2019-08-02T11:39:16Z
PERF: Improve performance of cut with IntervalIndex bins
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index 1aed756b841a5..cc373f413fb88 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -214,6 +214,7 @@ def setup(self, bins): self.datetime_series = pd.Series( np.random.randint(N, size=N), dtype="datetime64[ns]" ) + self.interval_bins = pd.IntervalIndex.from_breaks(np.linspace(0, N, bins)) def time_cut_int(self, bins): pd.cut(self.int_series, bins) @@ -239,6 +240,14 @@ def time_qcut_timedelta(self, bins): def time_qcut_datetime(self, bins): pd.qcut(self.datetime_series, bins) + def time_cut_interval(self, bins): + # GH 27668 + pd.cut(self.int_series, self.interval_bins) + + def peakmem_cut_interval(self, bins): + # GH 27668 + pd.cut(self.int_series, self.interval_bins) + class Explode: param_names = ["n_rows", "max_list_length"] diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index cc4bab8b9a923..97b4c6cd464e5 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -71,6 +71,7 @@ Performance improvements - Performance improvement in indexing with a non-unique :class:`IntervalIndex` (:issue:`27489`) - Performance improvement in `MultiIndex.is_monotonic` (:issue:`27495`) +- Performance improvement in :func:`cut` when ``bins`` is an :class:`IntervalIndex` (:issue:`27668`) .. _whatsnew_1000.bug_fixes: diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 949cad6073913..ab354a21a33df 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -373,8 +373,7 @@ def _bins_to_cuts( if isinstance(bins, IntervalIndex): # we have a fast-path here ids = bins.get_indexer(x) - result = algos.take_nd(bins, ids) - result = Categorical(result, categories=bins, ordered=True) + result = Categorical.from_codes(ids, categories=bins, ordered=True) return result, bins unique_bins = algos.unique(bins)
- [X] closes #27668 - [X] benchmarks added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry ASV output: ``` before after ratio [143bc34a] [d531e7b0] <master> <perf-cut-ii> - 198M 122M 0.62 reshape.Cut.peakmem_cut_interval(1000) - 197M 122M 0.62 reshape.Cut.peakmem_cut_interval(4) - 197M 122M 0.62 reshape.Cut.peakmem_cut_interval(10) - 2.11±0.02s 910±2ms 0.43 reshape.Cut.time_cut_interval(1000) - 1.95±0.01s 761±2ms 0.39 reshape.Cut.time_cut_interval(4) - 1.95±0.03s 763±10ms 0.39 reshape.Cut.time_cut_interval(10) SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. PERFORMANCE INCREASED. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/27669
2019-07-31T04:06:28Z
2019-07-31T12:05:34Z
2019-07-31T12:05:34Z
2019-07-31T13:32:56Z
BUG: fix nested meta path bug (GH 27220)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index e1fe2f7fe77e2..2111fd2f1dd37 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -83,6 +83,9 @@ Performance improvements Bug fixes ~~~~~~~~~ +- Fix bug in :meth:`io.json.json_normalize` when nested meta paths with a nested record path. (:issue:`27220`) +- +- Categorical ^^^^^^^^^^^ diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index 24a255c78f3c0..b5aabea0957b0 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -288,12 +288,14 @@ def _recursive_extract(data, path, seen_meta, level=0): if len(path) > 1: for obj in data: for val, key in zip(meta, meta_keys): - if level + 1 == len(val): - seen_meta[key] = _pull_field(obj, val[-1]) + + # Pull value for all the keys in case meta path and + # record path are on two branches + seen_meta[key] = _pull_field(obj, val[0]) _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) else: - for obj in data: + for ind, obj in enumerate(data): recs = _pull_field(obj, path[0]) recs = [ nested_to_record(r, sep=sep, max_level=max_level) @@ -305,8 +307,26 @@ def _recursive_extract(data, path, seen_meta, level=0): # For repeating the metadata later lengths.append(len(recs)) for val, key in zip(meta, meta_keys): + + # Extract the value of the key when the level + # is at the meta path end if level + 1 > len(val): meta_val = seen_meta[key] + + # Extract the value of the key from seen_meta when + # meta path and record path are on two branches + elif seen_meta: + meta_val_obj = seen_meta[key] + + # Both the list case and the dict case are covered + meta_val = ( + meta_val_obj[ind][val[level]] + if isinstance(meta_val_obj, list) + else meta_val_obj[val[level]] + ) + + # At top level, seen_meta is empty, pull from data + # directly and raise KeyError if not found else: try: meta_val = _pull_field(obj, val[level:]) @@ -320,6 +340,7 @@ def _recursive_extract(data, path, seen_meta, level=0): "{err} is not always present".format(err=e) ) meta_vals[key].append(meta_val) + records.extend(recs) _recursive_extract(data, record_path, {}, level=0) diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 3ceddfc3c1db4..4e63673642abe 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -287,6 +287,31 @@ def test_shallow_nested(self): expected = DataFrame(ex_data, columns=result.columns) tm.assert_frame_equal(result, expected) + @pytest.mark.skipif(not PY36, reason="drop support for 3.5 soon") + def test_nested_meta_path_with_nested_record_path(self, state_data): + # GH 27220 + result = json_normalize( + data=state_data, + record_path=["counties", "name"], + meta=["state", "shortname", ["info", "governor"]], + errors="ignore", + ) + + ex_data = [ + [ + i + for word in ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"] + for i in word + ], + ["Florida"] * 21 + ["Ohio"] * 14, + ["FL"] * 21 + ["OH"] * 14, + ["Rick Scott"] * 21 + ["John Kasich"] * 14, + ] + expected = DataFrame(ex_data).T + expected.columns = [0, "state", "shortname", "info.governor"] + + tm.assert_frame_equal(result, expected) + def test_meta_name_conflict(self): data = [ {
- [x] closes #27220 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR attempts to solve the missing value problems when using nested meta with nested record path. The assumption is that we want to get the value of nested meta field even it is not on the record path. For example, in the example of GH 27220, ['info', 'governor'] and ['counties', 'name'] are not on the same path. The current behavior is to raise key error. I change the behavior to return the value of the key if we can find it in the json. Let me know how we want to move forward with this. Thanks~
https://api.github.com/repos/pandas-dev/pandas/pulls/27667
2019-07-31T03:14:30Z
2019-12-02T00:57:36Z
null
2019-12-02T00:57:36Z
BUG: Allow plotting boolean values
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 58918f2d8c40e..974d14a4b424c 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -163,7 +163,7 @@ I/O Plotting ^^^^^^^^ -- +- Bug in :meth:`Series.plot` not able to plot boolean values (:issue:`23719`) - Groupby/resample/rolling diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index a3c1499845c2a..ec5c609c1b267 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -586,6 +586,8 @@ class PlotAccessor(PandasObject): mark_right : bool, default True When using a secondary_y axis, automatically mark the column labels with "(right)" in the legend + include_bool : bool, default is False + If True, boolean values can be plotted `**kwds` : keywords Options to pass to matplotlib plotting method diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index c2b37bb297ecb..50f0d16631a15 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -106,6 +106,7 @@ def __init__( colormap=None, table=False, layout=None, + include_bool=False, **kwds ): @@ -191,6 +192,7 @@ def __init__( self.colormap = colormap self.table = table + self.include_bool = include_bool self.kwds = kwds @@ -400,9 +402,12 @@ def _compute_plot_data(self): # GH16953, _convert is needed as fallback, for ``Series`` # with ``dtype == object`` data = data._convert(datetime=True, timedelta=True) - numeric_data = data.select_dtypes( - include=[np.number, "datetime", "datetimetz", "timedelta"] - ) + select_include_type = [np.number, "datetime", "datetimetz", "timedelta"] + + # GH23719, allow plotting boolean + if self.include_bool is True: + select_include_type.append(np.bool_) + numeric_data = data.select_dtypes(include=select_include_type) try: is_empty = numeric_data.empty diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 8b4a78e9195b5..111c3a70fc09c 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -167,6 +167,15 @@ def test_label(self): ax.legend() # draw it self._check_legend_labels(ax, labels=["LABEL"]) + def test_boolean(self): + # GH 23719 + s = Series([False, False, True]) + _check_plot_works(s.plot, include_bool=True) + + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + _check_plot_works(s.plot) + def test_line_area_nan_series(self): values = [1, 2, np.nan, 3] s = Series(values)
- [ ] closes #23719 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27665
2019-07-30T21:05:20Z
2019-08-12T19:00:03Z
2019-08-12T19:00:03Z
2020-01-12T21:49:40Z
issue#27482 - added a check for if obj is instance of type in _isna-new
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index fb67decb46b64..fb791eaafe25f 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -93,7 +93,7 @@ Indexing Missing ^^^^^^^ -- +- Bug in :func:`pandas.isnull` or :func:`pandas.isna` when the input is a type e.g. `type(pandas.Series())` (:issue:`27482`) - - diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 6f599a6be6021..056cd2222af3c 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -133,6 +133,8 @@ def _isna_new(obj): # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") + elif isinstance(obj, type): + return False elif isinstance( obj, ( @@ -171,6 +173,8 @@ def _isna_old(obj): # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") + elif isinstance(obj, type): + return False elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)): return _isna_ndarraylike_old(obj) elif isinstance(obj, ABCGeneric): diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index a688dec50bc95..bbc485ecf94f2 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -86,6 +86,10 @@ def test_isna_isnull(self, isna_f): assert not isna_f(np.inf) assert not isna_f(-np.inf) + # type + assert not isna_f(type(pd.Series())) + assert not isna_f(type(pd.DataFrame())) + # series for s in [ tm.makeFloatSeries(),
- [x] closes #27482 - [x] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27664
2019-07-30T20:25:02Z
2019-08-20T14:00:09Z
2019-08-20T14:00:08Z
2019-08-20T14:00:16Z
BUG: pd.crosstab not working when margin and normalize are set together
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 943a6adb7944e..792dfe4be055e 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -125,6 +125,7 @@ Reshaping ^^^^^^^^^ - A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) +- Bug in :meth:`DataFrame.crosstab` when ``margins`` set to ``True`` and ``normalize`` is not ``False``, an error is raised. (:issue:`27500`) - :meth:`DataFrame.join` now suppresses the ``FutureWarning`` when the sort parameter is specified (:issue:`21952`) - diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 79716520f6654..d653dd87308cf 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -611,13 +611,21 @@ def _normalize(table, normalize, margins, margins_name="All"): table = table.fillna(0) elif margins is True: - - column_margin = table.loc[:, margins_name].drop(margins_name) - index_margin = table.loc[margins_name, :].drop(margins_name) - table = table.drop(margins_name, axis=1).drop(margins_name) - # to keep index and columns names - table_index_names = table.index.names - table_columns_names = table.columns.names + # keep index and column of pivoted table + table_index = table.index + table_columns = table.columns + + # check if margin name is in (for MI cases) or equal to last + # index/column and save the column and index margin + if (margins_name not in table.iloc[-1, :].name) | ( + margins_name != table.iloc[:, -1].name + ): + raise ValueError("{} not in pivoted DataFrame".format(margins_name)) + column_margin = table.iloc[:-1, -1] + index_margin = table.iloc[-1, :-1] + + # keep the core table + table = table.iloc[:-1, :-1] # Normalize core table = _normalize(table, normalize=normalize, margins=False) @@ -627,11 +635,13 @@ def _normalize(table, normalize, margins, margins_name="All"): column_margin = column_margin / column_margin.sum() table = concat([table, column_margin], axis=1) table = table.fillna(0) + table.columns = table_columns elif normalize == "index": index_margin = index_margin / index_margin.sum() table = table.append(index_margin) table = table.fillna(0) + table.index = table_index elif normalize == "all" or normalize is True: column_margin = column_margin / column_margin.sum() @@ -641,13 +651,12 @@ def _normalize(table, normalize, margins, margins_name="All"): table = table.append(index_margin) table = table.fillna(0) + table.index = table_index + table.columns = table_columns else: raise ValueError("Not a valid normalize argument") - table.index.names = table_index_names - table.columns.names = table_columns_names - else: raise ValueError("Not a valid margins argument") diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index be82e7f595f8c..03b15d2df1a26 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -2447,3 +2447,84 @@ def test_crosstab_unsorted_order(self): [[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns ) tm.assert_frame_equal(result, expected) + + def test_margin_normalize(self): + # GH 27500 + df = pd.DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + # normalize on index + result = pd.crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0 + ) + expected = pd.DataFrame( + [[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]] + ) + expected.index = MultiIndex( + levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]], + codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + expected.columns = Index(["large", "small"], dtype="object", name="C") + tm.assert_frame_equal(result, expected) + + # normalize on columns + result = pd.crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1 + ) + expected = pd.DataFrame( + [ + [0.25, 0.2, 0.222222], + [0.25, 0.2, 0.222222], + [0.5, 0.2, 0.333333], + [0, 0.4, 0.222222], + ] + ) + expected.columns = Index( + ["large", "small", "Sub-Total"], dtype="object", name="C" + ) + expected.index = MultiIndex( + levels=[["bar", "foo"], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=["A", "B"], + ) + tm.assert_frame_equal(result, expected) + + # normalize on both index and column + result = pd.crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True + ) + expected = pd.DataFrame( + [ + [0.111111, 0.111111, 0.222222], + [0.111111, 0.111111, 0.222222], + [0.222222, 0.111111, 0.333333], + [0.000000, 0.222222, 0.222222], + [0.444444, 0.555555, 1], + ] + ) + expected.columns = Index( + ["large", "small", "Sub-Total"], dtype="object", name="C" + ) + expected.index = MultiIndex( + levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]], + codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + tm.assert_frame_equal(result, expected)
- [ ] closes #27500 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27663
2019-07-30T19:58:02Z
2019-08-06T15:45:31Z
2019-08-06T15:45:31Z
2019-08-06T15:45:32Z
DOC: add anonymizeIp for Google analytics in docs
diff --git a/doc/source/themes/nature_with_gtoc/layout.html b/doc/source/themes/nature_with_gtoc/layout.html index b3f13f99f44d4..6e7d8ece35133 100644 --- a/doc/source/themes/nature_with_gtoc/layout.html +++ b/doc/source/themes/nature_with_gtoc/layout.html @@ -94,15 +94,15 @@ <h3 style="margin-top: 1.5em;">{{ _('Search') }}</h3> }); }); </script> -<script type="text/javascript"> - var _gaq = _gaq || []; - _gaq.push(['_setAccount', 'UA-27880019-2']); - _gaq.push(['_trackPageview']); - (function() { - var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; - ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; - var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); - })(); +<!-- Google Analytics --> +<script> +window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date; +ga('create', 'UA-27880019-2', 'auto'); +ga('set', 'anonymizeIp', true); +ga('send', 'pageview'); </script> +<script async src='https://www.google-analytics.com/analytics.js'></script> +<!-- End Google Analytics --> + {% endblock %}
Using a more recent snippet (following their current docs), and adding anonymization of the IP (according to their docs, this should only "slightly reduce the accuracy of geolocation"). If we do this, should do the same for main website as well.
https://api.github.com/repos/pandas-dev/pandas/pulls/27662
2019-07-30T19:50:58Z
2019-08-01T20:42:19Z
2019-08-01T20:42:19Z
2020-02-03T09:29:21Z
BUG: pd.crosstab not working when margin and normalize are set together
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index eb60272246ebb..c0484f9217396 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -128,7 +128,7 @@ Groupby/resample/rolling Reshaping ^^^^^^^^^ -- +- Bug in :meth:`DataFrame.crosstab` when margins set to True and normalize is not False, an error is raised. (:issue:`27500`) - - diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 79716520f6654..1112f0393908a 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -611,13 +611,17 @@ def _normalize(table, normalize, margins, margins_name="All"): table = table.fillna(0) elif margins is True: - + # keep index and column of pivoted table + table_index = table.index + table_columns = table.columns + # drop margins created in pivot_table and only keep the core column_margin = table.loc[:, margins_name].drop(margins_name) - index_margin = table.loc[margins_name, :].drop(margins_name) + # separate cases between multiindex and index + if isinstance(table_index, MultiIndex): + index_margin = table.loc[margins_name, :].drop(margins_name, axis=1) + else: + index_margin = table.loc[margins_name, :].drop(margins_name) table = table.drop(margins_name, axis=1).drop(margins_name) - # to keep index and columns names - table_index_names = table.index.names - table_columns_names = table.columns.names # Normalize core table = _normalize(table, normalize=normalize, margins=False) @@ -627,11 +631,19 @@ def _normalize(table, normalize, margins, margins_name="All"): column_margin = column_margin / column_margin.sum() table = concat([table, column_margin], axis=1) table = table.fillna(0) + table.columns = table_columns elif normalize == "index": - index_margin = index_margin / index_margin.sum() + # index_margin is a dataframe, and use a hacky way: sum(axis=1)[0] + # to get the normalized result, and use sum() instead for series + if isinstance(index_margin, ABCDataFrame): + sum_index_margin = index_margin.sum(axis=1)[0] + else: + sum_index_margin = index_margin.sum() + index_margin = index_margin / sum_index_margin table = table.append(index_margin) table = table.fillna(0) + table.index = table_index elif normalize == "all" or normalize is True: column_margin = column_margin / column_margin.sum() @@ -641,13 +653,12 @@ def _normalize(table, normalize, margins, margins_name="All"): table = table.append(index_margin) table = table.fillna(0) + table.index = table_index + table.columns = table_columns else: raise ValueError("Not a valid normalize argument") - table.index.names = table_index_names - table.columns.names = table_columns_names - else: raise ValueError("Not a valid margins argument") diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index be82e7f595f8c..ac12165aa0fd1 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -2447,3 +2447,38 @@ def test_crosstab_unsorted_order(self): [[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns ) tm.assert_frame_equal(result, expected) + + def test_margin_normalize(self): + df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", + "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", + "one", "one", "two", "two"], + "C": ["small", "large", "large", "small", + "small", "large", "small", "small", + "large"], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) + # normalize on index + result = pd.crosstab([df.A, df.B], df.C, margins=True, margins_name='Sub-Total', + normalize=0) + expected = pd.DataFrame([[0.5, 0.5], [0.5, 0.5], + [0.666667, 0.333333], [0, 1], + [0.444444, 0.555556]]) + expected.index = MultiIndex(levels=[['Sub-Total', 'bar', 'foo'], + ['', 'one', 'two']], + codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]], + names=['A', 'B']) + expected.columns = Index(['large', 'small'], dtype='object', name='C') + tm.assert_frame_equal(result, expected) + + # normalize on columns + result = pd.crosstab([df.A, df.B], df.C, margins=True, margins_name='Sub-Total', + normalize=1) + expected = pd.DataFrame([[0.25, 0.2, 0.222222], [0.25, 0.2, 0.222222], + [0.5, 0.2, 0.333333], [0, 0.4, 0.222222]]) + expected.columns = Index(['large', 'small', 'Sub-Total'], dtype='object', + name='C') + expected.index = MultiIndex(levels=[['bar', 'foo'], ['one', 'two']], + codes=[[1, 1, 2, 2], [1, 2, 1, 2]], + names=['A, B']) + tm.assert_frame_equal(result, expected)
- [ ] closes #27500 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27661
2019-07-30T18:46:31Z
2019-07-30T19:58:19Z
null
2019-07-30T19:58:19Z
Backport PR #27651: DOC: improve warnings for Series.{real,imag}
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index fa9ca98f9c8d8..fb67decb46b64 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -64,7 +64,7 @@ Numeric Conversion ^^^^^^^^^^ -- +- Improved the warnings for the deprecated methods :meth:`Series.real` and :meth:`Series.imag` (:issue:`27610`) - - diff --git a/pandas/core/series.py b/pandas/core/series.py index 59ea8c6bd6c5d..42afb3537c5d8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -958,7 +958,9 @@ def real(self): .. deprecated 0.25.0 """ warnings.warn( - "`real` has be deprecated and will be removed in a " "future verison", + "`real` is deprecated and will be removed in a future version. " + "To eliminate this warning for a Series `ser`, use " + "`np.real(ser.to_numpy())` or `ser.to_numpy().real`.", FutureWarning, stacklevel=2, ) @@ -976,7 +978,9 @@ def imag(self): .. deprecated 0.25.0 """ warnings.warn( - "`imag` has be deprecated and will be removed in a " "future verison", + "`imag` is deprecated and will be removed in a future version. " + "To eliminate this warning for a Series `ser`, use " + "`np.imag(ser.to_numpy())` or `ser.to_numpy().imag`.", FutureWarning, stacklevel=2, )
Manual backport for #27651
https://api.github.com/repos/pandas-dev/pandas/pulls/27659
2019-07-30T13:33:56Z
2019-07-30T14:46:07Z
2019-07-30T14:46:07Z
2019-07-30T14:56:16Z
BUG: Fix dir(interval_index)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index fb67decb46b64..40fefe7ec43a8 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -78,7 +78,7 @@ Strings Interval ^^^^^^^^ - +- Bug in :class:`IntervalIndex` where `dir(obj)` would raise ``ValueError`` (:issue:`27571`) - - - diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index d430cb3d3913f..abaa1c639f048 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -925,6 +925,7 @@ _TYPE_MAP = { 'M': 'datetime64', 'timedelta64[ns]': 'timedelta64', 'm': 'timedelta64', + 'interval': 'interval', } # types only exist on certain platform diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 54882d039f135..f52f98db9fc84 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1953,8 +1953,11 @@ def _validate(data): values = getattr(data, "values", data) # Series / Index values = getattr(values, "categories", values) # categorical / normal - # missing values obfuscate type inference -> skip - inferred_dtype = lib.infer_dtype(values, skipna=True) + try: + inferred_dtype = lib.infer_dtype(values, skipna=True) + except ValueError: + # GH#27571 mostly occurs with ExtensionArray + inferred_dtype = None if inferred_dtype not in allowed_types: raise AttributeError("Can only use .str accessor with string values!") diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index ff48ae9b3c2e5..2933dfca736be 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1153,6 +1153,17 @@ def test_categorical(self): result = lib.infer_dtype(Series(arr), skipna=True) assert result == "categorical" + def test_interval(self): + idx = pd.IntervalIndex.from_breaks(range(5), closed="both") + inferred = lib.infer_dtype(idx, skipna=False) + assert inferred == "interval" + + inferred = lib.infer_dtype(idx._data, skipna=False) + assert inferred == "interval" + + inferred = lib.infer_dtype(pd.Series(idx), skipna=False) + assert inferred == "interval" + class TestNumberScalar: def test_is_number(self): diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index c61af1ce70aed..c1a21e6a7f152 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -1095,3 +1095,10 @@ def test_is_all_dates(self): ) year_2017_index = pd.IntervalIndex([year_2017]) assert not year_2017_index.is_all_dates + + +def test_dir(): + # GH#27571 dir(interval_index) should not raise + index = IntervalIndex.from_arrays([0, 1], [1, 2]) + result = dir(index) + assert "str" not in result
- [x] closes #27571 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27653
2019-07-30T02:31:56Z
2019-08-01T12:56:49Z
2019-08-01T12:56:49Z
2019-08-01T14:07:24Z
improve warnings for Series.{real,imag}
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index fa9ca98f9c8d8..fb67decb46b64 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -64,7 +64,7 @@ Numeric Conversion ^^^^^^^^^^ -- +- Improved the warnings for the deprecated methods :meth:`Series.real` and :meth:`Series.imag` (:issue:`27610`) - - diff --git a/pandas/core/series.py b/pandas/core/series.py index b445ff5f944de..106bb3c7d6cb4 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -955,7 +955,9 @@ def real(self): .. deprecated:: 0.25.0 """ warnings.warn( - "`real` has be deprecated and will be removed in a future version", + "`real` is deprecated and will be removed in a future version. " + "To eliminate this warning for a Series `ser`, use " + "`np.real(ser.to_numpy())` or `ser.to_numpy().real`.", FutureWarning, stacklevel=2, ) @@ -973,7 +975,9 @@ def imag(self): .. deprecated:: 0.25.0 """ warnings.warn( - "`imag` has be deprecated and will be removed in a future version", + "`imag` is deprecated and will be removed in a future version. " + "To eliminate this warning for a Series `ser`, use " + "`np.imag(ser.to_numpy())` or `ser.to_numpy().imag`.", FutureWarning, stacklevel=2, )
- [x] closes #27610 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27651
2019-07-29T22:57:10Z
2019-07-30T09:10:49Z
2019-07-30T09:10:49Z
2019-08-21T21:09:15Z
issue #27642 - timedelta merge asof with tolerance
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 680d69a9862cd..33296045fa05c 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -95,6 +95,7 @@ Reshaping ^^^^^^^^^ - A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) +- Bug :meth:`merge_asof` could not merge :class:`Timedelta` objects when passing `tolerance` kwarg (:issue:`27642`) - Bug in :meth:`DataFrame.crosstab` when ``margins`` set to ``True`` and ``normalize`` is not ``False``, an error is raised. (:issue:`27500`) - :meth:`DataFrame.join` now suppresses the ``FutureWarning`` when the sort parameter is specified (:issue:`21952`) - Bug in :meth:`DataFrame.join` raising with readonly arrays (:issue:`27943`) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index f45c7693bf6ed..225de3f11cf7d 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -22,7 +22,6 @@ is_bool, is_bool_dtype, is_categorical_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, is_datetimelike, is_dtype_equal, @@ -1635,7 +1634,7 @@ def _get_merge_keys(self): ) ) - if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt): + if is_datetimelike(lt): if not isinstance(self.tolerance, Timedelta): raise MergeError(msg) if self.tolerance < Timedelta(0): diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 6b66386bafc5e..7412b1de643a1 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1,3 +1,5 @@ +import datetime + import numpy as np import pytest import pytz @@ -588,14 +590,23 @@ def test_non_sorted(self): # ok, though has dupes merge_asof(trades, self.quotes, on="time", by="ticker") - def test_tolerance(self): + @pytest.mark.parametrize( + "tolerance", + [ + Timedelta("1day"), + pytest.param( + datetime.timedelta(days=1), + marks=pytest.mark.xfail(reason="not implemented", strict=True), + ), + ], + ids=["pd.Timedelta", "datetime.timedelta"], + ) + def test_tolerance(self, tolerance): trades = self.trades quotes = self.quotes - result = merge_asof( - trades, quotes, on="time", by="ticker", tolerance=Timedelta("1day") - ) + result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance) expected = self.tolerance assert_frame_equal(result, expected) @@ -1246,3 +1257,39 @@ def test_by_mixed_tz_aware(self): ) expected["value_y"] = np.array([np.nan], dtype=object) assert_frame_equal(result, expected) + + def test_timedelta_tolerance_nearest(self): + # GH 27642 + + left = pd.DataFrame( + list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])), + columns=["time", "left"], + ) + + left["time"] = pd.to_timedelta(left["time"], "ms") + + right = pd.DataFrame( + list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])), + columns=["time", "right"], + ) + + right["time"] = pd.to_timedelta(right["time"], "ms") + + expected = pd.DataFrame( + list( + zip( + [0, 5, 10, 15, 20, 25], + [0, 1, 2, 3, 4, 5], + [0, np.nan, 2, 4, np.nan, np.nan], + ) + ), + columns=["time", "left", "right"], + ) + + expected["time"] = pd.to_timedelta(expected["time"], "ms") + + result = pd.merge_asof( + left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest" + ) + + assert_frame_equal(result, expected)
- [X] closes #27642 - [x] tests 2 / 1 [added / passed] - [X] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry - [ ] \(Optional) allow users to pass datetime.timedelta objects for tolerance
https://api.github.com/repos/pandas-dev/pandas/pulls/27650
2019-07-29T21:35:20Z
2019-08-22T13:09:50Z
2019-08-22T13:09:49Z
2021-06-27T18:50:43Z
CLN: all the things
diff --git a/pandas/_config/config.py b/pandas/_config/config.py index 61e926035c3f2..4f0720abd1445 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -110,7 +110,7 @@ def _set_option(*args, **kwargs): # must at least 1 arg deal with constraints later nargs = len(args) if not nargs or nargs % 2 != 0: - raise ValueError("Must provide an even number of non-keyword " "arguments") + raise ValueError("Must provide an even number of non-keyword arguments") # default to false silent = kwargs.pop("silent", False) @@ -395,7 +395,7 @@ class option_context: def __init__(self, *args): if not (len(args) % 2 == 0 and len(args) >= 2): raise ValueError( - "Need to invoke as" " option_context(pat, val, [(pat, val), ...])." + "Need to invoke as option_context(pat, val, [(pat, val), ...])." ) self.ops = list(zip(args[::2], args[1::2])) diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index 89f7d71e21e9d..c2fe7d1dd12f4 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -59,7 +59,7 @@ def __call__(self, args, kwargs, fname=None, max_fname_arg_count=None, method=No ) else: raise ValueError( - "invalid validation method " "'{method}'".format(method=method) + "invalid validation method '{method}'".format(method=method) ) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 8e76ad8a375f7..2e086c8ce8c34 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -496,7 +496,7 @@ def _generate_range( if start is None and end is None: if closed is not None: raise ValueError( - "Closed has to be None if not both of startand end are defined" + "Closed has to be None if not both of start and end are defined" ) if start is NaT or end is NaT: raise ValueError("Neither `start` nor `end` can be NaT") diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 4dc1dfcae0777..da3db1c18e534 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -227,7 +227,7 @@ def aggregate(self, func, *args, **kwargs): kwargs = {} elif func is None: # nicer error message - raise TypeError("Must provide 'func' or tuples of " "'(column, aggfunc).") + raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") func = _maybe_mangle_lambdas(func) @@ -836,9 +836,7 @@ def aggregate(self, func_or_funcs=None, *args, **kwargs): relabeling = func_or_funcs is None columns = None - no_arg_message = ( - "Must provide 'func_or_funcs' or named " "aggregation **kwargs." - ) + no_arg_message = "Must provide 'func_or_funcs' or named aggregation **kwargs." if relabeling: columns = list(kwargs) if not PY36: diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 5961a7ff72832..15b94e59c065c 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -462,7 +462,7 @@ def get_converter(s): name_sample = names[0] if isinstance(index_sample, tuple): if not isinstance(name_sample, tuple): - msg = "must supply a tuple to get_group with multiple" " grouping keys" + msg = "must supply a tuple to get_group with multiple grouping keys" raise ValueError(msg) if not len(name_sample) == len(index_sample): try: @@ -715,7 +715,7 @@ def f(g): else: raise ValueError( - "func must be a callable if args or " "kwargs are supplied" + "func must be a callable if args or kwargs are supplied" ) else: f = func @@ -1872,7 +1872,7 @@ def quantile(self, q=0.5, interpolation="linear"): def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError( - "'quantile' cannot be performed against " "'object' dtypes!" + "'quantile' cannot be performed against 'object' dtypes!" ) inference = None @@ -2201,9 +2201,7 @@ def _get_cythonized_result( `Series` or `DataFrame` with filled values """ if result_is_index and aggregate: - raise ValueError( - "'result_is_index' and 'aggregate' cannot both " "be True!" - ) + raise ValueError("'result_is_index' and 'aggregate' cannot both be True!") if post_processing: if not callable(pre_processing): raise ValueError("'post_processing' must be a callable!") @@ -2212,7 +2210,7 @@ def _get_cythonized_result( raise ValueError("'pre_processing' must be a callable!") if not needs_values: raise ValueError( - "Cannot use 'pre_processing' without " "specifying 'needs_values'!" + "Cannot use 'pre_processing' without specifying 'needs_values'!" ) labels, _, ngroups = grouper.group_info diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index f8417c3f01eac..1d88ebd26b1b6 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -25,6 +25,7 @@ from pandas.core.arrays import Categorical, ExtensionArray import pandas.core.common as com from pandas.core.frame import DataFrame +from pandas.core.groupby.categorical import recode_for_groupby, recode_from_groupby from pandas.core.groupby.ops import BaseGrouper from pandas.core.index import CategoricalIndex, Index, MultiIndex from pandas.core.series import Series @@ -310,8 +311,6 @@ def __init__( # a passed Categorical elif is_categorical_dtype(self.grouper): - from pandas.core.groupby.categorical import recode_for_groupby - self.grouper, self.all_grouper = recode_for_groupby( self.grouper, self.sort, observed ) @@ -361,13 +360,10 @@ def __init__( # Timestamps like if getattr(self.grouper, "dtype", None) is not None: if is_datetime64_dtype(self.grouper): - from pandas import to_datetime - - self.grouper = to_datetime(self.grouper) + self.grouper = self.grouper.astype("datetime64[ns]") elif is_timedelta64_dtype(self.grouper): - from pandas import to_timedelta - self.grouper = to_timedelta(self.grouper) + self.grouper = self.grouper.astype("timedelta64[ns]") def __repr__(self): return "Grouping({0})".format(self.name) @@ -400,8 +396,6 @@ def labels(self): @cache_readonly def result_index(self): if self.all_grouper is not None: - from pandas.core.groupby.categorical import recode_from_groupby - return recode_from_groupby(self.all_grouper, self.sort, self.group_index) return self.group_index @@ -493,12 +487,12 @@ def _get_grouper( elif nlevels == 0: raise ValueError("No group keys passed!") else: - raise ValueError("multiple levels only valid with " "MultiIndex") + raise ValueError("multiple levels only valid with MultiIndex") if isinstance(level, str): if obj.index.name != level: raise ValueError( - "level name {} is not the name of the " "index".format(level) + "level name {} is not the name of the index".format(level) ) elif level > 0 or level < -1: raise ValueError("level > 0 or level < -1 only valid with MultiIndex") diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index cc8aec4cc243b..1484feeeada64 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -467,12 +467,12 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs): elif is_datetime64_any_dtype(values): if how in ["add", "prod", "cumsum", "cumprod"]: raise NotImplementedError( - "datetime64 type does not support {} " "operations".format(how) + "datetime64 type does not support {} operations".format(how) ) elif is_timedelta64_dtype(values): if how in ["prod", "cumprod"]: raise NotImplementedError( - "timedelta64 type does not support {} " "operations".format(how) + "timedelta64 type does not support {} operations".format(how) ) arity = self._cython_arity.get(how, 1) @@ -489,7 +489,7 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, **kwargs): values = values.T if arity > 1: raise NotImplementedError( - "arity of more than 1 is not " "supported for the 'how' argument" + "arity of more than 1 is not supported for the 'how' argument" ) out_shape = (self.ngroups,) + values.shape[1:] @@ -604,9 +604,7 @@ def _aggregate( ): if values.ndim > 3: # punting for now - raise NotImplementedError( - "number of dimensions is currently " "limited to 3" - ) + raise NotImplementedError("number of dimensions is currently limited to 3") elif values.ndim > 2: for i, chunk in enumerate(values.transpose(2, 0, 1)): @@ -631,9 +629,7 @@ def _transform( comp_ids, _, ngroups = self.group_info if values.ndim > 3: # punting for now - raise NotImplementedError( - "number of dimensions is currently " "limited to 3" - ) + raise NotImplementedError("number of dimensions is currently limited to 3") elif values.ndim > 2: for i, chunk in enumerate(values.transpose(2, 0, 1)): diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 5ba23990cbd51..2036728e702f3 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -340,4 +340,4 @@ def __new__(cls, data): except Exception: pass # we raise an attribute error anyway - raise AttributeError("Can only use .dt accessor with datetimelike " "values") + raise AttributeError("Can only use .dt accessor with datetimelike values") diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 745f8f3c90ea8..8cacd22fb1cb1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -376,9 +376,7 @@ def __new__( data = maybe_cast_to_integer_array(data, dtype, copy=copy) elif inferred in ["floating", "mixed-integer-float"]: if isna(data).any(): - raise ValueError( - "cannot convert float " "NaN to integer" - ) + raise ValueError("cannot convert float NaN to integer") if inferred == "mixed-integer-float": data = maybe_cast_to_integer_array(data, dtype) @@ -1182,7 +1180,7 @@ def summary(self, name=None): .. deprecated:: 0.23.0 """ warnings.warn( - "'summary' is deprecated and will be removed in a " "future version.", + "'summary' is deprecated and will be removed in a future version.", FutureWarning, stacklevel=2, ) @@ -1521,7 +1519,7 @@ def _validate_index_level(self, level): ) elif level > 0: raise IndexError( - "Too many levels:" " Index has only 1 level, not %d" % (level + 1) + "Too many levels: Index has only 1 level, not %d" % (level + 1) ) elif level != self.name: raise KeyError( @@ -2953,7 +2951,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): if not self.is_unique: raise InvalidIndexError( - "Reindexing only valid with uniquely" " valued Index objects" + "Reindexing only valid with uniquely valued Index objects" ) if method == "pad" or method == "backfill": @@ -2980,7 +2978,7 @@ def _convert_tolerance(self, tolerance, target): # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: - raise ValueError("list-like tolerance size must match " "target index size") + raise ValueError("list-like tolerance size must match target index size") return tolerance def _get_fill_indexer(self, target, method, limit=None, tolerance=None): @@ -3712,9 +3710,7 @@ def _get_leaf_sorter(labels): return lib.get_level_sorter(lab, ensure_int64(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): - raise TypeError( - "Join on level between two MultiIndex objects " "is ambiguous" - ) + raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other @@ -3728,7 +3724,7 @@ def _get_leaf_sorter(labels): if not right.is_unique: raise NotImplementedError( - "Index._join_level on non-unique index " "is not implemented" + "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( @@ -4554,9 +4550,7 @@ def sort(self, *args, **kwargs): """ Use sort_values instead. """ - raise TypeError( - "cannot sort an Index object in-place, use " "sort_values instead" - ) + raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods=1, freq=None): """ @@ -5205,7 +5199,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): - raise ValueError("Both dates must have the " "same UTC offset") + raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: @@ -5397,12 +5391,10 @@ def _validate_for_numeric_binop(self, other, op): if isinstance(other, (Index, ABCSeries, np.ndarray)): if len(self) != len(other): - raise ValueError("cannot evaluate a numeric op with " "unequal lengths") + raise ValueError("cannot evaluate a numeric op with unequal lengths") other = com.values_from_object(other) if other.dtype.kind not in ["f", "i", "u"]: - raise TypeError( - "cannot evaluate a numeric op " "with a non-numeric dtype" - ) + raise TypeError("cannot evaluate a numeric op with a non-numeric dtype") elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)): # higher up to handle pass @@ -5571,7 +5563,7 @@ def logical_func(self, *args, **kwargs): return logical_func cls.all = _make_logical_function( - "all", "Return whether all elements " "are True.", np.all + "all", "Return whether all elements are True.", np.all ) cls.any = _make_logical_function( "any", "Return whether any element is True.", np.any diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index e14bf7f86c0be..0f6aa711adc90 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -7,6 +7,7 @@ from pandas._config import get_option from pandas._libs import index as libindex +from pandas._libs.hashtable import duplicated_int64 import pandas.compat as compat from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, cache_readonly @@ -25,7 +26,7 @@ from pandas._typing import AnyArrayLike from pandas.core import accessor from pandas.core.algorithms import take_1d -from pandas.core.arrays.categorical import Categorical, contains +from pandas.core.arrays.categorical import Categorical, _recode_for_categories, contains import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs @@ -290,7 +291,7 @@ def _is_dtype_compat(self, other): other = other._values if not other.is_dtype_equal(self): raise TypeError( - "categories must match existing categories " "when appending" + "categories must match existing categories when appending" ) else: values = other @@ -299,7 +300,7 @@ def _is_dtype_compat(self, other): other = CategoricalIndex(self._create_categorical(other, dtype=self.dtype)) if not other.isin(values).all(): raise TypeError( - "cannot append a non-category item to a " "CategoricalIndex" + "cannot append a non-category item to a CategoricalIndex" ) return other @@ -473,8 +474,6 @@ def unique(self, level=None): @Appender(Index.duplicated.__doc__) def duplicated(self, keep="first"): - from pandas._libs.hashtable import duplicated_int64 - codes = self.codes.astype("i8") return duplicated_int64(codes, keep) @@ -581,15 +580,15 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): if method is not None: raise NotImplementedError( - "argument method is not implemented for " "CategoricalIndex.reindex" + "argument method is not implemented for CategoricalIndex.reindex" ) if level is not None: raise NotImplementedError( - "argument level is not implemented for " "CategoricalIndex.reindex" + "argument level is not implemented for CategoricalIndex.reindex" ) if limit is not None: raise NotImplementedError( - "argument limit is not implemented for " "CategoricalIndex.reindex" + "argument limit is not implemented for CategoricalIndex.reindex" ) target = ibase.ensure_index(target) @@ -657,8 +656,6 @@ def _reindex_non_unique(self, target): @Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): - from pandas.core.arrays.categorical import _recode_for_categories - method = missing.clean_reindex_fill_method(method) target = ibase.ensure_index(target) @@ -672,7 +669,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): ) elif method == "nearest": raise NotImplementedError( - "method='nearest' not implemented yet " "for CategoricalIndex" + "method='nearest' not implemented yet for CategoricalIndex" ) if isinstance(target, CategoricalIndex) and self.values.is_dtype_equal(target): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 0fb8f6823ac18..af99c7a2754e5 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -325,7 +325,7 @@ def asobject(self): *this is an internal non-public method* """ warnings.warn( - "'asobject' is deprecated. Use 'astype(object)'" " instead", + "'asobject' is deprecated. Use 'astype(object)' instead", FutureWarning, stacklevel=2, ) @@ -335,7 +335,7 @@ def _convert_tolerance(self, tolerance, target): tolerance = np.asarray(to_timedelta(tolerance).to_numpy()) if target.size != tolerance.size and tolerance.size > 1: - raise ValueError("list-like tolerance size must match " "target index size") + raise ValueError("list-like tolerance size must match target index size") return tolerance def tolist(self): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e9296eea2b8a3..04522fde4521c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -803,11 +803,9 @@ def _maybe_utc_convert(self, other): if isinstance(other, DatetimeIndex): if self.tz is not None: if other.tz is None: - raise TypeError( - "Cannot join tz-naive with tz-aware " "DatetimeIndex" - ) + raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") elif other.tz is not None: - raise TypeError("Cannot join tz-naive with tz-aware " "DatetimeIndex") + raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not timezones.tz_compare(self.tz, other.tz): this = self.tz_convert("UTC") @@ -1048,7 +1046,7 @@ def get_loc(self, key, method=None, tolerance=None): if isinstance(key, time): if method is not None: raise NotImplementedError( - "cannot yet lookup inexact labels " "when key is a time object" + "cannot yet lookup inexact labels when key is a time object" ) return self.indexer_at_time(key) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 66290ae54e626..d941dc547befe 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1058,7 +1058,7 @@ def insert(self, loc, item): if isinstance(item, Interval): if item.closed != self.closed: raise ValueError( - "inserted item must be closed on the same " "side as the index" + "inserted item must be closed on the same side as the index" ) left_insert = item.left right_insert = item.right @@ -1067,7 +1067,7 @@ def insert(self, loc, item): left_insert = right_insert = item else: raise ValueError( - "can only insert Interval objects and NA into " "an IntervalIndex" + "can only insert Interval objects and NA into an IntervalIndex" ) new_left = self.left.insert(loc, left_insert) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a7c3449615299..488107690fbd6 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -8,6 +8,7 @@ from pandas._config import get_option from pandas._libs import Timestamp, algos as libalgos, index as libindex, lib, tslibs +from pandas._libs.hashtable import duplicated_int64 from pandas.compat.numpy import function as nv from pandas.errors import PerformanceWarning, UnsortedIndexError from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg @@ -29,6 +30,8 @@ from pandas.core.dtypes.missing import array_equivalent, isna import pandas.core.algorithms as algos +from pandas.core.arrays import Categorical +from pandas.core.arrays.categorical import _factorize_from_iterables import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( @@ -39,6 +42,12 @@ ) from pandas.core.indexes.frozen import FrozenList, _ensure_frozen import pandas.core.missing as missing +from pandas.core.sorting import ( + get_group_index, + indexer_from_factorized, + lexsort_indexer, +) +from pandas.core.util.hashing import hash_tuple, hash_tuples from pandas.io.formats.printing import ( format_object_attrs, @@ -415,8 +424,6 @@ def from_arrays(cls, arrays, sortorder=None, names=None): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError("all arrays must be same length") - from pandas.core.arrays.categorical import _factorize_from_iterables - codes, levels = _factorize_from_iterables(arrays) if names is None: names = [getattr(arr, "name", None) for arr in arrays] @@ -527,7 +534,6 @@ def from_product(cls, iterables, sortorder=None, names=None): (2, 'purple')], names=['number', 'color']) """ - from pandas.core.arrays.categorical import _factorize_from_iterables from pandas.core.reshape.util import cartesian_product if not is_list_like(iterables): @@ -772,7 +778,7 @@ def codes(self): @property def labels(self): warnings.warn( - (".labels was deprecated in version 0.24.0. " "Use .codes instead."), + (".labels was deprecated in version 0.24.0. Use .codes instead."), FutureWarning, stacklevel=2, ) @@ -1213,7 +1219,7 @@ def _set_names(self, names, level=None, validate=True): raise ValueError("Length of names must match length of level.") if validate and level is None and len(names) != self.nlevels: raise ValueError( - "Length of names must match number of levels in " "MultiIndex." + "Length of names must match number of levels in MultiIndex." ) if level is None: @@ -1280,7 +1286,7 @@ def _get_level_number(self, level): count = self.names.count(level) if (count > 1) and not is_integer(level): raise ValueError( - "The name %s occurs multiple times, use a " "level number" % level + "The name %s occurs multiple times, use a level number" % level ) try: level = self.names.index(level) @@ -1399,8 +1405,6 @@ def _inferred_type_levels(self): @cache_readonly def _hashed_values(self): """ return a uint64 ndarray of my hashed values """ - from pandas.core.util.hashing import hash_tuples - return hash_tuples(self) def _hashed_indexing_key(self, key): @@ -1420,9 +1424,7 @@ def _hashed_indexing_key(self, key): Notes ----- we need to stringify if we have mixed levels - """ - from pandas.core.util.hashing import hash_tuples, hash_tuple if not isinstance(key, tuple): return hash_tuples(key) @@ -1442,9 +1444,6 @@ def f(k, stringify): @Appender(Index.duplicated.__doc__) def duplicated(self, keep="first"): - from pandas.core.sorting import get_group_index - from pandas._libs.hashtable import duplicated_int64 - shape = map(len, self.levels) ids = get_group_index(self.codes, shape, sort=False, xnull=False) @@ -1636,11 +1635,11 @@ def to_frame(self, index=True, name=None): if name is not None: if not is_list_like(name): - raise TypeError("'name' must be a list / sequence " "of column names.") + raise TypeError("'name' must be a list / sequence of column names.") if len(name) != len(self.levels): raise ValueError( - "'name' should have same length as " "number of levels on index." + "'name' should have same length as number of levels on index." ) idx_names = name else: @@ -2107,9 +2106,7 @@ def repeat(self, repeats, axis=None): ) def where(self, cond, other=None): - raise NotImplementedError( - ".where is not supported for " "MultiIndex operations" - ) + raise NotImplementedError(".where is not supported for MultiIndex operations") @deprecate_kwarg(old_arg_name="labels", new_arg_name="codes") def drop(self, codes, level=None, errors="raise"): @@ -2274,7 +2271,6 @@ def _get_codes_for_sorting(self): for sorting, where we need to disambiguate that -1 is not a valid valid """ - from pandas.core.arrays import Categorical def cats(level_codes): return np.arange( @@ -2309,8 +2305,6 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True): indexer : np.ndarray Indices of output values in original index. """ - from pandas.core.sorting import indexer_from_factorized - if isinstance(level, (str, int)): level = [level] level = [self._get_level_number(lev) for lev in level] @@ -2321,8 +2315,6 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True): if not len(level) == len(ascending): raise ValueError("level must have same length as ascending") - from pandas.core.sorting import lexsort_indexer - indexer = lexsort_indexer( [self.codes[lev] for lev in level], orders=ascending ) @@ -2419,14 +2411,12 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): ) if not self.is_unique: - raise ValueError( - "Reindexing only valid with uniquely valued " "Index objects" - ) + raise ValueError("Reindexing only valid with uniquely valued Index objects") if method == "pad" or method == "backfill": if tolerance is not None: raise NotImplementedError( - "tolerance not implemented yet " "for MultiIndex" + "tolerance not implemented yet for MultiIndex" ) indexer = self._engine.get_indexer(target, method, limit) elif method == "nearest": @@ -2766,7 +2756,7 @@ def maybe_mi_droplevels(indexer, levels, drop_level: bool): if isinstance(level, (tuple, list)): if len(key) != len(level): raise AssertionError( - "Key for location must have same " "length as number of levels" + "Key for location must have same length as number of levels" ) result = None for lev, k in zip(level, key): @@ -3323,7 +3313,7 @@ def astype(self, dtype, copy=True): raise NotImplementedError(msg) elif not is_object_dtype(dtype): msg = ( - "Setting {cls} dtype to anything other than object " "is not supported" + "Setting {cls} dtype to anything other than object is not supported" ).format(cls=self.__class__) raise TypeError(msg) elif copy is True: @@ -3369,7 +3359,7 @@ def insert(self, loc, item): if not isinstance(item, tuple): item = (item,) + ("",) * (self.nlevels - 1) elif len(item) != self.nlevels: - raise ValueError("Item must have length equal to number of " "levels.") + raise ValueError("Item must have length equal to number of levels.") new_levels = [] new_codes = [] diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index daf26d53aa6e2..1a1f8ae826ca7 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -99,7 +99,7 @@ def _convert_for_op(self, value): def _convert_tolerance(self, tolerance, target): tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: - raise ValueError("list-like tolerance size must match " "target index size") + raise ValueError("list-like tolerance size must match target index size") if not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( @@ -255,7 +255,7 @@ def _assert_safe_casting(cls, data, subarr): """ if not issubclass(data.dtype.type, np.signedinteger): if not np.array_equal(data, subarr): - raise TypeError("Unsafe NumPy casting, you must " "explicitly cast") + raise TypeError("Unsafe NumPy casting, you must explicitly cast") def _is_compatible_with_other(self, other): return super()._is_compatible_with_other(other) or all( @@ -329,7 +329,7 @@ def _assert_safe_casting(cls, data, subarr): """ if not issubclass(data.dtype.type, np.unsignedinteger): if not np.array_equal(data, subarr): - raise TypeError("Unsafe NumPy casting, you must " "explicitly cast") + raise TypeError("Unsafe NumPy casting, you must explicitly cast") def _is_compatible_with_other(self, other): return super()._is_compatible_with_other(other) or all( diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 04a858c8bfbdf..19fe1eb897f19 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -805,7 +805,7 @@ def _parsed_string_to_bounds(self, reso, parsed): def _get_string_slice(self, key): if not self.is_monotonic: - raise ValueError("Partial indexing only valid for " "ordered time series") + raise ValueError("Partial indexing only valid for ordered time series") key, parsed, reso = parse_time_string(key, self.freq) grp = resolution.Resolution.get_freq_group(reso) @@ -822,7 +822,7 @@ def _get_string_slice(self, key): def _convert_tolerance(self, tolerance, target): tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance, target) if target.size != tolerance.size and tolerance.size > 1: - raise ValueError("list-like tolerance size must match " "target index size") + raise ValueError("list-like tolerance size must match target index size") return self._maybe_convert_timedelta(tolerance) def insert(self, loc, item): @@ -935,7 +935,7 @@ def item(self): """ warnings.warn( - "`item` has been deprecated and will be removed in a " "future version", + "`item` has been deprecated and will be removed in a future version", FutureWarning, stacklevel=2, ) @@ -943,10 +943,9 @@ def item(self): if len(self) == 1: return self[0] else: + # TODO: is this still necessary? # copy numpy's message here because Py26 raises an IndexError - raise ValueError( - "can only convert an array of size 1 to a " "Python scalar" - ) + raise ValueError("can only convert an array of size 1 to a Python scalar") @property def data(self): diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 413132db195f6..6f2e264f1a4d0 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -5,6 +5,7 @@ from pandas.util._decorators import Appender from pandas.core.dtypes.common import is_extension_type, is_list_like +from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ABCMultiIndex from pandas.core.dtypes.missing import notna @@ -171,8 +172,6 @@ def lreshape(data, groups, dropna=True, label=None): for target, names in zip(keys, values): to_concat = [data[col].values for col in names] - from pandas.core.dtypes.concat import concat_compat - mdata[target] = concat_compat(to_concat) pivot_cols.append(target) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 456f99a806cc5..374de6156c807 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -3,8 +3,8 @@ import numpy as np -import pandas._libs.algos as _algos -import pandas._libs.reshape as _reshape +import pandas._libs.algos as libalgos +import pandas._libs.reshape as libreshape from pandas._libs.sparse import IntIndex from pandas.core.dtypes.cast import maybe_promote @@ -150,7 +150,7 @@ def _make_sorted_values_labels(self): comp_index, obs_ids = get_compressed_ids(to_sort, sizes) ngroups = len(obs_ids) - indexer = _algos.groupsort_indexer(comp_index, ngroups)[0] + indexer = libalgos.groupsort_indexer(comp_index, ngroups)[0] indexer = ensure_platform_int(indexer) self.sorted_values = algos.take_nd(self.values, indexer, axis=0) @@ -239,7 +239,7 @@ def get_new_values(self): sorted_values = sorted_values.astype(name, copy=False) # fill in our values & mask - f = getattr(_reshape, "unstack_{name}".format(name=name)) + f = getattr(libreshape, "unstack_{name}".format(name=name)) f( sorted_values, mask.view("u1"), diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index d1bdbdf51e9f5..949cad6073913 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -5,6 +5,7 @@ import numpy as np +from pandas._libs import Timedelta, Timestamp from pandas._libs.lib import infer_dtype from pandas.core.dtypes.common import ( @@ -26,8 +27,6 @@ Interval, IntervalIndex, Series, - Timedelta, - Timestamp, to_datetime, to_timedelta, )
well, more of them
https://api.github.com/repos/pandas-dev/pandas/pulls/27647
2019-07-29T20:47:54Z
2019-07-30T07:20:25Z
2019-07-30T07:20:25Z
2019-07-30T14:33:22Z
TYPING: add some type hints to core.generic
diff --git a/pandas/_typing.py b/pandas/_typing.py index 37a5d7945955d..efb5fdb5c1fdd 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -28,6 +28,7 @@ Scalar = Union[str, int, float] Axis = Union[str, int] Ordered = Optional[bool] +Level = Union[str, int] # to maintain type information across generic functions and parametrization _T = TypeVar("_T") diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6ade69fb4ca9d..05ea2c5d42be4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8,14 +8,19 @@ import re from textwrap import dedent from typing import ( + TYPE_CHECKING, + Any, Callable, Dict, FrozenSet, Hashable, + Iterable, List, Optional, Sequence, Set, + Tuple, + Type, Union, ) import warnings @@ -60,7 +65,7 @@ from pandas.core.dtypes.missing import isna, notna import pandas as pd -from pandas._typing import Dtype, FilePathOrBuffer +from pandas._typing import _T, Axis, Dtype, FilePathOrBuffer, FrameOrSeries, Level from pandas.core import missing, nanops import pandas.core.algorithms as algos from pandas.core.base import PandasObject, SelectionMixin @@ -83,6 +88,9 @@ from pandas.io.formats.printing import pprint_thing from pandas.tseries.frequencies import to_offset +if TYPE_CHECKING: + from pandas import Series, DataFrame + # goal is to be able to define the docs close to function, while still being # able to share _shared_docs = dict() # type: Dict[str, str] @@ -170,6 +178,12 @@ class NDFrame(PandasObject, SelectionMixin): _metadata = [] # type: List[str] _is_copy = None _data = None # type: BlockManager + if TYPE_CHECKING: + _AXIS_ALIASES = None # type: Dict[str, int] + _AXIS_NAMES = None # type: Dict[int, str] + _AXIS_NUMBERS = None # type: Dict[str, int] + _AXIS_REVERSED = None + _AXIS_LEN = None # type: int # ---------------------------------------------------------------------- # Constructors @@ -197,7 +211,9 @@ def __init__( object.__setattr__(self, "_data", data) object.__setattr__(self, "_item_cache", {}) - def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): + def _init_mgr( + self, mgr: BlockManager, axes: Dict[str, Any], dtype=None, copy: bool = False + ) -> BlockManager: """ passed a manager and a axes dict """ for a, axe in axes.items(): if axe is not None: @@ -258,7 +274,7 @@ def _validate_dtype(self, dtype): # Construction @property - def _constructor(self): + def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]: """Used when a manipulation result has the same dimensions as the original. """ @@ -346,21 +362,27 @@ def set_axis(a, i): assert not isinstance(ns, dict) - def _construct_axes_dict(self, axes=None, **kwargs): + def _construct_axes_dict( + self, axes: Optional[Iterable[str]] = None, **kwargs + ) -> Dict[str, Index]: """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} d.update(kwargs) return d @staticmethod - def _construct_axes_dict_from(self, axes, **kwargs): + def _construct_axes_dict_from(self, axes, **kwargs) -> Dict[str, Index]: """Return an axes dictionary for the passed axes.""" d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)} d.update(kwargs) return d def _construct_axes_from_arguments( - self, args, kwargs, require_all=False, sentinel=None + self, + args, + kwargs: Dict[str, Any], + require_all: bool = False, + sentinel: Optional[object] = None, ): """Construct and returns axes if supplied in args/kwargs. @@ -402,7 +424,9 @@ def _construct_axes_from_arguments( return axes, kwargs @classmethod - def _from_axes(cls, data, axes, **kwargs): + def _from_axes( + cls: Type[FrameOrSeries], data: BlockManager, axes, **kwargs + ) -> FrameOrSeries: # for construction from BlockManager if isinstance(data, BlockManager): return cls(data, **kwargs) @@ -414,7 +438,7 @@ def _from_axes(cls, data, axes, **kwargs): return cls(data, **d) @classmethod - def _get_axis_number(cls, axis): + def _get_axis_number(cls, axis) -> int: axis = cls._AXIS_ALIASES.get(axis, axis) if is_integer(axis): if axis in cls._AXIS_NAMES: @@ -427,7 +451,7 @@ def _get_axis_number(cls, axis): raise ValueError("No axis named {0} for object type {1}".format(axis, cls)) @classmethod - def _get_axis_name(cls, axis): + def _get_axis_name(cls, axis) -> str: axis = cls._AXIS_ALIASES.get(axis, axis) if isinstance(axis, str): if axis in cls._AXIS_NUMBERS: @@ -439,12 +463,12 @@ def _get_axis_name(cls, axis): pass raise ValueError("No axis named {0} for object type {1}".format(axis, cls)) - def _get_axis(self, axis): + def _get_axis(self, axis: Axis) -> Index: name = self._get_axis_name(axis) return getattr(self, name) @classmethod - def _get_block_manager_axis(cls, axis): + def _get_block_manager_axis(cls, axis: Axis) -> int: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) if cls._AXIS_REVERSED: @@ -452,7 +476,7 @@ def _get_block_manager_axis(cls, axis): return m - axis return axis - def _get_axis_resolvers(self, axis): + def _get_axis_resolvers(self, axis: str) -> Dict[str, Union["Series", MultiIndex]]: # index or columns axis_index = getattr(self, axis) d = dict() @@ -482,13 +506,15 @@ def _get_axis_resolvers(self, axis): d[axis] = dindex return d - def _get_index_resolvers(self): + def _get_index_resolvers(self) -> Dict[str, Union["Series", MultiIndex]]: d = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return d - def _get_space_character_free_column_resolvers(self): + def _get_space_character_free_column_resolvers( + self + ) -> Dict[Union[int, str], "Series"]: """Return the space character free column resolvers of a dataframe. Column names with spaces are 'cleaned up' so that they can be referred @@ -500,7 +526,7 @@ def _get_space_character_free_column_resolvers(self): return {_remove_spaces_column_name(k): v for k, v in self.items()} @property - def _info_axis(self): + def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) @property @@ -571,12 +597,12 @@ def size(self): return np.prod(self.shape) @property - def _selected_obj(self): + def _selected_obj(self: FrameOrSeries) -> FrameOrSeries: """ internal compat with SelectionMixin """ return self @property - def _obj_with_exclusions(self): + def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries: """ internal compat with SelectionMixin """ return self @@ -679,7 +705,7 @@ def set_axis(self, labels, axis=0, inplace=False): obj.set_axis(labels, axis=axis, inplace=True) return obj - def _set_axis(self, axis, labels): + def _set_axis(self, axis: int, labels) -> None: self._data.set_axis(axis, labels) self._clear_item_cache() @@ -1334,7 +1360,12 @@ class name if not inplace: return result - def _set_axis_name(self, name, axis=0, inplace=False): + def _set_axis_name( + self: FrameOrSeries, + name: Optional[Union[List[str], str]], + axis: Axis = 0, + inplace: bool = False, + ) -> Optional[FrameOrSeries]: """ Set the name(s) of the axis. @@ -1394,11 +1425,12 @@ def _set_axis_name(self, name, axis=0, inplace=False): renamed.set_axis(idx, axis=axis, inplace=True) if not inplace: return renamed + return None # ---------------------------------------------------------------------- # Comparison Methods - def _indexed_same(self, other): + def _indexed_same(self: FrameOrSeries, other: FrameOrSeries) -> bool: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) @@ -1492,7 +1524,7 @@ def equals(self, other): # ------------------------------------------------------------------------- # Unary Methods - def __neg__(self): + def __neg__(self: FrameOrSeries) -> FrameOrSeries: values = com.values_from_object(self) if is_bool_dtype(values): arr = operator.inv(values) @@ -1508,7 +1540,7 @@ def __neg__(self): ) return self.__array_wrap__(arr) - def __pos__(self): + def __pos__(self: FrameOrSeries) -> FrameOrSeries: values = com.values_from_object(self) if is_bool_dtype(values) or is_period_arraylike(values): arr = values @@ -1524,7 +1556,7 @@ def __pos__(self): ) return self.__array_wrap__(arr) - def __invert__(self): + def __invert__(self: FrameOrSeries) -> FrameOrSeries: try: arr = operator.inv(com.values_from_object(self)) return self.__array_wrap__(arr) @@ -1570,10 +1602,10 @@ def bool(self): self.__nonzero__() - def __abs__(self): + def __abs__(self: FrameOrSeries) -> FrameOrSeries: return self.abs() - def __round__(self, decimals=0): + def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries: return self.round(decimals) # ------------------------------------------------------------------------- @@ -1584,7 +1616,7 @@ def __round__(self, decimals=0): # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. - def _is_level_reference(self, key, axis=0): + def _is_level_reference(self, key, axis: Axis = 0) -> bool_t: """ Test whether a key is a level reference for a given axis. @@ -1614,7 +1646,7 @@ def _is_level_reference(self, key, axis=0): and not self._is_label_reference(key, axis=axis) ) - def _is_label_reference(self, key, axis=0): + def _is_label_reference(self, key, axis: Axis = 0) -> bool_t: """ Test whether a key is a label reference for a given axis. @@ -1643,7 +1675,7 @@ def _is_label_reference(self, key, axis=0): and any(key in self.axes[ax] for ax in other_axes) ) - def _is_label_or_level_reference(self, key, axis=0): + def _is_label_or_level_reference(self, key: str, axis: Axis = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. @@ -1667,7 +1699,7 @@ def _is_label_or_level_reference(self, key, axis=0): key, axis=axis ) - def _check_label_or_level_ambiguity(self, key, axis=0): + def _check_label_or_level_ambiguity(self, key, axis: Axis = 0) -> None: """ Check whether `key` is ambiguous. @@ -1716,7 +1748,7 @@ def _check_label_or_level_ambiguity(self, key, axis=0): ) raise ValueError(msg) - def _get_label_or_level_values(self, key, axis=0): + def _get_label_or_level_values(self, key, axis: Axis = 0): """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. @@ -1788,7 +1820,9 @@ def _get_label_or_level_values(self, key, axis=0): return values - def _drop_labels_or_levels(self, keys, axis=0): + def _drop_labels_or_levels( + self: FrameOrSeries, keys: Union[List[str], str], axis: Axis = 0 + ) -> FrameOrSeries: """ Drop labels and/or levels for the given `axis`. @@ -1919,7 +1953,7 @@ def __len__(self): """Returns length of info axis""" return len(self._info_axis) - def __contains__(self, key): + def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis @@ -1982,9 +2016,10 @@ def empty(self): def __array__(self, dtype=None): return com.values_from_object(self) - def __array_wrap__(self, result, context=None): + def __array_wrap__(self: FrameOrSeries, result, context=None) -> FrameOrSeries: d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) - return self._constructor(result, **d).__finalize__(self) + # https://github.com/python/mypy/issues/5382 + return self._constructor(result, **d).__finalize__(self) # type: ignore # ideally we would define this to avoid the getattr checks, but # is slower @@ -2017,11 +2052,11 @@ def to_dense(self): # ---------------------------------------------------------------------- # Picklability - def __getstate__(self): + def __getstate__(self) -> Dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata, **meta) - def __setstate__(self, state): + def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._data = state @@ -2058,7 +2093,7 @@ def __setstate__(self, state): # old pickling format, for compatibility self._unpickle_matrix_compat(state) - self._item_cache = {} + self._item_cache = {} # type: Dict # ---------------------------------------------------------------------- # Rendering Methods @@ -2069,7 +2104,7 @@ def __repr__(self): prepr = "[%s]" % ",".join(map(pprint_thing, self)) return "%s(%s)" % (self.__class__.__name__, prepr) - def _repr_latex_(self): + def _repr_latex_(self) -> Optional[str]: """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). @@ -2079,7 +2114,7 @@ def _repr_latex_(self): else: return None - def _repr_data_resource_(self): + def _repr_data_resource_(self) -> Optional[Dict]: """ Not a real Jupyter special repr method, but we use the same naming convention. @@ -2090,6 +2125,7 @@ def _repr_data_resource_(self): data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict ) return payload + return None # ---------------------------------------------------------------------- # I/O Methods @@ -3244,35 +3280,37 @@ def _create_indexer(cls, name, indexer): # ---------------------------------------------------------------------- # Lookup Caching - def _set_as_cached(self, item, cacher): + def _set_as_cached(self, item, cacher) -> None: """Set the _cacher attribute on the calling object with a weakref to cacher. """ self._cacher = (item, weakref.ref(cacher)) - def _reset_cacher(self): + def _reset_cacher(self) -> None: """Reset the cacher.""" if hasattr(self, "_cacher"): del self._cacher - def _maybe_cache_changed(self, item, value): + def _maybe_cache_changed(self, item: Union[str, int], value) -> None: """The object has called back to us saying maybe it has changed. """ self._data.set(item, value) @property - def _is_cached(self): + def _is_cached(self) -> bool_t: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None - def _get_cacher(self): + def _get_cacher(self: FrameOrSeries) -> Optional[FrameOrSeries]: """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher - def _maybe_update_cacher(self, clear=False, verify_is_copy=True): + def _maybe_update_cacher( + self, clear: bool_t = False, verify_is_copy: bool_t = True + ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. @@ -3305,7 +3343,7 @@ def _maybe_update_cacher(self, clear=False, verify_is_copy=True): if clear: self._clear_item_cache() - def _clear_item_cache(self): + def _clear_item_cache(self) -> None: self._item_cache.clear() # ---------------------------------------------------------------------- @@ -3565,10 +3603,10 @@ class animal locomotion _xs = xs # type: Callable - def __getitem__(self, item): + def __getitem__(self, item: int): raise AbstractMethodError(self) - def _get_item_cache(self, item): + def _get_item_cache(self, item: int): """Return the cached item, item represents a label indexer.""" cache = self._item_cache res = cache.get(item) @@ -3582,7 +3620,7 @@ def _get_item_cache(self, item): res._is_copy = self._is_copy return res - def _iget_item_cache(self, item): + def _iget_item_cache(self, item: int): """Return the cached item, item represents a positional indexer.""" ax = self._info_axis if ax.is_unique: @@ -3594,7 +3632,9 @@ def _iget_item_cache(self, item): def _box_item_values(self, key, values): raise AbstractMethodError(self) - def _slice(self, slobj: slice, axis=0, kind=None): + def _slice( + self: FrameOrSeries, slobj: slice, axis: Axis = 0, kind: Optional[str] = None + ) -> FrameOrSeries: """ Construct a slice of this container. @@ -3610,11 +3650,11 @@ def _slice(self, slobj: slice, axis=0, kind=None): result._set_is_copy(self, copy=is_copy) return result - def _set_item(self, key, value): + def _set_item(self, key, value) -> None: self._data.set(key, value) self._clear_item_cache() - def _set_is_copy(self, ref=None, copy=True): + def _set_is_copy(self, ref=None, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: @@ -3623,7 +3663,7 @@ def _set_is_copy(self, ref=None, copy=True): else: self._is_copy = None - def _check_is_chained_assignment_possible(self): + def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. @@ -3720,7 +3760,7 @@ def _check_setitem_copy(self, stacklevel=4, t="setting", force=False): elif value == "warn": warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel) - def __delitem__(self, key): + def __delitem__(self, key) -> None: """ Delete item """ @@ -3777,7 +3817,7 @@ def get(self, key, default=None): return default @property - def _is_view(self): + def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array """ return self._data.is_view @@ -3922,7 +3962,13 @@ def drop( else: return obj - def _drop_axis(self, labels, axis, level=None, errors="raise"): + def _drop_axis( + self: FrameOrSeries, + labels, + axis: Axis, + level: Optional[Level] = None, + errors: str = "raise", + ) -> FrameOrSeries: """ Drop labels from specified axis. Used in the ``drop`` method internally. @@ -3939,32 +3985,32 @@ def _drop_axis(self, labels, axis, level=None, errors="raise"): """ axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) - axis = self._get_axis(axis) + ax = self._get_axis(axis) - if axis.is_unique: + if ax.is_unique: if level is not None: - if not isinstance(axis, MultiIndex): + if not isinstance(ax, MultiIndex): raise AssertionError("axis must be a MultiIndex") - new_axis = axis.drop(labels, level=level, errors=errors) + new_axis = ax.drop(labels, level=level, errors=errors) else: - new_axis = axis.drop(labels, errors=errors) + new_axis = ax.drop(labels, errors=errors) result = self.reindex(**{axis_name: new_axis}) # Case for non-unique axis else: labels = ensure_object(com.index_labels_to_array(labels)) if level is not None: - if not isinstance(axis, MultiIndex): + if not isinstance(ax, MultiIndex): raise AssertionError("axis must be a MultiIndex") - indexer = ~axis.get_level_values(level).isin(labels) + indexer = ~ax.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and indexer.all(): raise KeyError("{} not found in axis".format(labels)) else: - indexer = ~axis.isin(labels) + indexer = ~ax.isin(labels) # Check if label doesn't exist along axis - labels_missing = (axis.get_indexer_for(labels) == -1).any() + labels_missing = (ax.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError("{} not found in axis".format(labels)) @@ -3975,7 +4021,11 @@ def _drop_axis(self, labels, axis, level=None, errors="raise"): return result - def _update_inplace(self, result, verify_is_copy=True): + def _update_inplace( + self: FrameOrSeries, + result: Union[BlockManager, FrameOrSeries], + verify_is_copy: bool_t = True, + ) -> None: """ Replace self internals with result. @@ -4517,7 +4567,16 @@ def reindex(self, *args, **kwargs): axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self) - def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): + def _reindex_axes( + self: FrameOrSeries, + axes: Dict[str, Any], + level: Optional[Level], + limit: Optional[int], + tolerance, + method: Optional[str], + fill_value, + copy: bool_t, + ) -> FrameOrSeries: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: @@ -4540,7 +4599,9 @@ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy) return obj - def _needs_reindex_multi(self, axes, method, level): + def _needs_reindex_multi( + self, axes: Dict[str, Any], method: Optional[str], level: Optional[Level] + ) -> bool_t: """Check if we do need a multi reindex.""" return ( (com.count_not_none(*axes.values()) == self._AXIS_LEN) @@ -4553,8 +4614,12 @@ def _reindex_multi(self, axes, copy, fill_value): return NotImplemented def _reindex_with_indexers( - self, reindexers, fill_value=None, copy=False, allow_dups=False - ): + self: FrameOrSeries, + reindexers, + fill_value=None, + copy: bool_t = False, + allow_dups: bool_t = False, + ) -> FrameOrSeries: """allow_dups indicates an internal call here """ # reindex doing multiple operations on different axes if indicated @@ -5143,7 +5208,12 @@ def pipe(self, func, *args, **kwargs): # ---------------------------------------------------------------------- # Attribute access - def __finalize__(self, other, method=None, **kwargs): + def __finalize__( + self: FrameOrSeries, + other: FrameOrSeries, + method: Optional[str] = None, + **kwargs + ) -> FrameOrSeries: """ Propagate metadata from other to self. @@ -5179,7 +5249,7 @@ def __getattr__(self, name): return self[name] return object.__getattribute__(self, name) - def __setattr__(self, name, value): + def __setattr__(self, name: str, value) -> None: """After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ @@ -5220,7 +5290,7 @@ def __setattr__(self, name, value): ) object.__setattr__(self, name, value) - def _dir_additions(self): + def _dir_additions(self) -> Set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, it's first level values are used. """ @@ -5234,7 +5304,7 @@ def _dir_additions(self): # ---------------------------------------------------------------------- # Consolidation of internals - def _protect_consolidate(self, f): + def _protect_consolidate(self, f: Callable[..., _T]) -> _T: """Consolidate _data -- if the blocks have changed, then clear the cache """ @@ -5244,7 +5314,7 @@ def _protect_consolidate(self, f): self._clear_item_cache() return result - def _consolidate_inplace(self): + def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f(): @@ -5252,7 +5322,9 @@ def f(): self._protect_consolidate(f) - def _consolidate(self, inplace=False): + def _consolidate( + self: FrameOrSeries, inplace: bool_t = False + ) -> Optional[FrameOrSeries]: """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). @@ -5269,27 +5341,28 @@ def _consolidate(self, inplace=False): inplace = validate_bool_kwarg(inplace, "inplace") if inplace: self._consolidate_inplace() + return None else: f = lambda: self._data.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) @property - def _is_mixed_type(self): + def _is_mixed_type(self) -> bool_t: f = lambda: self._data.is_mixed_type return self._protect_consolidate(f) @property - def _is_numeric_mixed_type(self): + def _is_numeric_mixed_type(self) -> bool_t: f = lambda: self._data.is_numeric_mixed_type return self._protect_consolidate(f) @property - def _is_datelike_mixed_type(self): + def _is_datelike_mixed_type(self) -> bool_t: f = lambda: self._data.is_datelike_mixed_type return self._protect_consolidate(f) - def _check_inplace_setting(self, value): + def _check_inplace_setting(self, value) -> bool_t: """ check whether we allow in-place setting with this type of value """ if self._is_mixed_type: @@ -5309,10 +5382,10 @@ def _check_inplace_setting(self, value): return True - def _get_numeric_data(self): + def _get_numeric_data(self: FrameOrSeries) -> FrameOrSeries: return self._constructor(self._data.get_numeric_data()).__finalize__(self) - def _get_bool_data(self): + def _get_bool_data(self: FrameOrSeries) -> FrameOrSeries: return self._constructor(self._data.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- @@ -5729,7 +5802,9 @@ def blocks(self): """ return self.as_blocks() - def _to_dict_of_blocks(self, copy=True): + def _to_dict_of_blocks( + self: FrameOrSeries, copy: bool_t = True + ) -> Dict[str, FrameOrSeries]: """ Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. @@ -5998,10 +6073,10 @@ def copy(self, deep=True): data = self._data.copy(deep=deep) return self._constructor(data).__finalize__(self) - def __copy__(self, deep=True): + def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries: return self.copy(deep=deep) - def __deepcopy__(self, memo=None): + def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries: """ Parameters ---------- @@ -6013,8 +6088,13 @@ def __deepcopy__(self, memo=None): return self.copy(deep=True) def _convert( - self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True - ): + self: FrameOrSeries, + datetime: bool_t = False, + numeric: bool_t = False, + timedelta: bool_t = False, + coerce: bool_t = False, + copy: bool_t = True, + ) -> FrameOrSeries: """ Attempt to infer better dtype for object columns @@ -7378,7 +7458,12 @@ def notna(self): def notnull(self): return notna(self).__finalize__(self) - def _clip_with_scalar(self, lower, upper, inplace=False): + def _clip_with_scalar( + self: FrameOrSeries, + lower: Optional[float], + upper: Optional[float], + inplace: bool_t = False, + ) -> Optional[FrameOrSeries]: if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): @@ -7400,10 +7485,17 @@ def _clip_with_scalar(self, lower, upper, inplace=False): if inplace: self._update_inplace(result) + return None else: return result - def _clip_with_one_bound(self, threshold, method, axis, inplace): + def _clip_with_one_bound( + self: FrameOrSeries, + threshold, + method: Callable, + axis: Optional[Axis], + inplace: bool_t, + ) -> Optional[FrameOrSeries]: if axis is not None: axis = self._get_axis_number(axis) @@ -8808,16 +8900,16 @@ def align( def _align_frame( self, - other, - join="outer", - axis=None, - level=None, - copy=True, + other: "DataFrame", + join: str = "outer", + axis: Optional[int] = None, + level: Optional[Level] = None, + copy: bool_t = True, fill_value=None, - method=None, - limit=None, - fill_axis=0, - ): + method: Optional[str] = None, + limit: Optional[int] = None, + fill_axis: Axis = 0, + ) -> Tuple["NDFrame", "NDFrame"]: # defaults join_index, join_columns = None, None ilidx, iridx = None, None @@ -8868,16 +8960,16 @@ def _align_frame( def _align_series( self, - other, - join="outer", - axis=None, - level=None, - copy=True, + other: "Series", + join: str = "outer", + axis: Optional[int] = None, + level: Optional[Level] = None, + copy: bool_t = True, fill_value=None, method=None, limit=None, - fill_axis=0, - ): + fill_axis: Axis = 0, + ) -> Tuple["NDFrame", "NDFrame"]: is_series = isinstance(self, ABCSeries) @@ -8951,15 +9043,15 @@ def _align_series( return left.__finalize__(self), right.__finalize__(other) def _where( - self, + self: FrameOrSeries, cond, other=np.nan, - inplace=False, - axis=None, - level=None, - errors="raise", - try_cast=False, - ): + inplace: bool_t = False, + axis: Optional[Axis] = None, + level: Optional[Level] = None, + errors: str = "raise", + try_cast: bool_t = False, + ) -> Optional[FrameOrSeries]: """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. @@ -8975,7 +9067,10 @@ def _where( cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") - cond = self._constructor(cond, **self._construct_axes_dict()) + # https://github.com/python/mypy/issues/5382 + cond = self._constructor( # type: ignore + cond, **self._construct_axes_dict() + ) # make sure we are boolean fill_value = bool(inplace) @@ -9067,7 +9162,10 @@ def _where( # we are the same shape, so create an actual object for alignment else: - other = self._constructor(other, **self._construct_axes_dict()) + # https://github.com/python/mypy/issues/5382 + other = self._constructor( # type: ignore + other, **self._construct_axes_dict() + ) if axis is None: axis = 0 @@ -9093,6 +9191,7 @@ def _where( transpose=self._AXIS_REVERSED, ) self._update_inplace(new_data) + return None else: new_data = self._data.where( @@ -10407,7 +10506,14 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwar np.putmask(rs.values, mask, np.nan) return rs - def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs): + def _agg_by_level( + self: FrameOrSeries, + name: str, + axis: Axis = 0, + level: Level = 0, + skipna: bool_t = True, + **kwargs + ) -> FrameOrSeries: if axis is None: raise ValueError("Must specify 'axis' when aggregating by level.") grouped = self.groupby(level=level, axis=axis, sort=False) @@ -10817,7 +10923,7 @@ def transform(self, func, *args, **kwargs): Also returns None for empty %(klass)s. """ - def _find_valid_index(self, how): + def _find_valid_index(self, how: str): """ Retrieves the index of the first valid value. diff --git a/pandas/core/series.py b/pandas/core/series.py index 6fb39c422de93..3ae0da97984f6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -54,6 +54,7 @@ ) import pandas as pd +from pandas._typing import Axis from pandas.core import algorithms, base, generic, nanops, ops from pandas.core.accessor import CachedAccessor from pandas.core.arrays import ExtensionArray, SparseArray @@ -1074,7 +1075,7 @@ def _ixs(self, i: int, axis: int = 0): else: return values[i] - def _slice(self, slobj: slice, axis: int = 0, kind=None): + def _slice(self, slobj: slice, axis: Axis = 0, kind=None): slobj = self.index._convert_slice_indexer(slobj, kind=kind or "getitem") return self._get_values(slobj) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 8ff4b9bda0430..ee97800f220e2 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -692,7 +692,7 @@ def _to_str_columns(self) -> List[List[str]]: if not is_list_like(self.header) and not self.header: stringified = [] - for i, c in enumerate(frame): + for i, _ in enumerate(frame): fmt_values = self._format_col(i) fmt_values = _make_fixed_width( fmt_values, @@ -723,7 +723,7 @@ def _to_str_columns(self) -> List[List[str]]: x.append("") stringified = [] - for i, c in enumerate(frame): + for i, _ in enumerate(frame): cheader = str_columns[i] header_colwidth = max( self.col_space or 0, *(self.adj.len(x) for x in cheader)
pre-cursor to #27527
https://api.github.com/repos/pandas-dev/pandas/pulls/27646
2019-07-29T19:42:47Z
2019-08-30T15:26:41Z
null
2019-09-12T19:36:00Z
BUG: Add mapping for pyqt for successful package installation
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index c7f8bb70e3461..71718928ee90a 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -187,6 +187,7 @@ Sparse Build Changes ^^^^^^^^^^^^^ +- Fixed pyqt development dependency issue because of different pyqt package name in conda and PyPI (:issue:`26838`) ExtensionArray diff --git a/environment.yml b/environment.yml index 93e8302b498a0..6d2cd701c3854 100644 --- a/environment.yml +++ b/environment.yml @@ -71,7 +71,7 @@ dependencies: - lxml # pandas.read_html - openpyxl # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile - pyarrow>=0.9.0 # pandas.read_paquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - - pyqt # pandas.read_clipbobard + - pyqt>=5.9.2 # pandas.read_clipboard - pytables>=3.4.2 # pandas.read_hdf, DataFrame.to_hdf - python-snappy # required by pyarrow - s3fs # pandas.read_csv... when using 's3://...' path diff --git a/requirements-dev.txt b/requirements-dev.txt index e49ad10bfc99d..cf11a3ee28258 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -45,7 +45,7 @@ html5lib lxml openpyxl pyarrow>=0.9.0 -pyqt +pyqt5>=5.9.2 tables>=3.4.2 python-snappy s3fs diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py index ac73859b22598..6ae10c2cb07d2 100755 --- a/scripts/generate_pip_deps_from_conda.py +++ b/scripts/generate_pip_deps_from_conda.py @@ -20,7 +20,7 @@ EXCLUDE = {"python=3"} -RENAME = {"pytables": "tables"} +RENAME = {"pytables": "tables", "pyqt": "pyqt5"} def conda_package_to_pip(package):
- [ ] closes #26838 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27645
2019-07-29T19:32:54Z
2019-08-07T21:09:55Z
2019-08-07T21:09:54Z
2019-08-07T21:10:05Z
DOC: Add Pandas-Bokeh to pandas ecosystem page
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index b76dd3e0ff8e6..b1e3d8dc8a1ad 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -72,6 +72,17 @@ the latest web technologies. Its goal is to provide elegant, concise constructio graphics in the style of Protovis/D3, while delivering high-performance interactivity over large data to thin clients. +`Pandas-Bokeh <https://github.com/PatrikHlobil/Pandas-Bokeh>`__ provides a high level API +for Bokeh that can be loaded as a native Pandas plotting backend via + +.. code:: python + + pd.set_option("plotting.backend", "pandas_bokeh") + +It is very similar to the matplotlib plotting backend, but provides interactive +web-based charts and maps. + + `seaborn <https://seaborn.pydata.org>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This PR adds Pandas-Bokeh reference to Pandas ecosystem page. Best, Patrik
https://api.github.com/repos/pandas-dev/pandas/pulls/27644
2019-07-29T19:07:27Z
2019-07-30T08:26:28Z
2019-07-30T08:26:28Z
2019-07-30T09:00:28Z
Backport PR #27631 on branch 0.25.x (BUG: raise when wrong level name is passed to "unstack")
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index eb60272246ebb..fa9ca98f9c8d8 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -128,7 +128,7 @@ Groupby/resample/rolling Reshaping ^^^^^^^^^ -- +- A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) - - diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 74519391bac2f..12923fd790972 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1546,7 +1546,11 @@ def _validate_index_level(self, level): "Too many levels:" " Index has only 1 level, not %d" % (level + 1) ) elif level != self.name: - raise KeyError("Level %s must be same as name (%s)" % (level, self.name)) + raise KeyError( + "Requested level ({}) does not match index name ({})".format( + level, self.name + ) + ) def _get_level_number(self, level): self._validate_index_level(level) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 540a06caec220..a24900543b81a 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -12,6 +12,7 @@ ensure_platform_int, is_bool_dtype, is_extension_array_dtype, + is_integer, is_integer_dtype, is_list_like, is_object_dtype, @@ -402,6 +403,10 @@ def unstack(obj, level, fill_value=None): else: level = level[0] + # Prioritize integer interpretation (GH #21677): + if not is_integer(level) and not level == "__placeholder__": + level = obj.index._get_level_number(level) + if isinstance(obj, DataFrame): if isinstance(obj.index, MultiIndex): return _unstack_frame(obj, level, fill_value=fill_value) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index c57b2a6964f39..a6fd980faefcd 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -1083,7 +1083,7 @@ def test_reset_index_level(self): # Missing levels - for both MultiIndex and single-level Index: for idx_lev in ["A", "B"], ["A"]: - with pytest.raises(KeyError, match="Level E "): + with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"): df.set_index(idx_lev).reset_index(level=["A", "E"]) with pytest.raises(IndexError, match="Too many levels"): df.set_index(idx_lev).reset_index(level=[0, 1, 2]) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index e75d80bec1fdf..c40a9bce9385b 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2004,7 +2004,7 @@ def test_isin_level_kwarg_bad_label_raises(self, label, indices): msg = "'Level {} not found'" else: index = index.rename("foo") - msg = r"'Level {} must be same as name \(foo\)'" + msg = r"Requested level \({}\) does not match index name \(foo\)" with pytest.raises(KeyError, match=msg.format(label)): index.isin([], level=label) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 0e9aa07a4c05a..ae1a21e9b3980 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -35,7 +35,8 @@ def test_droplevel(self, indices): for level in "wrong", ["wrong"]: with pytest.raises( - KeyError, match=re.escape("'Level wrong must be same as name (None)'") + KeyError, + match=r"'Requested level \(wrong\) does not match index name \(None\)'", ): indices.droplevel(level) @@ -200,7 +201,7 @@ def test_unique(self, indices): with pytest.raises(IndexError, match=msg): indices.unique(level=3) - msg = r"Level wrong must be same as name \({}\)".format( + msg = r"Requested level \(wrong\) does not match index name \({}\)".format( re.escape(indices.name.__repr__()) ) with pytest.raises(KeyError, match=msg): diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 63baa6af7c02a..11add8d61deeb 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -322,9 +322,9 @@ def test_reset_index_drop_errors(self): # KeyError raised for series index when passed level name is missing s = Series(range(4)) - with pytest.raises(KeyError, match="must be same as name"): + with pytest.raises(KeyError, match="does not match index name"): s.reset_index("wrong", drop=True) - with pytest.raises(KeyError, match="must be same as name"): + with pytest.raises(KeyError, match="does not match index name"): s.reset_index("wrong") # KeyError raised for series when level to be dropped is missing diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index c97c69c323b56..dc4db6e7902a8 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -524,6 +524,22 @@ def test_stack_unstack_preserve_names(self): restacked = unstacked.stack() assert restacked.index.names == self.frame.index.names + @pytest.mark.parametrize("method", ["stack", "unstack"]) + def test_stack_unstack_wrong_level_name(self, method): + # GH 18303 - wrong level name should raise + + # A DataFrame with flat axes: + df = self.frame.loc["foo"] + + with pytest.raises(KeyError, match="does not match index name"): + getattr(df, method)("mistake") + + if method == "unstack": + # Same on a Series: + s = df.iloc[:, 0] + with pytest.raises(KeyError, match="does not match index name"): + getattr(s, method)("mistake") + def test_unstack_level_name(self): result = self.frame.unstack("second") expected = self.frame.unstack(level=1)
Backport PR #27631: BUG: raise when wrong level name is passed to "unstack"
https://api.github.com/repos/pandas-dev/pandas/pulls/27640
2019-07-29T16:56:15Z
2019-07-30T11:21:48Z
2019-07-30T11:21:48Z
2019-07-30T12:24:43Z
EA: implement+test EA.view
diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index 407aab4bb1f1b..04974f05164f8 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -45,6 +45,7 @@ objects. api.extensions.ExtensionArray.argsort api.extensions.ExtensionArray.astype api.extensions.ExtensionArray.copy + api.extensions.ExtensionArray.view api.extensions.ExtensionArray.dropna api.extensions.ExtensionArray.factorize api.extensions.ExtensionArray.fillna diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index e517be4f03a16..41d84b0ae4853 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -64,6 +64,7 @@ class ExtensionArray: shift take unique + view _concat_same_type _formatter _formatting_values @@ -147,7 +148,7 @@ class ExtensionArray: If implementing NumPy's ``__array_ufunc__`` interface, pandas expects that - 1. You defer by raising ``NotImplemented`` when any Series are present + 1. You defer by returning ``NotImplemented`` when any Series are present in `inputs`. Pandas will extract the arrays and call the ufunc again. 2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class. Pandas inspect this to determine whether the ufunc is valid for the @@ -862,6 +863,27 @@ def copy(self) -> ABCExtensionArray: """ raise AbstractMethodError(self) + def view(self, dtype=None) -> Union[ABCExtensionArray, np.ndarray]: + """ + Return a view on the array. + + Parameters + ---------- + dtype : str, np.dtype, or ExtensionDtype, optional + Default None + + Returns + ------- + ExtensionArray + """ + # NB: + # - This must return a *new* object referencing the same data, not self. + # - The only case that *must* be implemented is with dtype=None, + # giving a view with the same dtype as self. + if dtype is not None: + raise NotImplementedError(dtype) + return self[:] + # ------------------------------------------------------------------------ # Printing # ------------------------------------------------------------------------ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index b16217d5d0a32..e56f623962fa3 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -517,19 +517,12 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: return self._set_dtype(dtype) return np.array(self, dtype=dtype, copy=copy) - @cache_readonly - def ndim(self) -> int: - """ - Number of dimensions of the Categorical - """ - return self._codes.ndim - @cache_readonly def size(self) -> int: """ return the len of myself """ - return len(self) + return self._codes.size @cache_readonly def itemsize(self) -> int: @@ -1764,18 +1757,10 @@ def ravel(self, order="C"): ) return np.array(self) - def view(self): - """ - Return a view of myself. - - For internal compatibility with numpy arrays. - - Returns - ------- - view : Categorical - Returns `self`! - """ - return self + def view(self, dtype=None): + if dtype is not None: + raise NotImplementedError(dtype) + return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True) def to_dense(self): """ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 47b138a9e1604..695138ca07f77 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -554,18 +554,8 @@ def astype(self, dtype, copy=True): return np.asarray(self, dtype=dtype) def view(self, dtype=None): - """ - New view on this array with the same data. - - Parameters - ---------- - dtype : numpy dtype, optional - - Returns - ------- - ndarray - With the specified `dtype`. - """ + if dtype is None or dtype is self.dtype: + return type(self)(self._data, dtype=self.dtype) return self._data.view(dtype=dtype) # ------------------------------------------------------------------ diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 2b3c02bd1cade..9a1ed79a99146 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -739,18 +739,14 @@ def isna(self): return isna(self.left) @property - def nbytes(self): + def nbytes(self) -> int: return self.left.nbytes + self.right.nbytes @property - def size(self): + def size(self) -> int: # Avoid materializing self.values return self.left.size - @property - def shape(self): - return self.left.shape - def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs): """ Take elements from the IntervalArray. diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 39529177b9e35..e8397341a1a1d 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -241,11 +241,11 @@ def __setitem__(self, key, value): else: self._ndarray[key] = value - def __len__(self): + def __len__(self) -> int: return len(self._ndarray) @property - def nbytes(self): + def nbytes(self) -> int: return self._ndarray.nbytes def isna(self): diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py index 47c7c72051150..7bd57c9c6ed32 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse.py @@ -839,7 +839,7 @@ def fill_value(self, value): self._dtype = SparseDtype(self.dtype.subtype, value) @property - def kind(self): + def kind(self) -> str: """ The kind of sparse index for this array. One of {'integer', 'block'}. """ @@ -854,7 +854,7 @@ def _valid_sp_values(self): mask = notna(sp_vals) return sp_vals[mask] - def __len__(self): + def __len__(self) -> int: return self.sp_index.length @property @@ -868,7 +868,7 @@ def _fill_value_matches(self, fill_value): return self.fill_value == fill_value @property - def nbytes(self): + def nbytes(self) -> int: return self.sp_values.nbytes + self.sp_index.nbytes @property @@ -886,7 +886,7 @@ def density(self): return r @property - def npoints(self): + def npoints(self) -> int: """ The number of non- ``fill_value`` points. diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index cc0deca765b41..9c53210b75d6b 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -41,6 +41,10 @@ def test_copy(self, data): # __setitem__ does not work, so we only have a smoke-test data.copy() + def test_view(self, data): + # __setitem__ does not work, so we only have a smoke-test + data.view() + class TestConstructors(BaseArrowTests, base.BaseConstructorsTests): def test_from_dtype(self, data): diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py index dee8021f5375f..a29f6deeffae6 100644 --- a/pandas/tests/extension/base/interface.py +++ b/pandas/tests/extension/base/interface.py @@ -75,3 +75,18 @@ def test_copy(self, data): data[1] = data[0] assert result[1] != result[0] + + def test_view(self, data): + # view with no dtype should return a shallow copy, *not* the same + # object + assert data[1] != data[0] + + result = data.view() + assert result is not data + assert type(result) == type(data) + + result[1] = result[0] + assert data[1] == data[0] + + # check specifically that the `dtype` kwarg is accepted + data.view(dtype=None) diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index c28ff956a33a4..a1988744d76a1 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -137,11 +137,11 @@ def __setitem__(self, key, value): value = decimal.Decimal(value) self._data[key] = value - def __len__(self): + def __len__(self) -> int: return len(self._data) @property - def nbytes(self): + def nbytes(self) -> int: n = len(self) if n: return n * sys.getsizeof(self[0]) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 21c4ac8f055a2..b64ddbd6ac84d 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -80,6 +80,9 @@ def __getitem__(self, item): elif isinstance(item, abc.Iterable): # fancy indexing return type(self)([self.data[i] for i in item]) + elif isinstance(item, slice) and item == slice(None): + # Make sure we get a view + return type(self)(self.data) else: # slice return type(self)(self.data[item]) @@ -103,11 +106,11 @@ def __setitem__(self, key, value): assert isinstance(v, self.dtype.type) self.data[k] = v - def __len__(self): + def __len__(self) -> int: return len(self.data) @property - def nbytes(self): + def nbytes(self) -> int: return sys.getsizeof(self.data) def isna(self): diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py index 1aab71286b4a6..4fdcf930d224f 100644 --- a/pandas/tests/extension/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -95,7 +95,10 @@ class TestGrouping(BaseInterval, base.BaseGroupbyTests): class TestInterface(BaseInterval, base.BaseInterfaceTests): - pass + def test_view(self, data): + # __setitem__ incorrectly makes a copy (GH#27147), so we only + # have a smoke-test + data.view() class TestReduce(base.BaseNoReduceTests): diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 84d59902d2aa7..6ebe71e173ec2 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -103,6 +103,10 @@ def test_copy(self, data): # __setitem__ does not work, so we only have a smoke-test data.copy() + def test_view(self, data): + # __setitem__ does not work, so we only have a smoke-test + data.view() + class TestConstructors(BaseSparseTests, base.BaseConstructorsTests): pass
Broken off from #27142, plus some type annotations
https://api.github.com/repos/pandas-dev/pandas/pulls/27633
2019-07-29T02:44:22Z
2019-08-09T08:00:54Z
2019-08-09T08:00:53Z
2019-08-09T14:34:27Z
CLN: Assorted cleanups
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index f84033e9c3c90..2d4ded9e2e6ba 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -51,7 +51,7 @@ class PandasDelegate: """ def _delegate_property_get(self, name, *args, **kwargs): - raise TypeError("You cannot access the " "property {name}".format(name=name)) + raise TypeError("You cannot access the property {name}".format(name=name)) def _delegate_property_set(self, name, value, *args, **kwargs): raise TypeError("The property {name} cannot be set".format(name=name)) @@ -271,8 +271,7 @@ def plot(self): @Appender( _doc % dict( - klass="DataFrame", - others=("register_series_accessor, " "register_index_accessor"), + klass="DataFrame", others=("register_series_accessor, register_index_accessor") ) ) def register_dataframe_accessor(name): @@ -284,8 +283,7 @@ def register_dataframe_accessor(name): @Appender( _doc % dict( - klass="Series", - others=("register_dataframe_accessor, " "register_index_accessor"), + klass="Series", others=("register_dataframe_accessor, register_index_accessor") ) ) def register_series_accessor(name): @@ -297,8 +295,7 @@ def register_series_accessor(name): @Appender( _doc % dict( - klass="Index", - others=("register_dataframe_accessor, " "register_series_accessor"), + klass="Index", others=("register_dataframe_accessor, register_series_accessor") ) ) def register_index_accessor(name): diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index ee796f9896b52..e517be4f03a16 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -14,14 +14,17 @@ from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution +from pandas.util._validators import validate_fillna_kwargs -from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.common import is_array_like, is_list_like from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna from pandas._typing import ArrayLike from pandas.core import ops +from pandas.core.algorithms import _factorize_array, unique +from pandas.core.missing import backfill_1d, pad_1d from pandas.core.sorting import nargsort _not_implemented_message = "{} does not implement {}." @@ -484,10 +487,6 @@ def fillna(self, value=None, method=None, limit=None): ------- filled : ExtensionArray with NA/NaN filled """ - from pandas.api.types import is_array_like - from pandas.util._validators import validate_fillna_kwargs - from pandas.core.missing import pad_1d, backfill_1d - value, method = validate_fillna_kwargs(value, method) mask = self.isna() @@ -584,8 +583,6 @@ def unique(self): ------- uniques : ExtensionArray """ - from pandas import unique - uniques = unique(self.astype(object)) return self._from_sequence(uniques, dtype=self.dtype) @@ -700,8 +697,6 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ABCExtensionArra # original ExtensionArray. # 2. ExtensionArray.factorize. # Complete control over factorization. - from pandas.core.algorithms import _factorize_array - arr, na_value = self._values_for_factorize() labels, uniques = _factorize_array( @@ -874,7 +869,7 @@ def copy(self) -> ABCExtensionArray: def __repr__(self): from pandas.io.formats.printing import format_object_summary - template = "{class_name}" "{data}\n" "Length: {length}, dtype: {dtype}" + template = "{class_name}{data}\nLength: {length}, dtype: {dtype}" # the short repr has no trailing newline, while the truncated # repr does. So we include a newline in our template, and strip # any trailing newlines from format_object_summary diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index c22f7e0429433..b16217d5d0a32 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -93,13 +93,13 @@ def f(self, other): if not self.ordered: if op in ["__lt__", "__gt__", "__le__", "__ge__"]: raise TypeError( - "Unordered Categoricals can only compare " "equality or not" + "Unordered Categoricals can only compare equality or not" ) if isinstance(other, Categorical): # Two Categoricals can only be be compared if the categories are # the same (maybe up to ordering, depending on ordered) - msg = "Categoricals can only be compared if " "'categories' are the same." + msg = "Categoricals can only be compared if 'categories' are the same." if len(self.categories) != len(other.categories): raise TypeError(msg + " Categories are different lengths") elif self.ordered and not (self.categories == other.categories).all(): @@ -109,7 +109,7 @@ def f(self, other): if not (self.ordered == other.ordered): raise TypeError( - "Categoricals can only be compared if " "'ordered' is the same" + "Categoricals can only be compared if 'ordered' is the same" ) if not self.ordered and not self.categories.equals(other.categories): # both unordered and different order @@ -387,7 +387,7 @@ def __init__( # FIXME raise NotImplementedError( - "> 1 ndim Categorical are not " "supported at this time" + "> 1 ndim Categorical are not supported at this time" ) # we're inferring from values @@ -694,7 +694,7 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None): raise ValueError(msg) if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1): - raise ValueError("codes need to be between -1 and " "len(categories)-1") + raise ValueError("codes need to be between -1 and len(categories)-1") return cls(codes, dtype=dtype, fastpath=True) @@ -1019,7 +1019,7 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False): inplace = validate_bool_kwarg(inplace, "inplace") if set(self.dtype.categories) != set(new_categories): raise ValueError( - "items in new_categories are not the same as in " "old categories" + "items in new_categories are not the same as in old categories" ) return self.set_categories(new_categories, ordered=ordered, inplace=inplace) @@ -1481,7 +1481,7 @@ def put(self, *args, **kwargs): """ Replace specific elements in the Categorical with given values. """ - raise NotImplementedError(("'put' is not yet implemented " "for Categorical")) + raise NotImplementedError(("'put' is not yet implemented for Categorical")) def dropna(self): """ @@ -1827,7 +1827,7 @@ def fillna(self, value=None, method=None, limit=None): value = np.nan if limit is not None: raise NotImplementedError( - "specifying a limit for fillna has not " "been implemented yet" + "specifying a limit for fillna has not been implemented yet" ) codes = self._codes @@ -1963,7 +1963,7 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None): if fill_value in self.categories: fill_value = self.categories.get_loc(fill_value) else: - msg = "'fill_value' ('{}') is not in this Categorical's " "categories." + msg = "'fill_value' ('{}') is not in this Categorical's categories." raise TypeError(msg.format(fill_value)) codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value) @@ -2168,12 +2168,12 @@ def __setitem__(self, key, value): # in a 2-d case be passd (slice(None),....) if len(key) == 2: if not com.is_null_slice(key[0]): - raise AssertionError("invalid slicing for a 1-ndim " "categorical") + raise AssertionError("invalid slicing for a 1-ndim categorical") key = key[1] elif len(key) == 1: key = key[0] else: - raise AssertionError("invalid slicing for a 1-ndim " "categorical") + raise AssertionError("invalid slicing for a 1-ndim categorical") # slicing in Series or Categorical elif isinstance(key, slice): @@ -2561,9 +2561,7 @@ def __init__(self, data): @staticmethod def _validate(data): if not is_categorical_dtype(data.dtype): - raise AttributeError( - "Can only use .cat accessor with a " "'category' dtype" - ) + raise AttributeError("Can only use .cat accessor with a 'category' dtype") def _delegate_property_get(self, name): return getattr(self._parent, name) @@ -2607,7 +2605,7 @@ def name(self): # need to be updated. `name` will need to be removed from # `ok_for_cat`. warn( - "`Series.cat.name` has been deprecated. Use `Series.name` " "instead.", + "`Series.cat.name` has been deprecated. Use `Series.name` instead.", FutureWarning, stacklevel=2, ) @@ -2619,7 +2617,7 @@ def index(self): # need to be updated. `index` will need to be removed from # ok_for_cat`. warn( - "`Series.cat.index` has been deprecated. Use `Series.index` " "instead.", + "`Series.cat.index` has been deprecated. Use `Series.index` instead.", FutureWarning, stacklevel=2, ) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 932d96a37c04c..f86b307e5ede3 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1097,7 +1097,7 @@ def _sub_period_array(self, other): ) if len(self) != len(other): - raise ValueError("cannot subtract arrays/indices of " "unequal length") + raise ValueError("cannot subtract arrays/indices of unequal length") if self.freq != other.freq: msg = DIFFERENT_FREQ.format( cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 5b540dcce53c8..8e76ad8a375f7 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -478,7 +478,7 @@ def _generate_range( periods = dtl.validate_periods(periods) if freq is None and any(x is None for x in [periods, start, end]): - raise ValueError("Must provide freq argument if no data is " "supplied") + raise ValueError("Must provide freq argument if no data is supplied") if com.count_not_none(start, end, periods, freq) != 3: raise ValueError( @@ -496,7 +496,7 @@ def _generate_range( if start is None and end is None: if closed is not None: raise ValueError( - "Closed has to be None if not both of start" "and end are defined" + "Closed has to be None if not both of startand end are defined" ) if start is NaT or end is NaT: raise ValueError("Neither `start` nor `end` can be NaT") @@ -786,11 +786,11 @@ def _assert_tzawareness_compat(self, other): elif self.tz is None: if other_tz is not None: raise TypeError( - "Cannot compare tz-naive and tz-aware " "datetime-like objects." + "Cannot compare tz-naive and tz-aware datetime-like objects." ) elif other_tz is None: raise TypeError( - "Cannot compare tz-naive and tz-aware " "datetime-like objects" + "Cannot compare tz-naive and tz-aware datetime-like objects" ) # ----------------------------------------------------------------- @@ -833,7 +833,7 @@ def _add_offset(self, offset): except NotImplementedError: warnings.warn( - "Non-vectorized DateOffset being applied to Series " "or DatetimeIndex", + "Non-vectorized DateOffset being applied to Series or DatetimeIndex", PerformanceWarning, ) result = self.astype("O") + offset @@ -851,7 +851,7 @@ def _sub_datetimelike_scalar(self, other): if not self._has_same_tz(other): # require tz compat raise TypeError( - "Timestamp subtraction must have the same " "timezones or no timezones" + "Timestamp subtraction must have the same timezones or no timezones" ) i8 = self.asi8 @@ -957,7 +957,7 @@ def tz_convert(self, tz): if self.tz is None: # tz naive, use tz_localize raise TypeError( - "Cannot convert tz-naive timestamps, use " "tz_localize to localize" + "Cannot convert tz-naive timestamps, use tz_localize to localize" ) # No conversion since timestamps are all UTC to begin with @@ -1125,7 +1125,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise", errors=None): nonexistent = "raise" else: raise ValueError( - "The errors argument must be either 'coerce' " "or 'raise'." + "The errors argument must be either 'coerce' or 'raise'." ) nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") @@ -1274,7 +1274,7 @@ def to_period(self, freq=None): if freq is None: raise ValueError( - "You must pass a freq argument as " "current index has none." + "You must pass a freq argument as current index has none." ) freq = get_period_alias(freq) @@ -2047,7 +2047,7 @@ def maybe_convert_dtype(data, copy): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError( - "Passing PeriodDtype data is invalid. " "Use `data.to_timestamp()` instead" + "Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead" ) elif is_categorical_dtype(data): @@ -2177,7 +2177,7 @@ def validate_tz_from_dtype(dtype, tz): dtz = getattr(dtype, "tz", None) if dtz is not None: if tz is not None and not timezones.tz_compare(tz, dtz): - raise ValueError("cannot supply both a tz and a dtype" " with a tz") + raise ValueError("cannot supply both a tz and a dtype with a tz") tz = dtz if tz is not None and is_datetime64_dtype(dtype): @@ -2216,7 +2216,7 @@ def _infer_tz_from_endpoints(start, end, tz): inferred_tz = timezones.infer_tzinfo(start, end) except Exception: raise TypeError( - "Start and end cannot both be tz-aware with " "different timezones" + "Start and end cannot both be tz-aware with different timezones" ) inferred_tz = timezones.maybe_get_tz(inferred_tz) @@ -2224,7 +2224,7 @@ def _infer_tz_from_endpoints(start, end, tz): if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): - raise AssertionError("Inferred time zone not equal to passed " "time zone") + raise AssertionError("Inferred time zone not equal to passed time zone") elif inferred_tz is not None: tz = inferred_tz diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 2a0d2c8770063..2b3c02bd1cade 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -33,6 +33,7 @@ ) from pandas.core.dtypes.missing import isna, notna +from pandas.core.algorithms import take, value_counts from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs from pandas.core.arrays.categorical import Categorical import pandas.core.common as com @@ -206,7 +207,7 @@ def _simple_new( left = left.astype(right.dtype) if type(left) != type(right): - msg = "must not have differing left [{ltype}] and right " "[{rtype}] types" + msg = "must not have differing left [{ltype}] and right [{rtype}] types" raise ValueError( msg.format(ltype=type(left).__name__, rtype=type(right).__name__) ) @@ -458,13 +459,13 @@ def from_tuples(cls, data, closed="right", copy=False, dtype=None): lhs, rhs = d except ValueError: msg = ( - "{name}.from_tuples requires tuples of " "length 2, got {tpl}" + "{name}.from_tuples requires tuples of length 2, got {tpl}" ).format(name=name, tpl=d) raise ValueError(msg) except TypeError: - msg = ( - "{name}.from_tuples received an invalid " "item, {tpl}" - ).format(name=name, tpl=d) + msg = ("{name}.from_tuples received an invalid item, {tpl}").format( + name=name, tpl=d + ) raise TypeError(msg) left.append(lhs) right.append(rhs) @@ -590,7 +591,7 @@ def fillna(self, value=None, method=None, limit=None): filled : IntervalArray with NA/NaN filled """ if method is not None: - raise TypeError("Filling by method is not supported for " "IntervalArray.") + raise TypeError("Filling by method is not supported for IntervalArray.") if limit is not None: raise TypeError("limit is not supported for IntervalArray.") @@ -796,8 +797,6 @@ def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs): When `indices` contains negative values other than ``-1`` and `allow_fill` is True. """ - from pandas.core.algorithms import take - nv.validate_take(tuple(), kwargs) fill_left = fill_right = fill_value @@ -843,8 +842,6 @@ def value_counts(self, dropna=True): Series.value_counts """ # TODO: implement this is a non-naive way! - from pandas.core.algorithms import value_counts - return value_counts(np.asarray(self), dropna=dropna) # Formatting diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index b0336c46d1953..c290391278def 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -286,13 +286,13 @@ def _generate_range(cls, start, end, periods, freq, fields): if start is not None or end is not None: if field_count > 0: raise ValueError( - "Can either instantiate from fields " "or endpoints, but not both" + "Can either instantiate from fields or endpoints, but not both" ) subarr, freq = _get_ordinal_range(start, end, periods, freq) elif field_count > 0: subarr, freq = _range_from_fields(freq=freq, **fields) else: - raise ValueError("Not enough parameters to construct " "Period range") + raise ValueError("Not enough parameters to construct Period range") return subarr, freq @@ -839,7 +839,7 @@ def period_array( dtype = None if is_float_dtype(data) and len(data) > 0: - raise TypeError("PeriodIndex does not allow " "floating point in construction") + raise TypeError("PeriodIndex does not allow floating point in construction") data = ensure_object(data) @@ -875,7 +875,7 @@ def validate_dtype_freq(dtype, freq): if freq is None: freq = dtype.freq elif freq != dtype.freq: - raise IncompatibleFrequency("specified freq and dtype " "are different") + raise IncompatibleFrequency("specified freq and dtype are different") return freq diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py index 048f6c6f5c680..47c7c72051150 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse.py @@ -121,7 +121,7 @@ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: if not is_scalar(fill_value): raise ValueError( - "fill_value must be a scalar. Got {} " "instead".format(fill_value) + "fill_value must be a scalar. Got {} instead".format(fill_value) ) self._dtype = dtype self._fill_value = fill_value @@ -1139,7 +1139,7 @@ def _get_val_at(self, loc): def take(self, indices, allow_fill=False, fill_value=None): if is_scalar(indices): raise ValueError( - "'indices' must be an array, not a " "scalar '{}'.".format(indices) + "'indices' must be an array, not a scalar '{}'.".format(indices) ) indices = np.asarray(indices, dtype=np.int32) @@ -1176,7 +1176,7 @@ def _take_with_fill(self, indices, fill_value=None): taken.fill(fill_value) return taken else: - raise IndexError("cannot do a non-empty take from an empty " "axes.") + raise IndexError("cannot do a non-empty take from an empty axes.") sp_indexer = self.sp_index.lookup_array(indices) @@ -1226,7 +1226,7 @@ def _take_without_fill(self, indices): if (indices.max() >= n) or (indices.min() < -n): if n == 0: - raise IndexError("cannot do a non-empty take from an " "empty axes.") + raise IndexError("cannot do a non-empty take from an empty axes.") else: raise IndexError("out of bounds value in 'indices'.") diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 9d622d92e0979..dd0b9a79c6dca 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -290,7 +290,7 @@ def _generate_range(cls, start, end, periods, freq, closed=None): periods = dtl.validate_periods(periods) if freq is None and any(x is None for x in [periods, start, end]): - raise ValueError("Must provide freq argument if no data is " "supplied") + raise ValueError("Must provide freq argument if no data is supplied") if com.count_not_none(start, end, periods, freq) != 3: raise ValueError( @@ -307,7 +307,7 @@ def _generate_range(cls, start, end, periods, freq, closed=None): if start is None and end is None: if closed is not None: raise ValueError( - "Closed has to be None if not both of start" "and end are defined" + "Closed has to be None if not both of startand end are defined" ) left_closed, right_closed = dtl.validate_endpoints(closed) @@ -862,17 +862,17 @@ def to_pytimedelta(self): seconds = _field_accessor( "seconds", "seconds", - "Number of seconds (>= 0 and less than 1 day) " "for each element.", + "Number of seconds (>= 0 and less than 1 day) for each element.", ) microseconds = _field_accessor( "microseconds", "microseconds", - "Number of microseconds (>= 0 and less " "than 1 second) for each element.", + "Number of microseconds (>= 0 and less than 1 second) for each element.", ) nanoseconds = _field_accessor( "nanoseconds", "nanoseconds", - "Number of nanoseconds (>= 0 and less " "than 1 microsecond) for each element.", + "Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.", ) @property @@ -1131,7 +1131,7 @@ def _generate_regular_range(start, end, periods, offset): b = e - periods * stride else: raise ValueError( - "at least 'start' or 'end' should be specified " "if a 'period' is given." + "at least 'start' or 'end' should be specified if a 'period' is given." ) data = np.arange(b, e, stride, dtype=np.int64) diff --git a/pandas/core/base.py b/pandas/core/base.py index ce993a513f569..cfa8d25210129 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -4,7 +4,7 @@ import builtins from collections import OrderedDict import textwrap -from typing import Optional +from typing import Dict, Optional import warnings import numpy as np @@ -37,7 +37,7 @@ from pandas.core.arrays import ExtensionArray import pandas.core.nanops as nanops -_shared_docs = dict() +_shared_docs = dict() # type: Dict[str, str] _indexops_doc_kwargs = dict( klass="IndexOpsMixin", inplace="", @@ -437,7 +437,7 @@ def _agg_1dim(name, how, subset=None): colg = self._gotitem(name, ndim=1, subset=subset) if colg.ndim != 1: raise SpecificationError( - "nested dictionary is ambiguous " "in aggregation" + "nested dictionary is ambiguous in aggregation" ) return colg.aggregate(how, _level=(_level or 0) + 1) @@ -634,9 +634,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): result = Series(results, index=keys, name=self.name) if is_nested_object(result): - raise ValueError( - "cannot combine transform and " "aggregation operations" - ) + raise ValueError("cannot combine transform and aggregation operations") return result def _shallow_copy(self, obj=None, obj_type=None, **kwargs): @@ -735,7 +733,7 @@ def item(self): The first element of %(klass)s. """ warnings.warn( - "`item` has been deprecated and will be removed in a " "future version", + "`item` has been deprecated and will be removed in a future version", FutureWarning, stacklevel=2, ) diff --git a/pandas/core/common.py b/pandas/core/common.py index f9a19291b8ad9..c12bfecc46518 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -443,7 +443,7 @@ def random_state(state=None): return np.random else: raise ValueError( - "random_state must be an integer, a numpy " "RandomState, or None" + "random_state must be an integer, a numpy RandomState, or None" ) diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index 456ecf4b2594f..8614230c4811f 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -333,7 +333,7 @@ def eval( " if all expressions contain an assignment" ) elif inplace: - raise ValueError("Cannot operate inplace " "if there is no assignment") + raise ValueError("Cannot operate inplace if there is no assignment") # assign if needed assigner = parsed_expr.assigner diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 772fb547567e3..e10d189bc3c6f 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -296,7 +296,7 @@ def _node_not_implemented(node_name, cls): def f(self, *args, **kwargs): raise NotImplementedError( - "{name!r} nodes are not " "implemented".format(name=node_name) + "{name!r} nodes are not implemented".format(name=node_name) ) return f @@ -433,7 +433,7 @@ def visit(self, node, **kwargs): from keyword import iskeyword if any(iskeyword(x) for x in clean.split()): - e.msg = "Python keyword not valid identifier" " in numexpr query" + e.msg = "Python keyword not valid identifier in numexpr query" raise e method = "visit_" + node.__class__.__name__ @@ -642,9 +642,7 @@ def visit_Assign(self, node, **kwargs): if len(node.targets) != 1: raise SyntaxError("can only assign a single expression") if not isinstance(node.targets[0], ast.Name): - raise SyntaxError( - "left hand side of an assignment must be a " "single name" - ) + raise SyntaxError("left hand side of an assignment must be a single name") if self.env.target is None: raise ValueError("cannot assign without a target object") @@ -656,7 +654,7 @@ def visit_Assign(self, node, **kwargs): self.assigner = getattr(assigner, "name", assigner) if self.assigner is None: raise SyntaxError( - "left hand side of an assignment must be a " "single resolvable name" + "left hand side of an assignment must be a single resolvable name" ) return self.visit(node.value, **kwargs) diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index ea61467080291..d9dc194d484ae 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -197,7 +197,7 @@ def _bool_arith_check( if op_str in not_allowed: raise NotImplementedError( - "operator {op!r} not implemented for " "bool dtypes".format(op=op_str) + "operator {op!r} not implemented for bool dtypes".format(op=op_str) ) return True diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 59ed7143e6cd2..870acc3cc9956 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -97,7 +97,7 @@ def _resolve_name(self): if hasattr(res, "ndim") and res.ndim > 2: raise NotImplementedError( - "N-dimensional objects, where N > 2," " are not supported with eval" + "N-dimensional objects, where N > 2, are not supported with eval" ) return res diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 8ba01670bd879..60cf35163bcf4 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -306,7 +306,7 @@ def invert(self): # self.condition = "~(%s)" % self.condition # return self raise NotImplementedError( - "cannot use an invert condition when " "passing to numexpr" + "cannot use an invert condition when passing to numexpr" ) def format(self): @@ -474,9 +474,7 @@ def _validate_where(w): """ if not (isinstance(w, (Expr, str)) or is_list_like(w)): - raise TypeError( - "where must be passed as a string, Expr, " "or list-like of Exprs" - ) + raise TypeError("where must be passed as a string, Expr, or list-like of Exprs") return w diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 33066ccef0687..5980e3d133374 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1173,9 +1173,7 @@ def from_dict(cls, data, orient="columns", dtype=None, columns=None): data, index = list(data.values()), list(data.keys()) elif orient == "columns": if columns is not None: - raise ValueError( - "cannot use columns parameter with " "orient='columns'" - ) + raise ValueError("cannot use columns parameter with orient='columns'") else: # pragma: no cover raise ValueError("only recognize index or columns for orient") @@ -1327,7 +1325,7 @@ def to_dict(self, orient="dict", into=dict): """ if not self.columns.is_unique: warnings.warn( - "DataFrame columns are not unique, some " "columns will be omitted.", + "DataFrame columns are not unique, some columns will be omitted.", UserWarning, stacklevel=2, ) @@ -1808,9 +1806,9 @@ def to_records( formats.append(dtype_mapping) else: element = "row" if i < index_len else "column" - msg = ( - "Invalid dtype {dtype} specified for " "{element} {name}" - ).format(dtype=dtype_mapping, element=element, name=name) + msg = ("Invalid dtype {dtype} specified for {element} {name}").format( + dtype=dtype_mapping, element=element, name=name + ) raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) @@ -2086,9 +2084,7 @@ def to_stata( raise ValueError("Only formats 114 and 117 supported.") if version == 114: if convert_strl is not None: - raise ValueError( - "strl support is only available when using " "format 117" - ) + raise ValueError("strl support is only available when using format 117") from pandas.io.stata import StataWriter as statawriter else: from pandas.io.stata import StataWriter117 as statawriter @@ -2502,7 +2498,7 @@ def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ["bytes", "KB", "MB", "GB", "TB"]: if num < 1024.0: - return "{num:3.1f}{size_q} " "{x}".format( + return "{num:3.1f}{size_q} {x}".format( num=num, size_q=size_qualifier, x=x ) num /= 1024.0 @@ -2887,7 +2883,7 @@ def _getitem_bool_array(self, key): # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( - "Boolean Series key will be reindexed to match " "DataFrame index.", + "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=3, ) @@ -3461,7 +3457,7 @@ def _get_info_slice(obj, indexer): selection = tuple(map(frozenset, (include, exclude))) if not any(selection): - raise ValueError("at least one of include or exclude must be " "nonempty") + raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation include, exclude = map( @@ -3654,7 +3650,7 @@ def reindexer(value): # other raise TypeError( - "incompatible index of inserted column " "with frame index" + "incompatible index of inserted column with frame index" ) return value @@ -4337,7 +4333,7 @@ def set_index( found = col in self.columns except TypeError: raise TypeError( - err_msg + " Received column of " "type {}".format(type(col)) + err_msg + " Received column of type {}".format(type(col)) ) else: if not found: @@ -5714,9 +5710,7 @@ def update( if join != "left": # pragma: no cover raise NotImplementedError("Only left join is supported") if errors not in ["ignore", "raise"]: - raise ValueError( - "The parameter errors must be either " "'ignore' or 'raise'" - ) + raise ValueError("The parameter errors must be either 'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) @@ -7213,7 +7207,7 @@ def _join_compat( else: if on is not None: raise ValueError( - "Joining multiple DataFrames only supported" " for joining on index" + "Joining multiple DataFrames only supported for joining on index" ) frames = [self] + list(other) @@ -7374,7 +7368,7 @@ def _series_round(s, decimals): # Dispatch to Series.round new_cols = [_series_round(v, decimals) for _, v in self.items()] else: - raise TypeError("decimals must be an integer, a dict-like or a " "Series") + raise TypeError("decimals must be an integer, a dict-like or a Series") if len(new_cols) > 0: return self._constructor( @@ -8376,11 +8370,11 @@ def isin(self, values): ) elif isinstance(values, Series): if not values.index.is_unique: - raise ValueError("cannot compute isin with " "a duplicate axis.") + raise ValueError("cannot compute isin with a duplicate axis.") return self.eq(values.reindex_like(self), axis="index") elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): - raise ValueError("cannot compute isin with " "a duplicate axis.") + raise ValueError("cannot compute isin with a duplicate axis.") return self.eq(values.reindex_like(self)) else: if not is_list_like(values): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index df97d34ee349a..821c35e0cce2f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5,6 +5,7 @@ import json import operator import pickle +import re from textwrap import dedent from typing import Callable, Dict, FrozenSet, List, Optional, Set import warnings @@ -380,7 +381,7 @@ def _construct_axes_from_arguments( kwargs[a] = args.pop(0) except IndexError: if require_all: - raise TypeError("not enough/duplicate arguments " "specified!") + raise TypeError("not enough/duplicate arguments specified!") axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS} return axes, kwargs @@ -1297,7 +1298,7 @@ class name if non_mapper: return self._set_axis_name(mapper, axis=axis, inplace=inplace) else: - raise ValueError("Use `.rename` to alter labels " "with a mapper.") + raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified @@ -3869,16 +3870,14 @@ def drop( if labels is not None: if index is not None or columns is not None: - raise ValueError( - "Cannot specify both 'labels' and " "'index'/'columns'" - ) + raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes, _ = self._construct_axes_from_arguments((index, columns), {}) else: raise ValueError( - "Need to specify at least one of 'labels', " "'index' or 'columns'" + "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self @@ -4615,8 +4614,6 @@ def filter(self, items=None, like=None, regex=None, axis=None): one two three rabbit 4 5 6 """ - import re - nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError( @@ -4886,7 +4883,7 @@ def sample( weights = self[weights] except KeyError: raise KeyError( - "String passed to weights not a " "valid column" + "String passed to weights not a valid column" ) else: raise ValueError( @@ -4904,14 +4901,14 @@ def sample( if len(weights) != axis_length: raise ValueError( - "Weights and axis to be sampled must be of " "same length" + "Weights and axis to be sampled must be of same length" ) if (weights == np.inf).any() or (weights == -np.inf).any(): raise ValueError("weight vector may not include `inf` values") if (weights < 0).any(): - raise ValueError("weight vector many not include negative " "values") + raise ValueError("weight vector many not include negative values") # If has nan, set to zero. weights = weights.fillna(0) @@ -4933,12 +4930,12 @@ def sample( elif n is None and frac is not None: n = int(round(frac * axis_length)) elif n is not None and frac is not None: - raise ValueError("Please enter a value for `frac` OR `n`, not " "both") + raise ValueError("Please enter a value for `frac` OR `n`, not both") # Check for negative sizes if n < 0: raise ValueError( - "A negative number of rows requested. Please " "provide positive value." + "A negative number of rows requested. Please provide positive value." ) locs = rs.choice(axis_length, size=n, replace=replace, p=weights) @@ -5565,7 +5562,7 @@ def get_ftype_counts(self): dtype: int64 """ warnings.warn( - "get_ftype_counts is deprecated and will " "be removed in a future version", + "get_ftype_counts is deprecated and will be removed in a future version", FutureWarning, stacklevel=2, ) @@ -5686,7 +5683,7 @@ def as_blocks(self, copy=True): values : a dict of dtype -> Constructor Types """ warnings.warn( - "as_blocks is deprecated and will " "be removed in a future version", + "as_blocks is deprecated and will be removed in a future version", FutureWarning, stacklevel=2, ) @@ -6598,9 +6595,7 @@ def replace( ): inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: - raise AssertionError( - "'to_replace' must be 'None' if 'regex' is " "not a bool" - ) + raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool") self._consolidate_inplace() @@ -6698,7 +6693,7 @@ def replace( convert=convert, ) else: - raise TypeError("value argument must be scalar, dict, or " "Series") + raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing'] if is_list_like(value): @@ -6984,7 +6979,7 @@ def interpolate( if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear": raise ValueError( - "Only `method=linear` interpolation is supported " "on MultiIndexes." + "Only `method=linear` interpolation is supported on MultiIndexes." ) if _maybe_transposed_self._data.get_dtype_counts().get("object") == len( @@ -7146,9 +7141,7 @@ def asof(self, where, subset=None): 2018-02-27 09:04:30 40.0 NaN """ if isinstance(where, str): - from pandas import to_datetime - - where = to_datetime(where) + where = Timestamp(where) if not self.index.is_monotonic: raise ValueError("asof requires a sorted index") @@ -7598,7 +7591,7 @@ def clip_upper(self, threshold, axis=None, inplace=False): dtype: int64 """ warnings.warn( - "clip_upper(threshold) is deprecated, " "use clip(upper=threshold) instead", + "clip_upper(threshold) is deprecated, use clip(upper=threshold) instead", FutureWarning, stacklevel=2, ) @@ -7717,7 +7710,7 @@ def clip_lower(self, threshold, axis=None, inplace=False): 2 5 6 """ warnings.warn( - "clip_lower(threshold) is deprecated, " "use clip(lower=threshold) instead", + "clip_lower(threshold) is deprecated, use clip(lower=threshold) instead", FutureWarning, stacklevel=2, ) @@ -8720,12 +8713,10 @@ def align( fill_axis=0, broadcast_axis=None, ): - from pandas import DataFrame, Series - method = missing.clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: - if isinstance(self, Series): + if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim @@ -8743,7 +8734,7 @@ def align( limit=limit, fill_axis=fill_axis, ) - elif isinstance(other, Series): + elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim @@ -8764,7 +8755,7 @@ def align( if axis is not None: axis = self._get_axis_number(axis) - if isinstance(other, DataFrame): + if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, @@ -8776,7 +8767,7 @@ def align( limit=limit, fill_axis=fill_axis, ) - elif isinstance(other, Series): + elif isinstance(other, ABCSeries): return self._align_series( other, join=join, @@ -8869,7 +8860,7 @@ def _align_series( # series/series compat, other must always be a Series if is_series: if axis: - raise ValueError("cannot align series to a series other than " "axis 0") + raise ValueError("cannot align series to a series other than axis 0") # equal if self.index.equals(other.index): @@ -8959,7 +8950,7 @@ def _where( if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: - raise ValueError("Array conditional must be same shape as " "self") + raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict()) # make sure we are boolean @@ -8999,7 +8990,7 @@ def _where( # slice me out of the other else: raise NotImplementedError( - "cannot align with a higher " "dimensional NDFrame" + "cannot align with a higher dimensional NDFrame" ) if isinstance(other, np.ndarray): @@ -9042,12 +9033,12 @@ def _where( else: raise ValueError( - "Length of replacements must equal " "series length" + "Length of replacements must equal series length" ) else: raise ValueError( - "other must be the same shape as self " "when an ndarray" + "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment @@ -9641,7 +9632,7 @@ def _tz_convert(ax, tz): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( - "%s is not a valid DatetimeIndex or " "PeriodIndex" % ax_name + "%s is not a valid DatetimeIndex or PeriodIndex" % ax_name ) else: ax = DatetimeIndex([], tz=tz) @@ -9805,7 +9796,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( - "%s is not a valid DatetimeIndex or " "PeriodIndex" % ax_name + "%s is not a valid DatetimeIndex or PeriodIndex" % ax_name ) else: ax = DatetimeIndex([], tz=tz) @@ -10249,7 +10240,7 @@ def _check_percentile(self, q): Validate percentiles (used by describe and quantile). """ - msg = "percentiles should all be in the interval [0, 1]. " "Try {0} instead." + msg = "percentiles should all be in the interval [0, 1]. Try {0} instead." q = np.asarray(q) if q.ndim == 0: if not 0 <= q <= 1: @@ -10769,7 +10760,7 @@ def ewm( def transform(self, func, *args, **kwargs): result = self.agg(func, *args, **kwargs) if is_scalar(result) or len(result) != len(self): - raise ValueError("transforms cannot produce " "aggregated results") + raise ValueError("transforms cannot produce aggregated results") return result @@ -11669,7 +11660,7 @@ def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs if level is not None: if bool_only is not None: raise NotImplementedError( - "Option bool_only is not " "implemented with option level." + "Option bool_only is not implemented with option level." ) return self._agg_by_level(name, axis=axis, level=level, skipna=skipna) return self._reduce( diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 8f0abc91f7aef..5edf9504157c7 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -119,7 +119,7 @@ def clean_interp_method(method, **kwargs): "from_derivatives", ] if method in ("spline", "polynomial") and order is None: - raise ValueError("You must specify the order of the spline or " "polynomial.") + raise ValueError("You must specify the order of the spline or polynomial.") if method not in valid: raise ValueError( "method must be one of {valid}. Got '{method}' " @@ -176,7 +176,7 @@ def interpolate_1d( valid_limit_directions = ["forward", "backward", "both"] limit_direction = limit_direction.lower() if limit_direction not in valid_limit_directions: - msg = "Invalid limit_direction: expecting one of {valid!r}, " "got {invalid!r}." + msg = "Invalid limit_direction: expecting one of {valid!r}, got {invalid!r}." raise ValueError( msg.format(valid=valid_limit_directions, invalid=limit_direction) ) @@ -322,7 +322,7 @@ def _interpolate_scipy_wrapper( alt_methods["pchip"] = interpolate.pchip_interpolate except AttributeError: raise ImportError( - "Your version of Scipy does not support " "PCHIP interpolation." + "Your version of Scipy does not support PCHIP interpolation." ) elif method == "akima": alt_methods["akima"] = _akima_interpolate @@ -470,7 +470,7 @@ def interpolate_2d( ndim = values.ndim if values.ndim == 1: if axis != 0: # pragma: no cover - raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0") + raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0") values = values.reshape(tuple((1,) + values.shape)) if fill_value is None: diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 50da5e4057210..3a5dfe6700bd2 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -1139,7 +1139,7 @@ def wrapper(self, other, axis=None): return NotImplemented elif isinstance(other, ABCSeries) and not self._indexed_same(other): - raise ValueError("Can only compare identically-labeled " "Series objects") + raise ValueError("Can only compare identically-labeled Series objects") elif is_categorical_dtype(self): # Dispatch to Categorical implementation; pd.CategoricalIndex @@ -1169,9 +1169,7 @@ def wrapper(self, other, axis=None): if op in {operator.lt, operator.le, operator.gt, operator.ge}: future = "a TypeError will be raised" else: - future = ( - "'the values will not compare equal to the " "'datetime.date'" - ) + future = "'the values will not compare equal to the 'datetime.date'" msg = "\n".join(textwrap.wrap(msg.format(future=future))) warnings.warn(msg, FutureWarning, stacklevel=2) other = Timestamp(other) @@ -1404,9 +1402,7 @@ def _align_method_FRAME(left, right, axis): """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """ def to_series(right): - msg = ( - "Unable to coerce to Series, length must be {req_len}: " "given {given_len}" - ) + msg = "Unable to coerce to Series, length must be {req_len}: given {given_len}" if axis is not None and left._get_axis_name(axis) == "index": if len(left.index) != len(right): raise ValueError( @@ -1564,7 +1560,7 @@ def f(self, other): # Another DataFrame if not self._indexed_same(other): raise ValueError( - "Can only compare identically-labeled " "DataFrame objects" + "Can only compare identically-labeled DataFrame objects" ) return dispatch_to_series(self, other, func, str_rep) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index ca4175e4a474a..ce2d2ac41d3ec 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -290,7 +290,7 @@ def __init__( self.intersect = True else: # pragma: no cover raise ValueError( - "Only can inner (intersect) or outer (union) " "join the other axis" + "Only can inner (intersect) or outer (union) join the other axis" ) if isinstance(objs, dict): diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 187a1913c3e15..413132db195f6 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -39,7 +39,7 @@ def melt( id_vars = [id_vars] elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(id_vars, list): raise ValueError( - "id_vars must be a list of tuples when columns" " are a MultiIndex" + "id_vars must be a list of tuples when columns are a MultiIndex" ) else: # Check that `id_vars` are in frame @@ -61,7 +61,7 @@ def melt( value_vars, list ): raise ValueError( - "value_vars must be a list of tuples when" " columns are a MultiIndex" + "value_vars must be a list of tuples when columns are a MultiIndex" ) else: value_vars = list(value_vars) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 5f8801619faec..fc32a8f0dd044 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1556,7 +1556,7 @@ def _validate_specification(self): # set 'by' columns if self.by is not None: if self.left_by is not None or self.right_by is not None: - raise MergeError("Can only pass by OR left_by " "and right_by") + raise MergeError("Can only pass by OR left_by and right_by") self.left_by = self.right_by = self.by if self.left_by is None and self.right_by is not None: raise MergeError("missing left_by") diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 1f519d4c0867d..0519a1159cda3 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -133,9 +133,7 @@ def __init__( num_cells = np.multiply(num_rows, num_columns, dtype=np.int32) if num_rows > 0 and num_columns > 0 and num_cells <= 0: - raise ValueError( - "Unstacked DataFrame is too big, " "causing int32 overflow" - ) + raise ValueError("Unstacked DataFrame is too big, causing int32 overflow") self._make_sorted_values_labels() self._make_selectors() @@ -176,7 +174,7 @@ def _make_selectors(self): mask.put(selector, True) if mask.sum() < len(self.index): - raise ValueError("Index contains duplicate entries, " "cannot reshape") + raise ValueError("Index contains duplicate entries, cannot reshape") self.group_index = comp_index self.mask = mask diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 0446f53345671..d1bdbdf51e9f5 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -230,7 +230,7 @@ def cut( if np.isinf(mn) or np.isinf(mx): # GH 24314 raise ValueError( - "cannot specify integer `bins` when input data " "contains infinity" + "cannot specify integer `bins` when input data contains infinity" ) elif mn == mx: # adjust end points before binning mn -= 0.001 * abs(mn) if mn != 0 else 0.001 @@ -406,7 +406,7 @@ def _bins_to_cuts( else: if len(labels) != len(bins) - 1: raise ValueError( - "Bin labels must be one fewer than " "the number of bin edges" + "Bin labels must be one fewer than the number of bin edges" ) if not is_categorical_dtype(labels): labels = Categorical(labels, categories=labels, ordered=True) diff --git a/pandas/core/series.py b/pandas/core/series.py index 5ae2bfa03923b..b445ff5f944de 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2614,9 +2614,7 @@ def dot(self, other): >>> s.dot(arr) array([24, 14]) """ - from pandas.core.frame import DataFrame - - if isinstance(other, (Series, DataFrame)): + if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") @@ -2633,7 +2631,7 @@ def dot(self, other): "Dot product shape mismatch, %s vs %s" % (lvals.shape, rvals.shape) ) - if isinstance(other, DataFrame): + if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns ).__finalize__(self) diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 46e8e97c7de8a..5db31fe6664ea 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -436,7 +436,7 @@ def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False, verify=T """ if not is_list_like(values): raise TypeError( - "Only list-like objects are allowed to be passed to" "safe_sort as values" + "Only list-like objects are allowed to be passed to safe_sort as values" ) if not isinstance(values, np.ndarray) and not is_extension_array_dtype(values): diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index f2d3e0012e635..f5add426297a7 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -425,7 +425,7 @@ def sp_maker(x, index=None): elif isinstance(value, SparseArray): if len(value) != len(self.index): - raise ValueError("Length of values does not match " "length of index") + raise ValueError("Length of values does not match length of index") clean = value elif hasattr(value, "__iter__"): @@ -435,9 +435,7 @@ def sp_maker(x, index=None): clean = sp_maker(clean) else: if len(value) != len(self.index): - raise ValueError( - "Length of values does not match " "length of index" - ) + raise ValueError("Length of values does not match length of index") clean = sp_maker(value) # Scalar @@ -732,7 +730,7 @@ def _reindex_with_indexers( if method is not None or limit is not None: raise NotImplementedError( - "cannot reindex with a method or limit " "with sparse" + "cannot reindex with a method or limit with sparse" ) if fill_value is None: @@ -765,9 +763,7 @@ def _join_compat( self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False ): if on is not None: - raise NotImplementedError( - "'on' keyword parameter is not yet " "implemented" - ) + raise NotImplementedError("'on' keyword parameter is not yet implemented") return self._join_index(other, how, lsuffix, rsuffix) def _join_index(self, other, how, lsuffix, rsuffix): diff --git a/pandas/core/sparse/scipy_sparse.py b/pandas/core/sparse/scipy_sparse.py index 73638f5965119..e8d8996fdd6ad 100644 --- a/pandas/core/sparse/scipy_sparse.py +++ b/pandas/core/sparse/scipy_sparse.py @@ -99,7 +99,7 @@ def _sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=F raise ValueError("to_coo requires MultiIndex with nlevels > 2") if not ss.index.is_unique: raise ValueError( - "Duplicate index entries are not allowed in to_coo " "transformation." + "Duplicate index entries are not allowed in to_coo transformation." ) # to keep things simple, only rely on integer indexing (not labels) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 5eb9cfbd01ede..0c417133b0538 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -590,7 +590,7 @@ def dropna(self, axis=0, inplace=False, **kwargs): dense_valid = self.to_dense().dropna() if inplace: raise NotImplementedError( - "Cannot perform inplace dropna" " operations on a SparseSeries" + "Cannot perform inplace dropna operations on a SparseSeries" ) if isna(self.fill_value): return dense_valid diff --git a/pandas/core/strings.py b/pandas/core/strings.py index ad8226e56e09f..54882d039f135 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -603,7 +603,7 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): if is_compiled_re: if (case is not None) or (flags != 0): raise ValueError( - "case and flags cannot be set" " when pat is a compiled regex" + "case and flags cannot be set when pat is a compiled regex" ) else: # not a compiled regex @@ -623,10 +623,10 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): else: if is_compiled_re: raise ValueError( - "Cannot use a compiled regex as replacement " "pattern with regex=False" + "Cannot use a compiled regex as replacement pattern with regex=False" ) if callable(repl): - raise ValueError("Cannot use a callable replacement when " "regex=False") + raise ValueError("Cannot use a callable replacement when regex=False") f = lambda x: x.replace(pat, repl, n) return _na_map(f, arr) @@ -1944,7 +1944,7 @@ def _validate(data): """ if isinstance(data, ABCMultiIndex): raise AttributeError( - "Can only use .str accessor with Index, " "not MultiIndex" + "Can only use .str accessor with Index, not MultiIndex" ) # see _libs/lib.pyx for list of inferred types @@ -1957,7 +1957,7 @@ def _validate(data): inferred_dtype = lib.infer_dtype(values, skipna=True) if inferred_dtype not in allowed_types: - raise AttributeError("Can only use .str accessor with string " "values!") + raise AttributeError("Can only use .str accessor with string values!") return inferred_dtype def __getitem__(self, key): @@ -2653,7 +2653,7 @@ def rsplit(self, pat=None, n=-1, expand=False): "side": "first", "return": "3 elements containing the string itself, followed by two " "empty strings", - "also": "rpartition : Split the string at the last occurrence of " "`sep`.", + "also": "rpartition : Split the string at the last occurrence of `sep`.", } ) @deprecate_kwarg(old_arg_name="pat", new_arg_name="sep") @@ -2669,7 +2669,7 @@ def partition(self, sep=" ", expand=True): "side": "last", "return": "3 elements containing two empty strings, followed by the " "string itself", - "also": "partition : Split the string at the first occurrence of " "`sep`.", + "also": "partition : Split the string at the first occurrence of `sep`.", } ) @deprecate_kwarg(old_arg_name="pat", new_arg_name="sep") diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 20c4b9422459c..172084e97a959 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -365,7 +365,7 @@ def _convert_listlike_datetimes( return result elif getattr(arg, "ndim", 1) > 1: raise TypeError( - "arg must be a string, datetime, list, tuple, " "1-d array, or Series" + "arg must be a string, datetime, list, tuple, 1-d array, or Series" ) # warn if passing timedelta64, raise for PeriodDtype @@ -402,9 +402,7 @@ def _convert_listlike_datetimes( orig_arg = ensure_object(orig_arg) result = _attempt_YYYYMMDD(orig_arg, errors=errors) except (ValueError, TypeError, tslibs.OutOfBoundsDatetime): - raise ValueError( - "cannot convert the input to " "'%Y%m%d' date format" - ) + raise ValueError("cannot convert the input to '%Y%m%d' date format") # fallback if result is None: @@ -503,7 +501,7 @@ def _adjust_to_origin(arg, origin, unit): try: arg = arg - j0 except TypeError: - raise ValueError("incompatible 'arg' type for given " "'origin'='julian'") + raise ValueError("incompatible 'arg' type for given 'origin'='julian'") # preemptively check this for a nice range j_max = Timestamp.max.to_julian_date() - j0 @@ -897,7 +895,7 @@ def coerce(values): try: values = to_datetime(values, format="%Y%m%d", errors=errors, utc=tz) except (TypeError, ValueError) as e: - raise ValueError("cannot assemble the " "datetimes: {error}".format(error=e)) + raise ValueError("cannot assemble the datetimes: {error}".format(error=e)) for u in ["h", "m", "s", "ms", "us", "ns"]: value = unit_rev.get(u) @@ -1029,7 +1027,7 @@ def _convert_listlike(arg, format): elif getattr(arg, "ndim", 1) > 1: raise TypeError( - "arg must be a string, datetime, list, tuple, " "1-d array, or Series" + "arg must be a string, datetime, list, tuple, 1-d array, or Series" ) arg = ensure_object(arg) @@ -1074,7 +1072,7 @@ def _convert_listlike(arg, format): times.append(time_object) elif errors == "raise": raise ValueError( - "Cannot convert arg {arg} to " "a time".format(arg=arg) + "Cannot convert arg {arg} to a time".format(arg=arg) ) elif errors == "ignore": return arg diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 2c594a3df27ea..cc31317980ca8 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -97,11 +97,11 @@ def to_timedelta(arg, unit="ns", box=True, errors="raise"): unit = parse_timedelta_unit(unit) if errors not in ("ignore", "raise", "coerce"): - raise ValueError("errors must be one of 'ignore', " "'raise', or 'coerce'}") + raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'}") if unit in {"Y", "y", "M"}: warnings.warn( - "M and Y units are deprecated and " "will be removed in a future version.", + "M and Y units are deprecated and will be removed in a future version.", FutureWarning, stacklevel=2, ) @@ -120,7 +120,7 @@ def to_timedelta(arg, unit="ns", box=True, errors="raise"): return _convert_listlike(arg, unit=unit, box=box, errors=errors) elif getattr(arg, "ndim", 1) > 1: raise TypeError( - "arg must be a string, timedelta, list, tuple, " "1-d array, or Series" + "arg must be a string, timedelta, list, tuple, 1-d array, or Series" ) # ...so it must be a scalar value. Return scalar. diff --git a/pandas/core/window.py b/pandas/core/window.py index 4721d6cfc6dda..323eba36e4678 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -120,7 +120,7 @@ def validate(self): "left", "neither", ]: - raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'") + raise ValueError("closed must be 'right', 'left', 'both' or 'neither'") def _create_blocks(self): """ @@ -232,9 +232,7 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray: try: values = ensure_float64(values) except (ValueError, TypeError): - raise TypeError( - "cannot handle this type -> {0}" "".format(values.dtype) - ) + raise TypeError("cannot handle this type -> {0}".format(values.dtype)) # Always convert inf to nan values[np.isinf(values)] = np.NaN @@ -327,9 +325,7 @@ def _center_window(self, result, window) -> np.ndarray: Center the result in the window. """ if self.axis > result.ndim - 1: - raise ValueError( - "Requested axis is larger then no. of argument " "dimensions" - ) + raise ValueError("Requested axis is larger then no. of argument dimensions") offset = _offset(window, True) if offset > 0: @@ -1734,7 +1730,7 @@ def validate(self): if not self.is_datetimelike and self.closed is not None: raise ValueError( - "closed only implemented for datetimelike " "and offset based windows" + "closed only implemented for datetimelike and offset based windows" ) def _validate_monotonic(self): @@ -1743,7 +1739,7 @@ def _validate_monotonic(self): """ if not self._on.is_monotonic: formatted = self.on or "index" - raise ValueError("{0} must be " "monotonic".format(formatted)) + raise ValueError("{0} must be monotonic".format(formatted)) def _validate_freq(self): """ @@ -2738,7 +2734,7 @@ def dataframe_from_int_dict(data, frame_template): def _get_center_of_mass(comass, span, halflife, alpha): valid_count = com.count_not_none(comass, span, halflife, alpha) if valid_count > 1: - raise ValueError("comass, span, halflife, and alpha " "are mutually exclusive") + raise ValueError("comass, span, halflife, and alpha are mutually exclusive") # Convert to center of mass; domain checks ensure 0 < alpha <= 1 if comass is not None:
Mostly post-black cleanup
https://api.github.com/repos/pandas-dev/pandas/pulls/27632
2019-07-29T00:24:06Z
2019-07-29T16:52:39Z
2019-07-29T16:52:39Z
2019-07-29T16:54:35Z
BUG: raise when wrong level name is passed to "unstack"
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index eb60272246ebb..fa9ca98f9c8d8 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -128,7 +128,7 @@ Groupby/resample/rolling Reshaping ^^^^^^^^^ -- +- A ``KeyError`` is now raised if ``.unstack()`` is called on a :class:`Series` or :class:`DataFrame` with a flat :class:`Index` passing a name which is not the correct one (:issue:`18303`) - - diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8042f71c2754e..745f8f3c90ea8 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1524,7 +1524,11 @@ def _validate_index_level(self, level): "Too many levels:" " Index has only 1 level, not %d" % (level + 1) ) elif level != self.name: - raise KeyError("Level %s must be same as name (%s)" % (level, self.name)) + raise KeyError( + "Requested level ({}) does not match index name ({})".format( + level, self.name + ) + ) def _get_level_number(self, level): self._validate_index_level(level) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 1f519d4c0867d..f5c46429d9d49 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -12,6 +12,7 @@ ensure_platform_int, is_bool_dtype, is_extension_array_dtype, + is_integer, is_integer_dtype, is_list_like, is_object_dtype, @@ -402,6 +403,10 @@ def unstack(obj, level, fill_value=None): else: level = level[0] + # Prioritize integer interpretation (GH #21677): + if not is_integer(level) and not level == "__placeholder__": + level = obj.index._get_level_number(level) + if isinstance(obj, DataFrame): if isinstance(obj.index, MultiIndex): return _unstack_frame(obj, level, fill_value=fill_value) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 6a274c8369328..00b59fd4dc087 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -1083,7 +1083,7 @@ def test_reset_index_level(self): # Missing levels - for both MultiIndex and single-level Index: for idx_lev in ["A", "B"], ["A"]: - with pytest.raises(KeyError, match="Level E "): + with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"): df.set_index(idx_lev).reset_index(level=["A", "E"]) with pytest.raises(IndexError, match="Too many levels"): df.set_index(idx_lev).reset_index(level=[0, 1, 2]) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index e75d80bec1fdf..c40a9bce9385b 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2004,7 +2004,7 @@ def test_isin_level_kwarg_bad_label_raises(self, label, indices): msg = "'Level {} not found'" else: index = index.rename("foo") - msg = r"'Level {} must be same as name \(foo\)'" + msg = r"Requested level \({}\) does not match index name \(foo\)" with pytest.raises(KeyError, match=msg.format(label)): index.isin([], level=label) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 0e9aa07a4c05a..ae1a21e9b3980 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -35,7 +35,8 @@ def test_droplevel(self, indices): for level in "wrong", ["wrong"]: with pytest.raises( - KeyError, match=re.escape("'Level wrong must be same as name (None)'") + KeyError, + match=r"'Requested level \(wrong\) does not match index name \(None\)'", ): indices.droplevel(level) @@ -200,7 +201,7 @@ def test_unique(self, indices): with pytest.raises(IndexError, match=msg): indices.unique(level=3) - msg = r"Level wrong must be same as name \({}\)".format( + msg = r"Requested level \(wrong\) does not match index name \({}\)".format( re.escape(indices.name.__repr__()) ) with pytest.raises(KeyError, match=msg): diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index f58462c0f3576..0a25d6ba203cb 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -319,9 +319,9 @@ def test_reset_index_drop_errors(self): # KeyError raised for series index when passed level name is missing s = Series(range(4)) - with pytest.raises(KeyError, match="must be same as name"): + with pytest.raises(KeyError, match="does not match index name"): s.reset_index("wrong", drop=True) - with pytest.raises(KeyError, match="must be same as name"): + with pytest.raises(KeyError, match="does not match index name"): s.reset_index("wrong") # KeyError raised for series when level to be dropped is missing diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index c97c69c323b56..dc4db6e7902a8 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -524,6 +524,22 @@ def test_stack_unstack_preserve_names(self): restacked = unstacked.stack() assert restacked.index.names == self.frame.index.names + @pytest.mark.parametrize("method", ["stack", "unstack"]) + def test_stack_unstack_wrong_level_name(self, method): + # GH 18303 - wrong level name should raise + + # A DataFrame with flat axes: + df = self.frame.loc["foo"] + + with pytest.raises(KeyError, match="does not match index name"): + getattr(df, method)("mistake") + + if method == "unstack": + # Same on a Series: + s = df.iloc[:, 0] + with pytest.raises(KeyError, match="does not match index name"): + getattr(s, method)("mistake") + def test_unstack_level_name(self): result = self.frame.unstack("second") expected = self.frame.unstack(level=1)
- [x] closes #18303 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Notice that I profited to make the error message a bit more understandable.
https://api.github.com/repos/pandas-dev/pandas/pulls/27631
2019-07-28T15:16:26Z
2019-07-29T16:55:40Z
2019-07-29T16:55:40Z
2019-08-03T11:44:30Z
DOC: Validate docstring directives
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 06d45e38bfcdb..333136ddfddd9 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -263,8 +263,8 @@ fi ### DOCSTRINGS ### if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then - MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA05)' ; echo $MSG - $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL04,GL05,GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA05 + MSG='Validate docstrings (GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT01, RT04, RT05, SA05)' ; echo $MSG + $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT01,RT04,RT05,SA05 RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index f3364e6725a20..35aaf10458f44 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -200,7 +200,7 @@ def contains(self, pat, case=True, na=np.nan): def mode(self, axis, numeric_only): """ - Ensure sphinx directives don't affect checks for trailing periods. + Ensure reST directives don't affect checks for leading periods. Parameters ---------- @@ -447,6 +447,27 @@ def deprecation_in_wrong_order(self): def method_wo_docstrings(self): pass + def directives_without_two_colons(self, first, second): + """ + Ensure reST directives have trailing colons. + + Parameters + ---------- + first : str + Sentence ending in period, followed by single directive w/o colons. + + .. versionchanged 0.1.2 + + second : bool + Sentence ending in period, followed by multiple directives w/o + colons. + + .. versionadded 0.1.2 + .. deprecated 0.00.0 + + """ + pass + class BadSummaries: def wrong_line(self): @@ -840,6 +861,7 @@ def test_bad_class(self, capsys): "plot", "method", "private_classes", + "directives_without_two_colons", ], ) def test_bad_generic_functions(self, capsys, func): @@ -879,6 +901,14 @@ def test_bad_generic_functions(self, capsys, func): "deprecation_in_wrong_order", ("Deprecation warning should precede extended summary",), ), + ( + "BadGenericDocStrings", + "directives_without_two_colons", + ( + "reST directives ['versionchanged', 'versionadded', " + "'deprecated'] must be followed by two colons", + ), + ), ( "BadSeeAlso", "desc_no_period", diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 37623d32db685..bf5d861281a36 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -59,6 +59,7 @@ PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"] DIRECTIVES = ["versionadded", "versionchanged", "deprecated"] +DIRECTIVE_PATTERN = re.compile(rf"^\s*\.\. ({'|'.join(DIRECTIVES)})(?!::)", re.I | re.M) ALLOWED_SECTIONS = [ "Parameters", "Attributes", @@ -93,6 +94,7 @@ "GL07": "Sections are in the wrong order. Correct order is: " "{correct_sections}", "GL08": "The object does not have a docstring", "GL09": "Deprecation warning should precede extended summary", + "GL10": "reST directives {directives} must be followed by two colons", "SS01": "No summary found (a short summary in a single line should be " "present at the beginning of the docstring)", "SS02": "Summary does not start with a capital letter", @@ -478,6 +480,10 @@ def parameter_mismatches(self): def correct_parameters(self): return not bool(self.parameter_mismatches) + @property + def directives_without_two_colons(self): + return DIRECTIVE_PATTERN.findall(self.raw_doc) + def parameter_type(self, param): return self.doc_parameters[param][0] @@ -697,6 +703,10 @@ def get_validation_data(doc): if doc.deprecated and not doc.extended_summary.startswith(".. deprecated:: "): errs.append(error("GL09")) + directives_without_two_colons = doc.directives_without_two_colons + if directives_without_two_colons: + errs.append(error("GL10", directives=directives_without_two_colons)) + if not doc.summary: errs.append(error("SS01")) else:
- [X] closes #27629 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27630
2019-07-28T10:58:57Z
2019-08-06T20:55:16Z
2019-08-06T20:55:16Z
2019-08-07T04:34:18Z
Make interpolate_2d handle datetime64 correctly
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 4ca867b1088e7..98397e81d7bea 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1223,7 +1223,6 @@ def _interpolate_with_fill( fill_value=fill_value, dtype=self.dtype, ) - values = self._try_coerce_result(values) blocks = [self.make_block_same_class(values, ndim=self.ndim)] return self._maybe_downcast(blocks, downcast) @@ -2293,13 +2292,6 @@ def _try_coerce_args(self, other): return other - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray) and result.dtype.kind == "i": - # needed for _interpolate_with_ffill - result = result.view("M8[ns]") - return result - def to_native_types( self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs ): diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 8f0abc91f7aef..19e4c3166da71 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -463,6 +463,7 @@ def interpolate_2d( Perform an actual interpolation of values, values will be make 2-d if needed fills inplace, returns the result. """ + orig_values = values transf = (lambda x: x) if axis == 0 else (lambda x: x.T) @@ -470,7 +471,7 @@ def interpolate_2d( ndim = values.ndim if values.ndim == 1: if axis != 0: # pragma: no cover - raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0") + raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0") values = values.reshape(tuple((1,) + values.shape)) if fill_value is None: @@ -490,6 +491,10 @@ def interpolate_2d( if ndim == 1: values = values[0] + if orig_values.dtype.kind == "M": + # convert float back to datetime64 + values = values.astype(orig_values.dtype) + return values diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 929bd1725b30a..fb3d428bcf4bf 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -885,7 +885,7 @@ def test_resample_dtype_preservation(): assert result.val.dtype == np.int32 -def test_resample_dtype_coerceion(): +def test_resample_dtype_coercion(): pytest.importorskip("scipy.interpolate") diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 8f4c89ee72ae1..f1b84acf68755 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -1533,6 +1533,17 @@ def test_interp_datetime64(self, method, tz_naive_fixture): ) assert_series_equal(result, expected) + def test_interp_pad_datetime64tz_values(self): + # GH#27628 missing.interpolate_2d should handle datetimetz values + dti = pd.date_range("2015-04-05", periods=3, tz="US/Central") + ser = pd.Series(dti) + ser[1] = pd.NaT + result = ser.interpolate(method="pad") + + expected = pd.Series(dti) + expected[1] = expected[0] + tm.assert_series_equal(result, expected) + def test_interp_limit_no_nans(self): # GH 7173 s = pd.Series([1.0, 2.0, 3.0])
Broken off from #27626 because I decided that is poorly scoped.
https://api.github.com/repos/pandas-dev/pandas/pulls/27628
2019-07-28T01:11:51Z
2019-07-31T20:22:12Z
2019-07-31T20:22:12Z
2019-07-31T20:35:11Z
CLN: de-kludge Block.quantile
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 4ca867b1088e7..62b08fd1712c8 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1526,18 +1526,7 @@ def quantile(self, qs, interpolation="linear", axis=0): # We should always have ndim == 2 becase Series dispatches to DataFrame assert self.ndim == 2 - if self.is_datetimetz: - # TODO: cleanup this special case. - # We need to operate on i8 values for datetimetz - # but `Block.get_values()` returns an ndarray of objects - # right now. We need an API for "values to do numeric-like ops on" - values = self.values.view("M8[ns]") - - # TODO: NonConsolidatableMixin shape - # Usual shape inconsistencies for ExtensionBlocks - values = values[None, :] - else: - values = self.get_values() + values = self.get_values() is_empty = values.shape[axis] == 0 orig_scalar = not is_list_like(qs) @@ -1576,7 +1565,6 @@ def quantile(self, qs, interpolation="linear", axis=0): result = lib.item_from_zerodim(result) ndim = getattr(result, "ndim", None) or 0 - result = self._try_coerce_result(result) return make_block(result, placement=np.arange(len(result)), ndim=ndim) def _replace_coerce( @@ -2477,21 +2465,9 @@ def _try_coerce_result(self, result): result = self._holder._from_sequence( result.astype(np.int64), freq=None, dtype=self.values.dtype ) - elif result.dtype == "M8[ns]": - # otherwise we get here via quantile and already have M8[ns] - result = self._holder._simple_new( - result, freq=None, dtype=self.values.dtype - ) - elif isinstance(result, np.datetime64): - # also for post-quantile - result = self._box_func(result) return result - @property - def _box_func(self): - return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz) - def diff(self, n, axis=0): """1st discrete difference @@ -2564,6 +2540,19 @@ def equals(self, other): return False return (self.values.view("i8") == other.values.view("i8")).all() + def quantile(self, qs, interpolation="linear", axis=0): + naive = self.values.view("M8[ns]") + + # kludge for 2D block with 1D values + naive = naive.reshape(self.shape) + + blk = self.make_block(naive) + res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis) + + # ravel is kludge for 2D block with 1D values, assumes column-like + aware = self._holder(res_blk.values.ravel(), dtype=self.dtype) + return self.make_block_same_class(aware, ndim=res_blk.ndim) + class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): __slots__ = ()
Broken off from #27626 since I decided that is poorly scoped.
https://api.github.com/repos/pandas-dev/pandas/pulls/27627
2019-07-28T01:07:25Z
2019-07-31T12:17:59Z
2019-07-31T12:17:59Z
2019-07-31T13:27:13Z
CLN: de-kludge quantile, make interpolate_with_fill understand datetime64
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 4ca867b1088e7..ed4267793a2a1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1223,7 +1223,6 @@ def _interpolate_with_fill( fill_value=fill_value, dtype=self.dtype, ) - values = self._try_coerce_result(values) blocks = [self.make_block_same_class(values, ndim=self.ndim)] return self._maybe_downcast(blocks, downcast) @@ -1526,18 +1525,7 @@ def quantile(self, qs, interpolation="linear", axis=0): # We should always have ndim == 2 becase Series dispatches to DataFrame assert self.ndim == 2 - if self.is_datetimetz: - # TODO: cleanup this special case. - # We need to operate on i8 values for datetimetz - # but `Block.get_values()` returns an ndarray of objects - # right now. We need an API for "values to do numeric-like ops on" - values = self.values.view("M8[ns]") - - # TODO: NonConsolidatableMixin shape - # Usual shape inconsistencies for ExtensionBlocks - values = values[None, :] - else: - values = self.get_values() + values = self.get_values() is_empty = values.shape[axis] == 0 orig_scalar = not is_list_like(qs) @@ -1576,7 +1564,6 @@ def quantile(self, qs, interpolation="linear", axis=0): result = lib.item_from_zerodim(result) ndim = getattr(result, "ndim", None) or 0 - result = self._try_coerce_result(result) return make_block(result, placement=np.arange(len(result)), ndim=ndim) def _replace_coerce( @@ -1710,7 +1697,6 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False) mask = _safe_reshape(mask, new_values.shape) new_values[mask] = new - new_values = self._try_coerce_result(new_values) return [self.make_block(values=new_values)] def _try_cast_result(self, result, dtype=None): @@ -2293,13 +2279,6 @@ def _try_coerce_args(self, other): return other - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray) and result.dtype.kind == "i": - # needed for _interpolate_with_ffill - result = result.view("M8[ns]") - return result - def to_native_types( self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs ): @@ -2477,15 +2456,7 @@ def _try_coerce_result(self, result): result = self._holder._from_sequence( result.astype(np.int64), freq=None, dtype=self.values.dtype ) - elif result.dtype == "M8[ns]": - # otherwise we get here via quantile and already have M8[ns] - result = self._holder._simple_new( - result, freq=None, dtype=self.values.dtype - ) - elif isinstance(result, np.datetime64): - # also for post-quantile - result = self._box_func(result) return result @property @@ -2564,6 +2535,19 @@ def equals(self, other): return False return (self.values.view("i8") == other.values.view("i8")).all() + def quantile(self, qs, interpolation="linear", axis=0): + naive = self.values.view("M8[ns]") + + # kludge for 2D block with 1D values + naive = naive.reshape(self.shape) + + blk = self.make_block(naive) + res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis) + + # ravel is kludge for 2D block with 1D values, assumes column-like + aware = self._holder(res_blk.values.ravel(), dtype=self.dtype) + return self.make_block_same_class(aware, ndim=res_blk.ndim) + class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): __slots__ = () @@ -2639,10 +2623,6 @@ def _try_coerce_args(self, other): return other - def _try_coerce_result(self, result): - """ reverse of try_coerce_args / try_operate """ - return result - def should_store(self, value): return issubclass( value.dtype.type, np.timedelta64 diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 344d41ed26943..f8511e6445ea5 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -908,7 +908,7 @@ def fast_xs(self, loc): # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): - result[rl] = blk._try_coerce_result(blk.iget((i, loc))) + result[rl] = blk.iget((i, loc)) if is_extension_array_dtype(dtype): result = dtype.construct_array_type()._from_sequence(result, dtype=dtype) diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 8f0abc91f7aef..6318bfcb83dd5 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -463,6 +463,19 @@ def interpolate_2d( Perform an actual interpolation of values, values will be make 2-d if needed fills inplace, returns the result. """ + if is_datetime64tz_dtype(values): + naive = values.view("M8[ns]") + result = interpolate_2d( + naive, + method=method, + axis=axis, + limit=limit, + fill_value=fill_value, + dtype=dtype, + ) + return type(values)._from_sequence(result, dtype=values.dtype) + + orig_values = values transf = (lambda x: x) if axis == 0 else (lambda x: x.T) @@ -470,7 +483,7 @@ def interpolate_2d( ndim = values.ndim if values.ndim == 1: if axis != 0: # pragma: no cover - raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0") + raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0") values = values.reshape(tuple((1,) + values.shape)) if fill_value is None: @@ -490,6 +503,10 @@ def interpolate_2d( if ndim == 1: values = values[0] + if orig_values.dtype.kind == "M": + # convert float back to datetime64 + values = values.astype(orig_values.dtype) + return values
2 related-but-separate things here -quantile: de-kludging the base class method and putting the datetimetz-specific stuff in a subclass method. This will allow us to implement the refactor suggested in #14562. If/when DTA supports 2D, we'll be able to get rid of the subclass override kludge altogether. - interpolate_2d: have the core.missing function understand datetime64 dtypes so the blocks don't have to implement special handling. There are only a couple other places where this is needed before we can get rid of the datetimetz-specific try_coerce_result altogether.
https://api.github.com/repos/pandas-dev/pandas/pulls/27626
2019-07-27T19:49:34Z
2019-07-28T01:24:56Z
null
2019-07-28T01:25:01Z
remove undesired values kwarg
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 4ca867b1088e7..990a24ef8d904 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -552,10 +552,10 @@ def f(m, v, i): return self.split_and_operate(None, f, False) - def astype(self, dtype, copy=False, errors="raise", values=None, **kwargs): - return self._astype(dtype, copy=copy, errors=errors, values=values, **kwargs) + def astype(self, dtype, copy=False, errors="raise", **kwargs): + return self._astype(dtype, copy=copy, errors=errors, **kwargs) - def _astype(self, dtype, copy=False, errors="raise", values=None, **kwargs): + def _astype(self, dtype, copy=False, errors="raise", **kwargs): """Coerce to the new type Parameters @@ -616,42 +616,39 @@ def _astype(self, dtype, copy=False, errors="raise", values=None, **kwargs): return self.copy() return self - if values is None: - try: - # force the copy here - if self.is_extension: - values = self.values.astype(dtype) - else: - if issubclass(dtype.type, str): - - # use native type formatting for datetime/tz/timedelta - if self.is_datelike: - values = self.to_native_types() + try: + # force the copy here + if self.is_extension: + values = self.values.astype(dtype) + else: + if issubclass(dtype.type, str): - # astype formatting - else: - values = self.get_values() + # use native type formatting for datetime/tz/timedelta + if self.is_datelike: + values = self.to_native_types() + # astype formatting else: - values = self.get_values(dtype=dtype) + values = self.get_values() - # _astype_nansafe works fine with 1-d only - vals1d = values.ravel() - values = astype_nansafe(vals1d, dtype, copy=True, **kwargs) + else: + values = self.get_values(dtype=dtype) - # TODO(extension) - # should we make this attribute? - if isinstance(values, np.ndarray): - values = values.reshape(self.shape) + # _astype_nansafe works fine with 1-d only + vals1d = values.ravel() + values = astype_nansafe(vals1d, dtype, copy=True, **kwargs) - except Exception: - # e.g. astype_nansafe can fail on object-dtype of strings - # trying to convert to float - if errors == "raise": - raise - newb = self.copy() if copy else self - else: - newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim) + # TODO(extension) + # should we make this attribute? + if isinstance(values, np.ndarray): + values = values.reshape(self.shape) + + except Exception: + # e.g. astype_nansafe can fail on object-dtype of strings + # trying to convert to float + if errors == "raise": + raise + newb = self.copy() if copy else self else: newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27625
2019-07-27T19:38:45Z
2019-07-31T12:20:16Z
2019-07-31T12:20:16Z
2019-07-31T13:26:32Z
BUG: cells are missing in the excel file when exporting excel using xlsxwriter with option constant_memory set to True (#15392)
diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index 6234bc0f7bd35..628486eafad2f 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -106,7 +106,7 @@ MultiIndex I/O ^^^ -- +- Bug in :func:`DataFrame.to_excel()` where cells are missing in the excel file when exporting excel using ``xlsxwriter`` with option ``constant_memory`` set to ``True`` ((:issue:`15392`) - - diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 012d2d9358241..4dffce5774462 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -408,6 +408,7 @@ def __init__( self.header = header self.merge_cells = merge_cells self.inf_rep = inf_rep + self._constant_memory = False @property def header_style(self): @@ -466,14 +467,13 @@ def _format_header_mi(self): coloffset = len(self.df.index[0]) - 1 if self.merge_cells: - # Format multi-index as a merged cells. - for lnum in range(len(level_lengths)): - name = columns.names[lnum] - yield ExcelCell(lnum, coloffset, name, self.header_style) - for lnum, (spans, levels, level_codes) in enumerate( zip(level_lengths, columns.levels, columns.codes) ): + # Format multi-index as a merged cells. + name = columns.names[lnum] + yield ExcelCell(lnum, coloffset, name, self.header_style) + values = levels.take(level_codes) for i in spans: if spans[i] > 1: @@ -578,23 +578,23 @@ def _format_regular_rows(self): if isinstance(self.df.index, ABCPeriodIndex): index_values = self.df.index.to_timestamp() + coloffset = 1 + body = self._generate_body(coloffset) + _, ncol = self.df.shape for idx, idxval in enumerate(index_values): yield ExcelCell(self.rowcounter + idx, 0, idxval, self.header_style) - - coloffset = 1 + for _ in range(ncol): + yield next(body) else: coloffset = 0 - - for cell in self._generate_body(coloffset): - yield cell + for cell in self._generate_body(coloffset): + yield cell def _format_hierarchical_rows(self): has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index)) if has_aliases or self.header: self.rowcounter += 1 - gcolidx = 0 - if self.index: index_labels = self.df.index.names # check for aliases @@ -616,6 +616,20 @@ def _format_hierarchical_rows(self): for cidx, name in enumerate(index_labels): yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style) + gen_non_merge_idx = ( + ExcelCell(self.rowcounter + ridx, cidx, item, self.header_style) + for ridx, row in enumerate(self.df.index) + for cidx, item in enumerate(row) + ) + gen_body = self._generate_body(self.df.index.nlevels) + nrow, ncol = self.df.shape + for _ in range(nrow): + if self._constant_memory or not self.merge_cells: + for _ in range(self.df.index.nlevels): + yield next(gen_non_merge_idx) + for _ in range(ncol): + yield next(gen_body) + if self.merge_cells: # Format hierarchical rows as merged cells. level_strs = self.df.index.format( @@ -623,6 +637,7 @@ def _format_hierarchical_rows(self): ) level_lengths = get_level_lengths(level_strs) + colidx = 0 for spans, levels, level_codes in zip( level_lengths, self.df.index.levels, self.df.index.codes ): @@ -635,35 +650,24 @@ def _format_hierarchical_rows(self): if spans[i] > 1: yield ExcelCell( self.rowcounter + i, - gcolidx, + colidx, values[i], self.header_style, self.rowcounter + i + spans[i] - 1, - gcolidx, + colidx, ) else: yield ExcelCell( self.rowcounter + i, - gcolidx, + colidx, values[i], self.header_style, ) - gcolidx += 1 - - else: - # Format hierarchical rows with non-merged values. - for indexcolvals in zip(*self.df.index): - for idx, indexcolval in enumerate(indexcolvals): - yield ExcelCell( - self.rowcounter + idx, - gcolidx, - indexcolval, - self.header_style, - ) - gcolidx += 1 + colidx += 1 - for cell in self._generate_body(gcolidx): - yield cell + else: + for cell in self._generate_body(0): + yield cell def _generate_body(self, coloffset): if self.styler is None: @@ -674,13 +678,16 @@ def _generate_body(self, coloffset): styles = None xlstyle = None - # Write the body of the frame data series by series. - for colidx in range(len(self.columns)): - series = self.df.iloc[:, colidx] - for i, val in enumerate(series): + # Write the body of the frame data row by row. + nrow, ncol = self.df.shape + for rowidx in range(nrow): + row = self.df.iloc[rowidx, :] + for colidx, val in enumerate(row): if styles is not None: - xlstyle = self.style_converter(";".join(styles[i, colidx])) - yield ExcelCell(self.rowcounter + i, colidx + coloffset, val, xlstyle) + xlstyle = self.style_converter(";".join(styles[rowidx, colidx])) + yield ExcelCell( + self.rowcounter + rowidx, colidx + coloffset, val, xlstyle + ) def get_formatted_cells(self): for cell in itertools.chain(self._format_header(), self._format_body()): @@ -730,6 +737,11 @@ def write( writer = ExcelWriter(_stringify_path(writer), engine=engine) need_save = True + from pandas.io.excel._xlsxwriter import _XlsxWriter + + if isinstance(writer, _XlsxWriter) and writer.book.constant_memory: + self._constant_memory = True + formatted_cells = self.get_formatted_cells() writer.write_cells( formatted_cells, diff --git a/pandas/tests/io/excel/test_xlsxwriter.py b/pandas/tests/io/excel/test_xlsxwriter.py index 4dae3db2e7abd..bad1a6a6c76a0 100644 --- a/pandas/tests/io/excel/test_xlsxwriter.py +++ b/pandas/tests/io/excel/test_xlsxwriter.py @@ -2,8 +2,8 @@ import pytest -from pandas import DataFrame -from pandas.util.testing import ensure_clean +from pandas import DataFrame, read_excel, MultiIndex +from pandas.util.testing import ensure_clean, assert_frame_equal from pandas.io.excel import ExcelWriter @@ -62,3 +62,60 @@ def test_write_append_mode_raises(ext): with ensure_clean(ext) as f: with pytest.raises(ValueError, match=msg): ExcelWriter(f, engine="xlsxwriter", mode="a") + + +def test_constant_memory_regularindex(ext): + # Test if cells are written row by row which is the requirement + # when exporting excel using xlsxwriter with constant_memory + # set True, for regular index. + # Test for issue #15392. + # Applicable to xlsxwriter only. + with ensure_clean(ext) as path: + df = DataFrame({"A": [123456, 123456], "B": [123456, 123456]}) + + with ExcelWriter( + path, engine="xlsxwriter", options=dict(constant_memory=True) + ) as writer: + df.to_excel(writer) + + read_df = read_excel(path, header=0, index_col=0) + + assert_frame_equal(df, read_df) + + +def test_constant_memory_multiindex(ext): + # Test if cells are written row by row which is the requirement + # when exporting excel using xlsxwriter with constant_memory set + # True, for MultiIndex. + # Test for issue #15392. + # Applicable to xlsxwriter only. + with ensure_clean(ext) as path: + df = DataFrame({"A": [123456, 123456], "B": [123456, 123456]}) + df.index = MultiIndex.from_arrays([["a", "a"], [1, 2]]) + + with ExcelWriter( + path, engine="xlsxwriter", options=dict(constant_memory=True) + ) as writer: + df.to_excel(writer) + + read_df = read_excel(path, header=0, index_col=[0, 1]) + + assert_frame_equal(df, read_df) + + +def test_constant_memory_multiheader(ext): + # Test if cells of a header of MultiIndex are written row by row + # Test for issue #15392. + # Applicable to xlsxwriter only. + with ensure_clean(ext) as path: + df = DataFrame({"A": [123456, 123456], "B": [123456, 123456]}) + df.columns = MultiIndex.from_arrays([["a", "a"], [1, 2]]) + + with ExcelWriter( + path, engine="xlsxwriter", options=dict(constant_memory=True) + ) as writer: + df.to_excel(writer) + + read_df = read_excel(path, header=[0, 1], index_col=0) + + assert_frame_equal(df, read_df)
- [x] closes #15392 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27624
2019-07-27T12:15:59Z
2019-10-11T21:56:18Z
null
2019-10-11T21:56:18Z
Fix AttributeError in scripts/validate_docstrings for doc.returns[0].name
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 37623d32db685..94b30a92dbd68 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -757,7 +757,7 @@ def get_validation_data(doc): if doc.method_returns_something: errs.append(error("RT01")) else: - if len(doc.returns) == 1 and doc.returns[0].name: + if len(doc.returns) == 1 and doc.returns[0][1]: errs.append(error("RT02")) for name_or_type, type_, desc in doc.returns: if not desc:
- [X] closes #27622 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27623
2019-07-27T11:06:45Z
2019-08-02T20:50:12Z
null
2023-05-11T01:19:06Z
Fix docstrings
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 1c35298fcc6b8..39529177b9e35 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -90,7 +90,7 @@ class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin): """ A pandas ExtensionArray for NumPy data. - .. versionadded :: 0.24.0 + .. versionadded:: 0.24.0 This is mostly for internal compatibility, and is not especially useful on its own. diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py index f6aa8bbd77614..048f6c6f5c680 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse.py @@ -2102,7 +2102,7 @@ class SparseFrameAccessor(BaseAccessor, PandasDelegate): """ DataFrame accessor for sparse data. - .. versionadded :: 0.25.0 + .. versionadded:: 0.25.0 """ def _validate(self, data): diff --git a/pandas/core/base.py b/pandas/core/base.py index 89a3d9cfea5ab..ce993a513f569 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -727,7 +727,7 @@ def item(self): """ Return the first element of the underlying data as a python scalar. - .. deprecated 0.25.0 + .. deprecated:: 0.25.0 Returns ------- @@ -1559,7 +1559,7 @@ def factorize(self, sort=False, na_sentinel=-1): A scalar or array of insertion points with the same shape as `value`. - .. versionchanged :: 0.24.0 + .. versionchanged:: 0.24.0 If `value` is a scalar, an int is now always returned. Previously, scalar inputs returned an 1-item array for :class:`Series` and :class:`Categorical`. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cdbe0e9d22eb4..d060ac0d193ad 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -310,11 +310,11 @@ class DataFrame(NDFrame): data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects - .. versionchanged :: 0.23.0 + .. versionchanged:: 0.23.0 If data is a dict, column order follows insertion-order for Python 3.6 and later. - .. versionchanged :: 0.25.0 + .. versionchanged:: 0.25.0 If data is a list of dicts, column order follows insertion-order Python 3.6 and later. @@ -3560,7 +3560,7 @@ def assign(self, **kwargs): or modified columns. All items are computed first, and then assigned in alphabetical order. - .. versionchanged :: 0.23.0 + .. versionchanged:: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. @@ -5628,7 +5628,7 @@ def update( If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. - .. versionchanged :: 0.24.0 + .. versionchanged:: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. @@ -5774,7 +5774,7 @@ def update( specified, all remaining columns will be used and the result will have hierarchically indexed columns. - .. versionchanged :: 0.23.0 + .. versionchanged:: 0.23.0 Also accept list of column names. Returns @@ -5903,7 +5903,7 @@ def pivot(self, index=None, columns=None, values=None): If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. - .. versionchanged :: 0.25.0 + .. versionchanged:: 0.25.0 Returns ------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 97a0b04146297..df97d34ee349a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11490,7 +11490,7 @@ def _doc_parms(cls): The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. - .. versionadded :: 0.22.0 + .. versionadded:: 0.22.0 Added with the default being 0. This means the sum of an all-NA or empty Series is 0, and the product of an all-NA or empty diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 47cf0f26f9ca5..04a858c8bfbdf 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -931,7 +931,7 @@ def item(self): return the first element of the underlying data as a python scalar - .. deprecated 0.25.0 + .. deprecated:: 0.25.0 """ warnings.warn( diff --git a/pandas/core/series.py b/pandas/core/series.py index c7fcab56e1fe5..c83a626ce9b8d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -156,7 +156,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): data : array-like, Iterable, dict, or scalar value Contains data stored in Series. - .. versionchanged :: 0.23.0 + .. versionchanged:: 0.23.0 If data is a dict, argument order is maintained for Python 3.6 and later. @@ -370,7 +370,7 @@ def from_array( """ Construct Series from array. - .. deprecated :: 0.23.0 + .. deprecated:: 0.23.0 Use pd.Series(..) constructor instead. Returns @@ -597,7 +597,7 @@ def asobject(self): """ Return object Series which contains boxed values. - .. deprecated :: 0.23.0 + .. deprecated:: 0.23.0 Use ``astype(object)`` instead. @@ -952,7 +952,7 @@ def real(self): """ Return the real value of vector. - .. deprecated 0.25.0 + .. deprecated:: 0.25.0 """ warnings.warn( "`real` has be deprecated and will be removed in a future version", @@ -970,7 +970,7 @@ def imag(self): """ Return imag value of vector. - .. deprecated 0.25.0 + .. deprecated:: 0.25.0 """ warnings.warn( "`imag` has be deprecated and will be removed in a future version", diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 3e44a7f941a86..f2d3e0012e635 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -49,7 +49,7 @@ class SparseDataFrame(DataFrame): Parameters ---------- data : same types as can be passed to DataFrame or scipy.sparse.spmatrix - .. versionchanged :: 0.23.0 + .. versionchanged:: 0.23.0 If data is a dict, argument order is maintained for Python 3.6 and later. diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index fc51c06b149fd..9064aa3ba1260 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -55,7 +55,7 @@ class SparseSeries(Series): Parameters ---------- data : {array-like, Series, SparseSeries, dict} - .. versionchanged :: 0.23.0 + .. versionchanged:: 0.23.0 If data is a dict, argument order is maintained for Python 3.6 and later. diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 296b1eef68d7d..35a62b627823a 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -71,7 +71,7 @@ def read_feather(path, columns=None, use_threads=True): """ Load a feather-format object from the file path. - .. versionadded 0.20.0 + .. versionadded:: 0.20.0 Parameters ---------- @@ -90,16 +90,16 @@ def read_feather(path, columns=None, use_threads=True): columns : sequence, default None If not provided, all columns are read. - .. versionadded 0.24.0 + .. versionadded:: 0.24.0 nthreads : int, default 1 Number of CPU threads to use when reading to pandas.DataFrame. - .. versionadded 0.21.0 - .. deprecated 0.24.0 + .. versionadded:: 0.21.0 + .. deprecated:: 0.24.0 use_threads : bool, default True Whether to parallelize reading using multiple threads. - .. versionadded 0.24.0 + .. versionadded:: 0.24.0 Returns ------- diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 617f4f44ae8af..82c460300582b 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -231,7 +231,7 @@ def to_parquet( ``False``, they will not be written to the file. If ``None``, the engine's default behavior will be used. - .. versionadded 0.24.0 + .. versionadded:: 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset @@ -257,7 +257,7 @@ def read_parquet(path, engine="auto", columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. - .. versionadded 0.21.0 + .. versionadded:: 0.21.0 Parameters ---------- @@ -281,7 +281,7 @@ def read_parquet(path, engine="auto", columns=None, **kwargs): columns : list, default=None If not None, only these columns will be read from the file. - .. versionadded 0.21.1 + .. versionadded:: 0.21.1 **kwargs Any additional kwargs are passed to the engine. diff --git a/pandas/io/spss.py b/pandas/io/spss.py index 983ac1c818c42..4f13349a819c3 100644 --- a/pandas/io/spss.py +++ b/pandas/io/spss.py @@ -15,7 +15,7 @@ def read_spss( """ Load an SPSS file from the file path, returning a DataFrame. - .. versionadded 0.25.0 + .. versionadded:: 0.25.0 Parameters ----------
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This fixes some docstring formatting issues: e.g. some deprecated directives do not appear in current docs because the two colons are missing `.. deprecated 0.25.0`.
https://api.github.com/repos/pandas-dev/pandas/pulls/27621
2019-07-27T10:16:26Z
2019-07-28T09:54:48Z
2019-07-28T09:54:48Z
2019-07-28T09:54:48Z
DEPR: remove ix
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 9647693d4ed6b..ae6c07107f4a0 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -321,10 +321,9 @@ class Dropna: def setup(self, how, axis): self.df = DataFrame(np.random.randn(10000, 1000)) - with warnings.catch_warnings(record=True): - self.df.ix[50:1000, 20:50] = np.nan - self.df.ix[2000:3000] = np.nan - self.df.ix[:, 60:70] = np.nan + self.df.iloc[50:1000, 20:50] = np.nan + self.df.iloc[2000:3000] = np.nan + self.df.iloc[:, 60:70] = np.nan self.df_mixed = self.df.copy() self.df_mixed["foo"] = "bar" @@ -342,10 +341,9 @@ class Count: def setup(self, axis): self.df = DataFrame(np.random.randn(10000, 1000)) - with warnings.catch_warnings(record=True): - self.df.ix[50:1000, 20:50] = np.nan - self.df.ix[2000:3000] = np.nan - self.df.ix[:, 60:70] = np.nan + self.df.iloc[50:1000, 20:50] = np.nan + self.df.iloc[2000:3000] = np.nan + self.df.iloc[:, 60:70] = np.nan self.df_mixed = self.df.copy() self.df_mixed["foo"] = "bar" diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index ac35139c1954a..c78c2fa92827e 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -67,22 +67,6 @@ def time_iloc_scalar(self, index, index_structure): def time_iloc_slice(self, index, index_structure): self.data.iloc[:800000] - def time_ix_array(self, index, index_structure): - with warnings.catch_warnings(record=True): - self.data.ix[self.array] - - def time_ix_list_like(self, index, index_structure): - with warnings.catch_warnings(record=True): - self.data.ix[[800000]] - - def time_ix_scalar(self, index, index_structure): - with warnings.catch_warnings(record=True): - self.data.ix[800000] - - def time_ix_slice(self, index, index_structure): - with warnings.catch_warnings(record=True): - self.data.ix[:800000] - def time_loc_array(self, index, index_structure): self.data.loc[self.array] @@ -148,10 +132,6 @@ def setup(self): self.bool_indexer = self.df[self.col_scalar] > 0 self.bool_obj_indexer = self.bool_indexer.astype(object) - def time_ix(self): - with warnings.catch_warnings(record=True): - self.df.ix[self.idx_scalar, self.col_scalar] - def time_loc(self): self.df.loc[self.idx_scalar, self.col_scalar] @@ -228,14 +208,6 @@ def setup(self): self.idx = IndexSlice[20000:30000, 20:30, 35:45, 30000:40000] self.mdt = self.mdt.set_index(["A", "B", "C", "D"]).sort_index() - def time_series_ix(self): - with warnings.catch_warnings(record=True): - self.s.ix[999] - - def time_frame_ix(self): - with warnings.catch_warnings(record=True): - self.df.ix[999] - def time_index_slice(self): self.mdt.loc[self.idx, :] @@ -310,10 +282,6 @@ def setup_cache(self): def time_lookup_iloc(self, s): s.iloc - def time_lookup_ix(self, s): - with warnings.catch_warnings(record=True): - s.ix - def time_lookup_loc(self, s): s.loc diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index 12ca318c815d3..9d5649c37e92f 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -49,7 +49,6 @@ public functions related to data types in pandas. api/pandas.DataFrame.blocks api/pandas.DataFrame.as_matrix - api/pandas.DataFrame.ix api/pandas.Index.asi8 api/pandas.Index.data api/pandas.Index.flags @@ -60,7 +59,6 @@ public functions related to data types in pandas. api/pandas.Series.asobject api/pandas.Series.blocks api/pandas.Series.from_array - api/pandas.Series.ix api/pandas.Series.imag api/pandas.Series.real diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 19fb4bdcd9536..d87053eeda691 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -533,6 +533,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - :meth:`DataFrame.hist` and :meth:`Series.hist` no longer allows ``figsize="default"``, specify figure size by passinig a tuple instead (:issue:`30003`) - Floordiv of integer-dtyped array by :class:`Timedelta` now raises ``TypeError`` (:issue:`21036`) - :func:`pandas.api.types.infer_dtype` argument ``skipna`` defaults to ``True`` instead of ``False`` (:issue:`24050`) +- Removed the previously deprecated :attr:`Series.ix` and :attr:`DataFrame.ix` (:issue:`26438`) - Removed the previously deprecated :meth:`Index.summary` (:issue:`18217`) - Removed the previously deprecated "fastpath" keyword from the :class:`Index` constructor (:issue:`23110`) - Removed the previously deprecated :meth:`Series.get_value`, :meth:`Series.set_value`, :meth:`DataFrame.get_value`, :meth:`DataFrame.set_value` (:issue:`17739`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index a9269a5e0efa1..341262313ddff 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1,6 +1,4 @@ -import textwrap from typing import Tuple -import warnings import numpy as np @@ -10,10 +8,8 @@ from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( - ensure_platform_int, is_float, is_integer, - is_integer_dtype, is_iterator, is_list_like, is_numeric_dtype, @@ -34,7 +30,6 @@ def get_indexers_list(): return [ - ("ix", _IXIndexer), ("iloc", _iLocIndexer), ("loc", _LocIndexer), ("at", _AtIndexer), @@ -112,9 +107,7 @@ def __call__(self, axis=None): new_self.axis = axis return new_self - def __iter__(self): - raise NotImplementedError("ix is not iterable") - + # TODO: remove once geopandas no longer needs this def __getitem__(self, key): # Used in ix and downstream in geopandas _CoordinateIndexer if type(key) is tuple: @@ -921,9 +914,6 @@ def _getitem_lowerdim(self, tup: Tuple): if len(tup) > self.ndim: raise IndexingError("Too many indexers. handle elsewhere") - # to avoid wasted computation - # df.ix[d1:d2, 0] -> columns first (True) - # df.ix[0, ['C', 'B', A']] -> rows first (False) for i, key in enumerate(tup): if is_label_like(key) or isinstance(key, tuple): section = self._getitem_axis(key, axis=i) @@ -1004,6 +994,7 @@ def _getitem_nested_tuple(self, tup: Tuple): return obj + # TODO: remove once geopandas no longer needs __getitem__ def _getitem_axis(self, key, axis: int): if is_iterator(key): key = list(key) @@ -1292,106 +1283,6 @@ def _get_slice_axis(self, slice_obj: slice, axis: int): return self._slice(indexer, axis=axis, kind="iloc") -class _IXIndexer(_NDFrameIndexer): - """ - A primarily label-location based indexer, with integer position fallback. - - Warning: Starting in 0.20.0, the .ix indexer is deprecated, in - favor of the more strict .iloc and .loc indexers. - - ``.ix[]`` supports mixed integer and label based access. It is - primarily label based, but will fall back to integer positional - access unless the corresponding axis is of integer type. - - ``.ix`` is the most general indexer and will support any of the - inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating - point label schemes. ``.ix`` is exceptionally useful when dealing - with mixed positional and label based hierarchical indexes. - - However, when an axis is integer based, ONLY label based access - and not positional access is supported. Thus, in such cases, it's - usually better to be explicit and use ``.iloc`` or ``.loc``. - - See more at :ref:`Advanced Indexing <advanced>`. - """ - - _ix_deprecation_warning = textwrap.dedent( - """ - .ix is deprecated. Please use - .loc for label based indexing or - .iloc for positional indexing - - See the documentation here: - http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#ix-indexer-is-deprecated""" # noqa: E501 - ) - - def __init__(self, name, obj): - warnings.warn(self._ix_deprecation_warning, FutureWarning, stacklevel=2) - super().__init__(name, obj) - - @Appender(_NDFrameIndexer._validate_key.__doc__) - def _validate_key(self, key, axis: int) -> bool: - """ - Returns - ------- - bool - """ - if isinstance(key, slice): - return True - - elif com.is_bool_indexer(key): - return True - - elif is_list_like_indexer(key): - return True - - else: - - self._convert_scalar_indexer(key, axis) - - return True - - def _convert_for_reindex(self, key, axis: int): - """ - Transform a list of keys into a new array ready to be used as axis of - the object we return (e.g. including NaNs). - - Parameters - ---------- - key : list-like - Targeted labels. - axis: int - Where the indexing is being made. - - Returns - ------- - list-like of labels. - """ - labels = self.obj._get_axis(axis) - - if com.is_bool_indexer(key): - key = check_bool_indexer(labels, key) - return labels[key] - - if isinstance(key, Index): - keyarr = labels._convert_index_indexer(key) - else: - # asarray can be unsafe, NumPy strings are weird - keyarr = com.asarray_tuplesafe(key) - - if is_integer_dtype(keyarr): - # Cast the indexer to uint64 if possible so - # that the values returned from indexing are - # also uint64. - keyarr = labels._convert_arr_indexer(keyarr) - - if not labels.is_integer(): - keyarr = ensure_platform_int(keyarr) - return labels.take(keyarr) - - return keyarr - - class _LocationIndexer(_NDFrameIndexer): def __getitem__(self, key): if type(key) is tuple: diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 716be92ebca3f..cd384d6fdbfad 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1,6 +1,5 @@ from datetime import date, datetime, time, timedelta import re -from warnings import catch_warnings, simplefilter import numpy as np import pytest @@ -396,10 +395,8 @@ def test_getitem_ix_mixed_integer(self): expected = df.loc[df.index[:-1]] tm.assert_frame_equal(result, expected) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.ix[[1, 10]] - expected = df.ix[Index([1, 10], dtype=object)] + result = df.loc[[1, 10]] + expected = df.loc[Index([1, 10])] tm.assert_frame_equal(result, expected) # 11320 @@ -419,53 +416,6 @@ def test_getitem_ix_mixed_integer(self): expected = df.iloc[:, [1]] tm.assert_frame_equal(result, expected) - def test_getitem_setitem_ix_negative_integers(self, float_frame): - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = float_frame.ix[:, -1] - tm.assert_series_equal(result, float_frame["D"]) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = float_frame.ix[:, [-1]] - tm.assert_frame_equal(result, float_frame[["D"]]) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = float_frame.ix[:, [-1, -2]] - tm.assert_frame_equal(result, float_frame[["D", "C"]]) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - float_frame.ix[:, [-1]] = 0 - assert (float_frame["D"] == 0).all() - - df = DataFrame(np.random.randn(8, 4)) - # ix does label-based indexing when having an integer index - msg = "\"None of [Int64Index([-1], dtype='int64')] are in the [index]\"" - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - with pytest.raises(KeyError, match=re.escape(msg)): - df.ix[[-1]] - - msg = "\"None of [Int64Index([-1], dtype='int64')] are in the [columns]\"" - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - with pytest.raises(KeyError, match=re.escape(msg)): - df.ix[:, [-1]] - - # #1942 - a = DataFrame(np.random.randn(20, 2), index=[chr(x + 65) for x in range(20)]) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - a.ix[-1] = a.ix[-2] - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_series_equal(a.ix[-1], a.ix[-2], check_names=False) - assert a.ix[-1].name == "T" - assert a.ix[-2].name == "S" - def test_getattr(self, float_frame): tm.assert_series_equal(float_frame.A, float_frame["A"]) msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'" @@ -848,55 +798,6 @@ def test_delitem_corner(self, float_frame): del f["B"] assert len(f.columns) == 2 - def test_getitem_fancy_2d(self, float_frame): - f = float_frame - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_frame_equal(f.ix[:, ["B", "A"]], f.reindex(columns=["B", "A"])) - - subidx = float_frame.index[[5, 4, 1]] - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_frame_equal( - f.ix[subidx, ["B", "A"]], f.reindex(index=subidx, columns=["B", "A"]) - ) - - # slicing rows, etc. - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_frame_equal(f.ix[5:10], f[5:10]) - tm.assert_frame_equal(f.ix[5:10, :], f[5:10]) - tm.assert_frame_equal( - f.ix[:5, ["A", "B"]], f.reindex(index=f.index[:5], columns=["A", "B"]) - ) - - # slice rows with labels, inclusive! - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - expected = f.ix[5:11] - result = f.ix[f.index[5] : f.index[10]] - tm.assert_frame_equal(expected, result) - - # slice columns - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_frame_equal(f.ix[:, :2], f.reindex(columns=["A", "B"])) - - # get view - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - exp = f.copy() - f.ix[5:10].values[:] = 5 - exp.values[5:10] = 5 - tm.assert_frame_equal(f, exp) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - msg = "Cannot index with multidimensional key" - with pytest.raises(ValueError, match=msg): - f.ix[f > 0.5] - def test_slice_floats(self): index = [52195.504153, 52196.303147, 52198.369883] df = DataFrame(np.random.rand(3, 2), index=index) @@ -945,119 +846,6 @@ def test_getitem_setitem_integer_slice_keyerrors(self): with pytest.raises(KeyError, match=r"^3$"): df2.loc[3:11] = 0 - def test_setitem_fancy_2d(self, float_frame): - - # case 1 - frame = float_frame.copy() - expected = frame.copy() - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame.ix[:, ["B", "A"]] = 1 - expected["B"] = 1.0 - expected["A"] = 1.0 - tm.assert_frame_equal(frame, expected) - - # case 2 - frame = float_frame.copy() - frame2 = float_frame.copy() - - expected = frame.copy() - - subidx = float_frame.index[[5, 4, 1]] - values = np.random.randn(3, 2) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame.ix[subidx, ["B", "A"]] = values - frame2.ix[[5, 4, 1], ["B", "A"]] = values - - expected["B"].ix[subidx] = values[:, 0] - expected["A"].ix[subidx] = values[:, 1] - - tm.assert_frame_equal(frame, expected) - tm.assert_frame_equal(frame2, expected) - - # case 3: slicing rows, etc. - frame = float_frame.copy() - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - expected1 = float_frame.copy() - frame.ix[5:10] = 1.0 - expected1.values[5:10] = 1.0 - tm.assert_frame_equal(frame, expected1) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - expected2 = float_frame.copy() - arr = np.random.randn(5, len(frame.columns)) - frame.ix[5:10] = arr - expected2.values[5:10] = arr - tm.assert_frame_equal(frame, expected2) - - # case 4 - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame = float_frame.copy() - frame.ix[5:10, :] = 1.0 - tm.assert_frame_equal(frame, expected1) - frame.ix[5:10, :] = arr - tm.assert_frame_equal(frame, expected2) - - # case 5 - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame = float_frame.copy() - frame2 = float_frame.copy() - - expected = float_frame.copy() - values = np.random.randn(5, 2) - - frame.ix[:5, ["A", "B"]] = values - expected["A"][:5] = values[:, 0] - expected["B"][:5] = values[:, 1] - tm.assert_frame_equal(frame, expected) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame2.ix[:5, [0, 1]] = values - tm.assert_frame_equal(frame2, expected) - - # case 6: slice rows with labels, inclusive! - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame = float_frame.copy() - expected = float_frame.copy() - - frame.ix[frame.index[5] : frame.index[10]] = 5.0 - expected.values[5:11] = 5 - tm.assert_frame_equal(frame, expected) - - # case 7: slice columns - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame = float_frame.copy() - frame2 = float_frame.copy() - expected = float_frame.copy() - - # slice indices - frame.ix[:, 1:3] = 4.0 - expected.values[:, 1:3] = 4.0 - tm.assert_frame_equal(frame, expected) - - # slice with labels - frame.ix[:, "B":"C"] = 4.0 - tm.assert_frame_equal(frame, expected) - - # new corner case of boolean slicing / setting - frame = DataFrame(zip([2, 3, 9, 6, 7], [np.nan] * 5), columns=["a", "b"]) - lst = [100] - lst.extend([np.nan] * 4) - expected = DataFrame(zip([100, 3, 9, 6, 7], lst), columns=["a", "b"]) - frame[frame["a"] == 2] = 100 - tm.assert_frame_equal(frame, expected) - def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame): sliced = float_string_frame.iloc[:, -3:] assert sliced["D"].dtype == np.float64 @@ -1071,194 +859,6 @@ def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame): assert (float_frame["C"] == 4).all() - def test_fancy_setitem_int_labels(self): - # integer index defers to label-based indexing - - df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2)) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tmp = df.copy() - exp = df.copy() - tmp.ix[[0, 2, 4]] = 5 - exp.values[:3] = 5 - tm.assert_frame_equal(tmp, exp) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tmp = df.copy() - exp = df.copy() - tmp.ix[6] = 5 - exp.values[3] = 5 - tm.assert_frame_equal(tmp, exp) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tmp = df.copy() - exp = df.copy() - tmp.ix[:, 2] = 5 - - # tmp correctly sets the dtype - # so match the exp way - exp[2] = 5 - tm.assert_frame_equal(tmp, exp) - - def test_fancy_getitem_int_labels(self): - df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2)) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.ix[[4, 2, 0], [2, 0]] - expected = df.reindex(index=[4, 2, 0], columns=[2, 0]) - tm.assert_frame_equal(result, expected) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.ix[[4, 2, 0]] - expected = df.reindex(index=[4, 2, 0]) - tm.assert_frame_equal(result, expected) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.ix[4] - expected = df.xs(4) - tm.assert_series_equal(result, expected) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.ix[:, 3] - expected = df[3] - tm.assert_series_equal(result, expected) - - def test_fancy_index_int_labels_exceptions(self, float_frame): - df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2)) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - - # labels that aren't contained - with pytest.raises(KeyError, match=r"\[1\] not in index"): - df.ix[[0, 1, 2], [2, 3, 4]] = 5 - - # try to set indices not contained in frame - msg = ( - r"None of \[Index\(\['foo', 'bar', 'baz'\]," - r" dtype='object'\)\] are in the \[index\]" - ) - with pytest.raises(KeyError, match=msg): - float_frame.ix[["foo", "bar", "baz"]] = 1 - msg = ( - r"None of \[Index\(\['E'\], dtype='object'\)\] are in the" - r" \[columns\]" - ) - with pytest.raises(KeyError, match=msg): - float_frame.ix[:, ["E"]] = 1 - - # FIXME: don't leave commented-out - # partial setting now allows this GH2578 - # pytest.raises(KeyError, float_frame.ix.__setitem__, - # (slice(None, None), 'E'), 1) - - def test_setitem_fancy_mixed_2d(self, float_string_frame): - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - float_string_frame.ix[:5, ["C", "B", "A"]] = 5 - result = float_string_frame.ix[:5, ["C", "B", "A"]] - assert (result.values == 5).all() - - float_string_frame.ix[5] = np.nan - assert isna(float_string_frame.ix[5]).all() - - float_string_frame.ix[5] = float_string_frame.ix[6] - tm.assert_series_equal( - float_string_frame.ix[5], float_string_frame.ix[6], check_names=False - ) - - # #1432 - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - df = DataFrame({1: [1.0, 2.0, 3.0], 2: [3, 4, 5]}) - assert df._is_mixed_type - - df.ix[1] = [5, 10] - - expected = DataFrame({1: [1.0, 5.0, 3.0], 2: [3, 10, 5]}) - - tm.assert_frame_equal(df, expected) - - def test_ix_align(self): - b = Series(np.random.randn(10), name=0).sort_values() - df_orig = DataFrame(np.random.randn(10, 4)) - df = df_orig.copy() - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - df.ix[:, 0] = b - tm.assert_series_equal(df.ix[:, 0].reindex(b.index), b) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - dft = df_orig.T - dft.ix[0, :] = b - tm.assert_series_equal(dft.ix[0, :].reindex(b.index), b) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - df = df_orig.copy() - df.ix[:5, 0] = b - s = df.ix[:5, 0] - tm.assert_series_equal(s, b.reindex(s.index)) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - dft = df_orig.T - dft.ix[0, :5] = b - s = dft.ix[0, :5] - tm.assert_series_equal(s, b.reindex(s.index)) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - df = df_orig.copy() - idx = [0, 1, 3, 5] - df.ix[idx, 0] = b - s = df.ix[idx, 0] - tm.assert_series_equal(s, b.reindex(s.index)) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - dft = df_orig.T - dft.ix[0, idx] = b - s = dft.ix[0, idx] - tm.assert_series_equal(s, b.reindex(s.index)) - - def test_ix_frame_align(self): - b = DataFrame(np.random.randn(3, 4)) - df_orig = DataFrame(np.random.randn(10, 4)) - df = df_orig.copy() - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - df.ix[:3] = b - out = b.ix[:3] - tm.assert_frame_equal(out, b) - - b.sort_index(inplace=True) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - df = df_orig.copy() - df.ix[[0, 1, 2]] = b - out = df.ix[[0, 1, 2]].reindex(b.index) - tm.assert_frame_equal(out, b) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - df = df_orig.copy() - df.ix[:3] = b - out = df.ix[:3] - tm.assert_frame_equal(out, b.reindex(out.index)) - def test_getitem_setitem_non_ix_labels(self): df = tm.makeTimeDataFrame() @@ -1285,6 +885,7 @@ def test_ix_multi_take(self): xp = df.reindex([0]) tm.assert_frame_equal(rs, xp) + # FIXME: dont leave commented-out """ #1321 df = DataFrame(np.random.randn(3, 2)) rs = df.loc[df.index==0, df.columns==1] @@ -1292,168 +893,6 @@ def test_ix_multi_take(self): tm.assert_frame_equal(rs, xp) """ - def test_ix_multi_take_nonint_index(self): - df = DataFrame(np.random.randn(3, 2), index=["x", "y", "z"], columns=["a", "b"]) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - rs = df.ix[[0], [0]] - xp = df.reindex(["x"], columns=["a"]) - tm.assert_frame_equal(rs, xp) - - def test_ix_multi_take_multiindex(self): - df = DataFrame( - np.random.randn(3, 2), - index=["x", "y", "z"], - columns=[["a", "b"], ["1", "2"]], - ) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - rs = df.ix[[0], [0]] - xp = df.reindex(["x"], columns=[("a", "1")]) - tm.assert_frame_equal(rs, xp) - - def test_ix_dup(self): - idx = Index(["a", "a", "b", "c", "d", "d"]) - df = DataFrame(np.random.randn(len(idx), 3), idx) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - sub = df.ix[:"d"] - tm.assert_frame_equal(sub, df) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - sub = df.ix["a":"c"] - tm.assert_frame_equal(sub, df.ix[0:4]) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - sub = df.ix["b":"d"] - tm.assert_frame_equal(sub, df.ix[2:]) - - def test_getitem_fancy_1d(self, float_frame, float_string_frame): - f = float_frame - - # return self if no slicing...for now - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - assert f.ix[:, :] is f - - # low dimensional slice - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - xs1 = f.ix[2, ["C", "B", "A"]] - xs2 = f.xs(f.index[2]).reindex(["C", "B", "A"]) - tm.assert_series_equal(xs1, xs2) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - ts1 = f.ix[5:10, 2] - ts2 = f[f.columns[2]][5:10] - tm.assert_series_equal(ts1, ts2) - - # positional xs - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - xs1 = f.ix[0] - xs2 = f.xs(f.index[0]) - tm.assert_series_equal(xs1, xs2) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - xs1 = f.ix[f.index[5]] - xs2 = f.xs(f.index[5]) - tm.assert_series_equal(xs1, xs2) - - # single column - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_series_equal(f.ix[:, "A"], f["A"]) - - # return view - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - exp = f.copy() - exp.values[5] = 4 - f.ix[5][:] = 4 - tm.assert_frame_equal(exp, f) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - exp.values[:, 1] = 6 - f.ix[:, 1][:] = 6 - tm.assert_frame_equal(exp, f) - - # slice of mixed-frame - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - xs = float_string_frame.ix[5] - exp = float_string_frame.xs(float_string_frame.index[5]) - tm.assert_series_equal(xs, exp) - - def test_setitem_fancy_1d(self, float_frame): - - # case 1: set cross-section for indices - frame = float_frame.copy() - expected = float_frame.copy() - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame.ix[2, ["C", "B", "A"]] = [1.0, 2.0, 3.0] - expected["C"][2] = 1.0 - expected["B"][2] = 2.0 - expected["A"][2] = 3.0 - tm.assert_frame_equal(frame, expected) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame2 = float_frame.copy() - frame2.ix[2, [3, 2, 1]] = [1.0, 2.0, 3.0] - tm.assert_frame_equal(frame, expected) - - # case 2, set a section of a column - frame = float_frame.copy() - expected = float_frame.copy() - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - vals = np.random.randn(5) - expected.values[5:10, 2] = vals - frame.ix[5:10, 2] = vals - tm.assert_frame_equal(frame, expected) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame2 = float_frame.copy() - frame2.ix[5:10, "B"] = vals - tm.assert_frame_equal(frame, expected) - - # case 3: full xs - frame = float_frame.copy() - expected = float_frame.copy() - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame.ix[4] = 5.0 - expected.values[4] = 5.0 - tm.assert_frame_equal(frame, expected) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame.ix[frame.index[4]] = 6.0 - expected.values[4] = 6.0 - tm.assert_frame_equal(frame, expected) - - # single column - frame = float_frame.copy() - expected = float_frame.copy() - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - frame.ix[:, "A"] = 7.0 - expected["A"] = 7.0 - tm.assert_frame_equal(frame, expected) - def test_getitem_fancy_scalar(self, float_frame): f = float_frame ix = f.loc @@ -1975,15 +1414,11 @@ def test_get_set_value_no_partial_indexing(self): with pytest.raises(KeyError, match=r"^0$"): df._get_value(0, 1) + # TODO: rename? remove? def test_single_element_ix_dont_upcast(self, float_frame): float_frame["E"] = 1 assert issubclass(float_frame["E"].dtype.type, (int, np.integer)) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = float_frame.ix[float_frame.index[5], "E"] - assert is_integer(result) - result = float_frame.loc[float_frame.index[5], "E"] assert is_integer(result) @@ -1991,18 +1426,10 @@ def test_single_element_ix_dont_upcast(self, float_frame): df = pd.DataFrame(dict(a=[1.23])) df["b"] = 666 - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.ix[0, "b"] - assert is_integer(result) result = df.loc[0, "b"] assert is_integer(result) expected = Series([666], [0], name="b") - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.ix[[0], "b"] - tm.assert_series_equal(result, expected) result = df.loc[[0], "b"] tm.assert_series_equal(result, expected) @@ -2070,45 +1497,12 @@ def test_iloc_duplicates(self): df = DataFrame(np.random.rand(3, 3), columns=list("ABC"), index=list("aab")) result = df.iloc[0] - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result2 = df.ix[0] assert isinstance(result, Series) tm.assert_almost_equal(result.values, df.values[0]) - tm.assert_series_equal(result, result2) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.T.iloc[:, 0] - result2 = df.T.ix[:, 0] + result = df.T.iloc[:, 0] assert isinstance(result, Series) tm.assert_almost_equal(result.values, df.values[0]) - tm.assert_series_equal(result, result2) - - # multiindex - df = DataFrame( - np.random.randn(3, 3), - columns=[["i", "i", "j"], ["A", "A", "B"]], - index=[["i", "i", "j"], ["X", "X", "Y"]], - ) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - rs = df.iloc[0] - xp = df.ix[0] - tm.assert_series_equal(rs, xp) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - rs = df.iloc[:, 0] - xp = df.T.ix[0] - tm.assert_series_equal(rs, xp) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - rs = df.iloc[:, [0]] - xp = df.ix[:, [0]] - tm.assert_frame_equal(rs, xp) # #2259 df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2]) @@ -2353,9 +1747,6 @@ def test_getitem_ix_float_duplicates(self): ) expect = df.iloc[1:] tm.assert_frame_equal(df.loc[0.2], expect) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_frame_equal(df.ix[0.2], expect) expect = df.iloc[1:, 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) @@ -2363,9 +1754,6 @@ def test_getitem_ix_float_duplicates(self): df.index = [1, 0.2, 0.2] expect = df.iloc[1:] tm.assert_frame_equal(df.loc[0.2], expect) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_frame_equal(df.ix[0.2], expect) expect = df.iloc[1:, 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) @@ -2375,9 +1763,6 @@ def test_getitem_ix_float_duplicates(self): ) expect = df.iloc[1:-1] tm.assert_frame_equal(df.loc[0.2], expect) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_frame_equal(df.ix[0.2], expect) expect = df.iloc[1:-1, 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) @@ -2385,9 +1770,6 @@ def test_getitem_ix_float_duplicates(self): df.index = [0.1, 0.2, 2, 0.2] expect = df.iloc[[1, -1]] tm.assert_frame_equal(df.loc[0.2], expect) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - tm.assert_frame_equal(df.ix[0.2], expect) expect = df.iloc[[1, -1], 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) @@ -2616,11 +1998,6 @@ def test_index_namedtuple(self): index = Index([idx1, idx2], name="composite_index", tupleize_cols=False) df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"]) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.ix[IndexType("foo", "bar")]["A"] - assert result == 1 - result = df.loc[IndexType("foo", "bar")]["A"] assert result == 1 diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index e5b2c83f29030..08e8dbad4e102 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -1,6 +1,6 @@ """ common utilities """ import itertools -from warnings import catch_warnings, filterwarnings +from warnings import catch_warnings import numpy as np @@ -136,21 +136,18 @@ def get_result(self, obj, method, key, axis): return xp - def get_value(self, f, i, values=False): + def get_value(self, name, f, i, values=False): """ return the value for the location i """ # check against values if values: return f.values[i] - # this is equiv of f[col][row]..... - # v = f - # for a in reversed(i): - # v = v.__getitem__(a) - # return v - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - return f.ix[i] + elif name == "iat": + return f.iloc[i] + else: + assert name == "at" + return f.loc[i] def check_values(self, f, func, values=False): @@ -183,16 +180,11 @@ def _eq(axis, obj, key1, key2): try: rs = getattr(obj, method1).__getitem__(_axify(obj, key1, axis)) - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - try: - xp = self.get_result( - obj=obj, method=method2, key=key2, axis=axis - ) - except (KeyError, IndexError): - # TODO: why is this allowed? - result = "no comp" - return + try: + xp = self.get_result(obj=obj, method=method2, key=key2, axis=axis) + except (KeyError, IndexError): + # TODO: why is this allowed? + return if is_scalar(rs) and is_scalar(xp): assert rs == xp diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index f279b5517c3f6..ee0f160b33cf1 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -1,5 +1,3 @@ -from warnings import catch_warnings - import numpy as np import pytest @@ -12,7 +10,6 @@ import pandas.util.testing as tm -@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning") class TestMultiIndexSlicers: def test_per_axis_per_level_getitem(self): @@ -675,8 +672,6 @@ def test_multiindex_label_slicing_with_negative_step(self): def assert_slices_equivalent(l_slc, i_slc): tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc]) tm.assert_series_equal(s[l_slc], s.iloc[i_slc]) - with catch_warnings(record=True): - tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc]) assert_slices_equivalent(SLC[::-1], SLC[::-1]) diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index 6e26d407ab0ec..760bb655534b2 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -361,13 +361,12 @@ def check(result, expected): result4 = df["A"].iloc[2] check(result4, expected) - @pytest.mark.filterwarnings("ignore::FutureWarning") def test_cache_updating(self): # GH 4939, make sure to update the cache on setitem df = tm.makeDataFrame() df["A"] # cache series - df.ix["Hello Friend"] = df.ix[0] + df.loc["Hello Friend"] = df.iloc[0] assert "Hello Friend" in df["A"].index assert "Hello Friend" in df["B"].index diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 0a3b513ff0167..d004441690c8c 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -132,9 +132,8 @@ def test_scalar_non_numeric(self): elif s.index.inferred_type in ["datetime64", "timedelta64", "period"]: # these should prob work - # and are inconsisten between series/dataframe ATM - # for idxr in [lambda x: x.ix, - # lambda x: x]: + # and are inconsistent between series/dataframe ATM + # for idxr in [lambda x: x]: # s2 = s.copy() # # with pytest.raises(TypeError): diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index f9bded5b266f1..2f27757d6a754 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -1,6 +1,6 @@ """ test positional based indexing with iloc """ -from warnings import catch_warnings, filterwarnings, simplefilter +from warnings import catch_warnings, simplefilter import numpy as np import pytest @@ -135,26 +135,22 @@ def test_iloc_non_integer_raises(self, index, columns, index_vals, column_vals): df.iloc[index_vals, column_vals] def test_iloc_getitem_int(self): - # integer - self.check_result("iloc", 2, "ix", {0: 4, 1: 6, 2: 8}, typs=["ints", "uints"]) self.check_result( "iloc", 2, - "indexer", + "iloc", 2, typs=["labels", "mixed", "ts", "floats", "empty"], fails=IndexError, ) def test_iloc_getitem_neg_int(self): - # neg integer - self.check_result("iloc", -1, "ix", {0: 6, 1: 9, 2: 12}, typs=["ints", "uints"]) self.check_result( "iloc", -1, - "indexer", + "iloc", -1, typs=["labels", "mixed", "ts", "floats", "empty"], fails=IndexError, @@ -187,51 +183,17 @@ def test_iloc_array_not_mutating_negative_indices(self): tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy) def test_iloc_getitem_list_int(self): - - # list of ints self.check_result( "iloc", [0, 1, 2], - "ix", - {0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]}, - typs=["ints", "uints"], - ) - self.check_result( - "iloc", [2], "ix", {0: [4], 1: [6], 2: [8]}, typs=["ints", "uints"], - ) - self.check_result( "iloc", [0, 1, 2], - "indexer", - [0, 1, 2], typs=["labels", "mixed", "ts", "floats", "empty"], fails=IndexError, ) # array of ints (GH5006), make sure that a single indexer is returning # the correct type - self.check_result( - "iloc", - np.array([0, 1, 2]), - "ix", - {0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]}, - typs=["ints", "uints"], - ) - self.check_result( - "iloc", - np.array([2]), - "ix", - {0: [4], 1: [6], 2: [8]}, - typs=["ints", "uints"], - ) - self.check_result( - "iloc", - np.array([0, 1, 2]), - "indexer", - [0, 1, 2], - typs=["labels", "mixed", "ts", "floats", "empty"], - fails=IndexError, - ) def test_iloc_getitem_neg_int_can_reach_first_index(self): # GH10547 and GH10779 @@ -261,15 +223,6 @@ def test_iloc_getitem_neg_int_can_reach_first_index(self): tm.assert_series_equal(result, expected) def test_iloc_getitem_dups(self): - - self.check_result( - "iloc", - [0, 1, 1, 3], - "ix", - {0: [0, 2, 2, 6], 1: [0, 3, 3, 9]}, - typs=["ints", "uints"], - ) - # GH 6766 df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}]) df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}]) @@ -284,30 +237,12 @@ def test_iloc_getitem_dups(self): tm.assert_series_equal(result, expected) def test_iloc_getitem_array(self): - - # array like - s = Series(index=range(1, 4), dtype=object) - self.check_result( - "iloc", - s.index, - "ix", - {0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]}, - typs=["ints", "uints"], - ) + # TODO: test something here? + pass def test_iloc_getitem_bool(self): - - # boolean indexers - b = [True, False, True, False] - self.check_result("iloc", b, "ix", b, typs=["ints", "uints"]) - self.check_result( - "iloc", - b, - "ix", - b, - typs=["labels", "mixed", "ts", "floats", "empty"], - fails=IndexError, - ) + # TODO: test something here? + pass @pytest.mark.parametrize("index", [[True, False], [True, False, True, False]]) def test_iloc_getitem_bool_diff_len(self, index): @@ -320,23 +255,8 @@ def test_iloc_getitem_bool_diff_len(self, index): _ = s.iloc[index] def test_iloc_getitem_slice(self): - - # slices - self.check_result( - "iloc", - slice(1, 3), - "ix", - {0: [2, 4], 1: [3, 6], 2: [4, 8]}, - typs=["ints", "uints"], - ) - self.check_result( - "iloc", - slice(1, 3), - "indexer", - slice(1, 3), - typs=["labels", "mixed", "ts", "floats", "empty"], - fails=IndexError, - ) + # TODO: test something here? + pass def test_iloc_getitem_slice_dups(self): @@ -441,69 +361,53 @@ def test_iloc_setitem_dups(self): df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True) tm.assert_frame_equal(df, expected) + # TODO: GH#27620 this test used to compare iloc against ix; check if this + # is redundant with another test comparing iloc against loc def test_iloc_getitem_frame(self): df = DataFrame( np.random.randn(10, 4), index=range(0, 20, 2), columns=range(0, 8, 2) ) result = df.iloc[2] - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - exp = df.ix[4] + exp = df.loc[4] tm.assert_series_equal(result, exp) result = df.iloc[2, 2] - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - exp = df.ix[4, 4] + exp = df.loc[4, 4] assert result == exp # slice result = df.iloc[4:8] - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - expected = df.ix[8:14] + expected = df.loc[8:14] tm.assert_frame_equal(result, expected) result = df.iloc[:, 2:3] - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - expected = df.ix[:, 4:5] + expected = df.loc[:, 4:5] tm.assert_frame_equal(result, expected) # list of integers result = df.iloc[[0, 1, 3]] - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - expected = df.ix[[0, 2, 6]] + expected = df.loc[[0, 2, 6]] tm.assert_frame_equal(result, expected) result = df.iloc[[0, 1, 3], [0, 1]] - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - expected = df.ix[[0, 2, 6], [0, 2]] + expected = df.loc[[0, 2, 6], [0, 2]] tm.assert_frame_equal(result, expected) # neg indices result = df.iloc[[-1, 1, 3], [-1, 1]] - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - expected = df.ix[[18, 2, 6], [6, 2]] + expected = df.loc[[18, 2, 6], [6, 2]] tm.assert_frame_equal(result, expected) # dups indices result = df.iloc[[-1, -1, 1, 3], [-1, 1]] - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - expected = df.ix[[18, 18, 2, 6], [6, 2]] + expected = df.loc[[18, 18, 2, 6], [6, 2]] tm.assert_frame_equal(result, expected) # with index-like s = Series(index=range(1, 5), dtype=object) result = df.iloc[s.index] - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - expected = df.ix[[2, 4, 6, 8]] + expected = df.loc[[2, 4, 6, 8]] tm.assert_frame_equal(result, expected) def test_iloc_getitem_labelled_frame(self): diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py deleted file mode 100644 index a46cd65162f4e..0000000000000 --- a/pandas/tests/indexing/test_ix.py +++ /dev/null @@ -1,354 +0,0 @@ -""" test indexing with ix """ - -from warnings import catch_warnings - -import numpy as np -import pytest - -from pandas.core.dtypes.common import is_scalar - -import pandas as pd -from pandas import DataFrame, Series, option_context -import pandas.util.testing as tm - - -def test_ix_deprecation(): - # GH 15114 - - df = DataFrame({"A": [1, 2, 3]}) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=True): - df.ix[1, "A"] - - -@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning") -class TestIX: - def test_ix_loc_setitem_consistency(self): - - # GH 5771 - # loc with slice and series - s = Series(0, index=[4, 5, 6]) - s.loc[4:5] += 1 - expected = Series([1, 1, 0], index=[4, 5, 6]) - tm.assert_series_equal(s, expected) - - # GH 5928 - # chained indexing assignment - df = DataFrame({"a": [0, 1, 2]}) - expected = df.copy() - with catch_warnings(record=True): - expected.ix[[0, 1, 2], "a"] = -expected.ix[[0, 1, 2], "a"] - - with catch_warnings(record=True): - df["a"].ix[[0, 1, 2]] = -df["a"].ix[[0, 1, 2]] - tm.assert_frame_equal(df, expected) - - df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2]}) - with catch_warnings(record=True): - df["a"].ix[[0, 1, 2]] = -df["a"].ix[[0, 1, 2]].astype("float64") + 0.5 - expected = DataFrame({"a": [0.5, -0.5, -1.5], "b": [0, 1, 2]}) - tm.assert_frame_equal(df, expected) - - # GH 8607 - # ix setitem consistency - df = DataFrame( - { - "delta": [1174, 904, 161], - "elapsed": [7673, 9277, 1470], - "timestamp": [1413840976, 1413842580, 1413760580], - } - ) - expected = DataFrame( - { - "delta": [1174, 904, 161], - "elapsed": [7673, 9277, 1470], - "timestamp": pd.to_datetime( - [1413840976, 1413842580, 1413760580], unit="s" - ), - } - ) - - df2 = df.copy() - df2["timestamp"] = pd.to_datetime(df["timestamp"], unit="s") - tm.assert_frame_equal(df2, expected) - - df2 = df.copy() - df2.loc[:, "timestamp"] = pd.to_datetime(df["timestamp"], unit="s") - tm.assert_frame_equal(df2, expected) - - df2 = df.copy() - with catch_warnings(record=True): - df2.ix[:, 2] = pd.to_datetime(df["timestamp"], unit="s") - tm.assert_frame_equal(df2, expected) - - def test_ix_loc_consistency(self): - - # GH 8613 - # some edge cases where ix/loc should return the same - # this is not an exhaustive case - - def compare(result, expected): - if is_scalar(expected): - assert result == expected - else: - assert expected.equals(result) - - # failure cases for .loc, but these work for .ix - df = DataFrame(np.random.randn(5, 4), columns=list("ABCD")) - for key in [ - slice(1, 3), - tuple([slice(0, 2), slice(0, 2)]), - tuple([slice(0, 2), df.columns[0:2]]), - ]: - - for index in [ - tm.makeStringIndex, - tm.makeUnicodeIndex, - tm.makeDateIndex, - tm.makePeriodIndex, - tm.makeTimedeltaIndex, - ]: - df.index = index(len(df.index)) - with catch_warnings(record=True): - df.ix[key] - - msg = ( - r"cannot do slice indexing" - r" on {klass} with these indexers \[(0|1)\] of" - r" {kind}".format(klass=type(df.index), kind=str(int)) - ) - with pytest.raises(TypeError, match=msg): - df.loc[key] - - df = DataFrame( - np.random.randn(5, 4), - columns=list("ABCD"), - index=pd.date_range("2012-01-01", periods=5), - ) - - for key in [ - "2012-01-03", - "2012-01-31", - slice("2012-01-03", "2012-01-03"), - slice("2012-01-03", "2012-01-04"), - slice("2012-01-03", "2012-01-06", 2), - slice("2012-01-03", "2012-01-31"), - tuple([[True, True, True, False, True]]), - ]: - - # getitem - - # if the expected raises, then compare the exceptions - try: - with catch_warnings(record=True): - expected = df.ix[key] - except KeyError: - with pytest.raises(KeyError, match=r"^'2012-01-31'$"): - df.loc[key] - continue - - result = df.loc[key] - compare(result, expected) - - # setitem - df1 = df.copy() - df2 = df.copy() - - with catch_warnings(record=True): - df1.ix[key] = 10 - df2.loc[key] = 10 - compare(df2, df1) - - # edge cases - s = Series([1, 2, 3, 4], index=list("abde")) - - result1 = s["a":"c"] - with catch_warnings(record=True): - result2 = s.ix["a":"c"] - result3 = s.loc["a":"c"] - tm.assert_series_equal(result1, result2) - tm.assert_series_equal(result1, result3) - - # now work rather than raising KeyError - s = Series(range(5), [-2, -1, 1, 2, 3]) - - with catch_warnings(record=True): - result1 = s.ix[-10:3] - result2 = s.loc[-10:3] - tm.assert_series_equal(result1, result2) - - with catch_warnings(record=True): - result1 = s.ix[0:3] - result2 = s.loc[0:3] - tm.assert_series_equal(result1, result2) - - def test_ix_weird_slicing(self): - # http://stackoverflow.com/q/17056560/1240268 - df = DataFrame({"one": [1, 2, 3, np.nan, np.nan], "two": [1, 2, 3, 4, 5]}) - df.loc[df["one"] > 1, "two"] = -df["two"] - - expected = DataFrame( - { - "one": {0: 1.0, 1: 2.0, 2: 3.0, 3: np.nan, 4: np.nan}, - "two": {0: 1, 1: -2, 2: -3, 3: 4, 4: 5}, - } - ) - tm.assert_frame_equal(df, expected) - - def test_ix_assign_column_mixed(self, float_frame): - # GH #1142 - df = float_frame - df["foo"] = "bar" - - orig = df.loc[:, "B"].copy() - df.loc[:, "B"] = df.loc[:, "B"] + 1 - tm.assert_series_equal(df.B, orig + 1) - - # GH 3668, mixed frame with series value - df = DataFrame({"x": np.arange(10), "y": np.arange(10, 20), "z": "bar"}) - expected = df.copy() - - for i in range(5): - indexer = i * 2 - v = 1000 + i * 200 - expected.loc[indexer, "y"] = v - assert expected.loc[indexer, "y"] == v - - df.loc[df.x % 2 == 0, "y"] = df.loc[df.x % 2 == 0, "y"] * 100 - tm.assert_frame_equal(df, expected) - - # GH 4508, making sure consistency of assignments - df = DataFrame({"a": [1, 2, 3], "b": [0, 1, 2]}) - df.loc[[0, 2], "b"] = [100, -100] - expected = DataFrame({"a": [1, 2, 3], "b": [100, 1, -100]}) - tm.assert_frame_equal(df, expected) - - df = DataFrame({"a": list(range(4))}) - df["b"] = np.nan - df.loc[[1, 3], "b"] = [100, -100] - expected = DataFrame({"a": [0, 1, 2, 3], "b": [np.nan, 100, np.nan, -100]}) - tm.assert_frame_equal(df, expected) - - # ok, but chained assignments are dangerous - # if we turn off chained assignment it will work - with option_context("chained_assignment", None): - df = DataFrame({"a": list(range(4))}) - df["b"] = np.nan - df["b"].loc[[1, 3]] = [100, -100] - tm.assert_frame_equal(df, expected) - - def test_ix_get_set_consistency(self): - - # GH 4544 - # ix/loc get/set not consistent when - # a mixed int/string index - df = DataFrame( - np.arange(16).reshape((4, 4)), - columns=["a", "b", 8, "c"], - index=["e", 7, "f", "g"], - ) - - with catch_warnings(record=True): - assert df.ix["e", 8] == 2 - assert df.loc["e", 8] == 2 - - with catch_warnings(record=True): - df.ix["e", 8] = 42 - assert df.ix["e", 8] == 42 - assert df.loc["e", 8] == 42 - - df.loc["e", 8] = 45 - with catch_warnings(record=True): - assert df.ix["e", 8] == 45 - assert df.loc["e", 8] == 45 - - def test_ix_slicing_strings(self): - # see gh-3836 - data = { - "Classification": ["SA EQUITY CFD", "bbb", "SA EQUITY", "SA SSF", "aaa"], - "Random": [1, 2, 3, 4, 5], - "X": ["correct", "wrong", "correct", "correct", "wrong"], - } - df = DataFrame(data) - x = df[~df.Classification.isin(["SA EQUITY CFD", "SA EQUITY", "SA SSF"])] - with catch_warnings(record=True): - df.ix[x.index, "X"] = df["Classification"] - - expected = DataFrame( - { - "Classification": { - 0: "SA EQUITY CFD", - 1: "bbb", - 2: "SA EQUITY", - 3: "SA SSF", - 4: "aaa", - }, - "Random": {0: 1, 1: 2, 2: 3, 3: 4, 4: 5}, - "X": {0: "correct", 1: "bbb", 2: "correct", 3: "correct", 4: "aaa"}, - } - ) # bug was 4: 'bbb' - - tm.assert_frame_equal(df, expected) - - def test_ix_setitem_out_of_bounds_axis_0(self): - df = DataFrame( - np.random.randn(2, 5), - index=["row{i}".format(i=i) for i in range(2)], - columns=["col{i}".format(i=i) for i in range(5)], - ) - with catch_warnings(record=True): - msg = "cannot set by positional indexing with enlargement" - with pytest.raises(ValueError, match=msg): - df.ix[2, 0] = 100 - - def test_ix_setitem_out_of_bounds_axis_1(self): - df = DataFrame( - np.random.randn(5, 2), - index=["row{i}".format(i=i) for i in range(5)], - columns=["col{i}".format(i=i) for i in range(2)], - ) - with catch_warnings(record=True): - msg = "cannot set by positional indexing with enlargement" - with pytest.raises(ValueError, match=msg): - df.ix[0, 2] = 100 - - def test_ix_empty_list_indexer_is_ok(self): - with catch_warnings(record=True): - - df = tm.makeCustomDataframe(5, 2) - # vertical empty - tm.assert_frame_equal( - df.ix[:, []], - df.iloc[:, :0], - check_index_type=True, - check_column_type=True, - ) - # horizontal empty - tm.assert_frame_equal( - df.ix[[], :], - df.iloc[:0, :], - check_index_type=True, - check_column_type=True, - ) - # horizontal empty - tm.assert_frame_equal( - df.ix[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True - ) - - def test_ix_duplicate_returns_series(self): - df = DataFrame( - np.random.randn(3, 3), index=[0.1, 0.2, 0.2], columns=list("abc") - ) - with catch_warnings(record=True): - r = df.ix[0.2, "a"] - e = df.loc[0.2, "a"] - tm.assert_series_equal(r, e) - - def test_ix_intervalindex(self): - # https://github.com/pandas-dev/pandas/issues/27865 - df = DataFrame( - np.random.randn(5, 2), - index=pd.IntervalIndex.from_breaks([-np.inf, 0, 1, 2, 3, np.inf]), - ) - result = df.ix[0:2, 0] - expected = df.iloc[0:2, 0] - tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index e5e899bfb7f0d..6f20ec649b200 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1,7 +1,6 @@ """ test label based indexing with loc """ from io import StringIO import re -from warnings import catch_warnings, filterwarnings import numpy as np import pytest @@ -96,18 +95,12 @@ def test_loc_setitem_slice(self): def test_loc_getitem_int(self): # int label - self.check_result("loc", 2, "ix", 2, typs=["ints", "uints"], axes=0) - self.check_result("loc", 3, "ix", 3, typs=["ints", "uints"], axes=1) - self.check_result("loc", 2, "ix", 2, typs=["label"], fails=KeyError) + self.check_result("loc", 2, "loc", 2, typs=["label"], fails=KeyError) def test_loc_getitem_label(self): # label - self.check_result("loc", "c", "ix", "c", typs=["labels"], axes=0) - self.check_result("loc", "null", "ix", "null", typs=["mixed"], axes=0) - self.check_result("loc", 8, "ix", 8, typs=["mixed"], axes=0) - self.check_result("loc", Timestamp("20130102"), "ix", 1, typs=["ts"], axes=0) - self.check_result("loc", "c", "ix", "c", typs=["empty"], fails=KeyError) + self.check_result("loc", "c", "loc", "c", typs=["empty"], fails=KeyError) def test_loc_getitem_label_out_of_range(self): @@ -115,49 +108,28 @@ def test_loc_getitem_label_out_of_range(self): self.check_result( "loc", "f", - "ix", + "loc", "f", typs=["ints", "uints", "labels", "mixed", "ts"], fails=KeyError, ) self.check_result("loc", "f", "ix", "f", typs=["floats"], fails=KeyError) + self.check_result("loc", "f", "loc", "f", typs=["floats"], fails=KeyError) self.check_result( - "loc", 20, "ix", 20, typs=["ints", "uints", "mixed"], fails=KeyError, + "loc", 20, "loc", 20, typs=["ints", "uints", "mixed"], fails=KeyError, ) - self.check_result("loc", 20, "ix", 20, typs=["labels"], fails=TypeError) - self.check_result("loc", 20, "ix", 20, typs=["ts"], axes=0, fails=TypeError) - self.check_result("loc", 20, "ix", 20, typs=["floats"], axes=0, fails=KeyError) + self.check_result("loc", 20, "loc", 20, typs=["labels"], fails=TypeError) + self.check_result("loc", 20, "loc", 20, typs=["ts"], axes=0, fails=TypeError) + self.check_result("loc", 20, "loc", 20, typs=["floats"], axes=0, fails=KeyError) def test_loc_getitem_label_list(self): - + # TODO: test something here? # list of labels - self.check_result( - "loc", [0, 2, 4], "ix", [0, 2, 4], typs=["ints", "uints"], axes=0, - ) - self.check_result( - "loc", [3, 6, 9], "ix", [3, 6, 9], typs=["ints", "uints"], axes=1, - ) - self.check_result( - "loc", ["a", "b", "d"], "ix", ["a", "b", "d"], typs=["labels"], axes=0, - ) - self.check_result( - "loc", ["A", "B", "C"], "ix", ["A", "B", "C"], typs=["labels"], axes=1, - ) - self.check_result( - "loc", [2, 8, "null"], "ix", [2, 8, "null"], typs=["mixed"], axes=0, - ) - self.check_result( - "loc", - [Timestamp("20130102"), Timestamp("20130103")], - "ix", - [Timestamp("20130102"), Timestamp("20130103")], - typs=["ts"], - axes=0, - ) + pass def test_loc_getitem_label_list_with_missing(self): self.check_result( - "loc", [0, 1, 2], "indexer", [0, 1, 2], typs=["empty"], fails=KeyError, + "loc", [0, 1, 2], "loc", [0, 1, 2], typs=["empty"], fails=KeyError, ) self.check_result( "loc", @@ -206,7 +178,7 @@ def test_loc_getitem_label_list_fails(self): self.check_result( "loc", [20, 30, 40], - "ix", + "loc", [20, 30, 40], typs=["ints", "uints"], axes=1, @@ -214,35 +186,15 @@ def test_loc_getitem_label_list_fails(self): ) def test_loc_getitem_label_array_like(self): + # TODO: test something? # array like - self.check_result( - "loc", - Series(index=[0, 2, 4], dtype=object).index, - "ix", - [0, 2, 4], - typs=["ints", "uints"], - axes=0, - ) - self.check_result( - "loc", - Series(index=[3, 6, 9], dtype=object).index, - "ix", - [3, 6, 9], - typs=["ints", "uints"], - axes=1, - ) + pass def test_loc_getitem_bool(self): # boolean indexers b = [True, False, True, False] - self.check_result( - "loc", - b, - "ix", - b, - typs=["ints", "uints", "labels", "mixed", "ts", "floats"], - ) - self.check_result("loc", b, "ix", b, typs=["empty"], fails=IndexError) + + self.check_result("loc", b, "loc", b, typs=["empty"], fails=IndexError) @pytest.mark.parametrize("index", [[True, False], [True, False, True, False]]) def test_loc_getitem_bool_diff_len(self, index): @@ -255,14 +207,8 @@ def test_loc_getitem_bool_diff_len(self, index): _ = s.loc[index] def test_loc_getitem_int_slice(self): - - # ok - self.check_result( - "loc", slice(2, 4), "ix", [2, 4], typs=["ints", "uints"], axes=0, - ) - self.check_result( - "loc", slice(3, 6), "ix", [3, 6], typs=["ints", "uints"], axes=1, - ) + # TODO: test something here? + pass def test_loc_to_fail(self): @@ -356,55 +302,34 @@ def test_loc_getitem_list_with_fail(self): def test_loc_getitem_label_slice(self): # label slices (with ints) + + # real label slices + + # GH 14316 + self.check_result( "loc", slice(1, 3), - "ix", + "loc", slice(1, 3), typs=["labels", "mixed", "empty", "ts", "floats"], fails=TypeError, ) - # real label slices - self.check_result( - "loc", slice("a", "c"), "ix", slice("a", "c"), typs=["labels"], axes=0, - ) - self.check_result( - "loc", slice("A", "C"), "ix", slice("A", "C"), typs=["labels"], axes=1, - ) - self.check_result( "loc", slice("20130102", "20130104"), - "ix", - slice("20130102", "20130104"), - typs=["ts"], - axes=0, - ) - self.check_result( "loc", slice("20130102", "20130104"), - "ix", - slice("20130102", "20130104"), typs=["ts"], axes=1, fails=TypeError, ) - # GH 14316 - self.check_result( - "loc", - slice("20130104", "20130102"), - "indexer", - [0, 1, 2], - typs=["ts_rev"], - axes=0, - ) - self.check_result( "loc", slice(2, 8), - "ix", + "loc", slice(2, 8), typs=["mixed"], axes=0, @@ -413,7 +338,7 @@ def test_loc_getitem_label_slice(self): self.check_result( "loc", slice(2, 8), - "ix", + "loc", slice(2, 8), typs=["mixed"], axes=1, @@ -423,7 +348,7 @@ def test_loc_getitem_label_slice(self): self.check_result( "loc", slice(2, 4, 2), - "ix", + "loc", slice(2, 4, 2), typs=["mixed"], axes=0, @@ -898,11 +823,6 @@ def test_loc_name(self): result = df.iloc[[0, 1]].index.name assert result == "index_name" - with catch_warnings(record=True): - filterwarnings("ignore", "\\n.ix", FutureWarning) - result = df.ix[[0, 1]].index.name - assert result == "index_name" - result = df.loc[[0, 1]].index.name assert result == "index_name" diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 3adc206335e6f..15c65be37e0d9 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -1,11 +1,9 @@ """ test setting *parts* of objects both positionally and label based -TOD: these should be split among the indexer tests +TODO: these should be split among the indexer tests """ -from warnings import catch_warnings - import numpy as np import pytest @@ -15,7 +13,6 @@ class TestPartialSetting: - @pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning") def test_partial_setting(self): # GH2578, allow ix and friends to partially set @@ -87,32 +84,28 @@ def test_partial_setting(self): # single dtype frame, overwrite expected = DataFrame(dict({"A": [0, 2, 4], "B": [0, 2, 4]})) df = df_orig.copy() - with catch_warnings(record=True): - df.ix[:, "B"] = df.ix[:, "A"] + df.loc[:, "B"] = df.loc[:, "A"] tm.assert_frame_equal(df, expected) # mixed dtype frame, overwrite expected = DataFrame(dict({"A": [0, 2, 4], "B": Series([0, 2, 4])})) df = df_orig.copy() df["B"] = df["B"].astype(np.float64) - with catch_warnings(record=True): - df.ix[:, "B"] = df.ix[:, "A"] + df.loc[:, "B"] = df.loc[:, "A"] tm.assert_frame_equal(df, expected) # single dtype frame, partial setting expected = df_orig.copy() expected["C"] = df["A"] df = df_orig.copy() - with catch_warnings(record=True): - df.ix[:, "C"] = df.ix[:, "A"] + df.loc[:, "C"] = df.loc[:, "A"] tm.assert_frame_equal(df, expected) # mixed frame, partial setting expected = df_orig.copy() expected["C"] = df["A"] df = df_orig.copy() - with catch_warnings(record=True): - df.ix[:, "C"] = df.ix[:, "A"] + df.loc[:, "C"] = df.loc[:, "A"] tm.assert_frame_equal(df, expected) # GH 8473 @@ -328,7 +321,6 @@ def test_series_partial_set_with_name(self): result = ser.iloc[[1, 1, 0, 0]] tm.assert_series_equal(result, expected, check_index_type=True) - @pytest.mark.filterwarnings("ignore:\\n.ix") def test_partial_set_invalid(self): # GH 4940 @@ -339,26 +331,15 @@ def test_partial_set_invalid(self): # don't allow not string inserts with pytest.raises(TypeError): - with catch_warnings(record=True): - df.loc[100.0, :] = df.ix[0] - - with pytest.raises(TypeError): - with catch_warnings(record=True): - df.loc[100, :] = df.ix[0] + df.loc[100.0, :] = df.iloc[0] with pytest.raises(TypeError): - with catch_warnings(record=True): - df.ix[100.0, :] = df.ix[0] - - with pytest.raises(ValueError): - with catch_warnings(record=True): - df.ix[100, :] = df.ix[0] + df.loc[100, :] = df.iloc[0] # allow object conversion here df = orig.copy() - with catch_warnings(record=True): - df.loc["a", :] = df.ix[0] - exp = orig.append(Series(df.ix[0], name="a")) + df.loc["a", :] = df.iloc[0] + exp = orig.append(Series(df.iloc[0], name="a")) tm.assert_frame_equal(df, exp) tm.assert_index_equal(df.index, Index(orig.index.tolist() + ["a"])) assert df.index.dtype == "object" diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index b41b90cd9afd1..ddaea5b597d6d 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -16,7 +16,7 @@ def _check(f, func, values=False): indicies = self.generate_indices(f, values) for i in indicies: result = getattr(f, func)[i] - expected = self.get_value(f, i, values) + expected = self.get_value(func, f, i, values) tm.assert_almost_equal(result, expected) for kind in self._kinds: @@ -44,7 +44,7 @@ def _check(f, func, values=False): indicies = self.generate_indices(f, values) for i in indicies: getattr(f, func)[i] = 1 - expected = self.get_value(f, i, values) + expected = self.get_value(func, f, i, values) tm.assert_almost_equal(expected, 1) for kind in self._kinds: diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 204cdee2d9e1f..ae16d0fa651d2 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2,7 +2,6 @@ from io import StringIO import itertools from itertools import product -from warnings import catch_warnings, simplefilter import numpy as np from numpy.random import randn @@ -209,11 +208,6 @@ def test_reindex(self): reindexed = self.frame.loc[[("foo", "one"), ("bar", "one")]] tm.assert_frame_equal(reindexed, expected) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - reindexed = self.frame.ix[[("foo", "one"), ("bar", "one")]] - tm.assert_frame_equal(reindexed, expected) - def test_reindex_preserve_levels(self): new_index = self.ymd.index[::10] chunk = self.ymd.reindex(new_index) @@ -222,11 +216,6 @@ def test_reindex_preserve_levels(self): chunk = self.ymd.loc[new_index] assert chunk.index is new_index - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - chunk = self.ymd.ix[new_index] - assert chunk.index is new_index - ymdT = self.ymd.T chunk = ymdT.reindex(columns=new_index) assert chunk.columns is new_index
- [x] Needs release note - [ ] Needs geopandas compat - [x] Need to remove ix asvs
https://api.github.com/repos/pandas-dev/pandas/pulls/27620
2019-07-27T01:45:51Z
2019-12-12T12:15:52Z
2019-12-12T12:15:52Z
2019-12-12T12:16:17Z
Continue simplifying indexing code
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cdbe0e9d22eb4..9fd956c40c1f0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2836,11 +2836,13 @@ def __getitem__(self, key): # Do we have a slicer (on rows)? indexer = convert_to_index_sliceable(self, key) if indexer is not None: + # either we have a slice or we have a string that can be converted + # to a slice for partial-string date indexing return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): - return self._getitem_frame(key) + return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): @@ -2938,11 +2940,6 @@ def _getitem_multilevel(self, key): else: return self._get_item_cache(key) - def _getitem_frame(self, key): - if key.values.size and not is_bool_dtype(key.values): - raise ValueError("Must pass DataFrame with boolean values only") - return self.where(key) - def _get_value(self, index, col, takeable: bool = False): """ Quickly retrieve single value at passed column and index. @@ -2986,6 +2983,8 @@ def __setitem__(self, key, value): # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: + # either we have a slice or we have a string that can be converted + # to a slice for partial-string date indexing return self._setitem_slice(indexer, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index a1a8619fab892..df89dbe6db6dc 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -117,6 +117,7 @@ def __iter__(self): raise NotImplementedError("ix is not iterable") def __getitem__(self, key): + # Used in ix and downstream in geopandas _CoordinateIndexer if type(key) is tuple: # Note: we check the type exactly instead of with isinstance # because NamedTuple is checked separately. @@ -181,7 +182,7 @@ def _get_setitem_indexer(self, key): pass if isinstance(key, range): - return self._convert_range(key, is_setter=True) + return list(key) axis = self.axis or 0 try: @@ -258,10 +259,6 @@ def _convert_tuple(self, key): keyidx.append(idx) return tuple(keyidx) - def _convert_range(self, key: range, is_setter: bool = False): - """ convert a range argument """ - return list(key) - def _convert_scalar_indexer(self, key, axis: int): # if we are accessing via lowered dim, use the last dim ax = self.obj._get_axis(min(axis, self.ndim - 1)) diff --git a/pandas/core/series.py b/pandas/core/series.py index c7fcab56e1fe5..f840b6ce649b8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1131,8 +1131,7 @@ def __getitem__(self, key): def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): - indexer = self.index._convert_slice_indexer(key, kind="getitem") - return self._get_values(indexer) + return self._slice(key) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " @@ -1148,7 +1147,6 @@ def _get_with(self, key): return self._get_values(key) raise - # pragma: no cover if not isinstance(key, (list, np.ndarray, Series, Index)): key = list(key) @@ -1165,19 +1163,18 @@ def _get_with(self, key): elif key_type == "boolean": return self._get_values(key) - try: - # handle the dup indexing case (GH 4246) - if isinstance(key, (list, tuple)): - return self.loc[key] - - return self.reindex(key) - except Exception: - # [slice(0, 5, None)] will break if you convert to ndarray, - # e.g. as requested by np.median - # hack - if isinstance(key[0], slice): + if isinstance(key, (list, tuple)): + # TODO: de-dup with tuple case handled above? + # handle the dup indexing case GH#4246 + if len(key) == 1 and isinstance(key[0], slice): + # [slice(0, 5, None)] will break if you convert to ndarray, + # e.g. as requested by np.median + # FIXME: hack return self._get_values(key) - raise + + return self.loc[key] + + return self.reindex(key) def _get_values_tuple(self, key): # mpl hackaround @@ -1220,33 +1217,28 @@ def _get_value(self, label, takeable: bool = False): def __setitem__(self, key, value): key = com.apply_if_callable(key, self) + cacher_needs_updating = self._check_is_chained_assignment_possible() - def setitem(key, value): - try: - self._set_with_engine(key, value) - return - except com.SettingWithCopyError: - raise - except (KeyError, ValueError): - values = self._values - if is_integer(key) and not self.index.inferred_type == "integer": - - values[key] = value - return - elif key is Ellipsis: - self[:] = value - return - + try: + self._set_with_engine(key, value) + except com.SettingWithCopyError: + raise + except (KeyError, ValueError): + values = self._values + if is_integer(key) and not self.index.inferred_type == "integer": + values[key] = value + elif key is Ellipsis: + self[:] = value + else: self.loc[key] = value - return - except TypeError as e: - if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): - raise ValueError("Can only tuple-index with a MultiIndex") + except TypeError as e: + if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): + raise ValueError("Can only tuple-index with a MultiIndex") - # python 3 type errors should be raised - if _is_unorderable_exception(e): - raise IndexError(key) + # python 3 type errors should be raised + if _is_unorderable_exception(e): + raise IndexError(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) @@ -1258,9 +1250,6 @@ def setitem(key, value): self._set_with(key, value) - # do the setitem - cacher_needs_updating = self._check_is_chained_assignment_possible() - setitem(key, value) if cacher_needs_updating: self._maybe_update_cacher() @@ -1282,6 +1271,14 @@ def _set_with(self, key, value): if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) + + elif is_scalar(key) and not is_integer(key) and key not in self.index: + # GH#12862 adding an new key to the Series + # Note: have to exclude integers because that is ambiguously + # position-based + self.loc[key] = value + return + else: if isinstance(key, tuple): try: @@ -1289,13 +1286,6 @@ def _set_with(self, key, value): except Exception: pass - if is_scalar(key) and not is_integer(key) and key not in self.index: - # GH#12862 adding an new key to the Series - # Note: have to exclude integers because that is ambiguously - # position-based - self.loc[key] = value - return - if is_scalar(key): key = [key] elif not isinstance(key, (list, Series, np.ndarray)): @@ -1306,6 +1296,7 @@ def _set_with(self, key, value): if isinstance(key, Index): key_type = key.inferred_type + key = key._values else: key_type = lib.infer_dtype(key, skipna=False) @@ -1320,10 +1311,7 @@ def _set_with(self, key, value): self._set_labels(key, value) def _set_labels(self, key, value): - if isinstance(key, Index): - key = key.values - else: - key = com.asarray_tuplesafe(key) + key = com.asarray_tuplesafe(key) indexer = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index fc51c06b149fd..d81cab77f09f5 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -324,7 +324,7 @@ def _ixs(self, i: int, axis: int = 0): Parameters ---------- i : int - axis: int + axis : int default 0, ignored Returns diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 814a99701b703..ae14563e5952a 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -269,7 +269,7 @@ def test_getitem_boolean( subframe_obj = datetime_frame[indexer_obj] assert_frame_equal(subframe_obj, subframe) - with pytest.raises(ValueError, match="boolean values only"): + with pytest.raises(ValueError, match="Boolean array expected"): datetime_frame[datetime_frame] # test that Series work
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/27619
2019-07-27T01:39:24Z
2019-07-27T14:09:37Z
2019-07-27T14:09:37Z
2019-07-27T15:11:12Z
Remove Encoding of values in char** For Labels
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 4decc99087a9e..8e25857e5ad69 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -159,6 +159,7 @@ I/O ^^^ - :meth:`read_csv` now accepts binary mode file buffers when using the Python csv engine (:issue:`23779`) +- Bug in :meth:`DataFrame.to_json` where using a Tuple as a column or index value and using ``orient="columns"`` or ``orient="index"`` would produce invalid JSON (:issue:`20500`) - Plotting diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h index 0470fef450dde..ee6e7081bf00e 100644 --- a/pandas/_libs/src/ujson/lib/ultrajson.h +++ b/pandas/_libs/src/ujson/lib/ultrajson.h @@ -307,11 +307,4 @@ EXPORTFUNCTION JSOBJ JSON_DecodeObject(JSONObjectDecoder *dec, const char *buffer, size_t cbBuffer); EXPORTFUNCTION void encode(JSOBJ, JSONObjectEncoder *, const char *, size_t); -#define Buffer_Reserve(__enc, __len) \ - if ((size_t)((__enc)->end - (__enc)->offset) < (size_t)(__len)) { \ - Buffer_Realloc((__enc), (__len)); \ - } - -void Buffer_Realloc(JSONObjectEncoder *enc, size_t cbNeeded); - #endif // PANDAS__LIBS_SRC_UJSON_LIB_ULTRAJSON_H_ diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c index 2d6c823a45515..d5b379bee585b 100644 --- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c +++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c @@ -714,6 +714,12 @@ int Buffer_EscapeStringValidated(JSOBJ obj, JSONObjectEncoder *enc, } } +#define Buffer_Reserve(__enc, __len) \ + if ( (size_t) ((__enc)->end - (__enc)->offset) < (size_t) (__len)) \ + { \ + Buffer_Realloc((__enc), (__len));\ + } \ + #define Buffer_AppendCharUnchecked(__enc, __chr) *((__enc)->offset++) = __chr; FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC strreverse(char *begin, diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 926440218b5d9..de336fb3aa1dc 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -48,13 +48,13 @@ Numeric decoder derived from from TCL library #include <../../../tslibs/src/datetime/np_datetime_strings.h> #include "datetime.h" -#define NPY_JSON_BUFSIZE 32768 - static PyTypeObject *type_decimal; static PyTypeObject *cls_dataframe; static PyTypeObject *cls_series; static PyTypeObject *cls_index; static PyTypeObject *cls_nat; +PyObject *cls_timestamp; +PyObject *cls_timedelta; npy_int64 get_nat(void) { return NPY_MIN_INT64; } @@ -166,6 +166,8 @@ void *initObjToJSON(void) cls_index = (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Index"); cls_series = (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Series"); + cls_timestamp = PyObject_GetAttrString(mod_pandas, "Timestamp"); + cls_timedelta = PyObject_GetAttrString(mod_pandas, "Timedelta"); Py_DECREF(mod_pandas); } @@ -787,30 +789,23 @@ JSOBJ NpyArr_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { return GET_TC(tc)->itemValue; } -static void NpyArr_getLabel(JSOBJ obj, JSONTypeContext *tc, size_t *outLen, - npy_intp idx, char **labels) { - JSONObjectEncoder *enc = (JSONObjectEncoder *)tc->encoder; - PRINTMARK(); - *outLen = strlen(labels[idx]); - Buffer_Reserve(enc, *outLen); - memcpy(enc->offset, labels[idx], sizeof(char) * (*outLen)); - enc->offset += *outLen; - *outLen = 0; -} - char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; npy_intp idx; PRINTMARK(); + char *cStr; if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) { idx = npyarr->index[npyarr->stridedim] - 1; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels); + cStr = npyarr->columnLabels[idx]; } else { idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels); + cStr = npyarr->rowLabels[idx]; } - return NULL; + + *outLen = strlen(cStr); + + return cStr; } //============================================================================= @@ -852,19 +847,22 @@ char *PdBlock_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; NpyArrContext *npyarr = blkCtxt->npyCtxts[0]; npy_intp idx; + char *cStr; PRINTMARK(); if (GET_TC(tc)->iterNext == PdBlock_iterNextItem) { idx = blkCtxt->colIdx - 1; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels); + cStr = npyarr->columnLabels[idx]; } else { idx = GET_TC(tc)->iterNext != PdBlock_iterNext ? npyarr->index[npyarr->stridedim - npyarr->inc] - 1 : npyarr->index[npyarr->stridedim]; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels); + cStr = npyarr->rowLabels[idx]; } - return NULL; + + *outLen = strlen(cStr); + return cStr; } char *PdBlock_iterGetName_Transpose(JSOBJ obj, JSONTypeContext *tc, @@ -872,16 +870,19 @@ char *PdBlock_iterGetName_Transpose(JSOBJ obj, JSONTypeContext *tc, PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; NpyArrContext *npyarr = blkCtxt->npyCtxts[blkCtxt->colIdx]; npy_intp idx; + char *cStr; PRINTMARK(); if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) { idx = npyarr->index[npyarr->stridedim] - 1; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels); + cStr = npyarr->columnLabels[idx]; } else { idx = blkCtxt->colIdx; - NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels); + cStr = npyarr->rowLabels[idx]; } - return NULL; + + *outLen = strlen(cStr); + return cStr; } int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) { @@ -1578,16 +1579,30 @@ void NpyArr_freeLabels(char **labels, npy_intp len) { } } -char **NpyArr_encodeLabels(PyArrayObject *labels, JSONObjectEncoder *enc, +/* + * Function: NpyArr_encodeLabels + * ----------------------------- + * + * Builds an array of "encoded" labels. + * + * labels: PyArrayObject pointer for labels to be "encoded" + * num : number of labels + * + * "encode" is quoted above because we aren't really doing encoding + * For historical reasons this function would actually encode the entire + * array into a separate buffer with a separate call to JSON_Encode + * and would leave it to complex pointer manipulation from there to + * unpack values as needed. To make things simpler and more idiomatic + * this has instead just stringified any input save for datetime values, + * which may need to be represented in various formats. + */ +char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc, npy_intp num) { // NOTE this function steals a reference to labels. - PyObjectEncoder *pyenc = (PyObjectEncoder *)enc; PyObject *item = NULL; - npy_intp i, stride, len, need_quotes; + npy_intp i, stride, len; char **ret; - char *dataptr, *cLabel, *origend, *origst, *origoffset; - char labelBuffer[NPY_JSON_BUFSIZE]; - PyArray_GetItemFunc *getitem; + char *dataptr, *cLabel; int type_num; PRINTMARK(); @@ -1614,68 +1629,136 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, JSONObjectEncoder *enc, ret[i] = NULL; } - origst = enc->start; - origend = enc->end; - origoffset = enc->offset; - stride = PyArray_STRIDE(labels, 0); dataptr = PyArray_DATA(labels); - getitem = (PyArray_GetItemFunc *)PyArray_DESCR(labels)->f->getitem; type_num = PyArray_TYPE(labels); for (i = 0; i < num; i++) { - if (PyTypeNum_ISDATETIME(type_num) || PyTypeNum_ISNUMBER(type_num)) - { - item = (PyObject *)labels; - pyenc->npyType = type_num; - pyenc->npyValue = dataptr; - } else { - item = getitem(dataptr, labels); - if (!item) { - NpyArr_freeLabels(ret, num); - ret = 0; - break; - } - } - - cLabel = JSON_EncodeObject(item, enc, labelBuffer, NPY_JSON_BUFSIZE); - - if (item != (PyObject *)labels) { - Py_DECREF(item); - } - - if (PyErr_Occurred() || enc->errorMsg) { + item = PyArray_GETITEM(labels, dataptr); + if (!item) { + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + // TODO: for any matches on type_num (date and timedeltas) should use a + // vectorized solution to convert to epoch or iso formats + if (enc->datetimeIso && (type_num == NPY_TIMEDELTA || PyDelta_Check(item))) { + PyObject *td = PyObject_CallFunction(cls_timedelta, "(O)", item); + if (td == NULL) { + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + PyObject *iso = PyObject_CallMethod(td, "isoformat", NULL); + Py_DECREF(td); + if (iso == NULL) { + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + cLabel = (char *)PyUnicode_AsUTF8(iso); + Py_DECREF(iso); + len = strlen(cLabel); + } + else if (PyTypeNum_ISDATETIME(type_num) || + PyDateTime_Check(item) || PyDate_Check(item)) { + PyObject *ts = PyObject_CallFunction(cls_timestamp, "(O)", item); + if (ts == NULL) { + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + if (enc->datetimeIso) { + PyObject *iso = PyObject_CallMethod(ts, "isoformat", NULL); + Py_DECREF(ts); + if (iso == NULL) { + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + cLabel = (char *)PyUnicode_AsUTF8(iso); + Py_DECREF(iso); + len = strlen(cLabel); + } else { + npy_int64 value; + // TODO: refactor to not duplicate what goes on in beginTypeContext + if (PyObject_HasAttrString(ts, "value")) { + PRINTMARK(); + value = get_long_attr(ts, "value"); + } else { + PRINTMARK(); + value = + total_seconds(ts) * 1000000000LL; // nanoseconds per second + } + Py_DECREF(ts); + + switch (enc->datetimeUnit) { + case NPY_FR_ns: + break; + case NPY_FR_us: + value /= 1000LL; + break; + case NPY_FR_ms: + value /= 1000000LL; + break; + case NPY_FR_s: + value /= 1000000000LL; + break; + default: + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + char buf[21] = {0}; // 21 chars for 2**63 as string + cLabel = buf; + sprintf(buf, "%" NPY_INT64_FMT, value); + len = strlen(cLabel); + } + } else { // Fallack to string representation + PyObject *str = PyObject_Str(item); + if (str == NULL) { + Py_DECREF(item); + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } + + cLabel = (char *)PyUnicode_AsUTF8(str); + Py_DECREF(str); + len = strlen(cLabel); + } + + Py_DECREF(item); + // Add 1 to include NULL terminator + ret[i] = PyObject_Malloc(len + 1); + memcpy(ret[i], cLabel, len + 1); + + if (PyErr_Occurred()) { NpyArr_freeLabels(ret, num); ret = 0; break; } - need_quotes = ((*cLabel) != '"'); - len = enc->offset - cLabel + 1 + 2 * need_quotes; - ret[i] = PyObject_Malloc(sizeof(char) * len); - if (!ret[i]) { PyErr_NoMemory(); ret = 0; break; } - if (need_quotes) { - ret[i][0] = '"'; - memcpy(ret[i] + 1, cLabel, sizeof(char) * (len - 4)); - ret[i][len - 3] = '"'; - } else { - memcpy(ret[i], cLabel, sizeof(char) * (len - 2)); - } - ret[i][len - 2] = ':'; - ret[i][len - 1] = '\0'; dataptr += stride; } - enc->start = origst; - enc->end = origend; - enc->offset = origoffset; - Py_DECREF(labels); return ret; } @@ -1972,7 +2055,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0); pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, - (JSONObjectEncoder *)enc, + enc, pc->columnLabelsLen); if (!pc->columnLabels) { goto INVALID; @@ -2075,7 +2158,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } pc->columnLabelsLen = PyObject_Size(tmpObj); pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, - (JSONObjectEncoder *)enc, + enc, pc->columnLabelsLen); Py_DECREF(tmpObj); if (!pc->columnLabels) { @@ -2098,7 +2181,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { pc->rowLabelsLen = PyObject_Size(tmpObj); pc->rowLabels = NpyArr_encodeLabels((PyArrayObject *)values, - (JSONObjectEncoder *)enc, pc->rowLabelsLen); + enc, pc->rowLabelsLen); Py_DECREF(tmpObj); tmpObj = (enc->outputFormat == INDEX ? PyObject_GetAttrString(obj, "columns") @@ -2117,7 +2200,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } pc->columnLabelsLen = PyObject_Size(tmpObj); pc->columnLabels = NpyArr_encodeLabels((PyArrayObject *)values, - (JSONObjectEncoder *)enc, + enc, pc->columnLabelsLen); Py_DECREF(tmpObj); if (!pc->columnLabels) { @@ -2429,7 +2512,6 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) { PRINTMARK(); ret = JSON_EncodeObject(oinput, encoder, buffer, sizeof(buffer)); PRINTMARK(); - if (PyErr_Occurred()) { PRINTMARK(); return NULL; diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 9c687f036aa68..9842a706f43d7 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1012,60 +1012,70 @@ def test_convert_dates_infer(self): result = read_json(dumps(data))[["id", infer_word]] assert_frame_equal(result, expected) - def test_date_format_frame(self): + @pytest.mark.parametrize( + "date,date_unit", + [ + ("20130101 20:43:42.123", None), + ("20130101 20:43:42", "s"), + ("20130101 20:43:42.123", "ms"), + ("20130101 20:43:42.123456", "us"), + ("20130101 20:43:42.123456789", "ns"), + ], + ) + def test_date_format_frame(self, date, date_unit): df = self.tsframe.copy() - def test_w_date(date, date_unit=None): - df["date"] = Timestamp(date) - df.iloc[1, df.columns.get_loc("date")] = pd.NaT - df.iloc[5, df.columns.get_loc("date")] = pd.NaT - if date_unit: - json = df.to_json(date_format="iso", date_unit=date_unit) - else: - json = df.to_json(date_format="iso") - result = read_json(json) - expected = df.copy() - expected.index = expected.index.tz_localize("UTC") - expected["date"] = expected["date"].dt.tz_localize("UTC") - assert_frame_equal(result, expected) - - test_w_date("20130101 20:43:42.123") - test_w_date("20130101 20:43:42", date_unit="s") - test_w_date("20130101 20:43:42.123", date_unit="ms") - test_w_date("20130101 20:43:42.123456", date_unit="us") - test_w_date("20130101 20:43:42.123456789", date_unit="ns") + df["date"] = Timestamp(date) + df.iloc[1, df.columns.get_loc("date")] = pd.NaT + df.iloc[5, df.columns.get_loc("date")] = pd.NaT + if date_unit: + json = df.to_json(date_format="iso", date_unit=date_unit) + else: + json = df.to_json(date_format="iso") + result = read_json(json) + expected = df.copy() + # expected.index = expected.index.tz_localize("UTC") + expected["date"] = expected["date"].dt.tz_localize("UTC") + assert_frame_equal(result, expected) + def test_date_format_frame_raises(self): + df = self.tsframe.copy() msg = "Invalid value 'foo' for option 'date_unit'" with pytest.raises(ValueError, match=msg): df.to_json(date_format="iso", date_unit="foo") - def test_date_format_series(self): - def test_w_date(date, date_unit=None): - ts = Series(Timestamp(date), index=self.ts.index) - ts.iloc[1] = pd.NaT - ts.iloc[5] = pd.NaT - if date_unit: - json = ts.to_json(date_format="iso", date_unit=date_unit) - else: - json = ts.to_json(date_format="iso") - result = read_json(json, typ="series") - expected = ts.copy() - expected.index = expected.index.tz_localize("UTC") - expected = expected.dt.tz_localize("UTC") - assert_series_equal(result, expected) - - test_w_date("20130101 20:43:42.123") - test_w_date("20130101 20:43:42", date_unit="s") - test_w_date("20130101 20:43:42.123", date_unit="ms") - test_w_date("20130101 20:43:42.123456", date_unit="us") - test_w_date("20130101 20:43:42.123456789", date_unit="ns") + @pytest.mark.parametrize( + "date,date_unit", + [ + ("20130101 20:43:42.123", None), + ("20130101 20:43:42", "s"), + ("20130101 20:43:42.123", "ms"), + ("20130101 20:43:42.123456", "us"), + ("20130101 20:43:42.123456789", "ns"), + ], + ) + def test_date_format_series(self, date, date_unit): + ts = Series(Timestamp(date), index=self.ts.index) + ts.iloc[1] = pd.NaT + ts.iloc[5] = pd.NaT + if date_unit: + json = ts.to_json(date_format="iso", date_unit=date_unit) + else: + json = ts.to_json(date_format="iso") + result = read_json(json, typ="series") + expected = ts.copy() + # expected.index = expected.index.tz_localize("UTC") + expected = expected.dt.tz_localize("UTC") + assert_series_equal(result, expected) + def test_date_format_series_raises(self): ts = Series(Timestamp("20130101 20:43:42.123"), index=self.ts.index) msg = "Invalid value 'foo' for option 'date_unit'" with pytest.raises(ValueError, match=msg): ts.to_json(date_format="iso", date_unit="foo") - def test_date_unit(self): + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_date_unit(self, unit): df = self.tsframe.copy() df["date"] = Timestamp("20130101 20:43:42") dl = df.columns.get_loc("date") @@ -1073,16 +1083,15 @@ def test_date_unit(self): df.iloc[2, dl] = Timestamp("21460101 20:43:42") df.iloc[4, dl] = pd.NaT - for unit in ("s", "ms", "us", "ns"): - json = df.to_json(date_format="epoch", date_unit=unit) + json = df.to_json(date_format="epoch", date_unit=unit) - # force date unit - result = read_json(json, date_unit=unit) - assert_frame_equal(result, df) + # force date unit + result = read_json(json, date_unit=unit) + assert_frame_equal(result, df) - # detect date unit - result = read_json(json, date_unit=None) - assert_frame_equal(result, df) + # detect date unit + result = read_json(json, date_unit=None) + assert_frame_equal(result, df) def test_weird_nested_json(self): # this used to core dump the parser @@ -1611,3 +1620,30 @@ def test_read_timezone_information(self): ) expected = Series([88], index=DatetimeIndex(["2019-01-01 11:00:00"], tz="UTC")) assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")] + ) + def test_timedelta_as_label(self, date_format, key): + df = pd.DataFrame([[1]], columns=[pd.Timedelta("1D")]) + expected = '{{"{key}":{{"0":1}}}}'.format(key=key) + result = df.to_json(date_format=date_format) + + assert result == expected + + @pytest.mark.parametrize( + "orient,expected", + [ + ("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"), + ("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"), + # TODO: the below have separate encoding procedures + # They produce JSON but not in a consistent manner + pytest.param("split", "", marks=pytest.mark.skip), + pytest.param("table", "", marks=pytest.mark.skip), + ], + ) + def test_tuple_labels(self, orient, expected): + # GH 20500 + df = pd.DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")]) + result = df.to_json(orient=orient) + assert result == expected
- [X] closes #20500 In reviewing this module there is a shared function for object keys and values which encodes objects into a separate buffer and subsequently indexes off of that. Instead of encoding values in a buffer I've updated that function to be a char ** pointing to string representations of the labels (or index / columns, rather). This is arguably a pre-cursor to: 1. #19486 to disentangle date time functions from JSON 2. #12004 to add indent support (tried this previously but vendoring ujson updates but didn't work because of this limitation) 3. #27164 because the various formats may need column / index labels at different points in time, so encoding up front makes that very difficult to reuse The only downside here I haven't been able to figure out is how to deal with date formatting. Right now all labels are written as epochs. I'm sure there is a way to handle but I wasn't clear on what the best way to convert arbitrary input (i.e. object or datetime dtypes) into ISO formats by element where applicable. cc @jbrockmendel in case you have insight on that
https://api.github.com/repos/pandas-dev/pandas/pulls/27618
2019-07-26T23:35:30Z
2019-08-23T22:38:19Z
2019-08-23T22:38:18Z
2019-08-23T22:38:22Z
DEPR: Deprecate NDFrame.filter
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 8755abe642068..79027a69bb81a 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -581,6 +581,7 @@ Deprecations it is recommended to use ``json_normalize`` as :func:`pandas.json_normalize` instead (:issue:`27586`). - :meth:`DataFrame.to_stata`, :meth:`DataFrame.to_feather`, and :meth:`DataFrame.to_parquet` argument "fname" is deprecated, use "path" instead (:issue:`23574`) - The deprecated internal attributes ``_start``, ``_stop`` and ``_step`` of :class:`RangeIndex` now raise a ``FutureWarning`` instead of a ``DeprecationWarning`` (:issue:`26581`) +- :meth:`DataFrame.filter` and :meth:`Series.filter` are deprecated. (:issue:`26642`) .. _whatsnew_1000.prior_deprecations: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b896721469f1f..a8bea0c32b878 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4557,7 +4557,11 @@ def filter( """ Subset the dataframe rows or columns according to the specified index labels. - Note that this routine does not filter a dataframe on its + .. deprecated:: 1.0 + Use .loc instead, e.g. for regular expressions + use .loc(regex=True)[:, "^col_"] + + Note that this method does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters @@ -4612,6 +4616,13 @@ def filter( one two three rabbit 4 5 6 """ + warnings.warn( + "DataFrame/Series.filter is deprecated " + "and will be removed in a future version", + FutureWarning, + stacklevel=2, + ) + nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError( diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index d6ef3a7600abb..407393074c24f 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -806,136 +806,9 @@ def test_align_series_combinations(self): tm.assert_series_equal(res1, exp2) tm.assert_frame_equal(res2, exp1) - def test_filter(self, float_frame, float_string_frame): - # Items - filtered = float_frame.filter(["A", "B", "E"]) - assert len(filtered.columns) == 2 - assert "E" not in filtered - - filtered = float_frame.filter(["A", "B", "E"], axis="columns") - assert len(filtered.columns) == 2 - assert "E" not in filtered - - # Other axis - idx = float_frame.index[0:4] - filtered = float_frame.filter(idx, axis="index") - expected = float_frame.reindex(index=idx) - tm.assert_frame_equal(filtered, expected) - - # like - fcopy = float_frame.copy() - fcopy["AA"] = 1 - - filtered = fcopy.filter(like="A") - assert len(filtered.columns) == 2 - assert "AA" in filtered - - # like with ints in column names - df = DataFrame(0.0, index=[0, 1, 2], columns=[0, 1, "_A", "_B"]) - filtered = df.filter(like="_") - assert len(filtered.columns) == 2 - - # regex with ints in column names - # from PR #10384 - df = DataFrame(0.0, index=[0, 1, 2], columns=["A1", 1, "B", 2, "C"]) - expected = DataFrame( - 0.0, index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object) - ) - filtered = df.filter(regex="^[0-9]+$") - tm.assert_frame_equal(filtered, expected) - - expected = DataFrame(0.0, index=[0, 1, 2], columns=[0, "0", 1, "1"]) - # shouldn't remove anything - filtered = expected.filter(regex="^[0-9]+$") - tm.assert_frame_equal(filtered, expected) - - # pass in None - with pytest.raises(TypeError, match="Must pass"): - float_frame.filter() - with pytest.raises(TypeError, match="Must pass"): - float_frame.filter(items=None) - with pytest.raises(TypeError, match="Must pass"): - float_frame.filter(axis=1) - - # test mutually exclusive arguments - with pytest.raises(TypeError, match="mutually exclusive"): - float_frame.filter(items=["one", "three"], regex="e$", like="bbi") - with pytest.raises(TypeError, match="mutually exclusive"): - float_frame.filter(items=["one", "three"], regex="e$", axis=1) - with pytest.raises(TypeError, match="mutually exclusive"): - float_frame.filter(items=["one", "three"], regex="e$") - with pytest.raises(TypeError, match="mutually exclusive"): - float_frame.filter(items=["one", "three"], like="bbi", axis=0) - with pytest.raises(TypeError, match="mutually exclusive"): - float_frame.filter(items=["one", "three"], like="bbi") - - # objects - filtered = float_string_frame.filter(like="foo") - assert "foo" in filtered - - # unicode columns, won't ascii-encode - df = float_frame.rename(columns={"B": "\u2202"}) - filtered = df.filter(like="C") - assert "C" in filtered - - def test_filter_regex_search(self, float_frame): - fcopy = float_frame.copy() - fcopy["AA"] = 1 - - # regex - filtered = fcopy.filter(regex="[A]+") - assert len(filtered.columns) == 2 - assert "AA" in filtered - - # doesn't have to be at beginning - df = DataFrame( - {"aBBa": [1, 2], "BBaBB": [1, 2], "aCCa": [1, 2], "aCCaBB": [1, 2]} - ) - - result = df.filter(regex="BB") - exp = df[[x for x in df.columns if "BB" in x]] - tm.assert_frame_equal(result, exp) - - @pytest.mark.parametrize( - "name,expected", - [ - ("a", DataFrame({"a": [1, 2]})), - ("a", DataFrame({"a": [1, 2]})), - ("あ", DataFrame({"あ": [3, 4]})), - ], - ) - def test_filter_unicode(self, name, expected): - # GH13101 - df = DataFrame({"a": [1, 2], "あ": [3, 4]}) - - tm.assert_frame_equal(df.filter(like=name), expected) - tm.assert_frame_equal(df.filter(regex=name), expected) - - @pytest.mark.parametrize("name", ["a", "a"]) - def test_filter_bytestring(self, name): - # GH13101 - df = DataFrame({b"a": [1, 2], b"b": [3, 4]}) - expected = DataFrame({b"a": [1, 2]}) - - tm.assert_frame_equal(df.filter(like=name), expected) - tm.assert_frame_equal(df.filter(regex=name), expected) - - def test_filter_corner(self): - empty = DataFrame() - - result = empty.filter([]) - tm.assert_frame_equal(result, empty) - - result = empty.filter(like="foo") - tm.assert_frame_equal(result, empty) - - def test_filter_regex_non_string(self): - # GH#5798 trying to filter on non-string columns should drop, - # not raise - df = pd.DataFrame(np.random.random((3, 2)), columns=["STRING", 123]) - result = df.filter(regex="STRING") - expected = df[["STRING"]] - tm.assert_frame_equal(result, expected) + def test_filter_deprecated(self, float_frame): + with tm.assert_produces_warning(FutureWarning): + float_frame.filter(["A", "B", "E"]) def test_take(self, float_frame): # homogeneous
- [ ] xref #26642 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I propose to deprecate (DataFrame|Series).filter. The method's functionality is already in the ``.loc``method, except the regex functionality, for which I've made a proposal adding regex functionality to ``.loc`` in #27363.
https://api.github.com/repos/pandas-dev/pandas/pulls/27617
2019-07-26T22:43:26Z
2020-02-02T01:19:16Z
null
2020-02-02T01:19:17Z
Move json_normalize to pd namespace
diff --git a/doc/redirects.csv b/doc/redirects.csv index 587a5e9f65b38..0a71f037d23c3 100644 --- a/doc/redirects.csv +++ b/doc/redirects.csv @@ -777,7 +777,7 @@ generated/pandas.io.formats.style.Styler.to_excel,../reference/api/pandas.io.for generated/pandas.io.formats.style.Styler.use,../reference/api/pandas.io.formats.style.Styler.use generated/pandas.io.formats.style.Styler.where,../reference/api/pandas.io.formats.style.Styler.where generated/pandas.io.json.build_table_schema,../reference/api/pandas.io.json.build_table_schema -generated/pandas.io.json.json_normalize,../reference/api/pandas.io.json.json_normalize +generated/pandas.io.json.json_normalize,../reference/api/pandas.json_normalize generated/pandas.io.stata.StataReader.data_label,../reference/api/pandas.io.stata.StataReader.data_label generated/pandas.io.stata.StataReader.value_labels,../reference/api/pandas.io.stata.StataReader.value_labels generated/pandas.io.stata.StataReader.variable_labels,../reference/api/pandas.io.stata.StataReader.variable_labels diff --git a/doc/source/reference/io.rst b/doc/source/reference/io.rst index 50168dec928ab..0037d4a4410c3 100644 --- a/doc/source/reference/io.rst +++ b/doc/source/reference/io.rst @@ -50,13 +50,13 @@ JSON :toctree: api/ read_json + json_normalize .. currentmodule:: pandas.io.json .. autosummary:: :toctree: api/ - json_normalize build_table_schema .. currentmodule:: pandas diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 52e16c15fc481..ae0f02312e1df 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -2136,27 +2136,26 @@ into a flat table. .. ipython:: python - from pandas.io.json import json_normalize data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}}, {'name': {'given': 'Mose', 'family': 'Regner'}}, {'id': 2, 'name': 'Faye Raker'}] - json_normalize(data) + pd.json_normalize(data) .. ipython:: python data = [{'state': 'Florida', 'shortname': 'FL', 'info': {'governor': 'Rick Scott'}, - 'counties': [{'name': 'Dade', 'population': 12345}, - {'name': 'Broward', 'population': 40000}, - {'name': 'Palm Beach', 'population': 60000}]}, + 'county': [{'name': 'Dade', 'population': 12345}, + {'name': 'Broward', 'population': 40000}, + {'name': 'Palm Beach', 'population': 60000}]}, {'state': 'Ohio', 'shortname': 'OH', 'info': {'governor': 'John Kasich'}, - 'counties': [{'name': 'Summit', 'population': 1234}, - {'name': 'Cuyahoga', 'population': 1337}]}] + 'county': [{'name': 'Summit', 'population': 1234}, + {'name': 'Cuyahoga', 'population': 1337}]}] - json_normalize(data, 'counties', ['state', 'shortname', ['info', 'governor']]) + pd.json_normalize(data, 'county', ['state', 'shortname', ['info', 'governor']]) The max_level parameter provides more control over which level to end normalization. With max_level=1 the following snippet normalizes until 1st nesting level of the provided dict. @@ -2169,7 +2168,7 @@ With max_level=1 the following snippet normalizes until 1st nesting level of the 'Name': 'Name001'}}, 'Image': {'a': 'b'} }] - json_normalize(data, max_level=1) + pd.json_normalize(data, max_level=1) .. _io.jsonl: diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index be137eaabd40a..b6b91983b8267 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -170,7 +170,7 @@ which level to end normalization (:issue:`23843`): The repr now looks like this: -.. ipython:: python +.. code-block:: ipython from pandas.io.json import json_normalize data = [{ diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst old mode 100755 new mode 100644 index 7554a2fc0b1c2..6ad6b5129ef5a --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -498,6 +498,9 @@ Deprecations - The parameter ``numeric_only`` of :meth:`Categorical.min` and :meth:`Categorical.max` is deprecated and replaced with ``skipna`` (:issue:`25303`) - The parameter ``label`` in :func:`lreshape` has been deprecated and will be removed in a future version (:issue:`29742`) - ``pandas.core.index`` has been deprecated and will be removed in a future version, the public classes are available in the top-level namespace (:issue:`19711`) +- :func:`pandas.json_normalize` is now exposed in the top-level namespace. + Usage of ``json_normalize`` as ``pandas.io.json.json_normalize`` is now deprecated and + it is recommended to use ``json_normalize`` as :func:`pandas.json_normalize` instead (:issue:`27586`). - .. _whatsnew_1000.prior_deprecations: diff --git a/pandas/__init__.py b/pandas/__init__.py index ec367c62de9db..30b7e5bafe1df 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -175,6 +175,8 @@ read_spss, ) +from pandas.io.json import _json_normalize as json_normalize + from pandas.util._tester import test import pandas.testing import pandas.arrays diff --git a/pandas/io/json/__init__.py b/pandas/io/json/__init__.py index 2382d993df96b..48febb086c302 100644 --- a/pandas/io/json/__init__.py +++ b/pandas/io/json/__init__.py @@ -1,5 +1,5 @@ from pandas.io.json._json import dumps, loads, read_json, to_json -from pandas.io.json._normalize import json_normalize +from pandas.io.json._normalize import _json_normalize, json_normalize from pandas.io.json._table_schema import build_table_schema __all__ = [ @@ -7,6 +7,7 @@ "loads", "read_json", "to_json", + "_json_normalize", "json_normalize", "build_table_schema", ] diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index df513d4d37d71..3c9c906939e8f 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -8,6 +8,7 @@ import numpy as np from pandas._libs.writers import convert_json_to_lines +from pandas.util._decorators import deprecate from pandas import DataFrame @@ -108,7 +109,7 @@ def nested_to_record( return new_ds -def json_normalize( +def _json_normalize( data: Union[Dict, List[Dict]], record_path: Optional[Union[str, List]] = None, meta: Optional[Union[str, List]] = None, @@ -332,3 +333,8 @@ def _recursive_extract(data, path, seen_meta, level=0): ) result[k] = np.array(v, dtype=object).repeat(lengths) return result + + +json_normalize = deprecate( + "pandas.io.json.json_normalize", _json_normalize, "1.0.0", "pandas.json_normalize" +) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index b832440aca99c..900ba878e4c0a 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -170,6 +170,9 @@ class TestPDApi(Base): "read_spss", ] + # top-level json funcs + funcs_json = ["json_normalize"] + # top-level to_* funcs funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"] @@ -209,6 +212,7 @@ def test_api(self): + self.funcs + self.funcs_option + self.funcs_read + + self.funcs_json + self.funcs_to + self.deprecated_funcs_in_future + self.deprecated_funcs diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index c71c52bce87b8..038dd2df4d632 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -3,10 +3,9 @@ import numpy as np import pytest -from pandas import DataFrame, Index +from pandas import DataFrame, Index, json_normalize import pandas.util.testing as tm -from pandas.io.json import json_normalize from pandas.io.json._normalize import nested_to_record @@ -698,3 +697,10 @@ def test_with_large_max_level(self): ] output = nested_to_record(input_data, max_level=max_level) assert output == expected + + def test_deprecated_import(self): + with tm.assert_produces_warning(FutureWarning): + from pandas.io.json import json_normalize + + recs = [{"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 5, "c": 6}] + json_normalize(recs)
- Added a whatsnew entry - Imported pandas.io.json.json_normalize in __init__.py - [x] closes #27586 - [x] whatsnew entry cc: @WillAyd
https://api.github.com/repos/pandas-dev/pandas/pulls/27615
2019-07-26T17:12:09Z
2019-12-18T19:53:18Z
2019-12-18T19:53:17Z
2019-12-18T19:55:50Z
CLN: Fix comment in interval.py
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 7f1aad3ba3261..2a0d2c8770063 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -1206,7 +1206,7 @@ def maybe_convert_platform_interval(values): """ if isinstance(values, (list, tuple)) and len(values) == 0: # GH 19016 - # empty lists/tuples get object dtype by default, but this is not + # empty lists/tuples get object dtype by default, but this is # prohibited for IntervalArray, so coerce to integer instead return np.array([], dtype=np.int64) elif is_categorical_dtype(values):
https://api.github.com/repos/pandas-dev/pandas/pulls/27612
2019-07-26T15:12:28Z
2019-07-26T20:58:56Z
2019-07-26T20:58:56Z
2019-07-26T21:02:41Z