title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CLN: re-wrap docstrings in pandas\compat\numpy\function.py
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index 938f57f504b04..c074b06042e26 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -1,20 +1,18 @@ """ -For compatibility with numpy libraries, pandas functions or -methods have to accept '*args' and '**kwargs' parameters to -accommodate numpy arguments that are not actually used or -respected in the pandas implementation. - -To ensure that users do not abuse these parameters, validation -is performed in 'validators.py' to make sure that any extra -parameters passed correspond ONLY to those in the numpy signature. -Part of that validation includes whether or not the user attempted -to pass in non-default values for these extraneous parameters. As we -want to discourage users from relying on these parameters when calling -the pandas implementation, we want them only to pass in the default values -for these parameters. - -This module provides a set of commonly used default arguments for functions -and methods that are spread throughout the codebase. This module will make it +For compatibility with numpy libraries, pandas functions or methods have to +accept '*args' and '**kwargs' parameters to accommodate numpy arguments that +are not actually used or respected in the pandas implementation. + +To ensure that users do not abuse these parameters, validation is performed in +'validators.py' to make sure that any extra parameters passed correspond ONLY +to those in the numpy signature. Part of that validation includes whether or +not the user attempted to pass in non-default values for these extraneous +parameters. As we want to discourage users from relying on these parameters +when calling the pandas implementation, we want them only to pass in the +default values for these parameters. + +This module provides a set of commonly used default arguments for functions and +methods that are spread throughout the codebase. This module will make it easier to adjust to future upstream changes in the analogous numpy signatures. """ from distutils.version import LooseVersion @@ -92,11 +90,10 @@ def process_skipna(skipna, args): def validate_argmin_with_skipna(skipna, args, kwargs): """ - If 'Series.argmin' is called via the 'numpy' library, - the third parameter in its signature is 'out', which - takes either an ndarray or 'None', so check if the - 'skipna' parameter is either an instance of ndarray or - is None, since 'skipna' itself should be a boolean + If 'Series.argmin' is called via the 'numpy' library, the third parameter + in its signature is 'out', which takes either an ndarray or 'None', so + check if the 'skipna' parameter is either an instance of ndarray or is + None, since 'skipna' itself should be a boolean """ skipna, args = process_skipna(skipna, args) validate_argmin(args, kwargs) @@ -105,11 +102,10 @@ def validate_argmin_with_skipna(skipna, args, kwargs): def validate_argmax_with_skipna(skipna, args, kwargs): """ - If 'Series.argmax' is called via the 'numpy' library, - the third parameter in its signature is 'out', which - takes either an ndarray or 'None', so check if the - 'skipna' parameter is either an instance of ndarray or - is None, since 'skipna' itself should be a boolean + If 'Series.argmax' is called via the 'numpy' library, the third parameter + in its signature is 'out', which takes either an ndarray or 'None', so + check if the 'skipna' parameter is either an instance of ndarray or is + None, since 'skipna' itself should be a boolean """ skipna, args = process_skipna(skipna, args) validate_argmax(args, kwargs) @@ -130,8 +126,8 @@ def validate_argmax_with_skipna(skipna, args, kwargs): ARGSORT_DEFAULTS, fname="argsort", max_fname_arg_count=0, method="both" ) -# two different signatures of argsort, this second validation -# for when the `kind` param is supported +# two different signatures of argsort, this second validation for when the +# `kind` param is supported ARGSORT_DEFAULTS_KIND: Dict[str, Optional[int]] = {} ARGSORT_DEFAULTS_KIND["axis"] = -1 ARGSORT_DEFAULTS_KIND["order"] = None @@ -142,11 +138,10 @@ def validate_argmax_with_skipna(skipna, args, kwargs): def validate_argsort_with_ascending(ascending, args, kwargs): """ - If 'Categorical.argsort' is called via the 'numpy' library, the - first parameter in its signature is 'axis', which takes either - an integer or 'None', so check if the 'ascending' parameter has - either integer type or is None, since 'ascending' itself should - be a boolean + If 'Categorical.argsort' is called via the 'numpy' library, the first + parameter in its signature is 'axis', which takes either an integer or + 'None', so check if the 'ascending' parameter has either integer type or is + None, since 'ascending' itself should be a boolean """ if is_integer(ascending) or ascending is None: args = (ascending,) + args @@ -164,10 +159,10 @@ def validate_argsort_with_ascending(ascending, args, kwargs): def validate_clip_with_axis(axis, args, kwargs): """ - If 'NDFrame.clip' is called via the numpy library, the third - parameter in its signature is 'out', which can takes an ndarray, - so check if the 'axis' parameter is an instance of ndarray, since - 'axis' itself should either be an integer or None + If 'NDFrame.clip' is called via the numpy library, the third parameter in + its signature is 'out', which can takes an ndarray, so check if the 'axis' + parameter is an instance of ndarray, since 'axis' itself should either be + an integer or None """ if isinstance(axis, ndarray): args = (axis,) + args @@ -190,10 +185,9 @@ def validate_clip_with_axis(axis, args, kwargs): def validate_cum_func_with_skipna(skipna, args, kwargs, name): """ - If this function is called via the 'numpy' library, the third - parameter in its signature is 'dtype', which takes either a - 'numpy' dtype or 'None', so check if the 'skipna' parameter is - a boolean or not + If this function is called via the 'numpy' library, the third parameter in + its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so + check if the 'skipna' parameter is a boolean or not """ if not is_bool(skipna): args = (skipna,) + args @@ -294,10 +288,9 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name): def validate_take_with_convert(convert, args, kwargs): """ - If this function is called via the 'numpy' library, the third - parameter in its signature is 'axis', which takes either an - ndarray or 'None', so check if the 'convert' parameter is either - an instance of ndarray or is None + If this function is called via the 'numpy' library, the third parameter in + its signature is 'axis', which takes either an ndarray or 'None', so check + if the 'convert' parameter is either an instance of ndarray or is None """ if isinstance(convert, ndarray) or convert is None: args = (convert,) + args @@ -360,10 +353,9 @@ def validate_expanding_func(name, args, kwargs) -> None: def validate_groupby_func(name, args, kwargs, allowed=None) -> None: """ - 'args' and 'kwargs' should be empty, except for allowed - kwargs because all of - their necessary parameters are explicitly listed in - the function signature + 'args' and 'kwargs' should be empty, except for allowed kwargs because all + of their necessary parameters are explicitly listed in the function + signature """ if allowed is None: allowed = [] @@ -382,9 +374,8 @@ def validate_groupby_func(name, args, kwargs, allowed=None) -> None: def validate_resampler_func(method: str, args, kwargs) -> None: """ - 'args' and 'kwargs' should be empty because all of - their necessary parameters are explicitly listed in - the function signature + 'args' and 'kwargs' should be empty because all of their necessary + parameters are explicitly listed in the function signature """ if len(args) + len(kwargs) > 0: if method in RESAMPLER_NUMPY_OPS: @@ -398,8 +389,8 @@ def validate_resampler_func(method: str, args, kwargs) -> None: def validate_minmax_axis(axis: Optional[int]) -> None: """ - Ensure that the axis argument passed to min, max, argmin, or argmax is - zero or None, as otherwise it will be incorrectly ignored. + Ensure that the axis argument passed to min, max, argmin, or argmax is zero + or None, as otherwise it will be incorrectly ignored. Parameters ----------
https://api.github.com/repos/pandas-dev/pandas/pulls/36979
2020-10-08T11:31:06Z
2020-10-10T17:32:46Z
2020-10-10T17:32:46Z
2020-10-10T19:43:44Z
CI Upgrade isort in pre-commit
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 33afe8d443457..0cb8983ec68e3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,10 +21,12 @@ repos: - file args: [--append-config=flake8/cython-template.cfg] - repo: https://github.com/PyCQA/isort - rev: 5.2.2 + rev: 5.6.0 hooks: - id: isort exclude: ^pandas/__init__\.py$|^pandas/core/api\.py$ + files: '.pxd$|.py$' + types: [file] - repo: https://github.com/asottile/pyupgrade rev: v2.7.2 hooks: diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd index 2650bea921b3f..75c273b35ee7d 100644 --- a/pandas/_libs/hashtable.pxd +++ b/pandas/_libs/hashtable.pxd @@ -1,7 +1,15 @@ +from numpy cimport intp_t, ndarray + from pandas._libs.khash cimport ( - kh_int64_t, kh_uint64_t, kh_float64_t, kh_pymap_t, kh_str_t, uint64_t, - int64_t, float64_t) -from numpy cimport ndarray, intp_t + float64_t, + int64_t, + kh_float64_t, + kh_int64_t, + kh_pymap_t, + kh_str_t, + kh_uint64_t, + uint64_t, +) # prototypes for sharing diff --git a/pandas/_libs/khash.pxd b/pandas/_libs/khash.pxd index b5fe73df5d9be..1bb3a158b4b1a 100644 --- a/pandas/_libs/khash.pxd +++ b/pandas/_libs/khash.pxd @@ -1,5 +1,6 @@ from cpython.object cimport PyObject -from numpy cimport int64_t, uint64_t, int32_t, uint32_t, float64_t +from numpy cimport float64_t, int32_t, int64_t, uint32_t, uint64_t + cdef extern from "khash_python.h": ctypedef uint32_t khint_t diff --git a/pandas/_libs/missing.pxd b/pandas/_libs/missing.pxd index 090c5c5173280..e02b84381b62c 100644 --- a/pandas/_libs/missing.pxd +++ b/pandas/_libs/missing.pxd @@ -1,5 +1,6 @@ from numpy cimport ndarray, uint8_t + cpdef bint checknull(object val) cpdef bint checknull_old(object val) cpdef ndarray[uint8_t] isnaobj(ndarray arr) diff --git a/pandas/_libs/tslibs/ccalendar.pxd b/pandas/_libs/tslibs/ccalendar.pxd index 4eb5188b8a04b..388fd0c62b937 100644 --- a/pandas/_libs/tslibs/ccalendar.pxd +++ b/pandas/_libs/tslibs/ccalendar.pxd @@ -1,6 +1,5 @@ from cython cimport Py_ssize_t - -from numpy cimport int64_t, int32_t +from numpy cimport int32_t, int64_t ctypedef (int32_t, int32_t, int32_t) iso_calendar_t diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 56f5481b7e781..c80be79a12d90 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -1,6 +1,5 @@ from cpython.datetime cimport datetime, tzinfo - -from numpy cimport int64_t, int32_t, ndarray +from numpy cimport int32_t, int64_t, ndarray from pandas._libs.tslibs.np_datetime cimport npy_datetimestruct diff --git a/pandas/_libs/tslibs/nattype.pxd b/pandas/_libs/tslibs/nattype.pxd index 3f7240654d7e8..d38f4518f9bf0 100644 --- a/pandas/_libs/tslibs/nattype.pxd +++ b/pandas/_libs/tslibs/nattype.pxd @@ -1,6 +1,7 @@ from cpython.datetime cimport datetime - from numpy cimport int64_t + + cdef int64_t NPY_NAT cdef bint _nat_scalar_rules[6] diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index eebdcb3ace507..b2524c6bc6c0d 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -1,6 +1,6 @@ from cpython.datetime cimport date, datetime +from numpy cimport int32_t, int64_t -from numpy cimport int64_t, int32_t cdef extern from "numpy/ndarrayobject.h": ctypedef int64_t npy_timedelta diff --git a/pandas/_libs/tslibs/offsets.pxd b/pandas/_libs/tslibs/offsets.pxd index 9a9244db4a565..215c3f849281f 100644 --- a/pandas/_libs/tslibs/offsets.pxd +++ b/pandas/_libs/tslibs/offsets.pxd @@ -1,5 +1,6 @@ from numpy cimport int64_t + cpdef to_offset(object obj) cdef bint is_offset_object(object obj) cdef bint is_tick_object(object obj) diff --git a/pandas/_libs/tslibs/period.pxd b/pandas/_libs/tslibs/period.pxd index 9c0342e239a89..46c6e52cb9156 100644 --- a/pandas/_libs/tslibs/period.pxd +++ b/pandas/_libs/tslibs/period.pxd @@ -2,5 +2,6 @@ from numpy cimport int64_t from .np_datetime cimport npy_datetimestruct + cdef bint is_period_object(object obj) cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd index 4142861e9ad38..fed1f2d326819 100644 --- a/pandas/_libs/tslibs/timedeltas.pxd +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -1,6 +1,7 @@ from cpython.datetime cimport timedelta from numpy cimport int64_t + # Exposed for tslib, not intended for outside use. cpdef int64_t delta_to_nanoseconds(delta) except? -1 cdef convert_to_timedelta64(object ts, str unit) diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd index 6fb7b1ea8f520..45aae3581fe79 100644 --- a/pandas/_libs/tslibs/timestamps.pxd +++ b/pandas/_libs/tslibs/timestamps.pxd @@ -1,5 +1,4 @@ from cpython.datetime cimport datetime, tzinfo - from numpy cimport int64_t from pandas._libs.tslibs.base cimport ABCTimestamp diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd index 136710003d32a..753c881ed505c 100644 --- a/pandas/_libs/tslibs/timezones.pxd +++ b/pandas/_libs/tslibs/timezones.pxd @@ -1,5 +1,6 @@ from cpython.datetime cimport datetime, timedelta, tzinfo + cdef tzinfo utc_pytz cpdef bint is_utc(tzinfo tz) diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd index 1f79a1ea7b6d1..16d801f69df05 100644 --- a/pandas/_libs/tslibs/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -1,6 +1,7 @@ from cpython.object cimport PyTypeObject + cdef extern from *: """ PyObject* char_to_string(const char* data) { @@ -26,7 +27,8 @@ cdef extern from "Python.h": const char* PyUnicode_AsUTF8AndSize(object obj, Py_ssize_t* length) except NULL -from numpy cimport int64_t, float64_t +from numpy cimport float64_t, int64_t + cdef extern from "numpy/arrayobject.h": PyTypeObject PyFloatingArrType_Type diff --git a/pandas/_libs/util.pxd b/pandas/_libs/util.pxd index 5f234910deede..bd1e21b0d8665 100644 --- a/pandas/_libs/util.pxd +++ b/pandas/_libs/util.pxd @@ -1,8 +1,9 @@ -from pandas._libs.tslibs.util cimport * - cimport numpy as cnp from numpy cimport ndarray +from pandas._libs.tslibs.util cimport * + + cdef extern from "numpy/ndarraytypes.h": void PyArray_CLEARFLAGS(ndarray arr, int flags) nogil
- xref #36879 ```console (base) root@ada0229371a4:/home/pandas-tazminia# pre-commit autoupdate && pre-commit run --all Updating https://github.com/python/black ... [INFO] Initializing environment for https://github.com/python/black. already up to date. Updating https://gitlab.com/pycqa/flake8 ... [INFO] Initializing environment for https://gitlab.com/pycqa/flake8. already up to date. Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort. updating 5.2.2 -> 5.6.0. Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade. already up to date. Updating https://github.com/pre-commit/pygrep-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks. already up to date. Updating https://github.com/asottile/yesqa ... [INFO] Initializing environment for https://github.com/asottile/yesqa. already up to date. Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks. already up to date. [INFO] Initializing environment for https://gitlab.com/pycqa/flake8:flake8-comprehensions>=3.1.0. [INFO] Installing environment for https://github.com/python/black. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/PyCQA/isort. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/asottile/pyupgrade. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/asottile/yesqa. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... black....................................................................Passed flake8...................................................................Passed flake8-pyx...............................................................Passed flake8-pxd...............................................................Passed isort....................................................................Passed pyupgrade................................................................Passed rst ``code`` is two backticks............................................Passed Generate pip dependency from conda.......................................Passed Strip unnecessary `# noqa`s..............................................Passed Fix End of Files.........................................................Passed ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36978
2020-10-08T11:03:24Z
2020-10-08T17:42:46Z
2020-10-08T17:42:46Z
2020-10-08T17:43:10Z
DOC: improve description of the example which dataframe has two rows
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 29c63b030cdd5..22f58b769a938 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8971,8 +8971,8 @@ def mode(self, axis=0, numeric_only=False, dropna=True) -> DataFrame: ostrich bird 2 NaN By default, missing values are not considered, and the mode of wings - are both 0 and 2. The second row of species and legs contains ``NaN``, - because they have only one mode, but the DataFrame has two rows. + are both 0 and 2. Because the resulting DataFrame has two rows, + the second row of ``species`` and ``legs`` contains ``NaN``. >>> df.mode() species legs wings
improve description of the example which dataframe has two rows - [ ] closes #36970 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36971
2020-10-08T06:17:46Z
2020-10-11T19:38:46Z
2020-10-11T19:38:46Z
2020-10-11T19:39:17Z
DOC: make rename docs consistent
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 29c63b030cdd5..80e9ec5076610 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4323,7 +4323,7 @@ def rename( Traceback (most recent call last): KeyError: ['C'] not found in axis - Using axis-style parameters + Using axis-style parameters: >>> df.rename(str.lower, axis='columns') a b
Other doc text describing code end in a colon. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36969
2020-10-08T04:46:42Z
2020-10-08T22:08:27Z
2020-10-08T22:08:27Z
2020-10-08T22:08:33Z
TYP: consistent return types blocks
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 09f276be7d64a..2ab5ae6e22092 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -491,11 +491,11 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"] return extend_blocks([b.downcast(downcast) for b in blocks]) - def downcast(self, dtypes=None): + def downcast(self, dtypes=None) -> List["Block"]: """ try to downcast each item to the dict of dtypes if present """ # turn it off completely if dtypes is False: - return self + return [self] values = self.values @@ -506,11 +506,11 @@ def downcast(self, dtypes=None): dtypes = "infer" nv = maybe_downcast_to_dtype(values, dtypes) - return self.make_block(nv) + return [self.make_block(nv)] # ndim > 1 if dtypes is None: - return self + return [self] if not (dtypes == "infer" or isinstance(dtypes, dict)): raise ValueError( @@ -639,13 +639,13 @@ def convert( numeric: bool = True, timedelta: bool = True, coerce: bool = False, - ): + ) -> List["Block"]: """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock here! """ - return self.copy() if copy else self + return [self.copy()] if copy else [self] def _can_hold_element(self, element: Any) -> bool: """ require the same dtype as ourselves """ @@ -788,7 +788,9 @@ def replace( convert=convert, ) if convert: - blocks = [b.convert(numeric=False, copy=not inplace) for b in blocks] + blocks = extend_blocks( + [b.convert(numeric=False, copy=not inplace) for b in blocks] + ) return blocks def _replace_single(self, *args, **kwargs): @@ -2461,12 +2463,10 @@ def convert( numeric: bool = True, timedelta: bool = True, coerce: bool = False, - ): + ) -> List["Block"]: """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we ARE an ObjectBlock!!!!! - - can return multiple blocks! """ # operate column-by-column def f(mask, val, idx): @@ -2639,8 +2639,10 @@ def re_replacer(s): # convert block = self.make_block(new_values) if convert: - block = block.convert(numeric=False) - return block + nbs = block.convert(numeric=False) + else: + nbs = [block] + return nbs def _replace_coerce( self, to_replace, value, inplace=True, regex=False, convert=False, mask=None @@ -2669,7 +2671,7 @@ def _replace_coerce( A new block if there is anything to replace or the original block. """ if mask.any(): - block = super()._replace_coerce( + nbs = super()._replace_coerce( to_replace=to_replace, value=value, inplace=inplace, @@ -2678,11 +2680,11 @@ def _replace_coerce( mask=mask, ) if convert: - block = [b.convert(numeric=False, copy=True) for b in block] - return block + nbs = extend_blocks([b.convert(numeric=False, copy=True) for b in nbs]) + return nbs if convert: - return [self.convert(numeric=False, copy=True)] - return self + return self.convert(numeric=False, copy=True) + return [self] class CategoricalBlock(ExtensionBlock):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36967
2020-10-08T03:39:26Z
2020-10-08T21:12:31Z
2020-10-08T21:12:31Z
2020-10-08T21:21:14Z
CLN: remove unnecessary BoolBlock.replace
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 09f276be7d64a..050789edbe79d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1171,8 +1171,8 @@ def interpolate( inplace = validate_bool_kwarg(inplace, "inplace") - # Only FloatBlocks will contain NaNs. timedelta subclasses IntBlock - if (self.is_bool or self.is_integer) and not self.is_timedelta: + if not self._can_hold_na: + # If there are no NAs, then interpolate is a no-op return self if inplace else self.copy() # a fill na type method @@ -2425,15 +2425,6 @@ def _can_hold_element(self, element: Any) -> bool: return issubclass(tipo.type, np.bool_) return isinstance(element, (bool, np.bool_)) - def replace(self, to_replace, value, inplace=False, regex=False, convert=True): - inplace = validate_bool_kwarg(inplace, "inplace") - to_replace_values = np.atleast_1d(to_replace) - if not np.can_cast(to_replace_values, bool): - return self - return super().replace( - to_replace, value, inplace=inplace, regex=regex, convert=convert - ) - class ObjectBlock(Block): __slots__ = ()
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry The fastpath it implements is equivalent to the can_hold_element fastpath in Block.replace
https://api.github.com/repos/pandas-dev/pandas/pulls/36966
2020-10-08T01:54:45Z
2020-10-08T21:12:52Z
2020-10-08T21:12:52Z
2020-10-08T21:23:19Z
CLN: share to_native_types
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 09f276be7d64a..7332d01f15993 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2070,7 +2070,7 @@ def _can_hold_element(self, element: Any) -> bool: return is_integer(element) or (is_float(element) and element.is_integer()) -class DatetimeLikeBlockMixin: +class DatetimeLikeBlockMixin(Block): """Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.""" @property @@ -2108,8 +2108,15 @@ def shift(self, periods, axis=0, fill_value=None): new_values = values.shift(periods, fill_value=fill_value, axis=axis) return self.make_block_same_class(new_values) + def to_native_types(self, na_rep="NaT", **kwargs): + """ convert to our native types format """ + arr = self.array_values() + + result = arr._format_native_types(na_rep=na_rep, **kwargs) + return self.make_block(result) -class DatetimeBlock(DatetimeLikeBlockMixin, Block): + +class DatetimeBlock(DatetimeLikeBlockMixin): __slots__ = () is_datetime = True @@ -2187,15 +2194,6 @@ def _can_hold_element(self, element: Any) -> bool: return is_valid_nat_for_dtype(element, self.dtype) - def to_native_types(self, na_rep="NaT", date_format=None, **kwargs): - """ convert to our native types format """ - dta = self.array_values() - - result = dta._format_native_types( - na_rep=na_rep, date_format=date_format, **kwargs - ) - return self.make_block(result) - def set(self, locs, values): """ See Block.set.__doc__ @@ -2407,12 +2405,6 @@ def fillna(self, value, **kwargs): ) return super().fillna(value, **kwargs) - def to_native_types(self, na_rep="NaT", **kwargs): - """ convert to our native types format """ - tda = self.array_values() - res = tda._format_native_types(na_rep, **kwargs) - return self.make_block(res) - class BoolBlock(NumericBlock): __slots__ = ()
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36965
2020-10-08T01:16:28Z
2020-10-10T17:31:49Z
2020-10-10T17:31:49Z
2020-10-10T18:09:42Z
REF/TYP: use OpsMixin for logical methods
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index 1fba022f2a1de..185e9197e01fe 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -8,6 +8,7 @@ from pandas.errors import AbstractMethodError +from pandas.core.ops import roperator from pandas.core.ops.common import unpack_zerodim_and_defer @@ -41,3 +42,33 @@ def __gt__(self, other): @unpack_zerodim_and_defer("__ge__") def __ge__(self, other): return self._cmp_method(other, operator.ge) + + # ------------------------------------------------------------- + # Logical Methods + + def _logical_method(self, other, op): + raise AbstractMethodError(self) + + @unpack_zerodim_and_defer("__and__") + def __and__(self, other): + return self._logical_method(other, operator.and_) + + @unpack_zerodim_and_defer("__rand__") + def __rand__(self, other): + return self._logical_method(other, roperator.rand_) + + @unpack_zerodim_and_defer("__or__") + def __or__(self, other): + return self._logical_method(other, operator.or_) + + @unpack_zerodim_and_defer("__ror__") + def __ror__(self, other): + return self._logical_method(other, roperator.ror_) + + @unpack_zerodim_and_defer("__xor__") + def __xor__(self, other): + return self._logical_method(other, operator.xor) + + @unpack_zerodim_and_defer("__rxor__") + def __rxor__(self, other): + return self._logical_method(other, roperator.rxor) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 84319b69d9a35..ae21f13ea3f49 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -280,7 +280,7 @@ def dispatch_to_series(left, right, func, axis: Optional[int] = None): # Series -def _align_method_SERIES(left: "Series", right, align_asobject: bool = False): +def align_method_SERIES(left: "Series", right, align_asobject: bool = False): """ align lhs and rhs Series """ # ToDo: Different from align_method_FRAME, list, tuple and ndarray # are not coerced here @@ -311,7 +311,7 @@ def arith_method_SERIES(cls, op, special): @unpack_zerodim_and_defer(op_name) def wrapper(left, right): res_name = get_op_result_name(left, right) - left, right = _align_method_SERIES(left, right) + left, right = align_method_SERIES(left, right) lvalues = extract_array(left, extract_numpy=True) rvalues = extract_array(right, extract_numpy=True) @@ -323,29 +323,6 @@ def wrapper(left, right): return wrapper -def bool_method_SERIES(cls, op, special): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. - """ - assert special # non-special uses flex_method_SERIES - op_name = _get_op_name(op, special) - - @unpack_zerodim_and_defer(op_name) - def wrapper(self, other): - res_name = get_op_result_name(self, other) - self, other = _align_method_SERIES(self, other, align_asobject=True) - - lvalues = extract_array(self, extract_numpy=True) - rvalues = extract_array(other, extract_numpy=True) - - res_values = logical_op(lvalues, rvalues, op) - return self._construct_result(res_values, name=res_name) - - wrapper.__name__ = op_name - return wrapper - - def flex_method_SERIES(cls, op, special): assert not special # "special" also means "not flex" name = _get_op_name(op, special) diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py index 2b117d5e22186..70fd814423c7f 100644 --- a/pandas/core/ops/methods.py +++ b/pandas/core/ops/methods.py @@ -46,7 +46,6 @@ def _get_method_wrappers(cls): from pandas.core.ops import ( arith_method_FRAME, arith_method_SERIES, - bool_method_SERIES, comp_method_FRAME, flex_comp_method_FRAME, flex_method_SERIES, @@ -58,7 +57,7 @@ def _get_method_wrappers(cls): comp_flex = flex_method_SERIES arith_special = arith_method_SERIES comp_special = None - bool_special = bool_method_SERIES + bool_special = None elif issubclass(cls, ABCDataFrame): arith_flex = arith_method_FRAME comp_flex = flex_comp_method_FRAME @@ -118,13 +117,23 @@ def f(self, other): ) ) - new_methods.update( - dict( - __iand__=_wrap_inplace_method(new_methods["__and__"]), - __ior__=_wrap_inplace_method(new_methods["__or__"]), - __ixor__=_wrap_inplace_method(new_methods["__xor__"]), + if bool_method is None: + # Series gets bool_method via OpsMixin + new_methods.update( + dict( + __iand__=_wrap_inplace_method(cls.__and__), + __ior__=_wrap_inplace_method(cls.__or__), + __ixor__=_wrap_inplace_method(cls.__xor__), + ) + ) + else: + new_methods.update( + dict( + __iand__=_wrap_inplace_method(new_methods["__and__"]), + __ior__=_wrap_inplace_method(new_methods["__or__"]), + __ixor__=_wrap_inplace_method(new_methods["__xor__"]), + ) ) - ) _add_methods(cls, new_methods=new_methods) diff --git a/pandas/core/series.py b/pandas/core/series.py index 5cc163807fac6..0852f1b650ae6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4978,6 +4978,16 @@ def _cmp_method(self, other, op): return self._construct_result(res_values, name=res_name) + def _logical_method(self, other, op): + res_name = ops.get_op_result_name(self, other) + self, other = ops.align_method_SERIES(self, other, align_asobject=True) + + lvalues = extract_array(self, extract_numpy=True) + rvalues = extract_array(other, extract_numpy=True) + + res_values = ops.logical_op(lvalues, rvalues, op) + return self._construct_result(res_values, name=res_name) + Series._add_numeric_operations() diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py index 5e930b7b22f30..04ce705690cf3 100644 --- a/pandas/tests/extension/arrow/arrays.py +++ b/pandas/tests/extension/arrow/arrays.py @@ -21,6 +21,7 @@ register_extension_dtype, take, ) +from pandas.core.arraylike import OpsMixin @register_extension_dtype @@ -67,7 +68,7 @@ def construct_array_type(cls) -> Type["ArrowStringArray"]: return ArrowStringArray -class ArrowExtensionArray(ExtensionArray): +class ArrowExtensionArray(OpsMixin, ExtensionArray): _data: pa.ChunkedArray @classmethod @@ -109,7 +110,7 @@ def astype(self, dtype, copy=True): def dtype(self): return self._dtype - def _boolean_op(self, other, op): + def _logical_method(self, other, op): if not isinstance(other, type(self)): raise NotImplementedError() @@ -122,13 +123,7 @@ def __eq__(self, other): if not isinstance(other, type(self)): return False - return self._boolean_op(other, operator.eq) - - def __and__(self, other): - return self._boolean_op(other, operator.and_) - - def __or__(self, other): - return self._boolean_op(other, operator.or_) + return self._logical_method(other, operator.eq) @property def nbytes(self) -> int:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36964
2020-10-08T00:00:58Z
2020-10-08T11:14:05Z
2020-10-08T11:14:05Z
2020-10-08T15:21:59Z
BUG/API: tighter checks on DTI/TDI.equals
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index f85e3e716bbf9..0853664c766bb 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -23,6 +23,7 @@ from pandas.core.dtypes.common import ( DT64NS_DTYPE, TD64NS_DTYPE, + is_categorical_dtype, is_dtype_equal, is_float_dtype, is_integer_dtype, @@ -940,6 +941,9 @@ def sequence_to_td64ns(data, copy=False, unit=None, errors="raise"): data = data._data elif isinstance(data, IntegerArray): data = data.to_numpy("int64", na_value=tslibs.iNaT) + elif is_categorical_dtype(data.dtype): + data = data.categories.take(data.codes, fill_value=NaT)._values + copy = False # Convert whatever we have into timedelta64[ns] dtype if is_object_dtype(data.dtype) or is_string_dtype(data.dtype): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5821ff0aca3c2..5baa103a25d51 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -2,7 +2,7 @@ Base and utility classes for tseries type pandas objects. """ from datetime import datetime, tzinfo -from typing import Any, List, Optional, TypeVar, Union, cast +from typing import TYPE_CHECKING, Any, List, Optional, TypeVar, Union, cast import numpy as np @@ -16,6 +16,7 @@ from pandas.core.dtypes.common import ( ensure_int64, is_bool_dtype, + is_categorical_dtype, is_dtype_equal, is_integer, is_list_like, @@ -41,6 +42,9 @@ from pandas.core.ops import get_op_result_name from pandas.core.tools.timedeltas import to_timedelta +if TYPE_CHECKING: + from pandas import CategoricalIndex + _index_doc_kwargs = dict(ibase._index_doc_kwargs) _T = TypeVar("_T", bound="DatetimeIndexOpsMixin") @@ -137,14 +141,31 @@ def equals(self, other: object) -> bool: elif other.dtype.kind in ["f", "i", "u", "c"]: return False elif not isinstance(other, type(self)): - try: - other = type(self)(other) - except (ValueError, TypeError, OverflowError): - # e.g. - # ValueError -> cannot parse str entry, or OutOfBoundsDatetime - # TypeError -> trying to convert IntervalIndex to DatetimeIndex - # OverflowError -> Index([very_large_timedeltas]) - return False + inferrable = [ + "timedelta", + "timedelta64", + "datetime", + "datetime64", + "date", + "period", + ] + + should_try = False + if other.dtype == object: + should_try = other.inferred_type in inferrable + elif is_categorical_dtype(other.dtype): + other = cast("CategoricalIndex", other) + should_try = other.categories.inferred_type in inferrable + + if should_try: + try: + other = type(self)(other) + except (ValueError, TypeError, OverflowError): + # e.g. + # ValueError -> cannot parse str entry, or OutOfBoundsDatetime + # TypeError -> trying to convert IntervalIndex to DatetimeIndex + # OverflowError -> Index([very_large_timedeltas]) + return False if not is_dtype_equal(self.dtype, other.dtype): # have different timezone diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index df857cce05bbb..be8ca61f1a730 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -116,6 +116,20 @@ def test_not_equals_numeric(self): assert not index.equals(pd.Index(index.asi8.astype("u8"))) assert not index.equals(pd.Index(index.asi8).astype("f8")) + def test_equals(self): + index = self.create_index() + + assert index.equals(index.astype(object)) + assert index.equals(pd.CategoricalIndex(index)) + assert index.equals(pd.CategoricalIndex(index.astype(object))) + + def test_not_equals_strings(self): + index = self.create_index() + + other = pd.Index([str(x) for x in index], dtype=object) + assert not index.equals(other) + assert not index.equals(pd.CategoricalIndex(other)) + def test_where_cast_str(self): index = self.create_index() diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py index 41e4e220c999c..09344bb5054f6 100644 --- a/pandas/tests/indexes/timedeltas/test_constructors.py +++ b/pandas/tests/indexes/timedeltas/test_constructors.py @@ -238,3 +238,15 @@ def test_explicit_none_freq(self): result = TimedeltaIndex(tdi._data, freq=None) assert result.freq is None + + def test_from_categorical(self): + tdi = timedelta_range(1, periods=5) + + cat = pd.Categorical(tdi) + + result = TimedeltaIndex(cat) + tm.assert_index_equal(result, tdi) + + ci = pd.CategoricalIndex(tdi) + result = TimedeltaIndex(ci) + tm.assert_index_equal(result, tdi)
- [x] closes #33531 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Also fixes `TimedeltaIndex(Categorical(tdi))` (which DatetimeIndex already gets right)
https://api.github.com/repos/pandas-dev/pandas/pulls/36962
2020-10-07T23:21:20Z
2020-10-08T21:15:41Z
2020-10-08T21:15:41Z
2020-10-08T21:27:59Z
TYP: use OpsMixin for DecimalArray
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 2895f33d5c887..3d1ebb01d632f 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -7,10 +7,11 @@ import numpy as np from pandas.core.dtypes.base import ExtensionDtype -from pandas.core.dtypes.common import is_dtype_equal, pandas_dtype +from pandas.core.dtypes.common import is_dtype_equal, is_list_like, pandas_dtype import pandas as pd from pandas.api.extensions import no_default, register_extension_dtype +from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin from pandas.core.indexers import check_array_indexer @@ -44,7 +45,7 @@ def _is_numeric(self) -> bool: return True -class DecimalArray(ExtensionArray, ExtensionScalarOpsMixin): +class DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray): __array_priority__ = 1000 def __init__(self, values, dtype=None, copy=False, context=None): @@ -197,6 +198,25 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs): ) from err return op(axis=0) + def _cmp_method(self, other, op): + # For use with OpsMixin + def convert_values(param): + if isinstance(param, ExtensionArray) or is_list_like(param): + ovalues = param + else: + # Assume it's an object + ovalues = [param] * len(self) + return ovalues + + lvalues = self + rvalues = convert_values(other) + + # If the operator is not defined for the underlying objects, + # a TypeError should be raised + res = [op(a, b) for (a, b) in zip(lvalues, rvalues)] + + return np.asarray(res, dtype=bool) + def to_decimal(values, context=None): return DecimalArray([decimal.Decimal(x) for x in values], context=context) @@ -207,4 +227,3 @@ def make_data(): DecimalArray._add_arithmetic_ops() -DecimalArray._add_comparison_ops()
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36961
2020-10-07T22:51:43Z
2020-10-10T17:31:16Z
2020-10-10T17:31:16Z
2020-10-10T18:11:11Z
ENH: typo in citing page
diff --git a/web/pandas/about/citing.md b/web/pandas/about/citing.md index 25d2c86061daa..e2821dbc19a4e 100644 --- a/web/pandas/about/citing.md +++ b/web/pandas/about/citing.md @@ -6,7 +6,7 @@ If you use _pandas_ for a scientific publication, we would appreciate citations following paper: - [pandas on Zenodo](https://zenodo.org/record/3715232#.XoqFyC2ZOL8), - Please find us on Zenodo and replace with the citation for the version you are using. You cna replace the full author + Please find us on Zenodo and replace with the citation for the version you are using. You can replace the full author list from there with "The pandas development team" like in the example below. @software{reback2020pandas,
Small typo in `citing.md`
https://api.github.com/repos/pandas-dev/pandas/pulls/36960
2020-10-07T22:25:21Z
2020-10-07T23:24:16Z
2020-10-07T23:24:16Z
2020-10-07T23:34:11Z
TYP: core.missing, ops.docstrings, internals.ops, internals.managers, io.html
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index a7a9a77bab3bc..0ec281bb0fdce 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1845,7 +1845,7 @@ def _consolidate(blocks): gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) - new_blocks = [] + new_blocks: List[Block] = [] for (_can_consolidate, dtype), group_blocks in grouper: merged_blocks = _merge_blocks( list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate diff --git a/pandas/core/missing.py b/pandas/core/missing.py index f3229b2876e5d..f2ec04c1fc05d 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -717,8 +717,8 @@ def inner(invalid, limit): # just use forwards return f_idx else: - b_idx = list(inner(invalid[::-1], bw_limit)) - b_idx = set(N - 1 - np.asarray(b_idx)) + b_idx_inv = list(inner(invalid[::-1], bw_limit)) + b_idx = set(N - 1 - np.asarray(b_idx_inv)) if fw_limit == 0: return b_idx diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py index e3a68ad328d55..839bdbfb2444a 100644 --- a/pandas/core/ops/docstrings.py +++ b/pandas/core/ops/docstrings.py @@ -4,7 +4,7 @@ from typing import Dict, Optional -def _make_flex_doc(op_name, typ): +def _make_flex_doc(op_name, typ: str): """ Make the appropriate substitutions for the given operation and class-typ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring @@ -22,10 +22,12 @@ def _make_flex_doc(op_name, typ): op_name = op_name.replace("__", "") op_desc = _op_descriptions[op_name] + op_desc_op = op_desc["op"] + assert op_desc_op is not None # for mypy if op_name.startswith("r"): - equiv = "other " + op_desc["op"] + " " + typ + equiv = "other " + op_desc_op + " " + typ else: - equiv = typ + " " + op_desc["op"] + " other" + equiv = typ + " " + op_desc_op + " other" if typ == "series": base_doc = _flex_doc_SERIES @@ -39,8 +41,9 @@ def _make_flex_doc(op_name, typ): equiv=equiv, series_returns=op_desc["series_returns"], ) - if op_desc["series_examples"]: - doc = doc_no_examples + op_desc["series_examples"] + ser_example = op_desc["series_examples"] + if ser_example: + doc = doc_no_examples + ser_example else: doc = doc_no_examples elif typ == "dataframe": diff --git a/pandas/io/html.py b/pandas/io/html.py index 9a91b16e52723..1534e42d8fb5a 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -8,7 +8,7 @@ import numbers import os import re -from typing import Dict, List, Optional, Pattern, Sequence, Union +from typing import Dict, List, Optional, Pattern, Sequence, Tuple, Union from pandas._typing import FilePathOrBuffer from pandas.compat._optional import import_optional_dependency @@ -435,7 +435,7 @@ def _expand_colspan_rowspan(self, rows): to subsequent cells. """ all_texts = [] # list of rows, each a list of str - remainder = [] # list of (index, text, nrows) + remainder: List[Tuple[int, str, int]] = [] # list of (index, text, nrows) for tr in rows: texts = [] # the output for this row @@ -910,6 +910,7 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs): else: break else: + assert retained is not None # for mypy raise retained ret = [] diff --git a/setup.cfg b/setup.cfg index ee28646d722f2..6c65695f6bc4a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -202,18 +202,6 @@ check_untyped_defs=False [mypy-pandas.core.internals.construction] check_untyped_defs=False -[mypy-pandas.core.internals.managers] -check_untyped_defs=False - -[mypy-pandas.core.internals.ops] -check_untyped_defs=False - -[mypy-pandas.core.missing] -check_untyped_defs=False - -[mypy-pandas.core.ops.docstrings] -check_untyped_defs=False - [mypy-pandas.core.resample] check_untyped_defs=False @@ -253,9 +241,6 @@ check_untyped_defs=False [mypy-pandas.io.formats.style] check_untyped_defs=False -[mypy-pandas.io.html] -check_untyped_defs=False - [mypy-pandas.io.json._json] check_untyped_defs=False
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36959
2020-10-07T20:57:55Z
2020-10-08T08:56:45Z
2020-10-08T08:56:45Z
2020-10-08T15:21:06Z
DOC: Add GCS as supported filesystem for read_parquet
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 55256c928aad9..97ec0ed1f7fdc 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -321,7 +321,7 @@ def read_parquet(path, engine: str = "auto", columns=None, **kwargs): ---------- path : str, path object or file-like object Any valid string path is acceptable. The string could be a URL. Valid - URL schemes include http, ftp, s3, and file. For file URLs, a host is + URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.parquet``. A file URL can also be a path to a directory that contains multiple
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36958
2020-10-07T20:51:41Z
2020-10-10T22:56:37Z
2020-10-10T22:56:37Z
2020-10-10T22:56:41Z
Implement DataFrame.__array_ufunc__
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ad5af5df710ba..9f7aff0a30bd3 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -238,6 +238,8 @@ Other enhancements - :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`) - - Added methods :meth:`IntegerArray.prod`, :meth:`IntegerArray.min`, and :meth:`IntegerArray.max` (:issue:`33790`) +- Calling a NumPy ufunc on a ``DataFrame`` with extension types now preserves the extension types when possible (:issue:`23743`). +- Calling a binary-input NumPy ufunc on multiple ``DataFrame`` objects now aligns, matching the behavior of binary operations and ufuncs on ``Series`` (:issue:`23743`). - Where possible :meth:`RangeIndex.difference` and :meth:`RangeIndex.symmetric_difference` will return :class:`RangeIndex` instead of :class:`Int64Index` (:issue:`36564`) - :meth:`DataFrame.to_parquet` now supports :class:`MultiIndex` for columns in parquet format (:issue:`34777`) - Added :meth:`Rolling.sem()` and :meth:`Expanding.sem()` to compute the standard error of mean (:issue:`26476`). @@ -470,6 +472,7 @@ Deprecations - The default value of ``regex`` for :meth:`Series.str.replace` will change from ``True`` to ``False`` in a future release. In addition, single character regular expressions will *not* be treated as literal strings when ``regex=True`` is set. (:issue:`24804`) - Deprecated automatic alignment on comparison operations between :class:`DataFrame` and :class:`Series`, do ``frame, ser = frame.align(ser, axis=1, copy=False)`` before e.g. ``frame == ser`` (:issue:`28759`) - :meth:`Rolling.count` with ``min_periods=None`` will default to the size of the window in a future version (:issue:`31302`) +- Using "outer" ufuncs on DataFrames to return 4d ndarray is now deprecated. Convert to an ndarray first (:issue:`23743`) - Deprecated slice-indexing on timezone-aware :class:`DatetimeIndex` with naive ``datetime`` objects, to match scalar indexing behavior (:issue:`36148`) - :meth:`Index.ravel` returning a ``np.ndarray`` is deprecated, in the future this will return a view on the same index (:issue:`19956`) - Deprecate use of strings denoting units with 'M', 'Y' or 'y' in :func:`~pandas.to_timedelta` (:issue:`36666`) @@ -750,6 +753,7 @@ Other - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`) +- Fixed metadata propagation in :meth:`Series.abs` and ufuncs called on Series and DataFrames (:issue:`28283`) - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly casting from ``PeriodDtype`` to object dtype (:issue:`34871`) - Fixed bug in metadata propagation incorrectly copying DataFrame columns as metadata when the column name overlaps with the metadata name (:issue:`37037`) - Fixed metadata propagation in the :class:`Series.dt`, :class:`Series.str` accessors, :class:`DataFrame.duplicated`, :class:`DataFrame.stack`, :class:`DataFrame.unstack`, :class:`DataFrame.pivot`, :class:`DataFrame.append`, :class:`DataFrame.diff`, :class:`DataFrame.applymap` and :class:`DataFrame.update` methods (:issue:`28283`) (:issue:`37381`) diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index da366c9abf0a4..6b28f8f135769 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -5,8 +5,15 @@ ExtensionArray """ import operator +from typing import Any, Callable +import warnings -from pandas.core.ops import roperator +import numpy as np + +from pandas._libs import lib + +from pandas.core.construction import extract_array +from pandas.core.ops import maybe_dispatch_ufunc_to_dunder_op, roperator from pandas.core.ops.common import unpack_zerodim_and_defer @@ -140,3 +147,138 @@ def __pow__(self, other): @unpack_zerodim_and_defer("__rpow__") def __rpow__(self, other): return self._arith_method(other, roperator.rpow) + + +def array_ufunc(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any): + """ + Compatibility with numpy ufuncs. + + See also + -------- + numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__ + """ + from pandas.core.generic import NDFrame + from pandas.core.internals import BlockManager + + cls = type(self) + + # for binary ops, use our custom dunder methods + result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) + if result is not NotImplemented: + return result + + # Determine if we should defer. + no_defer = (np.ndarray.__array_ufunc__, cls.__array_ufunc__) + + for item in inputs: + higher_priority = ( + hasattr(item, "__array_priority__") + and item.__array_priority__ > self.__array_priority__ + ) + has_array_ufunc = ( + hasattr(item, "__array_ufunc__") + and type(item).__array_ufunc__ not in no_defer + and not isinstance(item, self._HANDLED_TYPES) + ) + if higher_priority or has_array_ufunc: + return NotImplemented + + # align all the inputs. + types = tuple(type(x) for x in inputs) + alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)] + + if len(alignable) > 1: + # This triggers alignment. + # At the moment, there aren't any ufuncs with more than two inputs + # so this ends up just being x1.index | x2.index, but we write + # it to handle *args. + + if len(set(types)) > 1: + # We currently don't handle ufunc(DataFrame, Series) + # well. Previously this raised an internal ValueError. We might + # support it someday, so raise a NotImplementedError. + raise NotImplementedError( + "Cannot apply ufunc {} to mixed DataFrame and Series " + "inputs.".format(ufunc) + ) + axes = self.axes + for obj in alignable[1:]: + # this relies on the fact that we aren't handling mixed + # series / frame ufuncs. + for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)): + axes[i] = ax1.union(ax2) + + reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes)) + inputs = tuple( + x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x + for x, t in zip(inputs, types) + ) + else: + reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes)) + + if self.ndim == 1: + names = [getattr(x, "name") for x in inputs if hasattr(x, "name")] + name = names[0] if len(set(names)) == 1 else None + reconstruct_kwargs = {"name": name} + else: + reconstruct_kwargs = {} + + def reconstruct(result): + if lib.is_scalar(result): + return result + if result.ndim != self.ndim: + if method == "outer": + if self.ndim == 2: + # we already deprecated for Series + msg = ( + "outer method for ufunc {} is not implemented on " + "pandas objects. Returning an ndarray, but in the " + "future this will raise a 'NotImplementedError'. " + "Consider explicitly converting the DataFrame " + "to an array with '.to_numpy()' first." + ) + warnings.warn(msg.format(ufunc), FutureWarning, stacklevel=4) + return result + raise NotImplementedError + return result + if isinstance(result, BlockManager): + # we went through BlockManager.apply + result = self._constructor(result, **reconstruct_kwargs, copy=False) + else: + # we converted an array, lost our axes + result = self._constructor( + result, **reconstruct_axes, **reconstruct_kwargs, copy=False + ) + # TODO: When we support multiple values in __finalize__, this + # should pass alignable to `__fianlize__` instead of self. + # Then `np.add(a, b)` would consider attrs from both a and b + # when a and b are NDFrames. + if len(alignable) == 1: + result = result.__finalize__(self) + return result + + if self.ndim > 1 and ( + len(inputs) > 1 or ufunc.nout > 1 # type: ignore[attr-defined] + ): + # Just give up on preserving types in the complex case. + # In theory we could preserve them for them. + # * nout>1 is doable if BlockManager.apply took nout and + # returned a Tuple[BlockManager]. + # * len(inputs) > 1 is doable when we know that we have + # aligned blocks / dtypes. + inputs = tuple(np.asarray(x) for x in inputs) + result = getattr(ufunc, method)(*inputs) + elif self.ndim == 1: + # ufunc(series, ...) + inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs) + result = getattr(ufunc, method)(*inputs, **kwargs) + else: + # ufunc(dataframe) + mgr = inputs[0]._mgr + result = mgr.apply(getattr(ufunc, method)) + + if ufunc.nout > 1: # type: ignore[attr-defined] + result = tuple(reconstruct(x) for x in result) + else: + result = reconstruct(result) + return result diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5b87c4ea8b9cc..9c70f3557e339 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -434,6 +434,7 @@ class DataFrame(NDFrame, OpsMixin): _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" + _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) @property def _constructor(self) -> Type[DataFrame]: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7e8012d76fe1b..e866314f00639 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -87,7 +87,7 @@ from pandas.core.dtypes.missing import isna, notna import pandas as pd -from pandas.core import indexing, missing, nanops +from pandas.core import arraylike, indexing, missing, nanops import pandas.core.algorithms as algos from pandas.core.base import PandasObject, SelectionMixin import pandas.core.common as com @@ -1927,6 +1927,11 @@ def __array_wrap__( self, method="__array_wrap__" ) + def __array_ufunc__( + self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any + ): + return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) + # ideally we would define this to avoid the getattr checks, but # is slower # @property diff --git a/pandas/core/series.py b/pandas/core/series.py index 4c3ad38c8a922..1e4c0e07de403 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -176,6 +176,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): """ _typ = "series" + _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Label _metadata: List[str] = ["name"] @@ -683,81 +684,6 @@ def view(self, dtype=None) -> "Series": # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) - def __array_ufunc__( - self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any - ): - # TODO: handle DataFrame - cls = type(self) - - # for binary ops, use our custom dunder methods - result = ops.maybe_dispatch_ufunc_to_dunder_op( - self, ufunc, method, *inputs, **kwargs - ) - if result is not NotImplemented: - return result - - # Determine if we should defer. - no_defer = (np.ndarray.__array_ufunc__, cls.__array_ufunc__) - - for item in inputs: - higher_priority = ( - hasattr(item, "__array_priority__") - and item.__array_priority__ > self.__array_priority__ - ) - has_array_ufunc = ( - hasattr(item, "__array_ufunc__") - and type(item).__array_ufunc__ not in no_defer - and not isinstance(item, self._HANDLED_TYPES) - ) - if higher_priority or has_array_ufunc: - return NotImplemented - - # align all the inputs. - names = [getattr(x, "name") for x in inputs if hasattr(x, "name")] - types = tuple(type(x) for x in inputs) - # TODO: dataframe - alignable = [x for x, t in zip(inputs, types) if issubclass(t, Series)] - - if len(alignable) > 1: - # This triggers alignment. - # At the moment, there aren't any ufuncs with more than two inputs - # so this ends up just being x1.index | x2.index, but we write - # it to handle *args. - index = alignable[0].index - for s in alignable[1:]: - index = index.union(s.index) - inputs = tuple( - x.reindex(index) if issubclass(t, Series) else x - for x, t in zip(inputs, types) - ) - else: - index = self.index - - inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs) - result = getattr(ufunc, method)(*inputs, **kwargs) - - name = names[0] if len(set(names)) == 1 else None - - def construct_return(result): - if lib.is_scalar(result): - return result - elif result.ndim > 1: - # e.g. np.subtract.outer - if method == "outer": - # GH#27198 - raise NotImplementedError - return result - return self._constructor(result, index=index, name=name, copy=False) - - if type(result) is tuple: - # multiple return values - return tuple(construct_return(x) for x in result) - elif method == "at": - # no return value - return None - else: - return construct_return(result) - def __array__(self, dtype=None) -> np.ndarray: """ Return the values as a NumPy array. diff --git a/pandas/tests/frame/test_ufunc.py b/pandas/tests/frame/test_ufunc.py new file mode 100644 index 0000000000000..7bc9aa29af3b4 --- /dev/null +++ b/pandas/tests/frame/test_ufunc.py @@ -0,0 +1,111 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +dtypes = [ + "int64", + "Int64", + dict(A="int64", B="Int64"), +] + + +@pytest.mark.parametrize("dtype", dtypes) +def test_unary_unary(dtype): + # unary input, unary output + values = np.array([[-1, -1], [1, 1]], dtype="int64") + df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) + result = np.positive(df) + expected = pd.DataFrame( + np.positive(values), index=df.index, columns=df.columns + ).astype(dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", dtypes) +def test_unary_binary(dtype): + # unary input, binary output + if pd.api.types.is_extension_array_dtype(dtype) or isinstance(dtype, dict): + pytest.xfail(reason="Extension / mixed with multiple outuputs not implemented.") + + values = np.array([[-1, -1], [1, 1]], dtype="int64") + df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) + result_pandas = np.modf(df) + assert isinstance(result_pandas, tuple) + assert len(result_pandas) == 2 + expected_numpy = np.modf(values) + + for result, b in zip(result_pandas, expected_numpy): + expected = pd.DataFrame(b, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", dtypes) +def test_binary_input_dispatch_binop(dtype): + # binop ufuncs are dispatched to our dunder methods. + values = np.array([[-1, -1], [1, 1]], dtype="int64") + df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) + result = np.add(df, df) + expected = pd.DataFrame( + np.add(values, values), index=df.index, columns=df.columns + ).astype(dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype_a", dtypes) +@pytest.mark.parametrize("dtype_b", dtypes) +def test_binary_input_aligns_columns(dtype_a, dtype_b): + if ( + pd.api.types.is_extension_array_dtype(dtype_a) + or isinstance(dtype_a, dict) + or pd.api.types.is_extension_array_dtype(dtype_b) + or isinstance(dtype_b, dict) + ): + pytest.xfail(reason="Extension / mixed with multiple inputs not implemented.") + + df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}).astype(dtype_a) + + if isinstance(dtype_a, dict) and isinstance(dtype_b, dict): + dtype_b["C"] = dtype_b.pop("B") + + df2 = pd.DataFrame({"A": [1, 2], "C": [3, 4]}).astype(dtype_b) + result = np.heaviside(df1, df2) + expected = np.heaviside( + np.array([[1, 3, np.nan], [2, 4, np.nan]]), + np.array([[1, np.nan, 3], [2, np.nan, 4]]), + ) + expected = pd.DataFrame(expected, index=[0, 1], columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", dtypes) +def test_binary_input_aligns_index(dtype): + if pd.api.types.is_extension_array_dtype(dtype) or isinstance(dtype, dict): + pytest.xfail(reason="Extension / mixed with multiple inputs not implemented.") + df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).astype(dtype) + df2 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "c"]).astype(dtype) + result = np.heaviside(df1, df2) + expected = np.heaviside( + np.array([[1, 3], [3, 4], [np.nan, np.nan]]), + np.array([[1, 3], [np.nan, np.nan], [3, 4]]), + ) + # TODO(FloatArray): this will be Float64Dtype. + expected = pd.DataFrame(expected, index=["a", "b", "c"], columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + +def test_binary_frame_series_raises(): + # We don't currently implement + df = pd.DataFrame({"A": [1, 2]}) + with pytest.raises(NotImplementedError, match="logaddexp"): + np.logaddexp(df, df["A"]) + + with pytest.raises(NotImplementedError, match="logaddexp"): + np.logaddexp(df["A"], df) + + +def test_frame_outer_deprecated(): + df = pd.DataFrame({"A": [1, 2]}) + with tm.assert_produces_warning(FutureWarning): + np.subtract.outer(df, df) diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py index 3f7bebd86e983..300f4cd72573a 100644 --- a/pandas/tests/generic/test_duplicate_labels.py +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -37,8 +37,8 @@ def test_construction_ok(self, cls, data): operator.methodcaller("add", 1), operator.methodcaller("rename", str.upper), operator.methodcaller("rename", "name"), - pytest.param(operator.methodcaller("abs"), marks=not_implemented), - # TODO: test np.abs + operator.methodcaller("abs"), + np.abs, ], ) def test_preserved_series(self, func): diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index ecd70bb415334..4974d3fff1df4 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -302,7 +302,7 @@ (pd.DataFrame, frame_data, operator.inv), (pd.Series, [1], operator.inv), (pd.DataFrame, frame_data, abs), - pytest.param((pd.Series, [1], abs), marks=not_implemented_mark), + (pd.Series, [1], abs), pytest.param((pd.DataFrame, frame_data, round), marks=not_implemented_mark), (pd.Series, [1], round), (pd.DataFrame, frame_data, operator.methodcaller("take", [0, 0])),
For some cases, this will preserve extension types of arrays by calling the ufunc blockwise. ```python In [1]: import pandas as pd; import numpy as np In [2]: df = pd.DataFrame({"A": pd.array([0, 1], dtype="Sparse")}) In [3]: np.sin(df).dtypes Out[3]: A Sparse[float64, nan] dtype: object ``` Implementation-wise, this was done by moving `Series.__array_ufunc__` to `NDFrame` and making it generic for Series / DataFrame. The DataFrame implementation goes through `BlockManager.apply(ufunc)`. We don't currently handle the multi-input case well for dataframes (aside from ufuncs that are implemented as dunder ops like `np.add`). For these, we fall back to the old implementation of converting to an ndarray and wrapping the result. This loses extension types. We also don't currently handle multi-output ufuncs (like `np.modf`). This would require a `BlockManager.apply` that returns a `Tuple[BlockManager]`, `nout` per input block. Maybe someday, but that's low priority. closes https://github.com/pandas-dev/pandas/issues/23743
https://api.github.com/repos/pandas-dev/pandas/pulls/36955
2020-10-07T18:36:37Z
2020-11-25T20:52:40Z
2020-11-25T20:52:40Z
2020-11-30T14:38:18Z
TYP/REF: use _cmp_method in EAs
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index dd750bce7842e..4dd117e407961 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -23,6 +23,7 @@ from pandas.core.dtypes.missing import isna from pandas.core import ops +from pandas.core.arraylike import OpsMixin from .masked import BaseMaskedArray, BaseMaskedDtype @@ -202,7 +203,7 @@ def coerce_to_array( return values, mask -class BooleanArray(BaseMaskedArray): +class BooleanArray(OpsMixin, BaseMaskedArray): """ Array of boolean (True/False) data with missing values. @@ -603,52 +604,44 @@ def logical_method(self, other): name = f"__{op.__name__}__" return set_function_name(logical_method, name, cls) - @classmethod - def _create_comparison_method(cls, op): - @ops.unpack_zerodim_and_defer(op.__name__) - def cmp_method(self, other): - from pandas.arrays import FloatingArray, IntegerArray + def _cmp_method(self, other, op): + from pandas.arrays import FloatingArray, IntegerArray - if isinstance(other, (IntegerArray, FloatingArray)): - return NotImplemented + if isinstance(other, (IntegerArray, FloatingArray)): + return NotImplemented - mask = None + mask = None - if isinstance(other, BooleanArray): - other, mask = other._data, other._mask + if isinstance(other, BooleanArray): + other, mask = other._data, other._mask - elif is_list_like(other): - other = np.asarray(other) - if other.ndim > 1: - raise NotImplementedError( - "can only perform ops with 1-d structures" - ) - if len(self) != len(other): - raise ValueError("Lengths must match to compare") + elif is_list_like(other): + other = np.asarray(other) + if other.ndim > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + if len(self) != len(other): + raise ValueError("Lengths must match to compare") - if other is libmissing.NA: - # numpy does not handle pd.NA well as "other" scalar (it returns - # a scalar False instead of an array) - result = np.zeros_like(self._data) - mask = np.ones_like(self._data) - else: - # numpy will show a DeprecationWarning on invalid elementwise - # comparisons, this will raise in the future - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", "elementwise", FutureWarning) - with np.errstate(all="ignore"): - result = op(self._data, other) - - # nans propagate - if mask is None: - mask = self._mask.copy() - else: - mask = self._mask | mask + if other is libmissing.NA: + # numpy does not handle pd.NA well as "other" scalar (it returns + # a scalar False instead of an array) + result = np.zeros_like(self._data) + mask = np.ones_like(self._data) + else: + # numpy will show a DeprecationWarning on invalid elementwise + # comparisons, this will raise in the future + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "elementwise", FutureWarning) + with np.errstate(all="ignore"): + result = op(self._data, other) - return BooleanArray(result, mask, copy=False) + # nans propagate + if mask is None: + mask = self._mask.copy() + else: + mask = self._mask | mask - name = f"__{op.__name__}" - return set_function_name(cmp_method, name, cls) + return BooleanArray(result, mask, copy=False) def _reduce(self, name: str, skipna: bool = True, **kwargs): @@ -741,5 +734,4 @@ def boolean_arithmetic_method(self, other): BooleanArray._add_logical_ops() -BooleanArray._add_comparison_ops() BooleanArray._add_arithmetic_ops() diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py index bbb5467d42d53..aa272f13b045c 100644 --- a/pandas/core/arrays/floating.py +++ b/pandas/core/arrays/floating.py @@ -26,6 +26,7 @@ from pandas.core.dtypes.missing import isna from pandas.core import ops +from pandas.core.arraylike import OpsMixin from pandas.core.ops import invalid_comparison from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.tools.numeric import to_numeric @@ -201,7 +202,7 @@ def coerce_to_array( return values, mask -class FloatingArray(BaseMaskedArray): +class FloatingArray(OpsMixin, BaseMaskedArray): """ Array of floating (optional missing) values. @@ -398,58 +399,48 @@ def astype(self, dtype, copy: bool = True) -> ArrayLike: def _values_for_argsort(self) -> np.ndarray: return self._data - @classmethod - def _create_comparison_method(cls, op): - op_name = op.__name__ + def _cmp_method(self, other, op): + from pandas.arrays import BooleanArray, IntegerArray - @unpack_zerodim_and_defer(op.__name__) - def cmp_method(self, other): - from pandas.arrays import BooleanArray, IntegerArray + mask = None - mask = None + if isinstance(other, (BooleanArray, IntegerArray, FloatingArray)): + other, mask = other._data, other._mask - if isinstance(other, (BooleanArray, IntegerArray, FloatingArray)): - other, mask = other._data, other._mask + elif is_list_like(other): + other = np.asarray(other) + if other.ndim > 1: + raise NotImplementedError("can only perform ops with 1-d structures") - elif is_list_like(other): - other = np.asarray(other) - if other.ndim > 1: - raise NotImplementedError( - "can only perform ops with 1-d structures" - ) + if other is libmissing.NA: + # numpy does not handle pd.NA well as "other" scalar (it returns + # a scalar False instead of an array) + # This may be fixed by NA.__array_ufunc__. Revisit this check + # once that's implemented. + result = np.zeros(self._data.shape, dtype="bool") + mask = np.ones(self._data.shape, dtype="bool") + else: + with warnings.catch_warnings(): + # numpy may show a FutureWarning: + # elementwise comparison failed; returning scalar instead, + # but in the future will perform elementwise comparison + # before returning NotImplemented. We fall back to the correct + # behavior today, so that should be fine to ignore. + warnings.filterwarnings("ignore", "elementwise", FutureWarning) + with np.errstate(all="ignore"): + method = getattr(self._data, f"__{op.__name__}__") + result = method(other) - if other is libmissing.NA: - # numpy does not handle pd.NA well as "other" scalar (it returns - # a scalar False instead of an array) - # This may be fixed by NA.__array_ufunc__. Revisit this check - # once that's implemented. - result = np.zeros(self._data.shape, dtype="bool") - mask = np.ones(self._data.shape, dtype="bool") - else: - with warnings.catch_warnings(): - # numpy may show a FutureWarning: - # elementwise comparison failed; returning scalar instead, - # but in the future will perform elementwise comparison - # before returning NotImplemented. We fall back to the correct - # behavior today, so that should be fine to ignore. - warnings.filterwarnings("ignore", "elementwise", FutureWarning) - with np.errstate(all="ignore"): - method = getattr(self._data, f"__{op_name}__") - result = method(other) - - if result is NotImplemented: - result = invalid_comparison(self._data, other, op) - - # nans propagate - if mask is None: - mask = self._mask.copy() - else: - mask = self._mask | mask + if result is NotImplemented: + result = invalid_comparison(self._data, other, op) - return BooleanArray(result, mask) + # nans propagate + if mask is None: + mask = self._mask.copy() + else: + mask = self._mask | mask - name = f"__{op.__name__}__" - return set_function_name(cmp_method, name, cls) + return BooleanArray(result, mask) def sum(self, skipna=True, min_count=0, **kwargs): nv.validate_sum((), kwargs) @@ -565,7 +556,6 @@ def floating_arithmetic_method(self, other): FloatingArray._add_arithmetic_ops() -FloatingArray._add_comparison_ops() _dtype_docstring = """ diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 258a946536c2b..856b4bcbda048 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -26,6 +26,7 @@ from pandas.core.dtypes.missing import isna from pandas.core import ops +from pandas.core.arraylike import OpsMixin from pandas.core.ops import invalid_comparison from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.tools.numeric import to_numeric @@ -265,7 +266,7 @@ def coerce_to_array( return values, mask -class IntegerArray(BaseMaskedArray): +class IntegerArray(OpsMixin, BaseMaskedArray): """ Array of integer (optional missing) values. @@ -493,60 +494,50 @@ def _values_for_argsort(self) -> np.ndarray: data[self._mask] = data.min() - 1 return data - @classmethod - def _create_comparison_method(cls, op): - op_name = op.__name__ + def _cmp_method(self, other, op): + from pandas.core.arrays import BaseMaskedArray, BooleanArray - @unpack_zerodim_and_defer(op.__name__) - def cmp_method(self, other): - from pandas.core.arrays import BaseMaskedArray, BooleanArray + mask = None - mask = None + if isinstance(other, BaseMaskedArray): + other, mask = other._data, other._mask - if isinstance(other, BaseMaskedArray): - other, mask = other._data, other._mask - - elif is_list_like(other): - other = np.asarray(other) - if other.ndim > 1: - raise NotImplementedError( - "can only perform ops with 1-d structures" - ) - if len(self) != len(other): - raise ValueError("Lengths must match to compare") + elif is_list_like(other): + other = np.asarray(other) + if other.ndim > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + if len(self) != len(other): + raise ValueError("Lengths must match to compare") + + if other is libmissing.NA: + # numpy does not handle pd.NA well as "other" scalar (it returns + # a scalar False instead of an array) + # This may be fixed by NA.__array_ufunc__. Revisit this check + # once that's implemented. + result = np.zeros(self._data.shape, dtype="bool") + mask = np.ones(self._data.shape, dtype="bool") + else: + with warnings.catch_warnings(): + # numpy may show a FutureWarning: + # elementwise comparison failed; returning scalar instead, + # but in the future will perform elementwise comparison + # before returning NotImplemented. We fall back to the correct + # behavior today, so that should be fine to ignore. + warnings.filterwarnings("ignore", "elementwise", FutureWarning) + with np.errstate(all="ignore"): + method = getattr(self._data, f"__{op.__name__}__") + result = method(other) - if other is libmissing.NA: - # numpy does not handle pd.NA well as "other" scalar (it returns - # a scalar False instead of an array) - # This may be fixed by NA.__array_ufunc__. Revisit this check - # once that's implemented. - result = np.zeros(self._data.shape, dtype="bool") - mask = np.ones(self._data.shape, dtype="bool") - else: - with warnings.catch_warnings(): - # numpy may show a FutureWarning: - # elementwise comparison failed; returning scalar instead, - # but in the future will perform elementwise comparison - # before returning NotImplemented. We fall back to the correct - # behavior today, so that should be fine to ignore. - warnings.filterwarnings("ignore", "elementwise", FutureWarning) - with np.errstate(all="ignore"): - method = getattr(self._data, f"__{op_name}__") - result = method(other) - - if result is NotImplemented: - result = invalid_comparison(self._data, other, op) - - # nans propagate - if mask is None: - mask = self._mask.copy() - else: - mask = self._mask | mask + if result is NotImplemented: + result = invalid_comparison(self._data, other, op) - return BooleanArray(result, mask) + # nans propagate + if mask is None: + mask = self._mask.copy() + else: + mask = self._mask | mask - name = f"__{op.__name__}__" - return set_function_name(cmp_method, name, cls) + return BooleanArray(result, mask) def sum(self, skipna=True, min_count=0, **kwargs): nv.validate_sum((), kwargs) @@ -669,7 +660,6 @@ def integer_arithmetic_method(self, other): IntegerArray._add_arithmetic_ops() -IntegerArray._add_comparison_ops() _dtype_docstring = """ diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index c56cccf2e4a93..b5103fb7f9d5d 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -14,6 +14,7 @@ from pandas import compat from pandas.core import nanops, ops from pandas.core.array_algos import masked_reductions +from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.base import ExtensionOpsMixin from pandas.core.strings.object_array import ObjectStringArrayMixin @@ -115,6 +116,7 @@ def itemsize(self) -> int: class PandasArray( + OpsMixin, NDArrayBackedExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin, @@ -370,31 +372,32 @@ def to_numpy( def __invert__(self): return type(self)(~self._ndarray) - @classmethod - def _create_arithmetic_method(cls, op): + def _cmp_method(self, other, op): + if isinstance(other, PandasArray): + other = other._ndarray pd_op = ops.get_array_op(op) + result = pd_op(self._ndarray, other) - @ops.unpack_zerodim_and_defer(op.__name__) - def arithmetic_method(self, other): - if isinstance(other, cls): - other = other._ndarray - - result = pd_op(self._ndarray, other) + if op is divmod or op is ops.rdivmod: + a, b = result + if isinstance(a, np.ndarray): + # for e.g. op vs TimedeltaArray, we may already + # have an ExtensionArray, in which case we do not wrap + return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b) + return a, b - if op is divmod or op is ops.rdivmod: - a, b = result - if isinstance(a, np.ndarray): - # for e.g. op vs TimedeltaArray, we may already - # have an ExtensionArray, in which case we do not wrap - return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b) - return a, b + if isinstance(result, np.ndarray): + # for e.g. multiplication vs TimedeltaArray, we may already + # have an ExtensionArray, in which case we do not wrap + return self._wrap_ndarray_result(result) + return result - if isinstance(result, np.ndarray): - # for e.g. multiplication vs TimedeltaArray, we may already - # have an ExtensionArray, in which case we do not wrap - return self._wrap_ndarray_result(result) - return result + @classmethod + def _create_arithmetic_method(cls, op): + @ops.unpack_zerodim_and_defer(op.__name__) + def arithmetic_method(self, other): + return self._cmp_method(other, op) return compat.set_function_name(arithmetic_method, f"__{op.__name__}__", cls) @@ -415,4 +418,3 @@ def _wrap_ndarray_result(self, result: np.ndarray): PandasArray._add_arithmetic_ops() -PandasArray._add_comparison_ops() diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 9ea34d4680748..553ba25270943 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -314,43 +314,46 @@ def memory_usage(self, deep: bool = False) -> int: return result + lib.memory_usage_of_objects(self._ndarray) return result - # Override parent because we have different return types. - @classmethod - def _create_arithmetic_method(cls, op): - # Note: this handles both arithmetic and comparison methods. + def _cmp_method(self, other, op): + from pandas.arrays import BooleanArray - @ops.unpack_zerodim_and_defer(op.__name__) - def method(self, other): - from pandas.arrays import BooleanArray + if isinstance(other, StringArray): + other = other._ndarray + + mask = isna(self) | isna(other) + valid = ~mask - assert op.__name__ in ops.ARITHMETIC_BINOPS | ops.COMPARISON_BINOPS + if not lib.is_scalar(other): + if len(other) != len(self): + # prevent improper broadcasting when other is 2D + raise ValueError( + f"Lengths of operands do not match: {len(self)} != {len(other)}" + ) - if isinstance(other, cls): - other = other._ndarray + other = np.asarray(other) + other = other[valid] - mask = isna(self) | isna(other) - valid = ~mask + if op.__name__ in ops.ARITHMETIC_BINOPS: + result = np.empty_like(self._ndarray, dtype="object") + result[mask] = StringDtype.na_value + result[valid] = op(self._ndarray[valid], other) + return StringArray(result) + else: + # logical + result = np.zeros(len(self._ndarray), dtype="bool") + result[valid] = op(self._ndarray[valid], other) + return BooleanArray(result, mask) - if not lib.is_scalar(other): - if len(other) != len(self): - # prevent improper broadcasting when other is 2D - raise ValueError( - f"Lengths of operands do not match: {len(self)} != {len(other)}" - ) + # Override parent because we have different return types. + @classmethod + def _create_arithmetic_method(cls, op): + # Note: this handles both arithmetic and comparison methods. - other = np.asarray(other) - other = other[valid] + assert op.__name__ in ops.ARITHMETIC_BINOPS | ops.COMPARISON_BINOPS - if op.__name__ in ops.ARITHMETIC_BINOPS: - result = np.empty_like(self._ndarray, dtype="object") - result[mask] = StringDtype.na_value - result[valid] = op(self._ndarray[valid], other) - return StringArray(result) - else: - # logical - result = np.zeros(len(self._ndarray), dtype="bool") - result[valid] = op(self._ndarray[valid], other) - return BooleanArray(result, mask) + @ops.unpack_zerodim_and_defer(op.__name__) + def method(self, other): + return self._cmp_method(other, op) return compat.set_function_name(method, f"__{op.__name__}__", cls) @@ -362,7 +365,6 @@ def _add_arithmetic_ops(cls): cls.__mul__ = cls._create_arithmetic_method(operator.mul) cls.__rmul__ = cls._create_arithmetic_method(ops.rmul) - _create_comparison_method = _create_arithmetic_method # ------------------------------------------------------------------------ # String methods interface _str_na_value = StringDtype.na_value @@ -418,4 +420,3 @@ def _str_map(self, f, na_value=None, dtype=None): StringArray._add_arithmetic_ops() -StringArray._add_comparison_ops()
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36954
2020-10-07T18:14:48Z
2020-10-07T22:23:29Z
2020-10-07T22:23:29Z
2020-10-07T22:37:11Z
CLN: dont special-case should_store, CategoricalBlock
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 09f276be7d64a..a3b3d1aed7b13 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -103,7 +103,6 @@ class Block(PandasObject): is_timedelta = False is_bool = False is_object = False - is_categorical = False is_extension = False _can_hold_na = False _can_consolidate = True @@ -183,6 +182,10 @@ def is_view(self) -> bool: """ return a boolean if I am possibly a view """ return self.values.base is not None + @property + def is_categorical(self) -> bool: + return self._holder is Categorical + @property def is_datelike(self) -> bool: """ return True if I am a non-datelike """ @@ -1652,12 +1655,6 @@ def iget(self, col): raise IndexError(f"{self} only contains one item") return self.values - def should_store(self, value: ArrayLike) -> bool: - """ - Can we set the given array-like value inplace? - """ - return isinstance(value, self._holder) - def set(self, locs, values): assert locs.tolist() == [0] self.values = values @@ -2048,9 +2045,6 @@ def _can_hold_element(self, element: Any) -> bool: element, (float, int, complex, np.float_, np.int_) ) and not isinstance(element, (bool, np.bool_)) - def should_store(self, value: ArrayLike) -> bool: - return issubclass(value.dtype.type, np.complexfloating) - class IntBlock(NumericBlock): __slots__ = () @@ -2216,7 +2210,6 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): _can_hold_element = DatetimeBlock._can_hold_element to_native_types = DatetimeBlock.to_native_types fill_value = np.datetime64("NaT", "ns") - should_store = Block.should_store array_values = ExtensionBlock.array_values @property @@ -2687,20 +2680,6 @@ def _replace_coerce( class CategoricalBlock(ExtensionBlock): __slots__ = () - is_categorical = True - _can_hold_na = True - - should_store = Block.should_store - - def __init__(self, values, placement, ndim=None): - # coerce to categorical if we can - values = extract_array(values) - assert isinstance(values, Categorical), type(values) - super().__init__(values, placement=placement, ndim=ndim) - - @property - def _holder(self): - return Categorical def replace( self,
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Moving towards avoiding special-casing pandas-internal EA subclasses.
https://api.github.com/repos/pandas-dev/pandas/pulls/36952
2020-10-07T17:55:18Z
2020-10-08T21:13:42Z
2020-10-08T21:13:42Z
2020-10-08T21:22:21Z
REGR: Allow positional arguments in DataFrame.agg
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index f9127ee8d13e7..3ad8d981be2c9 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression where attempting to mutate a :class:`DateOffset` object would no longer raise an ``AttributeError`` (:issue:`36940`) +- Fixed regression where :meth:`DataFrame.agg` would fail with :exc:`TypeError` when passed positional arguments to be passed on to the aggregation function (:issue:`36948`). - Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 80e9ec5076610..607f927bbc332 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7406,7 +7406,7 @@ def aggregate(self, func=None, axis=0, *args, **kwargs): result = None try: - result, how = self._aggregate(func, axis=axis, *args, **kwargs) + result, how = self._aggregate(func, axis, *args, **kwargs) except TypeError as err: exc = TypeError( "DataFrame constructor called with " diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index 5c6a47c57970b..598da9c52731e 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -1463,6 +1463,34 @@ def test_agg_cython_table_raises(self, df, func, expected, axis): with pytest.raises(expected, match=msg): df.agg(func, axis=axis) + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize( + "args, kwargs", + [ + ((1, 2, 3), {}), + ((8, 7, 15), {}), + ((1, 2), {}), + ((1,), {"b": 2}), + ((), {"a": 1, "b": 2}), + ((), {"a": 2, "b": 1}), + ((), {"a": 1, "b": 2, "c": 3}), + ], + ) + def test_agg_args_kwargs(self, axis, args, kwargs): + def f(x, a, b, c=3): + return x.sum() + (a + b) / c + + df = pd.DataFrame([[1, 2], [3, 4]]) + + if axis == 0: + expected = pd.Series([5.0, 7.0]) + else: + expected = pd.Series([4.0, 8.0]) + + result = df.agg(f, axis, *args, **kwargs) + + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("num_cols", [2, 3, 5]) def test_frequency_is_original(self, num_cols): # GH 22150
Although `DataFrame.agg` is documented as accepting `func, axis, *args, **kwargs` with `*args` and `**kwargs` passed to `func`, passing positional arguments raises a TypeError. The reason for this is that the internal call to `self._aggregate` uses a keyword argument (axis) before passing *args and `**kwargs`, and as such the first positional argument is always interpreted as a second specification of `axis`, which raises TypeError. Prior to commit 433c9007781080658553fbef1a4d0c2813b404c0, TypeErrors were being suppressed, falling back to `self.apply`, which in v1.1.0 turned into an error. This fixes issue GH-36948. - [x] closes #36948 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36950
2020-10-07T17:53:27Z
2020-10-09T13:32:56Z
2020-10-09T13:32:56Z
2020-10-09T13:33:42Z
REGR: Make DateOffset immutable
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index e63912ebc8fee..d0d03021629c6 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -14,7 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- +- Fixed regression where attempting to mutate a :class:`DateOffset` object would no longer raise an ``AttributeError`` (:issue:`36940`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index a78de3eace98c..101e86bb37912 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1212,9 +1212,8 @@ class DateOffset(RelativeDeltaOffset, metaclass=OffsetMeta): >>> ts + DateOffset(months=2) Timestamp('2017-03-01 09:10:11') """ - - pass - + def __setattr__(self, name, value): + raise AttributeError("DateOffset objects are immutable.") # -------------------------------------------------------------------- diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 3a0a292d360d4..35fef0637dc76 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -4424,3 +4424,20 @@ def test_week_add_invalid(): other = Day() with pytest.raises(TypeError, match="Cannot add"): offset + other + + +@pytest.mark.parametrize( + "attribute", + [ + "hours", + "days", + "weeks", + "months", + "years", + ], +) +def test_dateoffset_immutable(attribute): + offset = DateOffset(**{attribute: 0}) + msg = "DateOffset objects are immutable" + with pytest.raises(AttributeError, match=msg): + setattr(offset, attribute, 5)
- [x] closes #36940 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I'm assuming this is a regression and not behavior we want?
https://api.github.com/repos/pandas-dev/pandas/pulls/36946
2020-10-07T17:11:21Z
2020-10-08T21:10:10Z
2020-10-08T21:10:09Z
2020-10-08T22:22:49Z
use-python-language-in-pip-to-conda
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 33afe8d443457..4fcb8b41fedd6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,10 +39,11 @@ repos: - id: pip_to_conda name: Generate pip dependency from conda description: This hook checks if the conda environment.yml and requirements-dev.txt are equal - language: system + language: python entry: python -m scripts.generate_pip_deps_from_conda files: ^(environment.yml|requirements-dev.txt)$ pass_filenames: false + additional_dependencies: [pyyaml] - repo: https://github.com/asottile/yesqa rev: v1.2.2 hooks:
xref #36531 This is to avoid problems where a dev's own system's Python points to Python2
https://api.github.com/repos/pandas-dev/pandas/pulls/36945
2020-10-07T16:28:49Z
2020-10-09T11:06:58Z
2020-10-09T11:06:58Z
2020-10-09T11:07:01Z
REF/TYP: define methods non-dynamically for SparseArray
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index d4ec641794fc2..5a66bf522215a 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -40,6 +40,7 @@ from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna import pandas.core.algorithms as algos +from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin from pandas.core.arrays.sparse.dtype import SparseDtype from pandas.core.base import PandasObject @@ -195,7 +196,7 @@ def _wrap_result(name, data, sparse_index, fill_value, dtype=None): ) -class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin): +class SparseArray(OpsMixin, PandasObject, ExtensionArray, ExtensionOpsMixin): """ An ExtensionArray for storing sparse data. @@ -762,8 +763,6 @@ def value_counts(self, dropna=True): # -------- def __getitem__(self, key): - # avoid mypy issues when importing at the top-level - from pandas.core.indexing import check_bool_indexer if isinstance(key, tuple): if len(key) > 1: @@ -796,7 +795,6 @@ def __getitem__(self, key): key = check_array_indexer(self, key) if com.is_bool_indexer(key): - key = check_bool_indexer(self, key) return self.take(np.arange(len(key), dtype=np.int32)[key]) elif hasattr(key, "__len__"): @@ -1390,17 +1388,6 @@ def __abs__(self): # Ops # ------------------------------------------------------------------------ - @classmethod - def _create_unary_method(cls, op) -> Callable[["SparseArray"], "SparseArray"]: - def sparse_unary_method(self) -> "SparseArray": - fill_value = op(np.array(self.fill_value)).item() - values = op(self.sp_values) - dtype = SparseDtype(values.dtype, fill_value) - return cls._simple_new(values, self.sp_index, dtype) - - name = f"__{op.__name__}__" - return compat.set_function_name(sparse_unary_method, name, cls) - @classmethod def _create_arithmetic_method(cls, op): op_name = op.__name__ @@ -1444,56 +1431,48 @@ def sparse_arithmetic_method(self, other): name = f"__{op.__name__}__" return compat.set_function_name(sparse_arithmetic_method, name, cls) - @classmethod - def _create_comparison_method(cls, op): - op_name = op.__name__ - if op_name in {"and_", "or_"}: - op_name = op_name[:-1] + def _cmp_method(self, other, op) -> "SparseArray": + if not is_scalar(other) and not isinstance(other, type(self)): + # convert list-like to ndarray + other = np.asarray(other) - @unpack_zerodim_and_defer(op_name) - def cmp_method(self, other): - - if not is_scalar(other) and not isinstance(other, type(self)): - # convert list-like to ndarray - other = np.asarray(other) + if isinstance(other, np.ndarray): + # TODO: make this more flexible than just ndarray... + if len(self) != len(other): + raise AssertionError(f"length mismatch: {len(self)} vs. {len(other)}") + other = SparseArray(other, fill_value=self.fill_value) - if isinstance(other, np.ndarray): - # TODO: make this more flexible than just ndarray... - if len(self) != len(other): - raise AssertionError( - f"length mismatch: {len(self)} vs. {len(other)}" - ) - other = SparseArray(other, fill_value=self.fill_value) + if isinstance(other, SparseArray): + op_name = op.__name__.strip("_") + return _sparse_array_op(self, other, op, op_name) + else: + with np.errstate(all="ignore"): + fill_value = op(self.fill_value, other) + result = op(self.sp_values, other) + + return type(self)( + result, + sparse_index=self.sp_index, + fill_value=fill_value, + dtype=np.bool_, + ) - if isinstance(other, SparseArray): - return _sparse_array_op(self, other, op, op_name) - else: - with np.errstate(all="ignore"): - fill_value = op(self.fill_value, other) - result = op(self.sp_values, other) + _logical_method = _cmp_method - return type(self)( - result, - sparse_index=self.sp_index, - fill_value=fill_value, - dtype=np.bool_, - ) + def _unary_method(self, op) -> "SparseArray": + fill_value = op(np.array(self.fill_value)).item() + values = op(self.sp_values) + dtype = SparseDtype(values.dtype, fill_value) + return type(self)._simple_new(values, self.sp_index, dtype) - name = f"__{op.__name__}__" - return compat.set_function_name(cmp_method, name, cls) + def __pos__(self) -> "SparseArray": + return self._unary_method(operator.pos) - @classmethod - def _add_unary_ops(cls): - cls.__pos__ = cls._create_unary_method(operator.pos) - cls.__neg__ = cls._create_unary_method(operator.neg) - cls.__invert__ = cls._create_unary_method(operator.invert) + def __neg__(self) -> "SparseArray": + return self._unary_method(operator.neg) - @classmethod - def _add_comparison_ops(cls): - cls.__and__ = cls._create_comparison_method(operator.and_) - cls.__or__ = cls._create_comparison_method(operator.or_) - cls.__xor__ = cls._create_arithmetic_method(operator.xor) - super()._add_comparison_ops() + def __invert__(self) -> "SparseArray": + return self._unary_method(operator.invert) # ---------- # Formatting @@ -1511,8 +1490,6 @@ def _formatter(self, boxed=False): SparseArray._add_arithmetic_ops() -SparseArray._add_comparison_ops() -SparseArray._add_unary_ops() def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None): diff --git a/setup.cfg b/setup.cfg index 8d3d79789a252..554c8b30641f0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -142,9 +142,6 @@ check_untyped_defs=False [mypy-pandas.core.arrays.datetimelike] check_untyped_defs=False -[mypy-pandas.core.arrays.sparse.array] -check_untyped_defs=False - [mypy-pandas.core.arrays.string_] check_untyped_defs=False
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36943
2020-10-07T16:21:39Z
2020-10-10T17:39:29Z
2020-10-10T17:39:29Z
2020-10-10T18:10:33Z
DOC: Update dependency for to_markdown documentation
diff --git a/pandas/core/series.py b/pandas/core/series.py index 2b972d33d7cdd..db274f37f5889 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1471,6 +1471,10 @@ def to_markdown( str {klass} in Markdown-friendly format. + Notes + ----- + Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. + Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
Adding to the documentation that tabulate is required for to_markdown to be executed - [x] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36938
2020-10-07T08:49:02Z
2020-10-17T10:21:30Z
2020-10-17T10:21:30Z
2020-10-17T10:21:31Z
BUG: GH36928 Allow dict_keys to be used as column names by read_csv
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index d0d03021629c6..843a161bf7c6a 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -14,6 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in :func:`read_csv` raising a ``ValueError`` when ``names`` was of type ``dict_keys`` (:issue:`36928`) - Fixed regression where attempting to mutate a :class:`DateOffset` object would no longer raise an ``AttributeError`` (:issue:`36940`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index ede4fdc5e1d8b..63c3f9899d915 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -420,7 +420,9 @@ def _validate_names(names): if names is not None: if len(names) != len(set(names)): raise ValueError("Duplicate names are not allowed.") - if not is_list_like(names, allow_sets=False): + if not ( + is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView) + ): raise ValueError("Names should be an ordered collection.") diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index a6a9e5c5610f2..edf1d780ef107 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -2241,3 +2241,15 @@ def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter): with pytest.raises(ValueError, match=msg): parser.read_table(f, delim_whitespace=True, delimiter=delimiter) + + +def test_dict_keys_as_names(all_parsers): + # GH: 36928 + data = "1,2" + + keys = {"a": int, "b": int}.keys() + parser = all_parsers + + result = parser.read_csv(StringIO(data), names=keys) + expected = DataFrame({"a": [1], "b": [2]}) + tm.assert_frame_equal(result, expected)
- [x] closes #36928 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36937
2020-10-07T08:14:21Z
2020-10-10T17:45:01Z
2020-10-10T17:45:01Z
2020-10-12T14:08:39Z
TYP: clean unreachable code and duplicate test #27396
diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index db7347bb863a5..b085704e8b06f 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -358,63 +358,33 @@ def _to_uni(x): N = 100 chunksize = 1000 - - for ncols in [4]: - base = int((chunksize // ncols or 1) or 1) - for nrows in [ - 2, - 10, - N - 1, - N, - N + 1, - N + 2, - 2 * N - 2, - 2 * N - 1, - 2 * N, - 2 * N + 1, - 2 * N + 2, - base - 1, - base, - base + 1, - ]: - _do_test( - tm.makeCustomDataframe( - nrows, ncols, r_idx_type="dt", c_idx_type="s" - ), - "dt", - "s", - ) - - for ncols in [4]: - base = int((chunksize // ncols or 1) or 1) - for nrows in [ - 2, - 10, - N - 1, - N, - N + 1, - N + 2, - 2 * N - 2, - 2 * N - 1, - 2 * N, - 2 * N + 1, - 2 * N + 2, - base - 1, - base, - base + 1, - ]: - _do_test( - tm.makeCustomDataframe( - nrows, ncols, r_idx_type="dt", c_idx_type="s" - ), - "dt", - "s", - ) - pass + ncols = 4 + base = chunksize // ncols + for nrows in [ + 2, + 10, + N - 1, + N, + N + 1, + N + 2, + 2 * N - 2, + 2 * N - 1, + 2 * N, + 2 * N + 1, + 2 * N + 2, + base - 1, + base, + base + 1, + ]: + _do_test( + tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"), + "dt", + "s", + ) for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]: for ncols in [1, 2, 3, 4]: - base = int((chunksize // ncols or 1) or 1) + base = chunksize // ncols for nrows in [ 2, 10, @@ -440,7 +410,7 @@ def _to_uni(x): ) for ncols in [1, 2, 3, 4]: - base = int((chunksize // ncols or 1) or 1) + base = chunksize // ncols for nrows in [ 10, N - 2,
xref #27396 ``` pandas/tests/frame/test_to_csv.py:363: error: Right operand of 'or' is never evaluated [unreachable] pandas/tests/frame/test_to_csv.py:389: error: Right operand of 'or' is never evaluated [unreachable] pandas/tests/frame/test_to_csv.py:417: error: Right operand of 'or' is never evaluated [unreachable] pandas/tests/frame/test_to_csv.py:443: error: Right operand of 'or' is never evaluated [unreachable] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36935
2020-10-07T05:32:49Z
2020-10-10T17:44:01Z
2020-10-10T17:44:00Z
2022-11-18T02:21:10Z
TST: RollingGroupby.count with closed specified
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ae4d5ea692066..0ab95dd260a9c 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -442,6 +442,7 @@ Groupby/resample/rolling - Bug in :meth:`Rolling.sum()` returned wrong values when dtypes where mixed between float and integer and axis was equal to one (:issue:`20649`, :issue:`35596`) - Bug in :meth:`Rolling.count` returned ``np.nan`` with :class:`pandas.api.indexers.FixedForwardWindowIndexer` as window, ``min_periods=0`` and only missing values in window (:issue:`35579`) - Bug where :class:`pandas.core.window.Rolling` produces incorrect window sizes when using a ``PeriodIndex`` (:issue:`34225`) +- Bug in :meth:`RollingGroupby.count` where a ``ValueError`` was raised when specifying the ``closed`` parameter (:issue:`35869`) Reshaping ^^^^^^^^^ diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index d69ee72a00aee..63bf731e95096 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -472,3 +472,35 @@ def test_groupby_rolling_no_sort(self): index=pd.MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]), ) tm.assert_frame_equal(result, expected) + + def test_groupby_rolling_count_closed_on(self): + # GH 35869 + df = pd.DataFrame( + { + "column1": range(6), + "column2": range(6), + "group": 3 * ["A", "B"], + "date": pd.date_range(end="20190101", periods=6), + } + ) + result = ( + df.groupby("group") + .rolling("3d", on="date", closed="left")["column1"] + .count() + ) + expected = pd.Series( + [np.nan, 1.0, 1.0, np.nan, 1.0, 1.0], + name="column1", + index=pd.MultiIndex.from_tuples( + [ + ("A", pd.Timestamp("2018-12-27")), + ("A", pd.Timestamp("2018-12-29")), + ("A", pd.Timestamp("2018-12-31")), + ("B", pd.Timestamp("2018-12-28")), + ("B", pd.Timestamp("2018-12-30")), + ("B", pd.Timestamp("2019-01-01")), + ], + names=["group", "date"], + ), + ) + tm.assert_series_equal(result, expected)
- [x] closes #35869 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Closed by #31302
https://api.github.com/repos/pandas-dev/pandas/pulls/36934
2020-10-07T04:51:34Z
2020-10-09T20:02:09Z
2020-10-09T20:02:08Z
2020-10-09T20:05:17Z
TST/CLN: roll_sum/mean/var/skew/kurt: simplification for non-monotonic indices
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index b50eaf800533a..2c315ca13e563 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -58,7 +58,7 @@ cdef: cdef inline int int_max(int a, int b): return a if a >= b else b cdef inline int int_min(int a, int b): return a if a <= b else b -cdef bint is_monotonic_start_end_bounds( +cdef bint is_monotonic_increasing_start_end_bounds( ndarray[int64_t, ndim=1] start, ndarray[int64_t, ndim=1] end ): return is_monotonic(start, False)[0] and is_monotonic(end, False)[0] @@ -143,9 +143,11 @@ def roll_sum(ndarray[float64_t] values, ndarray[int64_t] start, int64_t s, e int64_t nobs = 0, i, j, N = len(values) ndarray[float64_t] output - bint is_monotonic_bounds + bint is_monotonic_increasing_bounds - is_monotonic_bounds = is_monotonic_start_end_bounds(start, end) + is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds( + start, end + ) output = np.empty(N, dtype=float) with nogil: @@ -154,7 +156,7 @@ def roll_sum(ndarray[float64_t] values, ndarray[int64_t] start, s = start[i] e = end[i] - if i == 0 or not is_monotonic_bounds: + if i == 0 or not is_monotonic_increasing_bounds: # setup @@ -173,9 +175,10 @@ def roll_sum(ndarray[float64_t] values, ndarray[int64_t] start, output[i] = calc_sum(minp, nobs, sum_x) - if not is_monotonic_bounds: - for j in range(s, e): - remove_sum(values[j], &nobs, &sum_x, &compensation_remove) + if not is_monotonic_increasing_bounds: + nobs = 0 + sum_x = 0.0 + compensation_remove = 0.0 return output @@ -244,9 +247,11 @@ def roll_mean(ndarray[float64_t] values, ndarray[int64_t] start, int64_t s, e Py_ssize_t nobs = 0, i, j, neg_ct = 0, N = len(values) ndarray[float64_t] output - bint is_monotonic_bounds + bint is_monotonic_increasing_bounds - is_monotonic_bounds = is_monotonic_start_end_bounds(start, end) + is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds( + start, end + ) output = np.empty(N, dtype=float) with nogil: @@ -255,7 +260,7 @@ def roll_mean(ndarray[float64_t] values, ndarray[int64_t] start, s = start[i] e = end[i] - if i == 0 or not is_monotonic_bounds: + if i == 0 or not is_monotonic_increasing_bounds: # setup for j in range(s, e): @@ -276,10 +281,11 @@ def roll_mean(ndarray[float64_t] values, ndarray[int64_t] start, output[i] = calc_mean(minp, nobs, neg_ct, sum_x) - if not is_monotonic_bounds: - for j in range(s, e): - val = values[j] - remove_mean(val, &nobs, &sum_x, &neg_ct, &compensation_remove) + if not is_monotonic_increasing_bounds: + nobs = 0 + neg_ct = 0 + sum_x = 0.0 + compensation_remove = 0.0 return output # ---------------------------------------------------------------------- @@ -367,10 +373,12 @@ def roll_var(ndarray[float64_t] values, ndarray[int64_t] start, int64_t s, e Py_ssize_t i, j, N = len(values) ndarray[float64_t] output - bint is_monotonic_bounds + bint is_monotonic_increasing_bounds minp = max(minp, 1) - is_monotonic_bounds = is_monotonic_start_end_bounds(start, end) + is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds( + start, end + ) output = np.empty(N, dtype=float) with nogil: @@ -382,7 +390,7 @@ def roll_var(ndarray[float64_t] values, ndarray[int64_t] start, # Over the first window, observations can only be added # never removed - if i == 0 or not is_monotonic_bounds: + if i == 0 or not is_monotonic_increasing_bounds: for j in range(s, e): add_var(values[j], &nobs, &mean_x, &ssqdm_x, &compensation_add) @@ -403,10 +411,11 @@ def roll_var(ndarray[float64_t] values, ndarray[int64_t] start, output[i] = calc_var(minp, ddof, nobs, ssqdm_x) - if not is_monotonic_bounds: - for j in range(s, e): - remove_var(values[j], &nobs, &mean_x, &ssqdm_x, - &compensation_remove) + if not is_monotonic_increasing_bounds: + nobs = 0.0 + mean_x = 0.0 + ssqdm_x = 0.0 + compensation_remove = 0.0 return output @@ -486,10 +495,12 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start, int64_t nobs = 0, i, j, N = len(values) int64_t s, e ndarray[float64_t] output - bint is_monotonic_bounds + bint is_monotonic_increasing_bounds minp = max(minp, 3) - is_monotonic_bounds = is_monotonic_start_end_bounds(start, end) + is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds( + start, end + ) output = np.empty(N, dtype=float) with nogil: @@ -501,7 +512,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start, # Over the first window, observations can only be added # never removed - if i == 0 or not is_monotonic_bounds: + if i == 0 or not is_monotonic_increasing_bounds: for j in range(s, e): val = values[j] @@ -524,10 +535,11 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start, output[i] = calc_skew(minp, nobs, x, xx, xxx) - if not is_monotonic_bounds: - for j in range(s, e): - val = values[j] - remove_skew(val, &nobs, &x, &xx, &xxx) + if not is_monotonic_increasing_bounds: + nobs = 0 + x = 0.0 + xx = 0.0 + xxx = 0.0 return output @@ -611,10 +623,12 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start, float64_t x = 0, xx = 0, xxx = 0, xxxx = 0 int64_t nobs = 0, i, j, s, e, N = len(values) ndarray[float64_t] output - bint is_monotonic_bounds + bint is_monotonic_increasing_bounds minp = max(minp, 4) - is_monotonic_bounds = is_monotonic_start_end_bounds(start, end) + is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds( + start, end + ) output = np.empty(N, dtype=float) with nogil: @@ -626,7 +640,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start, # Over the first window, observations can only be added # never removed - if i == 0 or not is_monotonic_bounds: + if i == 0 or not is_monotonic_increasing_bounds: for j in range(s, e): add_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx) @@ -646,9 +660,12 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start, output[i] = calc_kurt(minp, nobs, x, xx, xxx, xxxx) - if not is_monotonic_bounds: - for j in range(s, e): - remove_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx) + if not is_monotonic_increasing_bounds: + nobs = 0 + x = 0.0 + xx = 0.0 + xxx = 0.0 + xxxx = 0.0 return output diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 9bba6d084f9c9..e919812be9fce 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -917,3 +917,98 @@ def test_rolling_var_numerical_issues(func, third_value, values): result = getattr(ds.rolling(2), func)() expected = Series([np.nan] + values) tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["var", "sum", "mean", "skew", "kurt", "min", "max"]) +def test_rolling_decreasing_indices(method): + """ + Make sure that decreasing indices give the same results as increasing indices. + + GH 36933 + """ + df = DataFrame({"values": np.arange(-15, 10) ** 2}) + df_reverse = DataFrame({"values": df["values"][::-1]}, index=df.index[::-1]) + + increasing = getattr(df.rolling(window=5), method)() + decreasing = getattr(df_reverse.rolling(window=5), method)() + + assert np.abs(decreasing.values[::-1][:-4] - increasing.values[4:]).max() < 1e-12 + + +@pytest.mark.parametrize( + "method,expected", + [ + ( + "var", + [ + float("nan"), + 43.0, + float("nan"), + 136.333333, + 43.5, + 94.966667, + 182.0, + 318.0, + ], + ), + ("mean", [float("nan"), 7.5, float("nan"), 21.5, 6.0, 9.166667, 13.0, 17.5]), + ("sum", [float("nan"), 30.0, float("nan"), 86.0, 30.0, 55.0, 91.0, 140.0]), + ( + "skew", + [ + float("nan"), + 0.709296, + float("nan"), + 0.407073, + 0.984656, + 0.919184, + 0.874674, + 0.842418, + ], + ), + ( + "kurt", + [ + float("nan"), + -0.5916711736073559, + float("nan"), + -1.0028993131317954, + -0.06103844629409494, + -0.254143227116194, + -0.37362637362637585, + -0.45439658241367054, + ], + ), + ], +) +def test_rolling_non_monotonic(method, expected): + """ + Make sure the (rare) branch of non-monotonic indices is covered by a test. + + output from 1.1.3 is assumed to be the expected output. Output of sum/mean has + manually been verified. + + GH 36933. + """ + # Based on an example found in computation.rst + use_expanding = [True, False, True, False, True, True, True, True] + df = DataFrame({"values": np.arange(len(use_expanding)) ** 2}) + + class CustomIndexer(pd.api.indexers.BaseIndexer): + def get_window_bounds(self, num_values, min_periods, center, closed): + start = np.empty(num_values, dtype=np.int64) + end = np.empty(num_values, dtype=np.int64) + for i in range(num_values): + if self.use_expanding[i]: + start[i] = 0 + end[i] = i + 1 + else: + start[i] = i + end[i] = i + self.window_size + return start, end + + indexer = CustomIndexer(window_size=4, use_expanding=use_expanding) + + result = getattr(df.rolling(indexer), method)() + expected = DataFrame({"values": expected}) + tm.assert_frame_equal(result, expected)
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` The removed for-loop doesn't seem to be necessary (I hope this code is tested by an existing test). I feel like I'm missing an obvious reason why these for-loops are needed: looking at the code I don't think we need them and the tests also pass.
https://api.github.com/repos/pandas-dev/pandas/pulls/36933
2020-10-07T04:49:54Z
2020-10-25T22:09:38Z
2020-10-25T22:09:37Z
2020-10-25T22:09:55Z
REF/TYP: arrays.datetimelike
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 6285f142b2391..d6417bfe3f8e8 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1,6 +1,17 @@ from datetime import datetime, timedelta import operator -from typing import Any, Callable, Optional, Sequence, Tuple, Type, TypeVar, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, + cast, +) import warnings import numpy as np @@ -60,6 +71,9 @@ from pandas.tseries import frequencies +if TYPE_CHECKING: + from pandas.core.arrays import DatetimeArray, TimedeltaArray + DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType] @@ -75,6 +89,9 @@ class InvalidComparison(Exception): class AttributesMixin: _data: np.ndarray + def __init__(self, data, dtype=None, freq=None, copy=False): + raise AbstractMethodError(self) + @classmethod def _simple_new( cls, values: np.ndarray, freq: Optional[BaseOffset] = None, dtype=None @@ -168,220 +185,6 @@ def _check_compatible_with( raise AbstractMethodError(self) -class DatelikeOps: - """ - Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex. - """ - - @Substitution( - URL="https://docs.python.org/3/library/datetime.html" - "#strftime-and-strptime-behavior" - ) - def strftime(self, date_format): - """ - Convert to Index using specified date_format. - - Return an Index of formatted strings specified by date_format, which - supports the same string format as the python standard library. Details - of the string format can be found in `python string format - doc <%(URL)s>`__. - - Parameters - ---------- - date_format : str - Date format string (e.g. "%%Y-%%m-%%d"). - - Returns - ------- - ndarray - NumPy ndarray of formatted strings. - - See Also - -------- - to_datetime : Convert the given argument to datetime. - DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. - DatetimeIndex.round : Round the DatetimeIndex to the specified freq. - DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. - - Examples - -------- - >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), - ... periods=3, freq='s') - >>> rng.strftime('%%B %%d, %%Y, %%r') - Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', - 'March 10, 2018, 09:00:02 AM'], - dtype='object') - """ - result = self._format_native_types(date_format=date_format, na_rep=np.nan) - return result.astype(object) - - -class TimelikeOps: - """ - Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex. - """ - - _round_doc = """ - Perform {op} operation on the data to the specified `freq`. - - Parameters - ---------- - freq : str or Offset - The frequency level to {op} the index to. Must be a fixed - frequency like 'S' (second) not 'ME' (month end). See - :ref:`frequency aliases <timeseries.offset_aliases>` for - a list of possible `freq` values. - ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' - Only relevant for DatetimeIndex: - - - 'infer' will attempt to infer fall dst-transition hours based on - order - - bool-ndarray where True signifies a DST time, False designates - a non-DST time (note that this flag is only applicable for - ambiguous times) - - 'NaT' will return NaT where there are ambiguous times - - 'raise' will raise an AmbiguousTimeError if there are ambiguous - times. - - .. versionadded:: 0.24.0 - - nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \ -default 'raise' - A nonexistent time does not exist in a particular timezone - where clocks moved forward due to DST. - - - 'shift_forward' will shift the nonexistent time forward to the - closest existing time - - 'shift_backward' will shift the nonexistent time backward to the - closest existing time - - 'NaT' will return NaT where there are nonexistent times - - timedelta objects will shift nonexistent times by the timedelta - - 'raise' will raise an NonExistentTimeError if there are - nonexistent times. - - .. versionadded:: 0.24.0 - - Returns - ------- - DatetimeIndex, TimedeltaIndex, or Series - Index of the same type for a DatetimeIndex or TimedeltaIndex, - or a Series with the same index for a Series. - - Raises - ------ - ValueError if the `freq` cannot be converted. - - Examples - -------- - **DatetimeIndex** - - >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min') - >>> rng - DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00', - '2018-01-01 12:01:00'], - dtype='datetime64[ns]', freq='T') - """ - - _round_example = """>>> rng.round('H') - DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', - '2018-01-01 12:00:00'], - dtype='datetime64[ns]', freq=None) - - **Series** - - >>> pd.Series(rng).dt.round("H") - 0 2018-01-01 12:00:00 - 1 2018-01-01 12:00:00 - 2 2018-01-01 12:00:00 - dtype: datetime64[ns] - """ - - _floor_example = """>>> rng.floor('H') - DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00', - '2018-01-01 12:00:00'], - dtype='datetime64[ns]', freq=None) - - **Series** - - >>> pd.Series(rng).dt.floor("H") - 0 2018-01-01 11:00:00 - 1 2018-01-01 12:00:00 - 2 2018-01-01 12:00:00 - dtype: datetime64[ns] - """ - - _ceil_example = """>>> rng.ceil('H') - DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', - '2018-01-01 13:00:00'], - dtype='datetime64[ns]', freq=None) - - **Series** - - >>> pd.Series(rng).dt.ceil("H") - 0 2018-01-01 12:00:00 - 1 2018-01-01 12:00:00 - 2 2018-01-01 13:00:00 - dtype: datetime64[ns] - """ - - def _round(self, freq, mode, ambiguous, nonexistent): - # round the local times - if is_datetime64tz_dtype(self.dtype): - # operate on naive timestamps, then convert back to aware - naive = self.tz_localize(None) - result = naive._round(freq, mode, ambiguous, nonexistent) - aware = result.tz_localize( - self.tz, ambiguous=ambiguous, nonexistent=nonexistent - ) - return aware - - values = self.view("i8") - result = round_nsint64(values, mode, freq) - result = self._maybe_mask_results(result, fill_value=NaT) - return self._simple_new(result, dtype=self.dtype) - - @Appender((_round_doc + _round_example).format(op="round")) - def round(self, freq, ambiguous="raise", nonexistent="raise"): - return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent) - - @Appender((_round_doc + _floor_example).format(op="floor")) - def floor(self, freq, ambiguous="raise", nonexistent="raise"): - return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) - - @Appender((_round_doc + _ceil_example).format(op="ceil")) - def ceil(self, freq, ambiguous="raise", nonexistent="raise"): - return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) - - def _with_freq(self, freq): - """ - Helper to get a view on the same data, with a new freq. - - Parameters - ---------- - freq : DateOffset, None, or "infer" - - Returns - ------- - Same type as self - """ - # GH#29843 - if freq is None: - # Always valid - pass - elif len(self) == 0 and isinstance(freq, BaseOffset): - # Always valid. In the TimedeltaArray case, we assume this - # is a Tick offset. - pass - else: - # As an internal method, we can ensure this assertion always holds - assert freq == "infer" - freq = to_offset(self.inferred_freq) - - arr = self.view() - arr._freq = freq - return arr - - DatetimeLikeArrayT = TypeVar("DatetimeLikeArrayT", bound="DatetimeLikeArrayMixin") @@ -705,7 +508,7 @@ def _validate_shift_value(self, fill_value): # only warn if we're not going to raise if self._scalar_type is Period and lib.is_integer(fill_value): # kludge for #31971 since Period(integer) tries to cast to str - new_fill = Period._from_ordinal(fill_value, freq=self.dtype.freq) + new_fill = Period._from_ordinal(fill_value, freq=self.freq) else: new_fill = self._scalar_type(fill_value) @@ -1033,6 +836,10 @@ def _validate_frequency(cls, index, freq, **kwargs): f"does not conform to passed frequency {freq.freqstr}" ) from e + @classmethod + def _generate_range(cls, start, end, periods, freq, *args, **kwargs): + raise AbstractMethodError(cls) + # monotonicity/uniqueness properties are called via frequencies.infer_freq, # see GH#23789 @@ -1417,6 +1224,7 @@ def __rsub__(self, other): # TODO: Can we simplify/generalize these cases at all? raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}") elif is_timedelta64_dtype(self.dtype): + self = cast("TimedeltaArray", self) return (-self) + other # We get here with e.g. datetime objects @@ -1567,6 +1375,224 @@ def median(self, axis: Optional[int] = None, skipna: bool = True, *args, **kwarg return self._from_backing_data(result.astype("i8")) +class DatelikeOps(DatetimeLikeArrayMixin): + """ + Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex. + """ + + @Substitution( + URL="https://docs.python.org/3/library/datetime.html" + "#strftime-and-strptime-behavior" + ) + def strftime(self, date_format): + """ + Convert to Index using specified date_format. + + Return an Index of formatted strings specified by date_format, which + supports the same string format as the python standard library. Details + of the string format can be found in `python string format + doc <%(URL)s>`__. + + Parameters + ---------- + date_format : str + Date format string (e.g. "%%Y-%%m-%%d"). + + Returns + ------- + ndarray + NumPy ndarray of formatted strings. + + See Also + -------- + to_datetime : Convert the given argument to datetime. + DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. + DatetimeIndex.round : Round the DatetimeIndex to the specified freq. + DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. + + Examples + -------- + >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), + ... periods=3, freq='s') + >>> rng.strftime('%%B %%d, %%Y, %%r') + Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', + 'March 10, 2018, 09:00:02 AM'], + dtype='object') + """ + result = self._format_native_types(date_format=date_format, na_rep=np.nan) + return result.astype(object) + + +_round_doc = """ + Perform {op} operation on the data to the specified `freq`. + + Parameters + ---------- + freq : str or Offset + The frequency level to {op} the index to. Must be a fixed + frequency like 'S' (second) not 'ME' (month end). See + :ref:`frequency aliases <timeseries.offset_aliases>` for + a list of possible `freq` values. + ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' + Only relevant for DatetimeIndex: + + - 'infer' will attempt to infer fall dst-transition hours based on + order + - bool-ndarray where True signifies a DST time, False designates + a non-DST time (note that this flag is only applicable for + ambiguous times) + - 'NaT' will return NaT where there are ambiguous times + - 'raise' will raise an AmbiguousTimeError if there are ambiguous + times. + + .. versionadded:: 0.24.0 + + nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, default 'raise' + A nonexistent time does not exist in a particular timezone + where clocks moved forward due to DST. + + - 'shift_forward' will shift the nonexistent time forward to the + closest existing time + - 'shift_backward' will shift the nonexistent time backward to the + closest existing time + - 'NaT' will return NaT where there are nonexistent times + - timedelta objects will shift nonexistent times by the timedelta + - 'raise' will raise an NonExistentTimeError if there are + nonexistent times. + + .. versionadded:: 0.24.0 + + Returns + ------- + DatetimeIndex, TimedeltaIndex, or Series + Index of the same type for a DatetimeIndex or TimedeltaIndex, + or a Series with the same index for a Series. + + Raises + ------ + ValueError if the `freq` cannot be converted. + + Examples + -------- + **DatetimeIndex** + + >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min') + >>> rng + DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00', + '2018-01-01 12:01:00'], + dtype='datetime64[ns]', freq='T') + """ + +_round_example = """>>> rng.round('H') + DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', + '2018-01-01 12:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.round("H") + 0 2018-01-01 12:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 12:00:00 + dtype: datetime64[ns] + """ + +_floor_example = """>>> rng.floor('H') + DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00', + '2018-01-01 12:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.floor("H") + 0 2018-01-01 11:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 12:00:00 + dtype: datetime64[ns] + """ + +_ceil_example = """>>> rng.ceil('H') + DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', + '2018-01-01 13:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.ceil("H") + 0 2018-01-01 12:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 13:00:00 + dtype: datetime64[ns] + """ + + +class TimelikeOps(DatetimeLikeArrayMixin): + """ + Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex. + """ + + def _round(self, freq, mode, ambiguous, nonexistent): + # round the local times + if is_datetime64tz_dtype(self.dtype): + # operate on naive timestamps, then convert back to aware + self = cast("DatetimeArray", self) + naive = self.tz_localize(None) + result = naive._round(freq, mode, ambiguous, nonexistent) + aware = result.tz_localize( + self.tz, ambiguous=ambiguous, nonexistent=nonexistent + ) + return aware + + values = self.view("i8") + result = round_nsint64(values, mode, freq) + result = self._maybe_mask_results(result, fill_value=NaT) + return self._simple_new(result, dtype=self.dtype) + + @Appender((_round_doc + _round_example).format(op="round")) + def round(self, freq, ambiguous="raise", nonexistent="raise"): + return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent) + + @Appender((_round_doc + _floor_example).format(op="floor")) + def floor(self, freq, ambiguous="raise", nonexistent="raise"): + return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) + + @Appender((_round_doc + _ceil_example).format(op="ceil")) + def ceil(self, freq, ambiguous="raise", nonexistent="raise"): + return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) + + # -------------------------------------------------------------- + # Frequency Methods + + def _with_freq(self, freq): + """ + Helper to get a view on the same data, with a new freq. + + Parameters + ---------- + freq : DateOffset, None, or "infer" + + Returns + ------- + Same type as self + """ + # GH#29843 + if freq is None: + # Always valid + pass + elif len(self) == 0 and isinstance(freq, BaseOffset): + # Always valid. In the TimedeltaArray case, we assume this + # is a Tick offset. + pass + else: + # As an internal method, we can ensure this assertion always holds + assert freq == "infer" + freq = to_offset(self.inferred_freq) + + arr = self.view() + arr._freq = freq + return arr + + # ------------------------------------------------------------------- # Shared Constructor Helpers diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 1e879e32bed5f..fb8604a8c87ba 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -113,7 +113,7 @@ def f(self): return property(f) -class DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps): +class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): """ Pandas ExtensionArray for tz-naive or tz-aware datetime data. diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index ed45b4da7279e..bf2b3a0a1c9ba 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -63,7 +63,7 @@ def f(self): return property(f) -class PeriodArray(PeriodMixin, dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps): +class PeriodArray(PeriodMixin, dtl.DatelikeOps): """ Pandas ExtensionArray for storing Period data. diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 0853664c766bb..6ddb3f1b7aa53 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -63,7 +63,7 @@ def f(self) -> np.ndarray: return property(f) -class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): +class TimedeltaArray(dtl.TimelikeOps): """ Pandas ExtensionArray for timedelta data. @@ -262,9 +262,7 @@ def _from_sequence_not_strict( return result @classmethod - def _generate_range( - cls, start, end, periods, freq, closed=None - ) -> "TimedeltaArray": + def _generate_range(cls, start, end, periods, freq, closed=None): periods = dtl.validate_periods(periods) if freq is None and any(x is None for x in [periods, start, end]):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry @simonjayhawkins this is still giving me two mypy complaints locally: ``` pandas/core/arrays/datetimelike.py:549: error: Too many arguments for "object" [call-arg] pandas/core/arrays/datetimelike.py:556: error: Too many arguments for "object" [call-arg] ``` These lines are both calls to `self._scalar_type(foo)` _scalar_type is annotated as `Type[DatetimeLikeScalar]`, which is a TypeVar for Period/Timestamp/Timedelta. Is there a non-ignoring solution here?
https://api.github.com/repos/pandas-dev/pandas/pulls/36932
2020-10-07T04:04:38Z
2020-10-10T22:25:10Z
2020-10-10T22:25:10Z
2020-10-10T22:45:23Z
TYP: define RangeIndex methods non-dynamically
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index f0b0773aeb47b..28a5243531c58 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -9,7 +9,6 @@ from pandas._libs import index as libindex from pandas._libs.lib import no_default from pandas._typing import Label -import pandas.compat as compat from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, cache_readonly, doc @@ -811,83 +810,95 @@ def any(self) -> bool: # -------------------------------------------------------------------- - @classmethod - def _add_numeric_methods_binary(cls): - """ add in numeric methods, specialized to RangeIndex """ - - def _make_evaluate_binop(op, step=False): - """ - Parameters - ---------- - op : callable that accepts 2 params - perform the binary op - step : callable, optional, default to False - op to apply to the step parm if not None - if False, use the existing step - """ - - @unpack_zerodim_and_defer(op.__name__) - def _evaluate_numeric_binop(self, other): - if isinstance(other, ABCTimedeltaIndex): - # Defer to TimedeltaIndex implementation - return NotImplemented - elif isinstance(other, (timedelta, np.timedelta64)): - # GH#19333 is_integer evaluated True on timedelta64, - # so we need to catch these explicitly - return op(self._int64index, other) - elif is_timedelta64_dtype(other): - # Must be an np.ndarray; GH#22390 - return op(self._int64index, other) - - other = extract_array(other, extract_numpy=True) - attrs = self._get_attributes_dict() - - left, right = self, other + def _arith_method(self, other, op, step=False): + """ + Parameters + ---------- + other : Any + op : callable that accepts 2 params + perform the binary op + step : callable, optional, default to False + op to apply to the step parm if not None + if False, use the existing step + """ + + if isinstance(other, ABCTimedeltaIndex): + # Defer to TimedeltaIndex implementation + return NotImplemented + elif isinstance(other, (timedelta, np.timedelta64)): + # GH#19333 is_integer evaluated True on timedelta64, + # so we need to catch these explicitly + return op(self._int64index, other) + elif is_timedelta64_dtype(other): + # Must be an np.ndarray; GH#22390 + return op(self._int64index, other) + + other = extract_array(other, extract_numpy=True) + attrs = self._get_attributes_dict() + + left, right = self, other - try: - # apply if we have an override - if step: - with np.errstate(all="ignore"): - rstep = step(left.step, right) + try: + # apply if we have an override + if step: + with np.errstate(all="ignore"): + rstep = step(left.step, right) + + # we don't have a representable op + # so return a base index + if not is_integer(rstep) or not rstep: + raise ValueError + + else: + rstep = left.step + + with np.errstate(all="ignore"): + rstart = op(left.start, right) + rstop = op(left.stop, right) + + result = type(self)(rstart, rstop, rstep, **attrs) - # we don't have a representable op - # so return a base index - if not is_integer(rstep) or not rstep: - raise ValueError + # for compat with numpy / Int64Index + # even if we can represent as a RangeIndex, return + # as a Float64Index if we have float-like descriptors + if not all(is_integer(x) for x in [rstart, rstop, rstep]): + result = result.astype("float64") - else: - rstep = left.step + return result - with np.errstate(all="ignore"): - rstart = op(left.start, right) - rstop = op(left.stop, right) + except (ValueError, TypeError, ZeroDivisionError): + # Defer to Int64Index implementation + return op(self._int64index, other) + # TODO: Do attrs get handled reliably? - result = type(self)(rstart, rstop, rstep, **attrs) + @unpack_zerodim_and_defer("__add__") + def __add__(self, other): + return self._arith_method(other, operator.add) - # for compat with numpy / Int64Index - # even if we can represent as a RangeIndex, return - # as a Float64Index if we have float-like descriptors - if not all(is_integer(x) for x in [rstart, rstop, rstep]): - result = result.astype("float64") + @unpack_zerodim_and_defer("__radd__") + def __radd__(self, other): + return self._arith_method(other, ops.radd) - return result + @unpack_zerodim_and_defer("__sub__") + def __sub__(self, other): + return self._arith_method(other, operator.sub) - except (ValueError, TypeError, ZeroDivisionError): - # Defer to Int64Index implementation - return op(self._int64index, other) - # TODO: Do attrs get handled reliably? + @unpack_zerodim_and_defer("__rsub__") + def __rsub__(self, other): + return self._arith_method(other, ops.rsub) - name = f"__{op.__name__}__" - return compat.set_function_name(_evaluate_numeric_binop, name, cls) + @unpack_zerodim_and_defer("__mul__") + def __mul__(self, other): + return self._arith_method(other, operator.mul, step=operator.mul) - cls.__add__ = _make_evaluate_binop(operator.add) - cls.__radd__ = _make_evaluate_binop(ops.radd) - cls.__sub__ = _make_evaluate_binop(operator.sub) - cls.__rsub__ = _make_evaluate_binop(ops.rsub) - cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul) - cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul) - cls.__truediv__ = _make_evaluate_binop(operator.truediv, step=operator.truediv) - cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv, step=ops.rtruediv) + @unpack_zerodim_and_defer("__rmul__") + def __rmul__(self, other): + return self._arith_method(other, ops.rmul, step=ops.rmul) + @unpack_zerodim_and_defer("__truediv__") + def __truediv__(self, other): + return self._arith_method(other, operator.truediv, step=operator.truediv) -RangeIndex._add_numeric_methods() + @unpack_zerodim_and_defer("__rtruediv__") + def __rtruediv__(self, other): + return self._arith_method(other, ops.rtruediv, step=ops.rtruediv) diff --git a/setup.cfg b/setup.cfg index 8ec10e7db5a5c..b74e0c2cf6920 100644 --- a/setup.cfg +++ b/setup.cfg @@ -196,9 +196,6 @@ check_untyped_defs=False [mypy-pandas.core.indexes.multi] check_untyped_defs=False -[mypy-pandas.core.indexes.range] -check_untyped_defs=False - [mypy-pandas.core.internals.blocks] check_untyped_defs=False
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36931
2020-10-07T00:46:21Z
2020-10-08T14:34:03Z
2020-10-08T14:34:03Z
2020-10-08T15:13:50Z
TYP/REF: define comparison methods non-dynamically
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py new file mode 100644 index 0000000000000..1fba022f2a1de --- /dev/null +++ b/pandas/core/arraylike.py @@ -0,0 +1,43 @@ +""" +Methods that can be shared by many array-like classes or subclasses: + Series + Index + ExtensionArray +""" +import operator + +from pandas.errors import AbstractMethodError + +from pandas.core.ops.common import unpack_zerodim_and_defer + + +class OpsMixin: + # ------------------------------------------------------------- + # Comparisons + + def _cmp_method(self, other, op): + raise AbstractMethodError(self) + + @unpack_zerodim_and_defer("__eq__") + def __eq__(self, other): + return self._cmp_method(other, operator.eq) + + @unpack_zerodim_and_defer("__ne__") + def __ne__(self, other): + return self._cmp_method(other, operator.ne) + + @unpack_zerodim_and_defer("__lt__") + def __lt__(self, other): + return self._cmp_method(other, operator.lt) + + @unpack_zerodim_and_defer("__le__") + def __le__(self, other): + return self._cmp_method(other, operator.le) + + @unpack_zerodim_and_defer("__gt__") + def __gt__(self, other): + return self._cmp_method(other, operator.gt) + + @unpack_zerodim_and_defer("__ge__") + def __ge__(self, other): + return self._cmp_method(other, operator.ge) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 1ab941eb7322d..b4f902129d010 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -24,7 +24,6 @@ round_nsint64, ) from pandas._typing import DatetimeLikeScalar, DtypeObj -from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning from pandas.util._decorators import Appender, Substitution, cache_readonly @@ -51,8 +50,8 @@ from pandas.core import nanops, ops from pandas.core.algorithms import checked_add_with_arr, unique1d, value_counts +from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import NDArrayBackedExtensionArray -from pandas.core.arrays.base import ExtensionOpsMixin import pandas.core.common as com from pandas.core.construction import array, extract_array from pandas.core.indexers import check_array_indexer, check_setitem_lengths @@ -73,46 +72,6 @@ class InvalidComparison(Exception): pass -def _datetimelike_array_cmp(cls, op): - """ - Wrap comparison operations to convert Timestamp/Timedelta/Period-like to - boxed scalars/arrays. - """ - opname = f"__{op.__name__}__" - nat_result = opname == "__ne__" - - @unpack_zerodim_and_defer(opname) - def wrapper(self, other): - if self.ndim > 1 and getattr(other, "shape", None) == self.shape: - # TODO: handle 2D-like listlikes - return op(self.ravel(), other.ravel()).reshape(self.shape) - - try: - other = self._validate_comparison_value(other, opname) - except InvalidComparison: - return invalid_comparison(self, other, op) - - dtype = getattr(other, "dtype", None) - if is_object_dtype(dtype): - # We have to use comp_method_OBJECT_ARRAY instead of numpy - # comparison otherwise it would fail to raise when - # comparing tz-aware and tz-naive - with np.errstate(all="ignore"): - result = ops.comp_method_OBJECT_ARRAY(op, self.astype(object), other) - return result - - other_i8 = self._unbox(other) - result = op(self.asi8, other_i8) - - o_mask = isna(other) - if self._hasnans | np.any(o_mask): - result[self._isnan | o_mask] = nat_result - - return result - - return set_function_name(wrapper, opname, cls) - - class AttributesMixin: _data: np.ndarray @@ -426,9 +385,7 @@ def _with_freq(self, freq): DatetimeLikeArrayT = TypeVar("DatetimeLikeArrayT", bound="DatetimeLikeArrayMixin") -class DatetimeLikeArrayMixin( - ExtensionOpsMixin, AttributesMixin, NDArrayBackedExtensionArray -): +class DatetimeLikeArrayMixin(OpsMixin, AttributesMixin, NDArrayBackedExtensionArray): """ Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray @@ -1093,7 +1050,35 @@ def _is_unique(self): # ------------------------------------------------------------------ # Arithmetic Methods - _create_comparison_method = classmethod(_datetimelike_array_cmp) + + def _cmp_method(self, other, op): + if self.ndim > 1 and getattr(other, "shape", None) == self.shape: + # TODO: handle 2D-like listlikes + return op(self.ravel(), other.ravel()).reshape(self.shape) + + try: + other = self._validate_comparison_value(other, f"__{op.__name__}__") + except InvalidComparison: + return invalid_comparison(self, other, op) + + dtype = getattr(other, "dtype", None) + if is_object_dtype(dtype): + # We have to use comp_method_OBJECT_ARRAY instead of numpy + # comparison otherwise it would fail to raise when + # comparing tz-aware and tz-naive + with np.errstate(all="ignore"): + result = ops.comp_method_OBJECT_ARRAY(op, self.astype(object), other) + return result + + other_i8 = self._unbox(other) + result = op(self.asi8, other_i8) + + o_mask = isna(other) + if self._hasnans | np.any(o_mask): + nat_result = op is operator.ne + result[self._isnan | o_mask] = nat_result + + return result # pow is invalid for all three subclasses; TimedeltaArray will override # the multiplication and division ops @@ -1560,8 +1545,6 @@ def mean(self, skipna=True): return self._box_func(result) -DatetimeLikeArrayMixin._add_comparison_ops() - # ------------------------------------------------------------------- # Shared Constructor Helpers diff --git a/pandas/core/base.py b/pandas/core/base.py index 9e6f93b656af8..e80bceb785165 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -30,6 +30,7 @@ from pandas.core import algorithms, common as com from pandas.core.accessor import DirNamesMixin from pandas.core.algorithms import duplicated, unique1d, value_counts +from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ExtensionArray from pandas.core.construction import create_series_with_explicit_dtype import pandas.core.nanops as nanops @@ -587,7 +588,7 @@ def _is_builtin_func(self, arg): return self._builtin_table.get(arg, arg) -class IndexOpsMixin: +class IndexOpsMixin(OpsMixin): """ Common ops mixin to support a unified interface / docs for Series / Index """ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f0fb34dadb257..21bb2a8ec40ab 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1764,7 +1764,7 @@ def _drop_labels_or_levels(self, keys, axis: int = 0): # ---------------------------------------------------------------------- # Iteration - def __hash__(self): + def __hash__(self) -> int: raise TypeError( f"{repr(type(self).__name__)} objects are mutable, " f"thus they cannot be hashed" diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 4967e13a9855a..963ec3090d747 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -119,41 +119,6 @@ str_t = str -def _make_comparison_op(op, cls): - def cmp_method(self, other): - if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)): - if other.ndim > 0 and len(self) != len(other): - raise ValueError("Lengths must match to compare") - - if is_object_dtype(self.dtype) and isinstance(other, ABCCategorical): - left = type(other)(self._values, dtype=other.dtype) - return op(left, other) - elif is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): - # e.g. PeriodArray - with np.errstate(all="ignore"): - result = op(self._values, other) - - elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): - # don't pass MultiIndex - with np.errstate(all="ignore"): - result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) - - elif is_interval_dtype(self.dtype): - with np.errstate(all="ignore"): - result = op(self._values, np.asarray(other)) - - else: - with np.errstate(all="ignore"): - result = ops.comparison_op(self._values, np.asarray(other), op) - - if is_bool_dtype(result): - return result - return ops.invalid_comparison(self, other, op) - - name = f"__{op.__name__}__" - return set_function_name(cmp_method, name, cls) - - def _make_arithmetic_op(op, cls): def index_arithmetic_method(self, other): if isinstance(other, (ABCSeries, ABCDataFrame, ABCTimedeltaIndex)): @@ -5395,17 +5360,38 @@ def drop(self, labels, errors: str_t = "raise"): # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods - @classmethod - def _add_comparison_methods(cls): + def _cmp_method(self, other, op): """ - Add in comparison methods. + Wrapper used to dispatch comparison operations. """ - cls.__eq__ = _make_comparison_op(operator.eq, cls) - cls.__ne__ = _make_comparison_op(operator.ne, cls) - cls.__lt__ = _make_comparison_op(operator.lt, cls) - cls.__gt__ = _make_comparison_op(operator.gt, cls) - cls.__le__ = _make_comparison_op(operator.le, cls) - cls.__ge__ = _make_comparison_op(operator.ge, cls) + if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)): + if other.ndim > 0 and len(self) != len(other): + raise ValueError("Lengths must match to compare") + + if is_object_dtype(self.dtype) and isinstance(other, ABCCategorical): + left = type(other)(self._values, dtype=other.dtype) + return op(left, other) + elif is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): + # e.g. PeriodArray + with np.errstate(all="ignore"): + result = op(self._values, other) + + elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): + # don't pass MultiIndex + with np.errstate(all="ignore"): + result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) + + elif is_interval_dtype(self.dtype): + with np.errstate(all="ignore"): + result = op(self._values, np.asarray(other)) + + else: + with np.errstate(all="ignore"): + result = ops.comparison_op(self._values, np.asarray(other), op) + + if is_bool_dtype(result): + return result + return ops.invalid_comparison(self, other, op) @classmethod def _add_numeric_methods_binary(cls): @@ -5589,7 +5575,6 @@ def shape(self): Index._add_numeric_methods() Index._add_logical_methods() -Index._add_comparison_methods() def ensure_index_from_sequences(sequences, names=None): diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 36e3a0e37c1ae..dc8a5519595a7 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -20,13 +20,13 @@ from pandas.core import algorithms from pandas.core.construction import extract_array -from pandas.core.ops.array_ops import ( +from pandas.core.ops.array_ops import ( # noqa:F401 arithmetic_op, + comp_method_OBJECT_ARRAY, comparison_op, get_array_op, logical_op, ) -from pandas.core.ops.array_ops import comp_method_OBJECT_ARRAY # noqa:F401 from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.docstrings import ( _arith_doc_FRAME, @@ -324,33 +324,6 @@ def wrapper(left, right): return wrapper -def comp_method_SERIES(cls, op, special): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. - """ - assert special # non-special uses flex_method_SERIES - op_name = _get_op_name(op, special) - - @unpack_zerodim_and_defer(op_name) - def wrapper(self, other): - - res_name = get_op_result_name(self, other) - - if isinstance(other, ABCSeries) and not self._indexed_same(other): - raise ValueError("Can only compare identically-labeled Series objects") - - lvalues = extract_array(self, extract_numpy=True) - rvalues = extract_array(other, extract_numpy=True) - - res_values = comparison_op(lvalues, rvalues, op) - - return self._construct_result(res_values, name=res_name) - - wrapper.__name__ = op_name - return wrapper - - def bool_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py index 852157e52d5fe..2b117d5e22186 100644 --- a/pandas/core/ops/methods.py +++ b/pandas/core/ops/methods.py @@ -48,7 +48,6 @@ def _get_method_wrappers(cls): arith_method_SERIES, bool_method_SERIES, comp_method_FRAME, - comp_method_SERIES, flex_comp_method_FRAME, flex_method_SERIES, ) @@ -58,7 +57,7 @@ def _get_method_wrappers(cls): arith_flex = flex_method_SERIES comp_flex = flex_method_SERIES arith_special = arith_method_SERIES - comp_special = comp_method_SERIES + comp_special = None bool_special = bool_method_SERIES elif issubclass(cls, ABCDataFrame): arith_flex = arith_method_FRAME @@ -189,16 +188,18 @@ def _create_methods(cls, arith_method, comp_method, bool_method, special): new_methods["divmod"] = arith_method(cls, divmod, special) new_methods["rdivmod"] = arith_method(cls, rdivmod, special) - new_methods.update( - dict( - eq=comp_method(cls, operator.eq, special), - ne=comp_method(cls, operator.ne, special), - lt=comp_method(cls, operator.lt, special), - gt=comp_method(cls, operator.gt, special), - le=comp_method(cls, operator.le, special), - ge=comp_method(cls, operator.ge, special), + if comp_method is not None: + # Series already has this pinned + new_methods.update( + dict( + eq=comp_method(cls, operator.eq, special), + ne=comp_method(cls, operator.ne, special), + lt=comp_method(cls, operator.lt, special), + gt=comp_method(cls, operator.gt, special), + le=comp_method(cls, operator.le, special), + ge=comp_method(cls, operator.ge, special), + ) ) - ) if bool_method: new_methods.update( diff --git a/pandas/core/series.py b/pandas/core/series.py index 2b972d33d7cdd..5cc163807fac6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -191,6 +191,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): hasnans = property( base.IndexOpsMixin.hasnans.func, doc=base.IndexOpsMixin.hasnans.__doc__ ) + __hash__ = generic.NDFrame.__hash__ _mgr: SingleBlockManager div: Callable[["Series", Any], "Series"] rdiv: Callable[["Series", Any], "Series"] @@ -4961,6 +4962,22 @@ def to_period(self, freq=None, copy=True) -> "Series": # Add plotting methods to Series hist = pandas.plotting.hist_series + # ---------------------------------------------------------------------- + # Template-Based Arithmetic/Comparison Methods + + def _cmp_method(self, other, op): + res_name = ops.get_op_result_name(self, other) + + if isinstance(other, Series) and not self._indexed_same(other): + raise ValueError("Can only compare identically-labeled Series objects") + + lvalues = extract_array(self, extract_numpy=True) + rvalues = extract_array(other, extract_numpy=True) + + res_values = ops.comparison_op(lvalues, rvalues, op) + + return self._construct_result(res_values, name=res_name) + Series._add_numeric_operations()
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry xref #31160, cc @simonjayhawkins @WillAyd This implements OpsMixin which is more mypy-friendly than the status quo, and uses it in Series, Index, and DatetimeLikeArrayMixin. There are a handful more places where it can be used in an upcoming pass. Will do the same for arithmetic/boolean ops if there is consensus that this is worth pursuing.
https://api.github.com/repos/pandas-dev/pandas/pulls/36930
2020-10-06T23:52:37Z
2020-10-07T15:10:11Z
2020-10-07T15:10:11Z
2020-10-07T15:16:20Z
TYP: define Index.any, Index.all non-dynamically
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 99d9568926df4..c3d21acbe9e4d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2,7 +2,6 @@ from datetime import datetime from itertools import zip_longest import operator -from textwrap import dedent from typing import ( TYPE_CHECKING, Any, @@ -30,7 +29,7 @@ from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import DuplicateLabelError, InvalidIndexError -from pandas.util._decorators import Appender, Substitution, cache_readonly, doc +from pandas.util._decorators import Appender, cache_readonly, doc from pandas.core.dtypes import concat as _concat from pandas.core.dtypes.cast import ( @@ -61,6 +60,7 @@ is_signed_integer_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, + needs_i8_conversion, pandas_dtype, validate_all_hashable, ) @@ -5447,28 +5447,61 @@ def _add_numeric_methods(cls): cls._add_numeric_methods_unary() cls._add_numeric_methods_binary() - @classmethod - def _add_logical_methods(cls): - """ - Add in logical methods. + def any(self, *args, **kwargs): """ - _doc = """ - %(desc)s + Return whether any element is Truthy. Parameters ---------- *args - These parameters will be passed to numpy.%(outname)s. + These parameters will be passed to numpy.any. **kwargs - These parameters will be passed to numpy.%(outname)s. + These parameters will be passed to numpy.any. Returns ------- - %(outname)s : bool or array_like (if axis is specified) - A single element array_like may be converted to bool.""" + any : bool or array_like (if axis is specified) + A single element array_like may be converted to bool. - _index_shared_docs["index_all"] = dedent( - """ + See Also + -------- + Index.all : Return whether all elements are True. + Series.all : Return whether all elements are True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to True because these are not equal to zero. + + Examples + -------- + >>> index = pd.Index([0, 1, 2]) + >>> index.any() + True + + >>> index = pd.Index([0, 0, 0]) + >>> index.any() + False + """ + # FIXME: docstr inaccurate, args/kwargs not passed + self._maybe_disable_logical_methods("any") + return np.any(self.values) + + def all(self): + """ + Return whether all elements are Truthy. + + Parameters + ---------- + *args + These parameters will be passed to numpy.all. + **kwargs + These parameters will be passed to numpy.all. + + Returns + ------- + all : bool or array_like (if axis is specified) + A single element array_like may be converted to bool. See Also -------- @@ -5507,65 +5540,24 @@ def _add_logical_methods(cls): >>> pd.Index([0, 0, 0]).any() False """ - ) - - _index_shared_docs["index_any"] = dedent( - """ - - See Also - -------- - Index.all : Return whether all elements are True. - Series.all : Return whether all elements are True. + # FIXME: docstr inaccurate, args/kwargs not passed - Notes - ----- - Not a Number (NaN), positive infinity and negative infinity - evaluate to True because these are not equal to zero. + self._maybe_disable_logical_methods("all") + return np.all(self.values) - Examples - -------- - >>> index = pd.Index([0, 1, 2]) - >>> index.any() - True - - >>> index = pd.Index([0, 0, 0]) - >>> index.any() - False + def _maybe_disable_logical_methods(self, opname: str_t): """ - ) - - def _make_logical_function(name: str_t, desc: str_t, f): - @Substitution(outname=name, desc=desc) - @Appender(_index_shared_docs["index_" + name]) - @Appender(_doc) - def logical_func(self, *args, **kwargs): - result = f(self.values) - if ( - isinstance(result, (np.ndarray, ABCSeries, Index)) - and result.ndim == 0 - ): - # return NumPy type - return result.dtype.type(result.item()) - else: # pragma: no cover - return result - - logical_func.__name__ = name - return logical_func - - cls.all = _make_logical_function( - "all", "Return whether all elements are True.", np.all - ) - cls.any = _make_logical_function( - "any", "Return whether any element is True.", np.any - ) - - @classmethod - def _add_logical_methods_disabled(cls): + raise if this Index subclass does not support any or all. """ - Add in logical methods to disable. - """ - cls.all = make_invalid_op("all") - cls.any = make_invalid_op("any") + if ( + isinstance(self, ABCMultiIndex) + or needs_i8_conversion(self.dtype) + or is_interval_dtype(self.dtype) + or is_categorical_dtype(self.dtype) + or is_float_dtype(self.dtype) + ): + # This call will raise + make_invalid_op(opname)(self) @property def shape(self): @@ -5579,7 +5571,6 @@ def shape(self): Index._add_numeric_methods() -Index._add_logical_methods() def ensure_index_from_sequences(sequences, names=None): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index d3167189dbcc6..8038bc6bf1c72 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -721,6 +721,3 @@ def _wrap_joined_index( name = get_op_result_name(self, other) cat = self._data._from_backing_data(joined) return type(self)._simple_new(cat, name=name) - - -CategoricalIndex._add_logical_methods_disabled() diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 67b71ce63a6e3..8255a4f6875a6 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -871,9 +871,6 @@ def indexer_between_time( return mask.nonzero()[0] -DatetimeIndex._add_logical_methods_disabled() - - def date_range( start=None, end=None, diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index efb8a3e850b1a..93117fbc22752 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1120,9 +1120,6 @@ def __ge__(self, other): return Index.__ge__(self, other) -IntervalIndex._add_logical_methods_disabled() - - def _is_valid_endpoint(endpoint) -> bool: """ Helper for interval_range to check if start/end are valid types. diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 7e2bad3e4bf93..0604f70316cfb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3723,7 +3723,6 @@ def _add_numeric_methods_disabled(cls): MultiIndex._add_numeric_methods_disabled() MultiIndex._add_numeric_methods_add_sub_disabled() -MultiIndex._add_logical_methods_disabled() def sparsify_labels(label_list, start: int = 0, sentinel=""): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 34bbaca06cc08..60a206a5344de 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -276,7 +276,6 @@ def _can_union_without_object_cast(self, other) -> bool: Int64Index._add_numeric_methods() -Int64Index._add_logical_methods() _uint64_descr_args = dict( klass="UInt64Index", ltype="unsigned integer", dtype="uint64", extra="" @@ -323,7 +322,6 @@ def _can_union_without_object_cast(self, other) -> bool: UInt64Index._add_numeric_methods() -UInt64Index._add_logical_methods() _float64_descr_args = dict( klass="Float64Index", dtype="float64", ltype="float", extra="" @@ -430,4 +428,3 @@ def _can_union_without_object_cast(self, other) -> bool: Float64Index._add_numeric_methods() -Float64Index._add_logical_methods_disabled() diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index adf7a75b33b38..ce2839ab9a8e1 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -719,9 +719,6 @@ def memory_usage(self, deep: bool = False) -> int: return result -PeriodIndex._add_logical_methods_disabled() - - def period_range( start=None, end=None, periods=None, freq=None, name=None ) -> PeriodIndex: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 90b713e8f09a9..14146503afd8d 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -804,10 +804,10 @@ def __floordiv__(self, other): # -------------------------------------------------------------------- # Reductions - def all(self) -> bool: + def all(self, *args, **kwargs) -> bool: return 0 not in self._range - def any(self) -> bool: + def any(self, *args, **kwargs) -> bool: return any(self._range) # -------------------------------------------------------------------- diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 2a7c624b430ed..302fead8c8b0c 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -261,9 +261,6 @@ def inferred_type(self) -> str: return "timedelta64" -TimedeltaIndex._add_logical_methods_disabled() - - def timedelta_range( start=None, end=None, periods=None, freq=None, name=None, closed=None ) -> TimedeltaIndex:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry xref #31160, cc @simonjayhawkins This unearths 2.5 issues: 1) Index.any/all says that we pass *args/**kwargs to np.any/all, but we don't 1b) the RangeIndex methods don't have args/kwargs at all 2) the subclasses that disable any/all don't totally make sense. In particular it seems like Float64Index should support it and CategoricalIndex should support it iff `self.categories` supports it
https://api.github.com/repos/pandas-dev/pandas/pulls/36929
2020-10-06T23:45:24Z
2020-10-08T11:52:58Z
2020-10-08T11:52:58Z
2020-10-08T15:26:28Z
BUG: Fix duplicates in intersection of multiindexes
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst index 29b0e99a3a356..6b6a75a2d5fab 100644 --- a/doc/source/whatsnew/v1.1.5.rst +++ b/doc/source/whatsnew/v1.1.5.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in indexing on a :class:`Series` with ``CategoricalDtype`` after unpickling (:issue:`37631`) - Fixed regression in ``df.groupby(..).rolling(..)`` with the resulting :class:`MultiIndex` when grouping by a label that is in the index (:issue:`37641`) - Fixed regression in :meth:`DataFrame.fillna` not filling ``NaN`` after other operations such as :meth:`DataFrame.pivot` (:issue:`36495`). +- Fixed regression in :meth:`MultiIndex.intersection` returning duplicates when at least one of the indexes had duplicates (:issue:`36915`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c49f3f9457161..f746f7a8adbca 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2822,7 +2822,7 @@ def intersection(self, other, sort=False): self._assert_can_do_setop(other) other = ensure_index(other) - if self.equals(other): + if self.equals(other) and not self.has_duplicates: return self._get_reconciled_name_object(other) if not is_dtype_equal(self.dtype, other.dtype): @@ -2847,7 +2847,7 @@ def _intersection(self, other, sort=False): except TypeError: pass else: - return result + return algos.unique1d(result) try: indexer = Index(rvals).get_indexer(lvals) @@ -2858,11 +2858,14 @@ def _intersection(self, other, sort=False): indexer = algos.unique1d(Index(rvals).get_indexer_non_unique(lvals)[0]) indexer = indexer[indexer != -1] - result = other.take(indexer)._values + result = other.take(indexer).unique()._values if sort is None: result = algos.safe_sort(result) + # Intersection has to be unique + assert algos.unique(result).shape == result.shape + return result def difference(self, other, sort=None): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 9b4b459d9a122..0df8bb437dc9a 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3601,6 +3601,8 @@ def intersection(self, other, sort=False): other, result_names = self._convert_can_do_setop(other) if self.equals(other): + if self.has_duplicates: + return self.unique().rename(result_names) return self.rename(result_names) if not is_object_dtype(other.dtype): @@ -3619,10 +3621,12 @@ def intersection(self, other, sort=False): uniq_tuples = None # flag whether _inner_indexer was successful if self.is_monotonic and other.is_monotonic: try: - uniq_tuples = self._inner_indexer(lvals, rvals)[0] - sort = False # uniq_tuples is already sorted + inner_tuples = self._inner_indexer(lvals, rvals)[0] + sort = False # inner_tuples is already sorted except TypeError: pass + else: + uniq_tuples = algos.unique(inner_tuples) if uniq_tuples is None: other_uniq = set(rvals) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 2b159c607b0a0..d8b5dba424cbf 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -311,7 +311,10 @@ def should_reindex_frame_op( # TODO: any other cases we should handle here? cols = left.columns.intersection(right.columns) - if len(cols) and not (cols.equals(left.columns) and cols.equals(right.columns)): + # Intersection is always unique so we have to check the unique columns + left_uniques = left.columns.unique() + right_uniques = right.columns.unique() + if len(cols) and not (cols.equals(left_uniques) and cols.equals(right_uniques)): # TODO: is there a shortcut available when len(cols) == 0? return True diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 3b755c40721fb..9bb1add309407 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1271,7 +1271,9 @@ def _validate_specification(self): raise MergeError("Must pass left_on or left_index=True") else: # use the common columns - common_cols = self.left.columns.intersection(self.right.columns) + left_cols = self.left.columns + right_cols = self.right.columns + common_cols = left_cols.intersection(right_cols) if len(common_cols) == 0: raise MergeError( "No common columns to perform merge on. " @@ -1280,7 +1282,10 @@ def _validate_specification(self): f"left_index={self.left_index}, " f"right_index={self.right_index}" ) - if not common_cols.is_unique: + if ( + not left_cols.join(common_cols, how="inner").is_unique + or not right_cols.join(common_cols, how="inner").is_unique + ): raise MergeError(f"Data columns not unique: {repr(common_cols)}") self.left_on = self.right_on = common_cols elif self.on is not None: diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py index 6413b110dff2e..ddcb3c5b87ebc 100644 --- a/pandas/tests/indexes/base_class/test_setops.py +++ b/pandas/tests/indexes/base_class/test_setops.py @@ -141,7 +141,7 @@ def test_intersection_str_dates(self, sort): @pytest.mark.parametrize( "index2,expected_arr", - [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B", "A"])], + [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B"])], ) def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort): # non-monotonic non-unique diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py index 4ac9a27069a3f..2ac57f1befd57 100644 --- a/pandas/tests/indexes/multi/test_setops.py +++ b/pandas/tests/indexes/multi/test_setops.py @@ -378,3 +378,26 @@ def test_setops_disallow_true(method): with pytest.raises(ValueError, match="The 'sort' keyword only takes"): getattr(idx1, method)(idx2, sort=True) + + +@pytest.mark.parametrize( + ("tuples", "exp_tuples"), + [ + ([("val1", "test1")], [("val1", "test1")]), + ([("val1", "test1"), ("val1", "test1")], [("val1", "test1")]), + ( + [("val2", "test2"), ("val1", "test1")], + [("val2", "test2"), ("val1", "test1")], + ), + ], +) +def test_intersect_with_duplicates(tuples, exp_tuples): + # GH#36915 + left = MultiIndex.from_tuples(tuples, names=["first", "second"]) + right = MultiIndex.from_tuples( + [("val1", "test1"), ("val1", "test1"), ("val2", "test2")], + names=["first", "second"], + ) + result = left.intersection(right) + expected = MultiIndex.from_tuples(exp_tuples, names=["first", "second"]) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index 0973cef7cfdc1..2675c4569a8e9 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -120,6 +120,16 @@ def test_dunder_inplace_setops_deprecated(index): index ^= index +@pytest.mark.parametrize("values", [[1, 2, 2, 3], [3, 3]]) +def test_intersection_duplicates(values): + # GH#31326 + a = pd.Index(values) + b = pd.Index([3, 3]) + result = a.intersection(b) + expected = pd.Index([3]) + tm.assert_index_equal(result, expected) + + class TestSetOps: # Set operation tests shared by all indexes in the `index` fixture @pytest.mark.parametrize("case", [0.5, "xxx"]) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index f44909b61ff7a..40ba62a27aa68 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -753,7 +753,7 @@ def test_overlapping_columns_error_message(self): # #2649, #10639 df2.columns = ["key1", "foo", "foo"] - msg = r"Data columns not unique: Index\(\['foo', 'foo'\], dtype='object'\)" + msg = r"Data columns not unique: Index\(\['foo'\], dtype='object'\)" with pytest.raises(MergeError, match=msg): merge(df, df2)
- [x] closes #36915 - [x] xref #31326 (closes the intersection part) - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Seems like this was not introduced on purpose. Probably introduced in #31312
https://api.github.com/repos/pandas-dev/pandas/pulls/36927
2020-10-06T20:08:06Z
2020-11-29T17:21:53Z
2020-11-29T17:21:53Z
2020-12-02T12:21:57Z
CLN: Clean groupby tests
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 6783fc5b66433..087b4f64307e6 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -368,6 +368,7 @@ def test_attr_wrapper(ts): # get attribute result = grouped.dtype expected = grouped.agg(lambda x: x.dtype) + tm.assert_series_equal(result, expected) # make sure raises error msg = "'SeriesGroupBy' object has no attribute 'foo'" @@ -1503,7 +1504,7 @@ def test_groupby_reindex_inside_function(): ind = date_range(start="2012/1/1", freq="5min", periods=periods) df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind) - def agg_before(hour, func, fix=False): + def agg_before(func, fix=False): """ Run an aggregate func on the subset of data. """ @@ -1518,13 +1519,9 @@ def _func(data): return _func - def afunc(data): - d = data.select(lambda x: x.hour < 11).dropna() - return np.max(d) - grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day)) - closure_bad = grouped.agg({"high": agg_before(11, np.max)}) - closure_good = grouped.agg({"high": agg_before(11, np.max, True)}) + closure_bad = grouped.agg({"high": agg_before(np.max)}) + closure_good = grouped.agg({"high": agg_before(np.max, True)}) tm.assert_frame_equal(closure_bad, closure_good) diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index cd6c17955c18d..29a8f883f0ff5 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -199,7 +199,6 @@ def test_slice_groupby_then_transform(dropna, df_expected, s_expected): res = gb_slice.transform(len) tm.assert_frame_equal(res, df_expected) - gb_slice = gb["B"] res = gb["B"].transform(len) tm.assert_series_equal(res, s_expected)
Found a few unused parts while moving tests for another PR
https://api.github.com/repos/pandas-dev/pandas/pulls/36925
2020-10-06T19:09:07Z
2020-10-10T22:53:36Z
2020-10-10T22:53:36Z
2020-10-10T23:31:40Z
TYP: check_untyped_defs core.indexes.base
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 539f5515a2f8b..df7c27c1fc48c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -14,6 +14,7 @@ Tuple, TypeVar, Union, + cast, ) import warnings @@ -102,7 +103,7 @@ ) if TYPE_CHECKING: - from pandas import RangeIndex, Series + from pandas import MultiIndex, RangeIndex, Series __all__ = ["Index"] @@ -1575,6 +1576,7 @@ def droplevel(self, level=0): "levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex + self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) @@ -3735,6 +3737,8 @@ def _get_leaf_sorter(labels): left, right = right, left how = {"right": "left", "left": "right"}.get(how, how) + assert isinstance(left, MultiIndex) + level = left._get_level_number(level) old_level = left.levels[level] @@ -4780,7 +4784,7 @@ def get_indexer_for(self, target, **kwargs): """ if self._index_as_unique: return self.get_indexer(target, **kwargs) - indexer, _ = self.get_indexer_non_unique(target, **kwargs) + indexer, _ = self.get_indexer_non_unique(target) return indexer @property @@ -5409,24 +5413,24 @@ def _add_numeric_methods_binary(cls): """ Add in numeric methods. """ - cls.__add__ = _make_arithmetic_op(operator.add, cls) - cls.__radd__ = _make_arithmetic_op(ops.radd, cls) - cls.__sub__ = _make_arithmetic_op(operator.sub, cls) - cls.__rsub__ = _make_arithmetic_op(ops.rsub, cls) - cls.__rpow__ = _make_arithmetic_op(ops.rpow, cls) - cls.__pow__ = _make_arithmetic_op(operator.pow, cls) - - cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls) - cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls) - - cls.__mod__ = _make_arithmetic_op(operator.mod, cls) - cls.__rmod__ = _make_arithmetic_op(ops.rmod, cls) - cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls) - cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls) - cls.__divmod__ = _make_arithmetic_op(divmod, cls) - cls.__rdivmod__ = _make_arithmetic_op(ops.rdivmod, cls) - cls.__mul__ = _make_arithmetic_op(operator.mul, cls) - cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls) + setattr(cls, "__add__", _make_arithmetic_op(operator.add, cls)) + setattr(cls, "__radd__", _make_arithmetic_op(ops.radd, cls)) + setattr(cls, "__sub__", _make_arithmetic_op(operator.sub, cls)) + setattr(cls, "__rsub__", _make_arithmetic_op(ops.rsub, cls)) + setattr(cls, "__rpow__", _make_arithmetic_op(ops.rpow, cls)) + setattr(cls, "__pow__", _make_arithmetic_op(operator.pow, cls)) + + setattr(cls, "__truediv__", _make_arithmetic_op(operator.truediv, cls)) + setattr(cls, "__rtruediv__", _make_arithmetic_op(ops.rtruediv, cls)) + + setattr(cls, "__mod__", _make_arithmetic_op(operator.mod, cls)) + setattr(cls, "__rmod__", _make_arithmetic_op(ops.rmod, cls)) + setattr(cls, "__floordiv__", _make_arithmetic_op(operator.floordiv, cls)) + setattr(cls, "__rfloordiv__", _make_arithmetic_op(ops.rfloordiv, cls)) + setattr(cls, "__divmod__", _make_arithmetic_op(divmod, cls)) + setattr(cls, "__rdivmod__", _make_arithmetic_op(ops.rdivmod, cls)) + setattr(cls, "__mul__", _make_arithmetic_op(operator.mul, cls)) + setattr(cls, "__rmul__", _make_arithmetic_op(ops.rmul, cls)) @classmethod def _add_numeric_methods_unary(cls): @@ -5443,10 +5447,10 @@ def _evaluate_numeric_unary(self): _evaluate_numeric_unary.__name__ = opstr return _evaluate_numeric_unary - cls.__neg__ = _make_evaluate_unary(operator.neg, "__neg__") - cls.__pos__ = _make_evaluate_unary(operator.pos, "__pos__") - cls.__abs__ = _make_evaluate_unary(np.abs, "__abs__") - cls.__inv__ = _make_evaluate_unary(lambda x: -x, "__inv__") + setattr(cls, "__neg__", _make_evaluate_unary(operator.neg, "__neg__")) + setattr(cls, "__pos__", _make_evaluate_unary(operator.pos, "__pos__")) + setattr(cls, "__abs__", _make_evaluate_unary(np.abs, "__abs__")) + setattr(cls, "__inv__", _make_evaluate_unary(lambda x: -x, "__inv__")) @classmethod def _add_numeric_methods(cls): diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 0d2ca83f1012e..72b07000146b2 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -12,6 +12,7 @@ Mapping, Optional, Sequence, + Sized, Tuple, TypeVar, Union, @@ -503,7 +504,7 @@ def _justify( def format_object_attrs( - obj: Sequence, include_dtype: bool = True + obj: Sized, include_dtype: bool = True ) -> List[Tuple[str, Union[str, int]]]: """ Return a list of tuples of the (attr, formatted_value) @@ -512,7 +513,7 @@ def format_object_attrs( Parameters ---------- obj : object - must be iterable + Must be sized. include_dtype : bool If False, dtype won't be in the returned list @@ -523,16 +524,16 @@ def format_object_attrs( """ attrs: List[Tuple[str, Union[str, int]]] = [] if hasattr(obj, "dtype") and include_dtype: - # error: "Sequence[Any]" has no attribute "dtype" + # error: "Sized" has no attribute "dtype" attrs.append(("dtype", f"'{obj.dtype}'")) # type: ignore[attr-defined] if getattr(obj, "name", None) is not None: - # error: "Sequence[Any]" has no attribute "name" + # error: "Sized" has no attribute "name" attrs.append(("name", default_pprint(obj.name))) # type: ignore[attr-defined] - # error: "Sequence[Any]" has no attribute "names" + # error: "Sized" has no attribute "names" elif getattr(obj, "names", None) is not None and any( obj.names # type: ignore[attr-defined] ): - # error: "Sequence[Any]" has no attribute "names" + # error: "Sized" has no attribute "names" attrs.append(("names", default_pprint(obj.names))) # type: ignore[attr-defined] max_seq_items = get_option("display.max_seq_items") or len(obj) if len(obj) > max_seq_items: diff --git a/setup.cfg b/setup.cfg index 8d3d79789a252..b5dce84031516 100644 --- a/setup.cfg +++ b/setup.cfg @@ -172,9 +172,6 @@ check_untyped_defs=False [mypy-pandas.core.groupby.grouper] check_untyped_defs=False -[mypy-pandas.core.indexes.base] -check_untyped_defs=False - [mypy-pandas.core.indexes.category] check_untyped_defs=False
pandas\core\indexes\base.py:975: error: Argument 1 to "format_object_attrs" has incompatible type "Index"; expected "Sequence[Any]" [arg-type] pandas\core\indexes\base.py:1612: error: "Index" has no attribute "levels"; maybe "nlevels"? [attr-defined] pandas\core\indexes\base.py:1613: error: "Index" has no attribute "codes" [attr-defined] pandas\core\indexes\base.py:3763: error: "Index" has no attribute "levels"; maybe "nlevels"? [attr-defined] pandas\core\indexes\base.py:3779: error: "Index" has no attribute "codes" [attr-defined] pandas\core\indexes\base.py:3787: error: "Index" has no attribute "codes" [attr-defined] pandas\core\indexes\base.py:3790: error: "Index" has no attribute "codes" [attr-defined] pandas\core\indexes\base.py:3793: error: "Index" has no attribute "levels"; maybe "nlevels"? [attr-defined] pandas\core\indexes\base.py:3837: error: "Index" has no attribute "codes" [attr-defined] pandas\core\indexes\base.py:3840: error: "Index" has no attribute "codes" [attr-defined] pandas\core\indexes\base.py:4807: error: Too many arguments for "get_indexer_non_unique" of "Index" [call-arg] pandas\core\indexes\base.py:5403: error: Cannot assign to a method [assignment] pandas\core\indexes\base.py:5404: error: Cannot assign to a method [assignment] pandas\core\indexes\base.py:5405: error: Unsupported left operand type for < ("Type[Index]") [operator] pandas\core\indexes\base.py:5406: error: Unsupported left operand type for > ("Type[Index]") [operator] pandas\core\indexes\base.py:5407: error: Unsupported left operand type for <= ("Type[Index]") [operator] pandas\core\indexes\base.py:5408: error: Unsupported left operand type for >= ("Type[Index]") [operator] pandas\core\indexes\base.py:5415: error: Unsupported left operand type for + ("Type[Index]") [operator] pandas\core\indexes\base.py:5416: error: "Type[Index]" has no attribute "__radd__" [attr-defined] pandas\core\indexes\base.py:5417: error: Unsupported left operand type for - ("Type[Index]") [operator] pandas\core\indexes\base.py:5418: error: "Type[Index]" has no attribute "__rsub__" [attr-defined] pandas\core\indexes\base.py:5419: error: "Type[Index]" has no attribute "__rpow__" [attr-defined] pandas\core\indexes\base.py:5420: error: Unsupported left operand type for ** ("Type[Index]") [operator] pandas\core\indexes\base.py:5422: error: Unsupported left operand type for / ("Type[Index]") [operator] pandas\core\indexes\base.py:5423: error: "Type[Index]" has no attribute "__rtruediv__" [attr-defined] pandas\core\indexes\base.py:5425: error: Unsupported left operand type for % ("Type[Index]") [operator] pandas\core\indexes\base.py:5426: error: "Type[Index]" has no attribute "__rmod__" [attr-defined] pandas\core\indexes\base.py:5427: error: Unsupported left operand type for // ("Type[Index]") [operator] pandas\core\indexes\base.py:5428: error: "Type[Index]" has no attribute "__rfloordiv__" [attr-defined] pandas\core\indexes\base.py:5429: error: Unsupported left operand type for divmod ("Type[Index]") [operator] pandas\core\indexes\base.py:5430: error: "Type[Index]" has no attribute "__rdivmod__" [attr-defined] pandas\core\indexes\base.py:5431: error: Unsupported left operand type for * ("Type[Index]") [operator] pandas\core\indexes\base.py:5432: error: "Type[Index]" has no attribute "__rmul__" [attr-defined] pandas\core\indexes\base.py:5449: error: Unsupported operand type for unary - ("Type[Index]") [operator] pandas\core\indexes\base.py:5450: error: Unsupported operand type for unary + ("Type[Index]") [operator] pandas\core\indexes\base.py:5451: error: "Type[Index]" has no attribute "__abs__" [attr-defined] pandas\core\indexes\base.py:5452: error: "Type[Index]" has no attribute "__inv__" [attr-defined] pandas\core\indexes\base.py:5564: error: "Type[Index]" has no attribute "all" [attr-defined] pandas\core\indexes\base.py:5567: error: "Type[Index]" has no attribute "any" [attr-defined] pandas\core\indexes\base.py:5576: error: "Type[Index]" has no attribute "all" [attr-defined] pandas\core\indexes\base.py:5577: error: "Type[Index]" has no attribute "any" [attr-defined]
https://api.github.com/repos/pandas-dev/pandas/pulls/36924
2020-10-06T19:05:51Z
2020-10-10T15:43:15Z
2020-10-10T15:43:14Z
2020-10-10T16:05:47Z
DOC: add newline to fix code block format in `pandas.DataFrame.to_sql`
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 04e1fc91c5fd4..f0fb34dadb257 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2690,6 +2690,7 @@ def to_sql( [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to to `con`: + >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append')
- [x] closes #36922 - [x] tests added / passed n/a (didn't add any new tests/change code) - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry not sure if i need to add a whatsnew entry to 1.1.4 or 1.2.0... would love to get a hacktoberfest tag if your org supports this. #36837
https://api.github.com/repos/pandas-dev/pandas/pulls/36923
2020-10-06T18:02:41Z
2020-10-06T23:04:35Z
2020-10-06T23:04:34Z
2020-10-07T02:48:19Z
TYP: check_untyped_defs core.groupby.ops
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index d16ffbca16a3d..1c18ef891b8c5 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -7,7 +7,7 @@ """ import collections -from typing import Dict, Hashable, List, Optional, Sequence, Tuple, Type +from typing import Dict, Generic, Hashable, List, Optional, Sequence, Tuple, Type import numpy as np @@ -866,7 +866,7 @@ def _is_indexed_like(obj, axes, axis: int) -> bool: # Splitting / application -class DataSplitter: +class DataSplitter(Generic[FrameOrSeries]): def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int = 0): self.data = data self.labels = ensure_int64(labels) diff --git a/setup.cfg b/setup.cfg index 8ec10e7db5a5c..ee28646d722f2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -172,9 +172,6 @@ check_untyped_defs=False [mypy-pandas.core.groupby.grouper] check_untyped_defs=False -[mypy-pandas.core.groupby.ops] -check_untyped_defs=False - [mypy-pandas.core.indexes.base] check_untyped_defs=False
pandas\core\groupby\ops.py:889: error: Need type annotation for 'sdata' [var-annotated]
https://api.github.com/repos/pandas-dev/pandas/pulls/36921
2020-10-06T17:47:20Z
2020-10-07T14:56:43Z
2020-10-07T14:56:43Z
2020-10-07T15:45:08Z
TYP: check_untyped_defs core.computation.pytables
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index d876c655421ef..dd622ed724e8f 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -42,7 +42,10 @@ class Term(ops.Term): env: PyTablesScope def __new__(cls, name, env, side=None, encoding=None): - klass = Constant if not isinstance(name, str) else cls + if isinstance(name, str): + klass = cls + else: + klass = Constant return object.__new__(klass) def __init__(self, name, env: PyTablesScope, side=None, encoding=None): @@ -83,6 +86,7 @@ class BinOp(ops.BinOp): op: str queryables: Dict[str, Any] + condition: Optional[str] def __init__(self, op: str, lhs, rhs, queryables: Dict[str, Any], encoding): super().__init__(op, lhs, rhs) @@ -184,10 +188,8 @@ def convert_value(self, v) -> "TermValue": def stringify(value): if self.encoding is not None: - encoder = partial(pprint_thing_encoded, encoding=self.encoding) - else: - encoder = pprint_thing - return encoder(value) + return pprint_thing_encoded(value, encoding=self.encoding) + return pprint_thing(value) kind = ensure_decoded(self.kind) meta = ensure_decoded(self.meta) @@ -257,9 +259,11 @@ def __repr__(self) -> str: def invert(self): """ invert the filter """ if self.filter is not None: - f = list(self.filter) - f[1] = self.generate_filter_op(invert=True) - self.filter = tuple(f) + self.filter = ( + self.filter[0], + self.generate_filter_op(invert=True), + self.filter[2], + ) return self def format(self): diff --git a/setup.cfg b/setup.cfg index e125eea226b10..1ddfd900fd1b7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -160,9 +160,6 @@ check_untyped_defs=False [mypy-pandas.core.computation.ops] check_untyped_defs=False -[mypy-pandas.core.computation.pytables] -check_untyped_defs=False - [mypy-pandas.core.computation.scope] check_untyped_defs=False
pandas\core\computation\pytables.py:46: error: Argument 1 to "__new__" of "object" has incompatible type "object"; expected "Type[object]" [arg-type] pandas\core\computation\pytables.py:189: error: Incompatible types in assignment (expression has type "Callable[[Any, int, Union[Mapping[str, str], Iterable[str], None], bool, bool, Optional[int]], str]", variable has type "partial[bytes]") [assignment] pandas\core\computation\pytables.py:262: error: Incompatible types in assignment (expression has type "Tuple[Any, ...]", variable has type "Optional[Tuple[Any, Any, Index]]") [assignment] pandas\core\computation\pytables.py:351: error: Incompatible types in assignment (expression has type "str", variable has type "None") [assignment] pandas\core\computation\pytables.py:357: error: Incompatible types in assignment (expression has type "str", variable has type "None") [assignment] pandas\core\computation\pytables.py:364: error: Incompatible types in assignment (expression has type "str", variable has type "None") [assignment]
https://api.github.com/repos/pandas-dev/pandas/pulls/36920
2020-10-06T17:23:38Z
2020-10-06T18:19:38Z
2020-10-06T18:19:38Z
2020-10-06T19:23:53Z
TYP: check_untyped_defs core.computation.expressions
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 5bfd2e93a9247..23cf3019df461 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -248,6 +248,7 @@ def where(cond, a, b, use_numexpr=True): use_numexpr : bool, default True Whether to try to use numexpr. """ + assert _where is not None return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b) diff --git a/setup.cfg b/setup.cfg index e125eea226b10..742bff79a3238 100644 --- a/setup.cfg +++ b/setup.cfg @@ -154,9 +154,6 @@ check_untyped_defs=False [mypy-pandas.core.computation.expr] check_untyped_defs=False -[mypy-pandas.core.computation.expressions] -check_untyped_defs=False - [mypy-pandas.core.computation.ops] check_untyped_defs=False
pandas\core\computation\expressions.py:251: error: "None" not callable [misc]
https://api.github.com/repos/pandas-dev/pandas/pulls/36917
2020-10-06T15:12:29Z
2020-10-06T18:03:14Z
2020-10-06T18:03:13Z
2020-10-06T19:25:07Z
BUG: Fixed IntegerArray.__array_ufunc__ with nout
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 6a5b4b3b9ff16..2b0aea12bea42 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -367,7 +367,7 @@ ExtensionArray - Fixed Bug where :class:`DataFrame` column set to scalar extension type via a dict instantion was considered an object type rather than the extension type (:issue:`35965`) - Fixed bug where ``astype()`` with equal dtype and ``copy=False`` would return a new object (:issue:`284881`) -- +- Fixed bug when applying a NumPy ufunc with multiple outputs to a :class:`pandas.arrays.IntegerArray` returning None (:issue:`36913`) Other diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 94af013d6df2c..05c3a0517078a 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -427,7 +427,7 @@ def reconstruct(x): result = getattr(ufunc, method)(*inputs2, **kwargs) if isinstance(result, tuple): - tuple(reconstruct(x) for x in result) + return tuple(reconstruct(x) for x in result) else: return reconstruct(result) diff --git a/pandas/tests/arrays/integer/test_function.py b/pandas/tests/arrays/integer/test_function.py index a81434339fdae..8d7f620327f31 100644 --- a/pandas/tests/arrays/integer/test_function.py +++ b/pandas/tests/arrays/integer/test_function.py @@ -64,6 +64,20 @@ def test_ufuncs_binary_int(ufunc): tm.assert_extension_array_equal(result, expected) +def test_ufunc_binary_output(): + a = integer_array([1, 2, np.nan]) + result = np.modf(a) + expected = np.modf(a.to_numpy(na_value=np.nan, dtype="float")) + + assert isinstance(result, tuple) + assert len(result) == 2 + + for x, y in zip(result, expected): + # TODO(FloatArray): This will return an extension array. + # y = integer_array(y) + tm.assert_numpy_array_equal(x, y) + + @pytest.mark.parametrize("values", [[0, 1], [0, None]]) def test_ufunc_reduce_raises(values): a = integer_array(values)
We forgot to return.
https://api.github.com/repos/pandas-dev/pandas/pulls/36913
2020-10-06T11:52:35Z
2020-10-06T18:41:36Z
2020-10-06T18:41:36Z
2020-10-06T18:42:14Z
BUG: RollingGroupby not respecting sort=False
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index d0d03021629c6..f9127ee8d13e7 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression where attempting to mutate a :class:`DateOffset` object would no longer raise an ``AttributeError`` (:issue:`36940`) +- Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index a509acb3604e1..9f0d953a2cc71 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -563,8 +563,13 @@ def indices(self): if isinstance(self.grouper, ops.BaseGrouper): return self.grouper.indices - values = Categorical(self.grouper) - return values._reverse_indexer() + # Return a dictionary of {group label: [indices belonging to the group label]} + # respecting whether sort was specified + codes, uniques = algorithms.factorize(self.grouper, sort=self.sort) + return { + category: np.flatnonzero(codes == i) + for i, category in enumerate(Index(uniques)) + } @property def codes(self) -> np.ndarray: diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index f0e8b39464a9f..d69ee72a00aee 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -457,3 +457,18 @@ def test_groupby_rolling_string_index(self): columns=["index", "group", "eventTime", "count_to_date"], ).set_index(["group", "index"]) tm.assert_frame_equal(result, expected) + + def test_groupby_rolling_no_sort(self): + # GH 36889 + result = ( + pd.DataFrame({"foo": [2, 1], "bar": [2, 1]}) + .groupby("foo", sort=False) + .rolling(1) + .min() + ) + expected = pd.DataFrame( + np.array([[2.0, 2.0], [1.0, 1.0]]), + columns=["foo", "bar"], + index=pd.MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]), + ) + tm.assert_frame_equal(result, expected)
- [x] closes #36889 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36911
2020-10-06T06:14:54Z
2020-10-09T00:46:16Z
2020-10-09T00:46:15Z
2020-10-10T09:54:00Z
BUG: IntervalArray.__eq__ not deferring to Series
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 74534bc371094..f77a55b95d567 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -368,6 +368,7 @@ Numeric - Bug in :class:`IntegerArray` multiplication with ``timedelta`` and ``np.timedelta64`` objects (:issue:`36870`) - Bug in :meth:`DataFrame.diff` with ``datetime64`` dtypes including ``NaT`` values failing to fill ``NaT`` results correctly (:issue:`32441`) - Bug in :class:`DataFrame` arithmetic ops incorrectly accepting keyword arguments (:issue:`36843`) +- Bug in :class:`IntervalArray` comparisons with :class:`Series` not returning :class:`Series` (:issue:`36908`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index d943fe3df88c5..09488b9576212 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -48,6 +48,7 @@ from pandas.core.construction import array, extract_array from pandas.core.indexers import check_array_indexer from pandas.core.indexes.base import ensure_index +from pandas.core.ops import unpack_zerodim_and_defer if TYPE_CHECKING: from pandas import Index @@ -519,6 +520,7 @@ def __setitem__(self, key, value): self._left[key] = value_left self._right[key] = value_right + @unpack_zerodim_and_defer("__eq__") def __eq__(self, other): # ensure pandas array for list-like and eliminate non-interval scalars if is_list_like(other): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b3f5fb6f0291a..7d3c2c2297d5d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -27,7 +27,6 @@ from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import tz_compare from pandas._typing import AnyArrayLike, Dtype, DtypeObj, Label -from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import DuplicateLabelError, InvalidIndexError from pandas.util._decorators import Appender, cache_readonly, doc @@ -68,7 +67,6 @@ from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCCategorical, - ABCDataFrame, ABCDatetimeIndex, ABCMultiIndex, ABCPandasArray, @@ -122,22 +120,6 @@ str_t = str -def _make_arithmetic_op(op, cls): - def index_arithmetic_method(self, other): - if isinstance(other, (ABCSeries, ABCDataFrame, ABCTimedeltaIndex)): - return NotImplemented - - from pandas import Series - - result = op(Series(self), other) - if isinstance(result, tuple): - return (Index(result[0]), Index(result[1])) - return Index(result) - - name = f"__{op.__name__}__" - return set_function_name(index_arithmetic_method, name, cls) - - _o_dtype = np.dtype(object) _Identity = object @@ -5380,7 +5362,7 @@ def _cmp_method(self, other, op): Wrapper used to dispatch comparison operations. """ if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)): - if other.ndim > 0 and len(self) != len(other): + if len(self) != len(other): raise ValueError("Lengths must match to compare") if is_object_dtype(self.dtype) and isinstance(other, ABCCategorical): diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 72ef7ea6bf8ca..03cc4fe2bdcb5 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -103,7 +103,10 @@ def elementwise_comparison(self, op, array, other): Helper that performs elementwise comparisons between `array` and `other` """ other = other if is_list_like(other) else [other] * len(array) - return np.array([op(x, y) for x, y in zip(array, other)]) + expected = np.array([op(x, y) for x, y in zip(array, other)]) + if isinstance(other, Series): + return Series(expected, index=other.index) + return expected def test_compare_scalar_interval(self, op, array): # matches first interval @@ -161,19 +164,19 @@ def test_compare_list_like_interval(self, op, array, interval_constructor): other = interval_constructor(array.left, array.right) result = op(array, other) expected = self.elementwise_comparison(op, array, other) - tm.assert_numpy_array_equal(result, expected) + tm.assert_equal(result, expected) # different endpoints other = interval_constructor(array.left[::-1], array.right[::-1]) result = op(array, other) expected = self.elementwise_comparison(op, array, other) - tm.assert_numpy_array_equal(result, expected) + tm.assert_equal(result, expected) # all nan endpoints other = interval_constructor([np.nan] * 4, [np.nan] * 4) result = op(array, other) expected = self.elementwise_comparison(op, array, other) - tm.assert_numpy_array_equal(result, expected) + tm.assert_equal(result, expected) def test_compare_list_like_interval_mixed_closed( self, op, interval_constructor, closed, other_closed @@ -183,7 +186,7 @@ def test_compare_list_like_interval_mixed_closed( result = op(array, other) expected = self.elementwise_comparison(op, array, other) - tm.assert_numpy_array_equal(result, expected) + tm.assert_equal(result, expected) @pytest.mark.parametrize( "other",
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36908
2020-10-06T02:29:29Z
2020-10-13T16:27:55Z
2020-10-13T16:27:55Z
2020-10-13T16:28:09Z
CLN: remove inplace kwarg from NDFrame._consolidate
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 04e1fc91c5fd4..8ccce9e5e451b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5447,27 +5447,18 @@ def f(): self._protect_consolidate(f) - def _consolidate(self, inplace: bool_t = False): + def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). - Parameters - ---------- - inplace : bool, default False - If False return new object, otherwise modify existing object. - Returns ------- consolidated : same type as caller """ - inplace = validate_bool_kwarg(inplace, "inplace") - if inplace: - self._consolidate_inplace() - else: - f = lambda: self._mgr.consolidate() - cons_data = self._protect_consolidate(f) - return self._constructor(cons_data).__finalize__(self) + f = lambda: self._mgr.consolidate() + cons_data = self._protect_consolidate(f) + return self._constructor(cons_data).__finalize__(self) @property def _is_mixed_type(self) -> bool_t: diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index a07c7b49ac55b..5a54e4e05dbad 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -360,7 +360,7 @@ def __init__( raise TypeError(msg) # consolidate - obj._consolidate(inplace=True) + obj._consolidate_inplace() ndims.add(obj.ndim) # get the sample diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 1e404c572dd51..f5d2bd27762ef 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -64,7 +64,7 @@ def test_consolidate(self, float_frame): float_frame["F"] = 8.0 assert len(float_frame._mgr.blocks) == 3 - return_value = float_frame._consolidate(inplace=True) + return_value = float_frame._consolidate_inplace() assert return_value is None assert len(float_frame._mgr.blocks) == 1 diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py index 31501f20db453..ad0d1face53cf 100644 --- a/pandas/tests/generic/test_frame.py +++ b/pandas/tests/generic/test_frame.py @@ -189,9 +189,6 @@ def test_validate_bool_args(self, value): with pytest.raises(ValueError, match=msg): super(DataFrame, df).drop("a", axis=1, inplace=value) - with pytest.raises(ValueError, match=msg): - super(DataFrame, df)._consolidate(inplace=value) - with pytest.raises(ValueError, match=msg): super(DataFrame, df).fillna(value=0, inplace=value)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36906
2020-10-06T00:45:57Z
2020-10-12T14:55:03Z
2020-10-12T14:55:03Z
2020-10-12T15:23:22Z
CLN: value -> key
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 413430942575d..94c6c5aed9c0d 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -544,10 +544,10 @@ def __iter__(self): def __len__(self) -> int: return len(self._left) - def __getitem__(self, value): - value = check_array_indexer(self, value) - left = self._left[value] - right = self._right[value] + def __getitem__(self, key): + key = check_array_indexer(self, key) + left = self._left[key] + right = self._right[key] if not isinstance(left, (np.ndarray, ExtensionArray)): # scalar
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36905
2020-10-06T00:45:29Z
2020-10-06T03:08:08Z
2020-10-06T03:08:08Z
2020-10-06T15:08:57Z
TYP: io.json._json, util._decorators
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index ef684469dffbb..3cf3c2e7fd91d 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -3,7 +3,7 @@ from io import BytesIO, StringIO from itertools import islice import os -from typing import IO, Any, Callable, List, Optional, Type +from typing import IO, Any, Callable, List, Optional, Tuple, Type import numpy as np @@ -111,6 +111,8 @@ def to_json( class Writer: + _default_orient: str + def __init__( self, obj, @@ -126,8 +128,7 @@ def __init__( self.obj = obj if orient is None: - # error: "Writer" has no attribute "_default_orient" - orient = self._default_orient # type: ignore[attr-defined] + orient = self._default_orient self.orient = orient self.date_format = date_format @@ -777,8 +778,8 @@ def read(self): obj = self._get_object_parser(lines_json) else: data = ensure_str(self.data) - data = data.split("\n") - obj = self._get_object_parser(self._combine_lines(data)) + data_lines = data.split("\n") + obj = self._get_object_parser(self._combine_lines(data_lines)) else: obj = self._get_object_parser(self.data) self.close() @@ -848,6 +849,8 @@ def __next__(self): class Parser: + _split_keys: Tuple[str, ...] + _default_orient: str _STAMP_UNITS = ("s", "ms", "us", "ns") _MIN_STAMPS = { @@ -873,6 +876,7 @@ def __init__( if orient is None: orient = self._default_orient + self.orient = orient self.dtype = dtype @@ -902,8 +906,8 @@ def check_keys_split(self, decoded): """ bad_keys = set(decoded.keys()).difference(set(self._split_keys)) if bad_keys: - bad_keys = ", ".join(bad_keys) - raise ValueError(f"JSON data had unexpected key(s): {bad_keys}") + bad_keys_joined = ", ".join(bad_keys) + raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}") def parse(self): @@ -922,14 +926,22 @@ def parse(self): self._try_convert_types() return self.obj + def _parse_numpy(self): + raise AbstractMethodError(self) + + def _parse_no_numpy(self): + raise AbstractMethodError(self) + def _convert_axes(self): """ Try to convert axes. """ - for axis_name in self.obj._AXIS_ORDERS: + obj = self.obj + assert obj is not None # for mypy + for axis_name in obj._AXIS_ORDERS: new_axis, result = self._try_convert_data( name=axis_name, - data=self.obj._get_axis(axis_name), + data=obj._get_axis(axis_name), use_dtypes=False, convert_dates=True, ) @@ -1083,7 +1095,11 @@ def _parse_numpy(self): self.check_keys_split(decoded) self.obj = create_series_with_explicit_dtype(**decoded) elif self.orient in ["columns", "index"]: - self.obj = create_series_with_explicit_dtype(*data, dtype_if_empty=object) + # error: "create_series_with_explicit_dtype" + # gets multiple values for keyword argument "dtype_if_empty + self.obj = create_series_with_explicit_dtype( + *data, dtype_if_empty=object + ) # type:ignore[misc] else: self.obj = create_series_with_explicit_dtype(data, dtype_if_empty=object) @@ -1175,9 +1191,12 @@ def _process_converter(self, f, filt=None): if filt is None: filt = lambda col, c: True + obj = self.obj + assert obj is not None # for mypy + needs_new_obj = False new_obj = dict() - for i, (col, c) in enumerate(self.obj.items()): + for i, (col, c) in enumerate(obj.items()): if filt(col, c): new_data, result = f(col, c) if result: @@ -1188,9 +1207,9 @@ def _process_converter(self, f, filt=None): if needs_new_obj: # possibly handle dup columns - new_obj = DataFrame(new_obj, index=self.obj.index) - new_obj.columns = self.obj.columns - self.obj = new_obj + new_frame = DataFrame(new_obj, index=obj.index) + new_frame.columns = obj.columns + self.obj = new_frame def _try_convert_types(self): if self.obj is None: diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 3db7c38eced65..27c7b931b7136 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -2,7 +2,7 @@ import datetime as pydt from datetime import datetime, timedelta, tzinfo import functools -from typing import Any, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple from dateutil.relativedelta import relativedelta import matplotlib.dates as dates @@ -1002,7 +1002,7 @@ def __init__( self.format = None self.freq = freq self.locs: List[Any] = [] # unused, for matplotlib compat - self.formatdict = None + self.formatdict: Optional[Dict[Any, Any]] = None self.isminor = minor_locator self.isdynamic = dynamic_mode self.offset = 0 diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index f81bca7e85156..d002e8a4ebd43 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -278,6 +278,9 @@ def decorate(func): allow_args = allowed_args else: spec = inspect.getfullargspec(func) + + # We must have some defaults if we are deprecating default-less + assert spec.defaults is not None # for mypy allow_args = spec.args[: -len(spec.defaults)] @wraps(func) diff --git a/setup.cfg b/setup.cfg index 8d3d79789a252..cd20249728062 100644 --- a/setup.cfg +++ b/setup.cfg @@ -238,9 +238,6 @@ check_untyped_defs=False [mypy-pandas.io.formats.style] check_untyped_defs=False -[mypy-pandas.io.json._json] -check_untyped_defs=False - [mypy-pandas.io.parsers] check_untyped_defs=False @@ -264,6 +261,3 @@ check_untyped_defs=False [mypy-pandas.plotting._misc] check_untyped_defs=False - -[mypy-pandas.util._decorators] -check_untyped_defs=False
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36903
2020-10-05T23:22:21Z
2020-10-10T10:38:36Z
2020-10-10T10:38:36Z
2020-10-10T14:42:36Z
CLN: standardize fixture usage in datetimelike array tests
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 3f5ab5baa7d69..c50927eb66a52 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -17,7 +17,12 @@ # TODO: more freq variants @pytest.fixture(params=["D", "B", "W", "M", "Q", "Y"]) -def period_index(request): +def freqstr(request): + return request.param + + +@pytest.fixture +def period_index(freqstr): """ A fixture to provide PeriodIndex objects with different frequencies. @@ -25,14 +30,13 @@ def period_index(request): so here we just test that the PeriodArray behavior matches the PeriodIndex behavior. """ - freqstr = request.param # TODO: non-monotone indexes; NaTs, different start dates pi = pd.period_range(start=pd.Timestamp("2000-01-01"), periods=100, freq=freqstr) return pi -@pytest.fixture(params=["D", "B", "W", "M", "Q", "Y"]) -def datetime_index(request): +@pytest.fixture +def datetime_index(freqstr): """ A fixture to provide DatetimeIndex objects with different frequencies. @@ -40,14 +44,13 @@ def datetime_index(request): so here we just test that the DatetimeArray behavior matches the DatetimeIndex behavior. """ - freqstr = request.param # TODO: non-monotone indexes; NaTs, different start dates, timezones dti = pd.date_range(start=pd.Timestamp("2000-01-01"), periods=100, freq=freqstr) return dti @pytest.fixture -def timedelta_index(request): +def timedelta_index(): """ A fixture to provide TimedeltaIndex objects with different frequencies. Most TimedeltaArray behavior is already tested in TimedeltaIndex tests, @@ -438,16 +441,15 @@ class TestDatetimeArray(SharedTests): dtype = pd.Timestamp @pytest.fixture - def arr1d(self, tz_naive_fixture): + def arr1d(self, tz_naive_fixture, freqstr): tz = tz_naive_fixture - dti = pd.date_range("2016-01-01 01:01:00", periods=3, freq="H", tz=tz) + dti = pd.date_range("2016-01-01 01:01:00", periods=3, freq=freqstr, tz=tz) dta = dti._data return dta - def test_round(self, tz_naive_fixture): + def test_round(self, arr1d): # GH#24064 - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01 01:01:00", periods=3, freq="H", tz=tz) + dti = self.index_cls(arr1d) result = dti.round(freq="2T") expected = dti - pd.Timedelta(minutes=1) @@ -501,11 +503,10 @@ def test_array_interface(self, datetime_index): expected = np.asarray(arr).astype(dtype) tm.assert_numpy_array_equal(result, expected) - def test_array_object_dtype(self, tz_naive_fixture): + def test_array_object_dtype(self, arr1d): # GH#23524 - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01", periods=3, tz=tz) - arr = DatetimeArray(dti) + arr = arr1d + dti = self.index_cls(arr1d) expected = np.array(list(dti)) @@ -516,11 +517,10 @@ def test_array_object_dtype(self, tz_naive_fixture): result = np.array(dti, dtype=object) tm.assert_numpy_array_equal(result, expected) - def test_array_tz(self, tz_naive_fixture): + def test_array_tz(self, arr1d): # GH#23524 - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01", periods=3, tz=tz) - arr = DatetimeArray(dti) + arr = arr1d + dti = self.index_cls(arr1d) expected = dti.asi8.view("M8[ns]") result = np.array(arr, dtype="M8[ns]") @@ -537,10 +537,9 @@ def test_array_tz(self, tz_naive_fixture): assert result.base is expected.base assert result.base is not None - def test_array_i8_dtype(self, tz_naive_fixture): - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01", periods=3, tz=tz) - arr = DatetimeArray(dti) + def test_array_i8_dtype(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) expected = dti.asi8 result = np.array(arr, dtype="i8") @@ -563,10 +562,9 @@ def test_from_array_keeps_base(self): dta = DatetimeArray(arr[:0]) assert dta._data.base is arr - def test_from_dti(self, tz_naive_fixture): - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01", periods=3, tz=tz) - arr = DatetimeArray(dti) + def test_from_dti(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) assert list(dti) == list(arr) # Check that Index.__new__ knows what to do with DatetimeArray @@ -574,16 +572,15 @@ def test_from_dti(self, tz_naive_fixture): assert isinstance(dti2, pd.DatetimeIndex) assert list(dti2) == list(arr) - def test_astype_object(self, tz_naive_fixture): - tz = tz_naive_fixture - dti = pd.date_range("2016-01-01", periods=3, tz=tz) - arr = DatetimeArray(dti) + def test_astype_object(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + asobj = arr.astype("O") assert isinstance(asobj, np.ndarray) assert asobj.dtype == "O" assert list(asobj) == list(dti) - @pytest.mark.parametrize("freqstr", ["D", "B", "W", "M", "Q", "Y"]) def test_to_perioddelta(self, datetime_index, freqstr): # GH#23113 dti = datetime_index @@ -602,7 +599,6 @@ def test_to_perioddelta(self, datetime_index, freqstr): # an EA-specific tm.assert_ function tm.assert_index_equal(pd.Index(result), pd.Index(expected)) - @pytest.mark.parametrize("freqstr", ["D", "B", "W", "M", "Q", "Y"]) def test_to_period(self, datetime_index, freqstr): dti = datetime_index arr = DatetimeArray(dti) @@ -616,10 +612,10 @@ def test_to_period(self, datetime_index, freqstr): tm.assert_index_equal(pd.Index(result), pd.Index(expected)) @pytest.mark.parametrize("propname", pd.DatetimeIndex._bool_ops) - def test_bool_properties(self, datetime_index, propname): + def test_bool_properties(self, arr1d, propname): # in this case _bool_ops is just `is_leap_year` - dti = datetime_index - arr = DatetimeArray(dti) + dti = self.index_cls(arr1d) + arr = arr1d assert dti.freq == arr.freq result = getattr(arr, propname) @@ -628,21 +624,21 @@ def test_bool_properties(self, datetime_index, propname): tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("propname", pd.DatetimeIndex._field_ops) - def test_int_properties(self, datetime_index, propname): + def test_int_properties(self, arr1d, propname): if propname in ["week", "weekofyear"]: # GH#33595 Deprecate week and weekofyear return - dti = datetime_index - arr = DatetimeArray(dti) + dti = self.index_cls(arr1d) + arr = arr1d result = getattr(arr, propname) expected = np.array(getattr(dti, propname), dtype=result.dtype) tm.assert_numpy_array_equal(result, expected) - def test_take_fill_valid(self, datetime_index, tz_naive_fixture): - dti = datetime_index.tz_localize(tz_naive_fixture) - arr = DatetimeArray(dti) + def test_take_fill_valid(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) now = pd.Timestamp.now().tz_localize(dti.tz) result = arr.take([-1, 1], allow_fill=True, fill_value=now) @@ -677,10 +673,9 @@ def test_take_fill_valid(self, datetime_index, tz_naive_fixture): # require appropriate-dtype if we have a NA value arr.take([-1, 1], allow_fill=True, fill_value=value) - def test_concat_same_type_invalid(self, datetime_index): + def test_concat_same_type_invalid(self, arr1d): # different timezones - dti = datetime_index - arr = DatetimeArray(dti) + arr = arr1d if arr.tz is None: other = arr.tz_localize("UTC") @@ -708,8 +703,8 @@ def test_concat_same_type_different_freq(self): tm.assert_datetime_array_equal(result, expected) - def test_strftime(self, datetime_index): - arr = DatetimeArray(datetime_index) + def test_strftime(self, arr1d): + arr = arr1d result = arr.strftime("%Y %b") expected = np.array([ts.strftime("%Y %b") for ts in arr], dtype=object) @@ -854,9 +849,9 @@ class TestPeriodArray(SharedTests): def arr1d(self, period_index): return period_index._data - def test_from_pi(self, period_index): - pi = period_index - arr = PeriodArray(pi) + def test_from_pi(self, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d assert list(arr) == list(pi) # Check that Index.__new__ knows what to do with PeriodArray @@ -864,17 +859,16 @@ def test_from_pi(self, period_index): assert isinstance(pi2, pd.PeriodIndex) assert list(pi2) == list(arr) - def test_astype_object(self, period_index): - pi = period_index - arr = PeriodArray(pi) + def test_astype_object(self, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d asobj = arr.astype("O") assert isinstance(asobj, np.ndarray) assert asobj.dtype == "O" assert list(asobj) == list(pi) - def test_take_fill_valid(self, period_index): - pi = period_index - arr = PeriodArray(pi) + def test_take_fill_valid(self, arr1d): + arr = arr1d value = pd.NaT.value msg = f"'fill_value' should be a {self.dtype}. Got '{value}'." @@ -889,9 +883,9 @@ def test_take_fill_valid(self, period_index): arr.take([-1, 1], allow_fill=True, fill_value=value) @pytest.mark.parametrize("how", ["S", "E"]) - def test_to_timestamp(self, how, period_index): - pi = period_index - arr = PeriodArray(pi) + def test_to_timestamp(self, how, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d expected = DatetimeArray(pi.to_timestamp(how=how)) result = arr.to_timestamp(how=how) @@ -912,10 +906,10 @@ def test_to_timestamp_out_of_bounds(self): pi._data.to_timestamp() @pytest.mark.parametrize("propname", PeriodArray._bool_ops) - def test_bool_properties(self, period_index, propname): + def test_bool_properties(self, arr1d, propname): # in this case _bool_ops is just `is_leap_year` - pi = period_index - arr = PeriodArray(pi) + pi = self.index_cls(arr1d) + arr = arr1d result = getattr(arr, propname) expected = np.array(getattr(pi, propname)) @@ -923,17 +917,17 @@ def test_bool_properties(self, period_index, propname): tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("propname", PeriodArray._field_ops) - def test_int_properties(self, period_index, propname): - pi = period_index - arr = PeriodArray(pi) + def test_int_properties(self, arr1d, propname): + pi = self.index_cls(arr1d) + arr = arr1d result = getattr(arr, propname) expected = np.array(getattr(pi, propname)) tm.assert_numpy_array_equal(result, expected) - def test_array_interface(self, period_index): - arr = PeriodArray(period_index) + def test_array_interface(self, arr1d): + arr = arr1d # default asarray gives objects result = np.asarray(arr) @@ -956,8 +950,8 @@ def test_array_interface(self, period_index): expected = np.asarray(arr).astype("S20") tm.assert_numpy_array_equal(result, expected) - def test_strftime(self, period_index): - arr = PeriodArray(period_index) + def test_strftime(self, arr1d): + arr = arr1d result = arr.strftime("%Y") expected = np.array([per.strftime("%Y") for per in arr], dtype=object) diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index a32529cb58ba3..b3b8f4d55e4de 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -61,6 +61,7 @@ def test_copy(self): class TestTimedeltaArray: + # TODO: de-duplicate with test_npsum below def test_np_sum(self): # GH#25282 vals = np.arange(5, dtype=np.int64).view("m8[h]").astype("m8[ns]") @@ -76,35 +77,6 @@ def test_from_sequence_dtype(self): with pytest.raises(ValueError, match=msg): TimedeltaArray._from_sequence([], dtype=object) - def test_abs(self): - vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]") - arr = TimedeltaArray(vals) - - evals = np.array([3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]") - expected = TimedeltaArray(evals) - - result = abs(arr) - tm.assert_timedelta_array_equal(result, expected) - - def test_neg(self): - vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]") - arr = TimedeltaArray(vals) - - evals = np.array([3600 * 10 ** 9, "NaT", -7200 * 10 ** 9], dtype="m8[ns]") - expected = TimedeltaArray(evals) - - result = -arr - tm.assert_timedelta_array_equal(result, expected) - - def test_neg_freq(self): - tdi = pd.timedelta_range("2 Days", periods=4, freq="H") - arr = TimedeltaArray(tdi, freq=tdi.freq) - - expected = TimedeltaArray(-tdi._data, freq=-tdi.freq) - - result = -arr - tm.assert_timedelta_array_equal(result, expected) - @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) def test_astype_int(self, dtype): arr = TimedeltaArray._from_sequence([pd.Timedelta("1H"), pd.Timedelta("2H")]) @@ -171,6 +143,37 @@ def test_searchsorted_invalid_types(self, other, index): arr.searchsorted(other) +class TestUnaryOps: + def test_abs(self): + vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]") + arr = TimedeltaArray(vals) + + evals = np.array([3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]") + expected = TimedeltaArray(evals) + + result = abs(arr) + tm.assert_timedelta_array_equal(result, expected) + + def test_neg(self): + vals = np.array([-3600 * 10 ** 9, "NaT", 7200 * 10 ** 9], dtype="m8[ns]") + arr = TimedeltaArray(vals) + + evals = np.array([3600 * 10 ** 9, "NaT", -7200 * 10 ** 9], dtype="m8[ns]") + expected = TimedeltaArray(evals) + + result = -arr + tm.assert_timedelta_array_equal(result, expected) + + def test_neg_freq(self): + tdi = pd.timedelta_range("2 Days", periods=4, freq="H") + arr = TimedeltaArray(tdi, freq=tdi.freq) + + expected = TimedeltaArray(-tdi._data, freq=-tdi.freq) + + result = -arr + tm.assert_timedelta_array_equal(result, expected) + + class TestReductions: @pytest.mark.parametrize("name", ["sum", "std", "min", "max", "median"]) @pytest.mark.parametrize("skipna", [True, False])
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36902
2020-10-05T22:34:11Z
2020-10-06T03:33:27Z
2020-10-06T03:33:27Z
2020-10-06T14:59:18Z
REF: collect reduction tests
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 6ba55ce3c74b9..1a469d3e3d88b 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -9,12 +9,6 @@ class TestSeriesAnalytics: - def test_prod_numpy16_bug(self): - s = Series([1.0, 1.0, 1.0], index=range(3)) - result = s.prod() - - assert not isinstance(result, Series) - def test_matmul(self): # matmul test is for GH #10259 a = Series(np.random.randn(4), index=["p", "q", "r", "s"]) @@ -125,74 +119,3 @@ def test_is_monotonic(self): s = Series(list(reversed(s.tolist()))) assert s.is_monotonic is False assert s.is_monotonic_decreasing is True - - @pytest.mark.parametrize("func", [np.any, np.all]) - @pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())]) - def test_validate_any_all_out_keepdims_raises(self, kwargs, func): - s = pd.Series([1, 2]) - param = list(kwargs)[0] - name = func.__name__ - - msg = ( - f"the '{param}' parameter is not " - "supported in the pandas " - fr"implementation of {name}\(\)" - ) - with pytest.raises(ValueError, match=msg): - func(s, **kwargs) - - def test_validate_sum_initial(self): - s = pd.Series([1, 2]) - msg = ( - r"the 'initial' parameter is not " - r"supported in the pandas " - r"implementation of sum\(\)" - ) - with pytest.raises(ValueError, match=msg): - np.sum(s, initial=10) - - def test_validate_median_initial(self): - s = pd.Series([1, 2]) - msg = ( - r"the 'overwrite_input' parameter is not " - r"supported in the pandas " - r"implementation of median\(\)" - ) - with pytest.raises(ValueError, match=msg): - # It seems like np.median doesn't dispatch, so we use the - # method instead of the ufunc. - s.median(overwrite_input=True) - - def test_validate_stat_keepdims(self): - s = pd.Series([1, 2]) - msg = ( - r"the 'keepdims' parameter is not " - r"supported in the pandas " - r"implementation of sum\(\)" - ) - with pytest.raises(ValueError, match=msg): - np.sum(s, keepdims=True) - - def test_td64_summation_overflow(self): - # GH 9442 - s = pd.Series(pd.date_range("20130101", periods=100000, freq="H")) - s[0] += pd.Timedelta("1s 1ms") - - # mean - result = (s - s.min()).mean() - expected = pd.Timedelta((pd.TimedeltaIndex(s - s.min()).asi8 / len(s)).sum()) - - # the computation is converted to float so - # might be some loss of precision - assert np.allclose(result.value / 1000, expected.value / 1000) - - # sum - msg = "overflow in timedelta operation" - with pytest.raises(ValueError, match=msg): - (s - s.min()).sum() - - s1 = s[0:10000] - with pytest.raises(ValueError, match=msg): - (s1 - s1.min()).sum() - s2 = s[0:1000] - (s2 - s2.min()).sum() diff --git a/pandas/tests/series/test_reductions.py b/pandas/tests/series/test_reductions.py index be9330a14f9c9..28d29c69f6526 100644 --- a/pandas/tests/series/test_reductions.py +++ b/pandas/tests/series/test_reductions.py @@ -1,3 +1,6 @@ +import numpy as np +import pytest + import pandas as pd from pandas import Series @@ -9,3 +12,86 @@ def test_reductions_td64_with_nat(): assert ser.median() == exp assert ser.min() == exp assert ser.max() == exp + + +def test_td64_summation_overflow(): + # GH#9442 + ser = Series(pd.date_range("20130101", periods=100000, freq="H")) + ser[0] += pd.Timedelta("1s 1ms") + + # mean + result = (ser - ser.min()).mean() + expected = pd.Timedelta((pd.TimedeltaIndex(ser - ser.min()).asi8 / len(ser)).sum()) + + # the computation is converted to float so + # might be some loss of precision + assert np.allclose(result.value / 1000, expected.value / 1000) + + # sum + msg = "overflow in timedelta operation" + with pytest.raises(ValueError, match=msg): + (ser - ser.min()).sum() + + s1 = ser[0:10000] + with pytest.raises(ValueError, match=msg): + (s1 - s1.min()).sum() + s2 = ser[0:1000] + (s2 - s2.min()).sum() + + +def test_prod_numpy16_bug(): + ser = Series([1.0, 1.0, 1.0], index=range(3)) + result = ser.prod() + + assert not isinstance(result, Series) + + +@pytest.mark.parametrize("func", [np.any, np.all]) +@pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())]) +def test_validate_any_all_out_keepdims_raises(kwargs, func): + ser = Series([1, 2]) + param = list(kwargs)[0] + name = func.__name__ + + msg = ( + f"the '{param}' parameter is not " + "supported in the pandas " + fr"implementation of {name}\(\)" + ) + with pytest.raises(ValueError, match=msg): + func(ser, **kwargs) + + +def test_validate_sum_initial(): + ser = Series([1, 2]) + msg = ( + r"the 'initial' parameter is not " + r"supported in the pandas " + r"implementation of sum\(\)" + ) + with pytest.raises(ValueError, match=msg): + np.sum(ser, initial=10) + + +def test_validate_median_initial(): + ser = Series([1, 2]) + msg = ( + r"the 'overwrite_input' parameter is not " + r"supported in the pandas " + r"implementation of median\(\)" + ) + with pytest.raises(ValueError, match=msg): + # It seems like np.median doesn't dispatch, so we use the + # method instead of the ufunc. + ser.median(overwrite_input=True) + + +def test_validate_stat_keepdims(): + ser = Series([1, 2]) + msg = ( + r"the 'keepdims' parameter is not " + r"supported in the pandas " + r"implementation of sum\(\)" + ) + with pytest.raises(ValueError, match=msg): + np.sum(ser, keepdims=True) diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index b6f59807eaa15..c9c34916be32b 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -1,9 +1,8 @@ import numpy as np import pytest -from pandas._libs import lib, writers as libwriters +from pandas._libs import Timestamp, lib, writers as libwriters -import pandas as pd from pandas import Index import pandas._testing as tm @@ -41,7 +40,7 @@ def test_fast_unique_multiple_list_gen_sort(self): tm.assert_numpy_array_equal(np.array(out), expected) def test_fast_unique_multiple_unsortable_runtimewarning(self): - arr = [np.array(["foo", pd.Timestamp("2000")])] + arr = [np.array(["foo", Timestamp("2000")])] with tm.assert_produces_warning(RuntimeWarning): lib.fast_unique_multiple(arr, sort=None) diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index deb7434694d01..1c9fd46ae451f 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -298,7 +298,7 @@ def verify_order(df): "outer": np.ones(len(out), dtype="bool"), } - for how in "left", "right", "outer", "inner": + for how in ["left", "right", "outer", "inner"]: mask = jmask[how] frame = align(out[mask].copy()) assert mask.all() ^ mask.any() or how == "outer"
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36901
2020-10-05T21:20:01Z
2020-10-06T03:44:14Z
2020-10-06T03:44:14Z
2020-10-06T14:59:50Z
DEPR: Index.ravel returning an ndarray
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index a269580bc4453..47ebd962b367c 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -268,6 +268,7 @@ Deprecations - Deprecated :meth:`Index.is_all_dates` (:issue:`27744`) - Deprecated automatic alignment on comparison operations between :class:`DataFrame` and :class:`Series`, do ``frame, ser = frame.align(ser, axis=1, copy=False)`` before e.g. ``frame == ser`` (:issue:`28759`) - :meth:`Rolling.count` with ``min_periods=None`` will default to the size of the window in a future version (:issue:`31302`) +- :meth:`Index.ravel` returning a ``np.ndarray`` is deprecated, in the future this will return a view on the same index (:issue:`19956`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ff3d8bf05f9a5..d603797370ce3 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -659,6 +659,12 @@ def ravel(self, order="C"): -------- numpy.ndarray.ravel """ + warnings.warn( + "Index.ravel returning ndarray is deprecated; in a future version " + "this will return a view on self.", + FutureWarning, + stacklevel=2, + ) values = self._get_engine_target() return values.ravel(order=order) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index b9d41f142c2b5..13010bb2ef147 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1677,7 +1677,8 @@ def is_dates_only( values: Union[np.ndarray, DatetimeArray, Index, DatetimeIndex] ) -> bool: # return a boolean if we are only dates (and don't have a timezone) - values = values.ravel() + if not isinstance(values, Index): + values = values.ravel() values = DatetimeIndex(values) if values.tz is not None: diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 675ae388a28a4..e2dea7828b3ad 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -399,6 +399,11 @@ def test_astype_preserves_name(self, index, dtype): else: assert result.name == index.name + def test_ravel_deprecation(self, index): + # GH#19956 ravel returning ndarray is deprecated + with tm.assert_produces_warning(FutureWarning): + index.ravel() + @pytest.mark.parametrize("na_position", [None, "middle"]) def test_sort_values_invalid_na_position(index_with_missing, na_position):
- [x] closes #19956 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Make Index.ravel() behavior match every other ravel() method in existence.
https://api.github.com/repos/pandas-dev/pandas/pulls/36900
2020-10-05T20:42:51Z
2020-10-06T03:40:19Z
2020-10-06T03:40:18Z
2020-10-14T15:34:11Z
Backport PR #36689 on branch 1.1.x (DOC: Start v1.1.4 release notes)
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 33c0750c1dc16..b8abc71ca64a2 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -16,6 +16,7 @@ Version 1.1 .. toctree:: :maxdepth: 2 + v1.1.4 v1.1.3 v1.1.2 v1.1.1 diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 2323afbe00e5d..e752eb54d0c15 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -75,4 +75,4 @@ Other Contributors ~~~~~~~~~~~~ -.. contributors:: v1.1.2..v1.1.3|HEAD +.. contributors:: v1.1.2..v1.1.3 diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst new file mode 100644 index 0000000000000..e63912ebc8fee --- /dev/null +++ b/doc/source/whatsnew/v1.1.4.rst @@ -0,0 +1,42 @@ +.. _whatsnew_114: + +What's new in 1.1.4 (??) +------------------------ + +These are the changes in pandas 1.1.4. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_114.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_114.bug_fixes: + +Bug fixes +~~~~~~~~~ +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_114.other: + +Other +~~~~~ +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_114.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.1.3..v1.1.4|HEAD
Backport PR #36689: DOC: Start v1.1.4 release notes
https://api.github.com/repos/pandas-dev/pandas/pulls/36899
2020-10-05T20:06:04Z
2020-10-06T10:28:43Z
2020-10-06T10:28:43Z
2020-10-06T10:28:43Z
BUG: Add trailing trailing newline in to_json
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ae4d5ea692066..90d0dad78ce40 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -417,6 +417,7 @@ I/O - Removed ``private_key`` and ``verbose`` from :func:`read_gbq` as they are no longer supported in ``pandas-gbq`` (:issue:`34654`, :issue:`30200`) - Bumped minimum pytables version to 3.5.1 to avoid a ``ValueError`` in :meth:`read_hdf` (:issue:`24839`) - Bug in :func:`read_table` and :func:`read_csv` when ``delim_whitespace=True`` and ``sep=default`` (:issue:`36583`) +- Bug in :meth:`to_json` with ``lines=True`` and ``orient='records'`` the last line of the record is not appended with 'new line character' (:issue:`36888`) - Bug in :meth:`read_parquet` with fixed offset timezones. String representation of timezones was not recognized (:issue:`35997`, :issue:`36004`) Plotting diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx index f6823c3cb0d3f..06f180eef0c65 100644 --- a/pandas/_libs/writers.pyx +++ b/pandas/_libs/writers.pyx @@ -108,7 +108,7 @@ def convert_json_to_lines(arr: object) -> str: if not in_quotes: num_open_brackets_seen -= 1 - return narr.tobytes().decode('utf-8') + return narr.tobytes().decode('utf-8') + '\n' # GH:36888 # stata, pytables diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 6abd8a010ea69..e12424888f4af 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1291,19 +1291,19 @@ def test_to_jsonl(self): # GH9180 df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) result = df.to_json(orient="records", lines=True) - expected = '{"a":1,"b":2}\n{"a":1,"b":2}' + expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n' assert result == expected df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"]) result = df.to_json(orient="records", lines=True) - expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}' + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n' assert result == expected tm.assert_frame_equal(pd.read_json(result, lines=True), df) # GH15096: escaped characters in columns and data df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"]) result = df.to_json(orient="records", lines=True) - expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}' + expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n' assert result == expected tm.assert_frame_equal(pd.read_json(result, lines=True), df) diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py index b475fa2c514ff..a6ffa7e97d375 100644 --- a/pandas/tests/io/json/test_readlines.py +++ b/pandas/tests/io/json/test_readlines.py @@ -45,23 +45,31 @@ def test_to_jsonl(): # GH9180 df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) result = df.to_json(orient="records", lines=True) - expected = '{"a":1,"b":2}\n{"a":1,"b":2}' + expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n' assert result == expected df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"]) result = df.to_json(orient="records", lines=True) - expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}' + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n' assert result == expected tm.assert_frame_equal(read_json(result, lines=True), df) # GH15096: escaped characters in columns and data df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"]) result = df.to_json(orient="records", lines=True) - expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}' + expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n' assert result == expected tm.assert_frame_equal(read_json(result, lines=True), df) +def test_to_jsonl_count_new_lines(): + # GH36888 + df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + actual_new_lines_count = df.to_json(orient="records", lines=True).count("\n") + expected_new_lines_count = 2 + assert actual_new_lines_count == expected_new_lines_count + + @pytest.mark.parametrize("chunksize", [1, 1.0]) def test_readjson_chunks(lines_json_df, chunksize): # Basic test that read_json(chunks=True) gives the same result as
- [x] closes #36888 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36898
2020-10-05T19:48:07Z
2020-10-10T22:55:57Z
2020-10-10T22:55:57Z
2020-10-10T22:56:01Z
regression fix for merging DF with datetime index with empty DF
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 6f137302d4994..167af1e5e282e 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -467,7 +467,6 @@ MultiIndex - Bug in :meth:`DataFrame.xs` when used with :class:`IndexSlice` raises ``TypeError`` with message ``"Expected label or tuple of labels"`` (:issue:`35301`) - Bug in :meth:`DataFrame.reset_index` with ``NaT`` values in index raises ``ValueError`` with message ``"cannot convert float NaN to integer"`` (:issue:`36541`) -- I/O ^^^ @@ -534,6 +533,7 @@ Reshaping - Bug in :meth:`DataFrame.pivot` did not preserve :class:`MultiIndex` level names for columns when rows and columns both multiindexed (:issue:`36360`) - Bug in :func:`join` returned a non deterministic level-order for the resulting :class:`MultiIndex` (:issue:`36910`) - Bug in :meth:`DataFrame.combine_first()` caused wrong alignment with dtype ``string`` and one level of ``MultiIndex`` containing only ``NA`` (:issue:`37591`) +- Fixed regression in :func:`merge` on merging DatetimeIndex with empty DataFrame (:issue:`36895`) Sparse ^^^^^^ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 5012be593820e..516ae90360be7 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -830,12 +830,15 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer): rvals = algos.take_1d(take_right, right_indexer, fill_value=rfill) # if we have an all missing left_indexer - # make sure to just use the right values - mask = left_indexer == -1 - if mask.all(): + # make sure to just use the right values or vice-versa + mask_left = left_indexer == -1 + mask_right = right_indexer == -1 + if mask_left.all(): key_col = rvals + elif mask_right.all(): + key_col = lvals else: - key_col = Index(lvals).where(~mask, rvals) + key_col = Index(lvals).where(~mask_left, rvals) if result._is_label_reference(name): result[name] = key_col diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py index b1922241c7843..260a0e9d486b2 100644 --- a/pandas/tests/reshape/merge/test_multi.py +++ b/pandas/tests/reshape/merge/test_multi.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas import DataFrame, Index, MultiIndex, Series +from pandas import DataFrame, Index, MultiIndex, Series, Timestamp import pandas._testing as tm from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge @@ -481,6 +481,53 @@ def test_merge_datetime_index(self, klass): result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("merge_type", ["left", "right"]) + def test_merge_datetime_multi_index_empty_df(self, merge_type): + # see gh-36895 + + left = DataFrame( + data={ + "data": [1.5, 1.5], + }, + index=MultiIndex.from_tuples( + [[Timestamp("1950-01-01"), "A"], [Timestamp("1950-01-02"), "B"]], + names=["date", "panel"], + ), + ) + + right = DataFrame( + index=MultiIndex.from_tuples([], names=["date", "panel"]), columns=["state"] + ) + + expected_index = MultiIndex.from_tuples( + [[Timestamp("1950-01-01"), "A"], [Timestamp("1950-01-02"), "B"]], + names=["date", "panel"], + ) + + if merge_type == "left": + expected = DataFrame( + data={ + "data": [1.5, 1.5], + "state": [None, None], + }, + index=expected_index, + ) + results_merge = left.merge(right, how="left", on=["date", "panel"]) + results_join = left.join(right, how="left") + else: + expected = DataFrame( + data={ + "state": [None, None], + "data": [1.5, 1.5], + }, + index=expected_index, + ) + results_merge = right.merge(left, how="right", on=["date", "panel"]) + results_join = right.join(left, how="right") + + tm.assert_frame_equal(results_merge, expected) + tm.assert_frame_equal(results_join, expected) + def test_join_multi_levels(self): # GH 3662
- [X ] closes https://github.com/pandas-dev/pandas/issues/36895 - [X ] tests added / passed - [X ] passes `black pandas` - [ X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36897
2020-10-05T19:20:49Z
2020-11-03T13:30:04Z
2020-11-03T13:30:03Z
2020-11-03T18:34:08Z
Backport PR #36887 on branch 1.1.x (DOC: 1.1.3 release date)
diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index af714b1bb2ab1..2323afbe00e5d 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -1,7 +1,7 @@ .. _whatsnew_113: -What's new in 1.1.3 (??) ------------------------- +What's new in 1.1.3 (October 5, 2020) +------------------------------------- These are the changes in pandas 1.1.3. See :ref:`release` for a full changelog including other versions of pandas.
Backport PR #36887: DOC: 1.1.3 release date
https://api.github.com/repos/pandas-dev/pandas/pulls/36891
2020-10-05T14:03:43Z
2020-10-05T15:17:12Z
2020-10-05T15:17:11Z
2020-10-05T15:17:12Z
DOC: 1.1.3 release date
diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index af714b1bb2ab1..2323afbe00e5d 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -1,7 +1,7 @@ .. _whatsnew_113: -What's new in 1.1.3 (??) ------------------------- +What's new in 1.1.3 (October 5, 2020) +------------------------------------- These are the changes in pandas 1.1.3. See :ref:`release` for a full changelog including other versions of pandas.
removed mention of py39 support @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/36887
2020-10-05T12:24:21Z
2020-10-05T14:03:30Z
2020-10-05T14:03:30Z
2020-10-05T14:03:43Z
Backport PR #36864 on branch 1.1.x (CI: Update error message for np_dev)
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 1fafdf00393e1..fbdac2bb2d8e8 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -367,14 +367,17 @@ def test_2d_to_1d_assignment_raises(): x = np.random.randn(2, 2) y = pd.Series(range(2)) - msg = ( - r"shape mismatch: value array of shape \(2,2\) could not be " - r"broadcast to indexing result of shape \(2,\)" + msg = "|".join( + [ + r"shape mismatch: value array of shape \(2,2\) could not be " + r"broadcast to indexing result of shape \(2,\)", + r"cannot reshape array of size 4 into shape \(2,\)", + ] ) with pytest.raises(ValueError, match=msg): y.loc[range(2)] = x - msg = r"could not broadcast input array from shape \(2,2\) into shape \(2\)" + msg = r"could not broadcast input array from shape \(2,2\) into shape \(2,?\)" with pytest.raises(ValueError, match=msg): y.loc[:] = x
Backport PR #36864: CI: Update error message for np_dev
https://api.github.com/repos/pandas-dev/pandas/pulls/36886
2020-10-05T10:46:45Z
2020-10-05T12:03:14Z
2020-10-05T12:03:14Z
2020-10-05T12:03:15Z
TYP: check_untyped_defs core.arrays.base
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index c2fc72ff753a8..94d6428b44043 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1176,22 +1176,22 @@ def _create_arithmetic_method(cls, op): @classmethod def _add_arithmetic_ops(cls): - cls.__add__ = cls._create_arithmetic_method(operator.add) - cls.__radd__ = cls._create_arithmetic_method(ops.radd) - cls.__sub__ = cls._create_arithmetic_method(operator.sub) - cls.__rsub__ = cls._create_arithmetic_method(ops.rsub) - cls.__mul__ = cls._create_arithmetic_method(operator.mul) - cls.__rmul__ = cls._create_arithmetic_method(ops.rmul) - cls.__pow__ = cls._create_arithmetic_method(operator.pow) - cls.__rpow__ = cls._create_arithmetic_method(ops.rpow) - cls.__mod__ = cls._create_arithmetic_method(operator.mod) - cls.__rmod__ = cls._create_arithmetic_method(ops.rmod) - cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv) - cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv) - cls.__truediv__ = cls._create_arithmetic_method(operator.truediv) - cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv) - cls.__divmod__ = cls._create_arithmetic_method(divmod) - cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod) + setattr(cls, "__add__", cls._create_arithmetic_method(operator.add)) + setattr(cls, "__radd__", cls._create_arithmetic_method(ops.radd)) + setattr(cls, "__sub__", cls._create_arithmetic_method(operator.sub)) + setattr(cls, "__rsub__", cls._create_arithmetic_method(ops.rsub)) + setattr(cls, "__mul__", cls._create_arithmetic_method(operator.mul)) + setattr(cls, "__rmul__", cls._create_arithmetic_method(ops.rmul)) + setattr(cls, "__pow__", cls._create_arithmetic_method(operator.pow)) + setattr(cls, "__rpow__", cls._create_arithmetic_method(ops.rpow)) + setattr(cls, "__mod__", cls._create_arithmetic_method(operator.mod)) + setattr(cls, "__rmod__", cls._create_arithmetic_method(ops.rmod)) + setattr(cls, "__floordiv__", cls._create_arithmetic_method(operator.floordiv)) + setattr(cls, "__rfloordiv__", cls._create_arithmetic_method(ops.rfloordiv)) + setattr(cls, "__truediv__", cls._create_arithmetic_method(operator.truediv)) + setattr(cls, "__rtruediv__", cls._create_arithmetic_method(ops.rtruediv)) + setattr(cls, "__divmod__", cls._create_arithmetic_method(divmod)) + setattr(cls, "__rdivmod__", cls._create_arithmetic_method(ops.rdivmod)) @classmethod def _create_comparison_method(cls, op): @@ -1199,12 +1199,12 @@ def _create_comparison_method(cls, op): @classmethod def _add_comparison_ops(cls): - cls.__eq__ = cls._create_comparison_method(operator.eq) - cls.__ne__ = cls._create_comparison_method(operator.ne) - cls.__lt__ = cls._create_comparison_method(operator.lt) - cls.__gt__ = cls._create_comparison_method(operator.gt) - cls.__le__ = cls._create_comparison_method(operator.le) - cls.__ge__ = cls._create_comparison_method(operator.ge) + setattr(cls, "__eq__", cls._create_comparison_method(operator.eq)) + setattr(cls, "__ne__", cls._create_comparison_method(operator.ne)) + setattr(cls, "__lt__", cls._create_comparison_method(operator.lt)) + setattr(cls, "__gt__", cls._create_comparison_method(operator.gt)) + setattr(cls, "__le__", cls._create_comparison_method(operator.le)) + setattr(cls, "__ge__", cls._create_comparison_method(operator.ge)) @classmethod def _create_logical_method(cls, op): @@ -1212,12 +1212,12 @@ def _create_logical_method(cls, op): @classmethod def _add_logical_ops(cls): - cls.__and__ = cls._create_logical_method(operator.and_) - cls.__rand__ = cls._create_logical_method(ops.rand_) - cls.__or__ = cls._create_logical_method(operator.or_) - cls.__ror__ = cls._create_logical_method(ops.ror_) - cls.__xor__ = cls._create_logical_method(operator.xor) - cls.__rxor__ = cls._create_logical_method(ops.rxor) + setattr(cls, "__and__", cls._create_logical_method(operator.and_)) + setattr(cls, "__rand__", cls._create_logical_method(ops.rand_)) + setattr(cls, "__or__", cls._create_logical_method(operator.or_)) + setattr(cls, "__ror__", cls._create_logical_method(ops.ror_)) + setattr(cls, "__xor__", cls._create_logical_method(operator.xor)) + setattr(cls, "__rxor__", cls._create_logical_method(ops.rxor)) class ExtensionScalarOpsMixin(ExtensionOpsMixin): diff --git a/setup.cfg b/setup.cfg index 3279a485c9bf3..75722f2a7809f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -142,9 +142,6 @@ check_untyped_defs=False [mypy-pandas.core.apply] check_untyped_defs=False -[mypy-pandas.core.arrays.base] -check_untyped_defs=False - [mypy-pandas.core.arrays.datetimelike] check_untyped_defs=False
xref https://github.com/pandas-dev/pandas/issues/31160 from https://docs.python.org/3/library/functions.html?highlight=setattr#setattr > For example, setattr(x, 'foobar', 123) is equivalent to x.foobar = 123. The changes here, don't 'fix' the issue. The correct solution is to create these methods with a statically defined interface, see https://github.com/pandas-dev/pandas/issues/31160#issuecomment-699193016 In the meantime, the changes here allows untyped defs in this module to be checked. (without adding type ignores) pandas\core\arrays\base.py:1179: error: Unsupported left operand type for + ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1180: error: "Type[ExtensionOpsMixin]" has no attribute "__radd__" [attr-defined] pandas\core\arrays\base.py:1181: error: Unsupported left operand type for - ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1182: error: "Type[ExtensionOpsMixin]" has no attribute "__rsub__" [attr-defined] pandas\core\arrays\base.py:1183: error: Unsupported left operand type for * ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1184: error: "Type[ExtensionOpsMixin]" has no attribute "__rmul__" [attr-defined] pandas\core\arrays\base.py:1185: error: Unsupported left operand type for ** ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1186: error: "Type[ExtensionOpsMixin]" has no attribute "__rpow__" [attr-defined] pandas\core\arrays\base.py:1187: error: Unsupported left operand type for % ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1188: error: "Type[ExtensionOpsMixin]" has no attribute "__rmod__" [attr-defined] pandas\core\arrays\base.py:1189: error: Unsupported left operand type for // ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1190: error: "Type[ExtensionOpsMixin]" has no attribute "__rfloordiv__" [attr-defined] pandas\core\arrays\base.py:1191: error: Unsupported left operand type for / ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1192: error: "Type[ExtensionOpsMixin]" has no attribute "__rtruediv__" [attr-defined] pandas\core\arrays\base.py:1193: error: Unsupported left operand type for divmod ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1194: error: "Type[ExtensionOpsMixin]" has no attribute "__rdivmod__" [attr-defined] pandas\core\arrays\base.py:1202: error: Cannot assign to a method [assignment] pandas\core\arrays\base.py:1203: error: Cannot assign to a method [assignment] pandas\core\arrays\base.py:1204: error: Unsupported left operand type for < ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1205: error: Unsupported left operand type for > ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1206: error: Unsupported left operand type for <= ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1207: error: Unsupported left operand type for >= ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1215: error: Unsupported left operand type for & ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1216: error: "Type[ExtensionOpsMixin]" has no attribute "__rand__" [attr-defined] pandas\core\arrays\base.py:1217: error: Unsupported left operand type for | ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1218: error: "Type[ExtensionOpsMixin]" has no attribute "__ror__" [attr-defined] pandas\core\arrays\base.py:1219: error: Unsupported left operand type for ^ ("Type[ExtensionOpsMixin]") [operator] pandas\core\arrays\base.py:1220: error: "Type[ExtensionOpsMixin]" has no attribute "__rxor__" [attr-defined]
https://api.github.com/repos/pandas-dev/pandas/pulls/36885
2020-10-05T10:33:22Z
2020-10-05T12:58:39Z
2020-10-05T12:58:39Z
2020-10-05T13:15:12Z
TYP: check_untyped_defs compat.pickle_compat
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index ef9f36705a7ee..80ee1f2e20154 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -274,7 +274,7 @@ def patch_pickle(): """ orig_loads = pkl.loads try: - pkl.loads = loads + setattr(pkl, "loads", loads) yield finally: - pkl.loads = orig_loads + setattr(pkl, "loads", orig_loads) diff --git a/setup.cfg b/setup.cfg index 3279a485c9bf3..93564bc3908d6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -136,9 +136,6 @@ check_untyped_defs=False [mypy-pandas._version] check_untyped_defs=False -[mypy-pandas.compat.pickle_compat] -check_untyped_defs=False - [mypy-pandas.core.apply] check_untyped_defs=False
pandas\compat\pickle_compat.py:277: error: Incompatible types in assignment (expression has type "Callable[[bytes, DefaultNamedArg(bool, 'fix_imports'), DefaultNamedArg(str, 'encoding'), DefaultNamedArg(str, 'errors')], Any]", variable has type "Callable[[bytes, DefaultNamedArg(bool, 'fix_imports'), DefaultNamedArg(str, 'encoding'), DefaultNamedArg(str, 'errors'), DefaultNamedArg(Optional[Iterable[Any]], 'buffers')], Any]") [assignment]
https://api.github.com/repos/pandas-dev/pandas/pulls/36884
2020-10-05T10:24:51Z
2020-10-05T12:59:08Z
2020-10-05T12:59:08Z
2020-10-05T13:12:30Z
DOC: sync release notes on 1.1.x with master
diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index acf1dafc59885..af714b1bb2ab1 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -16,7 +16,7 @@ Enhancements Added support for new Python version ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Pandas 1.1.3 now supports Python 3.9 (:issue:`36296`). +pandas 1.1.3 now supports Python 3.9 (:issue:`36296`). Development Changes ^^^^^^^^^^^^^^^^^^^
xref #36845
https://api.github.com/repos/pandas-dev/pandas/pulls/36883
2020-10-05T10:20:49Z
2020-10-05T12:04:41Z
2020-10-05T12:04:41Z
2020-10-05T12:04:47Z
upgrade flake8 to 3.8.4 #36879
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d0c9f12614d0d..6a311c6f702e8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ repos: hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.3 + rev: 3.8.4 hooks: - id: flake8 additional_dependencies: [flake8-comprehensions>=3.1.0]
closes #36879 Here are the logs after the upgrade to 3.8.4: ```console (base) root@33d4c5d14a5b:/home/pandas-tazminia# pre-commit run flake8 --all [INFO] Initializing environment for https://github.com/python/black. [INFO] Initializing environment for https://gitlab.com/pycqa/flake8. [INFO] Initializing environment for https://gitlab.com/pycqa/flake8:flake8-comprehensions>=3.1.0. [INFO] Initializing environment for https://github.com/PyCQA/isort. [INFO] Initializing environment for https://github.com/asottile/pyupgrade. [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks. [INFO] Initializing environment for https://github.com/asottile/yesqa. [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://gitlab.com/pycqa/flake8. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... flake8...................................................................Passed flake8-pyx...............................................................Passed flake8-pxd...............................................................Passed ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36882
2020-10-05T10:03:21Z
2020-10-05T10:15:12Z
2020-10-05T10:15:11Z
2020-10-08T12:47:02Z
CLN: Remove the duplicate configuration of flake8-rst in setup.cfg
diff --git a/setup.cfg b/setup.cfg index a7c0f3484517f..3279a485c9bf3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,6 @@ bootstrap = ignore = E203, # space before : (needed for how black formats slicing) E402, # module level import not at top of file W503, # line break before binary operator - E203, # space before : (needed for how black formats slicing) # Classes/functions in different blocks can generate those errors E302, # expected 2 blank lines, found 0 E305, # expected 2 blank lines after class or function definition, found 0
Remove duplicate `ignore = E203` configuration in setup.cfg[flake8-rst]
https://api.github.com/repos/pandas-dev/pandas/pulls/36877
2020-10-05T07:24:23Z
2020-10-05T08:05:51Z
2020-10-05T08:05:51Z
2020-10-05T08:17:31Z
BUG: in DataFrame.reset_index() only call maybe_upcast_putmask with ndarrays
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 2b4b10c39602a..bd3112403b31b 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -392,6 +392,7 @@ Indexing - Bug in :meth:`Index.sort_values` where, when empty values were passed, the method would break by trying to compare missing values instead of pushing them to the end of the sort order. (:issue:`35584`) - Bug in :meth:`Index.get_indexer` and :meth:`Index.get_indexer_non_unique` where int64 arrays are returned instead of intp. (:issue:`36359`) - Bug in :meth:`DataFrame.sort_index` where parameter ascending passed as a list on a single level index gives wrong result. (:issue:`32334`) +- Bug in :meth:`DataFrame.reset_index` was incorrectly raising a ``ValueError`` for input with a :class:`MultiIndex` with missing values in a level with ``Categorical`` dtype (:issue:`24206`) Missing ^^^^^^^ diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 1cea817abbaa3..a7379376c2f78 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -493,7 +493,10 @@ def maybe_casted_values(index, codes=None): values = values._data # TODO: can we de-kludge yet? if mask.any(): - values, _ = maybe_upcast_putmask(values, mask, np.nan) + if isinstance(values, np.ndarray): + values, _ = maybe_upcast_putmask(values, mask, np.nan) + else: + values[mask] = np.nan if issubclass(values_type, DatetimeLikeArrayMixin): values = values_type(values, dtype=values_dtype) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 486855f5c37cd..deac1792737a1 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -12,10 +12,12 @@ from pandas import ( Categorical, + CategoricalIndex, DataFrame, DatetimeIndex, Index, IntervalIndex, + MultiIndex, Series, Timestamp, cut, @@ -171,21 +173,6 @@ def test_assign_columns(self, float_frame): tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False) tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False) - def test_set_index_preserve_categorical_dtype(self): - # GH13743, GH13854 - df = DataFrame( - { - "A": [1, 2, 1, 1, 2], - "B": [10, 16, 22, 28, 34], - "C1": Categorical(list("abaab"), categories=list("bac"), ordered=False), - "C2": Categorical(list("abaab"), categories=list("bac"), ordered=True), - } - ) - for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]: - result = df.set_index(cols).reset_index() - result = result.reindex(columns=df.columns) - tm.assert_frame_equal(result, df) - def test_rename_signature(self): sig = inspect.signature(DataFrame.rename) parameters = set(sig.parameters) @@ -266,3 +253,47 @@ def test_set_reset_index(self): df = df.set_index("B") df = df.reset_index() + + +class TestCategoricalIndex: + def test_set_index_preserve_categorical_dtype(self): + # GH13743, GH13854 + df = DataFrame( + { + "A": [1, 2, 1, 1, 2], + "B": [10, 16, 22, 28, 34], + "C1": Categorical(list("abaab"), categories=list("bac"), ordered=False), + "C2": Categorical(list("abaab"), categories=list("bac"), ordered=True), + } + ) + for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]: + result = df.set_index(cols).reset_index() + result = result.reindex(columns=df.columns) + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize( + "codes", ([[0, 0, 1, 1], [0, 1, 0, 1]], [[0, 0, -1, 1], [0, 1, 0, 1]]) + ) + def test_reindexing_with_missing_values(self, codes): + # GH 24206 + + index = MultiIndex( + [CategoricalIndex(["A", "B"]), CategoricalIndex(["a", "b"])], codes + ) + data = {"col": range(len(index))} + df = DataFrame(data=data, index=index) + + expected = DataFrame( + { + "level_0": Categorical.from_codes(codes[0], categories=["A", "B"]), + "level_1": Categorical.from_codes(codes[1], categories=["a", "b"]), + "col": range(4), + } + ) + + res = df.reset_index() + tm.assert_frame_equal(res, expected) + + # roundtrip + res = expected.set_index(["level_0", "level_1"]).reset_index() + tm.assert_frame_equal(res, expected)
- [x] closes #24206 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36876
2020-10-05T04:10:06Z
2020-10-11T20:01:58Z
2020-10-11T20:01:58Z
2020-10-11T20:05:50Z
DOC: Fix GL01 docstring errors in some functions
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 14363dabfcdf3..40c16d7cf0b8f 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -4,7 +4,7 @@ _shared_docs[ "aggregate" -] = """\ +] = """ Aggregate using one or more operations over the specified axis. Parameters @@ -46,7 +46,7 @@ _shared_docs[ "compare" -] = """\ +] = """ Compare to another %(klass)s and show the differences. .. versionadded:: 1.1.0 @@ -75,7 +75,7 @@ _shared_docs[ "groupby" -] = """\ +] = """ Group %(klass)s using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the @@ -144,7 +144,7 @@ _shared_docs[ "melt" -] = """\ +] = """ Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. This function is useful to massage a DataFrame into a format where one @@ -258,7 +258,7 @@ _shared_docs[ "transform" -] = """\ +] = """ Call ``func`` on self producing a {klass} with transformed values. Produced {klass} will have same axis length as self.
- [x] fixes #36874 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry There are some GL01 pandas docstring errors in `shared_docs.py`. ``` GL01 Docstring text (summary) should start in the line immediately after the opening quotes (not in the same line, or leaving a blank line in between. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36875
2020-10-05T03:49:23Z
2020-10-16T01:32:19Z
2020-10-16T01:32:19Z
2020-10-16T01:32:24Z
REF: dont consolidate in is_mixed_type
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 28a6f6b8c6621..e9a5191ea23a1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -630,7 +630,6 @@ def _is_homogeneous_type(self) -> bool: if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: - # Note: consolidates inplace return not self._is_mixed_type @property diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f0fb34dadb257..5fe9348baade1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5472,8 +5472,15 @@ def _consolidate(self, inplace: bool_t = False): @property def _is_mixed_type(self) -> bool_t: - f = lambda: self._mgr.is_mixed_type - return self._protect_consolidate(f) + if len(self._mgr.blocks) == 1: + return False + + if self._mgr.any_extension_types: + # Even if they have the same dtype, we cant consolidate them, + # so we pretend this is "mixed'" + return True + + return self.dtypes.nunique() > 1 def _check_inplace_setting(self, value) -> bool_t: """ check whether we allow in-place setting with this type of value """ @@ -6253,8 +6260,7 @@ def fillna( axis = self._get_axis_number(axis) if value is None: - - if self._is_mixed_type and axis == 1: + if len(self._mgr.blocks) > 1 and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 7b4b779e80481..9c985b1752222 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1547,8 +1547,7 @@ def _setitem_with_indexer(self, indexer, value): info_axis = self.obj._info_axis_number # maybe partial set - # _is_mixed_type has the side effect of consolidating in-place - take_split_path = self.obj._is_mixed_type + take_split_path = len(self.obj._mgr.blocks) > 1 # if there is only one block/type, still have to take split path # unless the block is one-dimensional or it can hold the value diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index f2480adce89b4..78ef48cc972ed 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -651,12 +651,6 @@ def _consolidate_check(self) -> None: self._is_consolidated = len(dtypes) == len(set(dtypes)) self._known_consolidated = True - @property - def is_mixed_type(self) -> bool: - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return len(self.blocks) > 1 - @property def is_numeric_mixed_type(self) -> bool: return all(block.is_numeric for block in self.blocks) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 53d417dc10014..5917520802519 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -230,18 +230,6 @@ def test_constructor_list_str_na(self, string_dtype): def test_is_homogeneous_type(self, data, expected): assert data._is_homogeneous_type is expected - def test_is_homogeneous_type_clears_cache(self): - ser = pd.Series([1, 2, 3]) - df = ser.to_frame("A") - df["B"] = ser - - assert len(df._mgr.blocks) == 2 - - a = df["B"] # caches lookup - df._is_homogeneous_type # _should_ clear cache - assert len(df._mgr.blocks) == 1 - assert df["B"] is not a - def test_asarray_homogenous(self): df = pd.DataFrame({"A": pd.Categorical([1, 2]), "B": pd.Categorical([1, 2])}) result = np.asarray(df) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 2567f704a4a8d..cd2f5a903d8cc 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -272,13 +272,6 @@ def test_attrs(self): assert mgr.nblocks == 2 assert len(mgr) == 6 - def test_is_mixed_dtype(self): - assert not create_mgr("a,b:f8").is_mixed_type - assert not create_mgr("a:f8-1; b:f8-2").is_mixed_type - - assert create_mgr("a,b:f8; c,d: f4").is_mixed_type - assert create_mgr("a,b:f8; c,d: object").is_mixed_type - def test_duplicate_ref_loc_failure(self): tmp_mgr = create_mgr("a:bool; a: f8")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Avoid copies, side-effects.
https://api.github.com/repos/pandas-dev/pandas/pulls/36873
2020-10-05T02:44:21Z
2020-10-08T01:21:41Z
2020-10-08T01:21:41Z
2020-10-08T01:25:21Z
PERF: Improve RollingGroupby.count
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 5c2d099ed3119..a269580bc4453 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -284,6 +284,7 @@ Performance improvements - ``Styler`` uuid method altered to compress data transmission over web whilst maintaining reasonably low table collision probability (:issue:`36345`) - Performance improvement in :meth:`pd.to_datetime` with non-ns time unit for ``float`` ``dtype`` columns (:issue:`20445`) - Performance improvement in setting values on a :class:`IntervalArray` (:issue:`36310`) +- Performance improvement in :meth:`RollingGroupby.count` (:issue:`35625`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 6452eb8c6b3a9..2e7e7cd47c336 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -58,7 +58,6 @@ def __init__(self, obj, *args, **kwargs): self._groupby.grouper.mutated = True super().__init__(obj, *args, **kwargs) - count = _dispatch("count") corr = _dispatch("corr", other=None, pairwise=None) cov = _dispatch("cov", other=None, pairwise=None)
- [x] closes #35625 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ``` In [1]: import pandas as pd ...: ...: # Generate sample df ...: df = pd.DataFrame({'column1': range(600), 'group': 5*['l'+str(i) for i in range(120)]}) ...: ...: # sort by group for easy/efficient joining of new columns to df ...: df=df.sort_values('group',kind='mergesort').reset_index(drop=True) In [2]: %timeit df['mean']=df.groupby('group').rolling(3,min_periods=1)['column1'].mean().values 5.59 ms ± 310 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) In [3]: %timeit df['sum']=df.groupby('group').rolling(3,min_periods=1)['column1'].sum().values ...: 5.34 ms ± 343 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) In [4]: %timeit df['count']=df.groupby('group').rolling(3,min_periods=1)['column1'].count().values ...: 4.97 ms ± 51.7 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36872
2020-10-05T02:27:35Z
2020-10-05T13:01:38Z
2020-10-05T13:01:37Z
2020-10-05T16:52:34Z
CLN: minor changes to `_translate` to simplify and setup for other chgs.
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 8c72c06a96322..292c459919398 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -375,11 +375,15 @@ def _translate(self): cellstyle_map: DefaultDict[Tuple[CSSPair, ...], List[str]] = defaultdict(list) # copied attributes - table_styles = self.table_styles or [] - caption = self.caption hidden_index = self.hidden_index hidden_columns = self.hidden_columns - uuid = self.uuid + + # construct render dict + d = { + "uuid": self.uuid, + "table_styles": _format_table_styles(self.table_styles or []), + "caption": self.caption, + } # for sparsifying a MultiIndex idx_lengths = _get_level_lengths(self.index) @@ -468,6 +472,7 @@ def _translate(self): ) head.append(index_header_row) + d.update({"head": head}) body = [] for r, row_tup in enumerate(self.data.itertuples()): @@ -517,11 +522,13 @@ def _translate(self): if props: # (), [] won't be in cellstyle_map, cellstyle respectively cellstyle_map[tuple(props)].append(f"row{r}_col{c}") body.append(row_es) + d.update({"body": body}) cellstyle: List[Dict[str, Union[CSSList, List[str]]]] = [ {"props": list(props), "selectors": selectors} for props, selectors in cellstyle_map.items() ] + d.update({"cellstyle": cellstyle}) table_attr = self.table_attributes use_mathjax = get_option("display.html.use_mathjax") @@ -531,16 +538,8 @@ def _translate(self): table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ') else: table_attr += ' class="tex2jax_ignore"' + d.update({"table_attributes": table_attr}) - d = { - "head": head, - "cellstyle": cellstyle, - "body": body, - "uuid": uuid, - "table_styles": _format_table_styles(table_styles), - "caption": caption, - "table_attributes": table_attr, - } if self.tooltips: d = self.tooltips._translate(self.data, self.uuid, d)
minor changes here to decouple the render dict `d` into sections for code readability. this is also first part of other changes to improve readability of this method.
https://api.github.com/repos/pandas-dev/pandas/pulls/40736
2021-04-01T18:53:48Z
2021-04-01T21:24:52Z
2021-04-01T21:24:52Z
2021-04-02T06:58:52Z
Backport PR #40723 on branch 1.2.x (BUG: fix comparison of NaT with numpy array)
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index 26d768f830830..9cef1307278e8 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.sum` when ``min_count`` greater than the :class:`DataFrame` shape was passed resulted in a ``ValueError`` (:issue:`39738`) - Fixed regression in :meth:`DataFrame.to_json` raising ``AttributeError`` when run on PyPy (:issue:`39837`) +- Fixed regression in (in)equality comparison of ``pd.NaT`` with a non-datetimelike numpy array returning a scalar instead of an array (:issue:`40722`) - Fixed regression in :meth:`DataFrame.where` not returning a copy in the case of an all True condition (:issue:`39595`) - Fixed regression in :meth:`DataFrame.replace` raising ``IndexError`` when ``regex`` was a multi-key dictionary (:issue:`39338`) - diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 3a61de62daf39..51863f0749790 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -124,6 +124,10 @@ cdef class _NaT(datetime): result.fill(_nat_scalar_rules[op]) elif other.dtype.kind == "O": result = np.array([PyObject_RichCompare(self, x, op) for x in other]) + elif op == Py_EQ: + result = np.zeros(other.shape, dtype=bool) + elif op == Py_NE: + result = np.ones(other.shape, dtype=bool) else: return NotImplemented return result diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 20de0effc30e1..84b9c51b7d387 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -575,6 +575,47 @@ def test_nat_comparisons_invalid(other, op): op(other, NaT) +@pytest.mark.parametrize( + "other", + [ + np.array(["foo"] * 2, dtype=object), + np.array([2, 3], dtype="int64"), + np.array([2.0, 3.5], dtype="float64"), + ], + ids=["str", "int", "float"], +) +def test_nat_comparisons_invalid_ndarray(other): + # GH#40722 + expected = np.array([False, False]) + result = NaT == other + tm.assert_numpy_array_equal(result, expected) + result = other == NaT + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([True, True]) + result = NaT != other + tm.assert_numpy_array_equal(result, expected) + result = other != NaT + tm.assert_numpy_array_equal(result, expected) + + for symbol, op in [ + ("<=", operator.le), + ("<", operator.lt), + (">=", operator.ge), + (">", operator.gt), + ]: + msg = f"'{symbol}' not supported between" + + with pytest.raises(TypeError, match=msg): + op(NaT, other) + + if other.dtype == np.dtype("object"): + # uses the reverse operator, so symbol changes + msg = None + with pytest.raises(TypeError, match=msg): + op(other, NaT) + + def test_compare_date(): # GH#39151 comparing NaT with date object is deprecated # See also: tests.scalar.timestamps.test_comparisons::test_compare_date
Backport PR #40723: BUG: fix comparison of NaT with numpy array
https://api.github.com/repos/pandas-dev/pandas/pulls/40734
2021-04-01T18:18:26Z
2021-04-06T13:30:24Z
2021-04-06T13:30:24Z
2021-04-06T13:30:24Z
REF: implement groupby.ops.WrappedCythonFunc
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 4c086f3b8612e..bcf2b6be15953 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -97,54 +97,172 @@ get_indexer_dict, ) -_CYTHON_FUNCTIONS = { - "aggregate": { - "add": "group_add", - "prod": "group_prod", - "min": "group_min", - "max": "group_max", - "mean": "group_mean", - "median": "group_median", - "var": "group_var", - "first": "group_nth", - "last": "group_last", - "ohlc": "group_ohlc", - }, - "transform": { - "cumprod": "group_cumprod", - "cumsum": "group_cumsum", - "cummin": "group_cummin", - "cummax": "group_cummax", - "rank": "group_rank", - }, -} - - -@functools.lru_cache(maxsize=None) -def _get_cython_function(kind: str, how: str, dtype: np.dtype, is_numeric: bool): - - dtype_str = dtype.name - ftype = _CYTHON_FUNCTIONS[kind][how] - - # see if there is a fused-type version of function - # only valid for numeric - f = getattr(libgroupby, ftype, None) - if f is not None: - if is_numeric: - return f - elif dtype == object: - if "object" not in f.__signatures__: - # raise NotImplementedError here rather than TypeError later + +class WrappedCythonOp: + """ + Dispatch logic for functions defined in _libs.groupby + """ + + def __init__(self, kind: str, how: str): + self.kind = kind + self.how = how + + _CYTHON_FUNCTIONS = { + "aggregate": { + "add": "group_add", + "prod": "group_prod", + "min": "group_min", + "max": "group_max", + "mean": "group_mean", + "median": "group_median", + "var": "group_var", + "first": "group_nth", + "last": "group_last", + "ohlc": "group_ohlc", + }, + "transform": { + "cumprod": "group_cumprod", + "cumsum": "group_cumsum", + "cummin": "group_cummin", + "cummax": "group_cummax", + "rank": "group_rank", + }, + } + + _cython_arity = {"ohlc": 4} # OHLC + + # Note: we make this a classmethod and pass kind+how so that caching + # works at the class level and not the instance level + @classmethod + @functools.lru_cache(maxsize=None) + def _get_cython_function( + cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool + ): + + dtype_str = dtype.name + ftype = cls._CYTHON_FUNCTIONS[kind][how] + + # see if there is a fused-type version of function + # only valid for numeric + f = getattr(libgroupby, ftype, None) + if f is not None: + if is_numeric: + return f + elif dtype == object: + if "object" not in f.__signatures__: + # raise NotImplementedError here rather than TypeError later + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) + return f + + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) + + def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool): + """ + Find the appropriate cython function, casting if necessary. + + Parameters + ---------- + values : np.ndarray + is_numeric : bool + + Returns + ------- + func : callable + values : np.ndarray + """ + how = self.how + kind = self.kind + + if how in ["median", "cumprod"]: + # these two only have float64 implementations + if is_numeric: + values = ensure_float64(values) + else: raise NotImplementedError( f"function is not implemented for this dtype: " - f"[how->{how},dtype->{dtype_str}]" + f"[how->{how},dtype->{values.dtype.name}]" ) - return f + func = getattr(libgroupby, f"group_{how}_float64") + return func, values - raise NotImplementedError( - f"function is not implemented for this dtype: " - f"[how->{how},dtype->{dtype_str}]" - ) + func = self._get_cython_function(kind, how, values.dtype, is_numeric) + + if values.dtype.kind in ["i", "u"]: + if how in ["add", "var", "prod", "mean", "ohlc"]: + # result may still include NaN, so we have to cast + values = ensure_float64(values) + + return func, values + + def disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False): + """ + Check if we can do this operation with our cython functions. + + Raises + ------ + NotImplementedError + This is either not a valid function for this dtype, or + valid but not implemented in cython. + """ + how = self.how + + if is_numeric: + # never an invalid op for those dtypes, so return early as fastpath + return + + if is_categorical_dtype(dtype) or is_sparse(dtype): + # categoricals are only 1d, so we + # are not setup for dim transforming + raise NotImplementedError(f"{dtype} dtype not supported") + elif is_datetime64_any_dtype(dtype): + # we raise NotImplemented if this is an invalid operation + # entirely, e.g. adding datetimes + if how in ["add", "prod", "cumsum", "cumprod"]: + raise NotImplementedError( + f"datetime64 type does not support {how} operations" + ) + elif is_timedelta64_dtype(dtype): + if how in ["prod", "cumprod"]: + raise NotImplementedError( + f"timedelta64 type does not support {how} operations" + ) + + def get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape: + how = self.how + kind = self.kind + + arity = self._cython_arity.get(how, 1) + + out_shape: Shape + if how == "ohlc": + out_shape = (ngroups, 4) + elif arity > 1: + raise NotImplementedError( + "arity of more than 1 is not supported for the 'how' argument" + ) + elif kind == "transform": + out_shape = values.shape + else: + out_shape = (ngroups,) + values.shape[1:] + return out_shape + + def get_out_dtype(self, dtype: np.dtype) -> np.dtype: + how = self.how + + if how == "rank": + out_dtype = "float64" + else: + if is_numeric_dtype(dtype): + out_dtype = f"{dtype.kind}{dtype.itemsize}" + else: + out_dtype = "object" + return np.dtype(out_dtype) class BaseGrouper: @@ -437,8 +555,6 @@ def get_group_levels(self) -> List[Index]: # ------------------------------------------------------------ # Aggregation functions - _cython_arity = {"ohlc": 4} # OHLC - @final def _is_builtin_func(self, arg): """ @@ -447,80 +563,6 @@ def _is_builtin_func(self, arg): """ return SelectionMixin._builtin_table.get(arg, arg) - @final - def _get_cython_func_and_vals( - self, kind: str, how: str, values: np.ndarray, is_numeric: bool - ): - """ - Find the appropriate cython function, casting if necessary. - - Parameters - ---------- - kind : str - how : str - values : np.ndarray - is_numeric : bool - - Returns - ------- - func : callable - values : np.ndarray - """ - if how in ["median", "cumprod"]: - # these two only have float64 implementations - if is_numeric: - values = ensure_float64(values) - else: - raise NotImplementedError( - f"function is not implemented for this dtype: " - f"[how->{how},dtype->{values.dtype.name}]" - ) - func = getattr(libgroupby, f"group_{how}_float64") - return func, values - - func = _get_cython_function(kind, how, values.dtype, is_numeric) - - if values.dtype.kind in ["i", "u"]: - if how in ["add", "var", "prod", "mean", "ohlc"]: - # result may still include NaN, so we have to cast - values = ensure_float64(values) - - return func, values - - @final - def _disallow_invalid_ops( - self, dtype: DtypeObj, how: str, is_numeric: bool = False - ): - """ - Check if we can do this operation with our cython functions. - - Raises - ------ - NotImplementedError - This is either not a valid function for this dtype, or - valid but not implemented in cython. - """ - if is_numeric: - # never an invalid op for those dtypes, so return early as fastpath - return - - if is_categorical_dtype(dtype) or is_sparse(dtype): - # categoricals are only 1d, so we - # are not setup for dim transforming - raise NotImplementedError(f"{dtype} dtype not supported") - elif is_datetime64_any_dtype(dtype): - # we raise NotImplemented if this is an invalid operation - # entirely, e.g. adding datetimes - if how in ["add", "prod", "cumsum", "cumprod"]: - raise NotImplementedError( - f"datetime64 type does not support {how} operations" - ) - elif is_timedelta64_dtype(dtype): - if how in ["prod", "cumprod"]: - raise NotImplementedError( - f"timedelta64 type does not support {how} operations" - ) - @final def _ea_wrap_cython_operation( self, kind: str, values, how: str, axis: int, min_count: int = -1, **kwargs @@ -593,9 +635,11 @@ def _cython_operation( dtype = values.dtype is_numeric = is_numeric_dtype(dtype) + cy_op = WrappedCythonOp(kind=kind, how=how) + # can we do this operation with our cython functions # if not raise NotImplementedError - self._disallow_invalid_ops(dtype, how, is_numeric) + cy_op.disallow_invalid_ops(dtype, is_numeric) if is_extension_array_dtype(dtype): return self._ea_wrap_cython_operation( @@ -637,43 +681,23 @@ def _cython_operation( if not is_complex_dtype(dtype): values = ensure_float64(values) - arity = self._cython_arity.get(how, 1) ngroups = self.ngroups + comp_ids, _, _ = self.group_info assert axis == 1 values = values.T - if how == "ohlc": - out_shape = (ngroups, 4) - elif arity > 1: - raise NotImplementedError( - "arity of more than 1 is not supported for the 'how' argument" - ) - elif kind == "transform": - out_shape = values.shape - else: - out_shape = (ngroups,) + values.shape[1:] - - func, values = self._get_cython_func_and_vals(kind, how, values, is_numeric) - - if how == "rank": - out_dtype = "float" - else: - if is_numeric: - out_dtype = f"{values.dtype.kind}{values.dtype.itemsize}" - else: - out_dtype = "object" - codes, _, _ = self.group_info + out_shape = cy_op.get_output_shape(ngroups, values) + func, values = cy_op.get_cython_func_and_vals(values, is_numeric) + out_dtype = cy_op.get_out_dtype(values.dtype) result = maybe_fill(np.empty(out_shape, dtype=out_dtype)) if kind == "aggregate": - counts = np.zeros(self.ngroups, dtype=np.int64) - result = self._aggregate(result, counts, values, codes, func, min_count) + counts = np.zeros(ngroups, dtype=np.int64) + func(result, counts, values, comp_ids, min_count) elif kind == "transform": # TODO: min_count - result = self._transform( - result, values, codes, func, is_datetimelike, **kwargs - ) + func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs) if is_integer_dtype(result.dtype) and not is_datetimelike: mask = result == iNaT @@ -697,28 +721,6 @@ def _cython_operation( return op_result - @final - def _aggregate( - self, result, counts, values, comp_ids, agg_func, min_count: int = -1 - ): - if agg_func is libgroupby.group_nth: - # different signature from the others - agg_func(result, counts, values, comp_ids, min_count, rank=1) - else: - agg_func(result, counts, values, comp_ids, min_count) - - return result - - @final - def _transform( - self, result, values, comp_ids, transform_func, is_datetimelike: bool, **kwargs - ): - - _, _, ngroups = self.group_info - transform_func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs) - - return result - def agg_series(self, obj: Series, func: F): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0
I think this is much closer to "the correct" abstraction here (i find the layered abstractions with similar names in core.groupby hard to follow). Contains \approx 0 groupby logic (takes ngroups as an argument in some methods) and is only about how to wrap/call/unwrap the libgroupby functions. There are a few more parts of _cython_operation I'd like to move, but I'm holding off on that pending some non-refactor work that touches those pieces. No specific plans, but I'm hopeful that something like this can be re-used to de-duplicate logic in nanops and maybe algorithms. This uses classmethods/staticmethods (i like the explicit statelessness) and passes how+kind as arguments, but an alternative would be to instantiate with how+kind passed to `__init__`. The only potentially-logical change is that when defining `out_dtype`, this returns "float64" instead of "float". I haven't checked if that makes a difference on 32bit builds, but after weeks of dealing with np.intp much prefer the explicitness.
https://api.github.com/repos/pandas-dev/pandas/pulls/40733
2021-04-01T16:43:21Z
2021-04-02T01:43:12Z
2021-04-02T01:43:12Z
2021-04-02T18:24:33Z
ENH: `Styler.to_latex` conversion from CSS
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index dd95f9088e3da..d61d6f9be1b98 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -136,7 +136,7 @@ which has been revised and improved (:issue:`39720`, :issue:`39317`, :issue:`404 - Many features of the :class:`.Styler` class are now either partially or fully usable on a DataFrame with a non-unique indexes or columns (:issue:`41143`) - One has greater control of the display through separate sparsification of the index or columns using the :ref:`new styler options <options.available>`, which are also usable via :func:`option_context` (:issue:`41142`) - Added the option ``styler.render.max_elements`` to avoid browser overload when styling large DataFrames (:issue:`40712`) - - Added the method :meth:`.Styler.to_latex` (:issue:`21673`) + - Added the method :meth:`.Styler.to_latex` (:issue:`21673`), which also allows some limited CSS conversion (:issue:`40731`) - Added the method :meth:`.Styler.to_html` (:issue:`13379`) .. _whatsnew_130.enhancements.dataframe_honors_copy_with_dict: diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 93c3843b36846..8d5e0a0022cd9 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -426,6 +426,7 @@ def to_latex( multicol_align: str = "r", siunitx: bool = False, encoding: str | None = None, + convert_css: bool = False, ): r""" Write Styler to a file, buffer or string in LaTeX format. @@ -482,6 +483,10 @@ def to_latex( Set to ``True`` to structure LaTeX compatible with the {siunitx} package. encoding : str, default "utf-8" Character encoding setting. + convert_css : bool, default False + Convert simple cell-styles from CSS to LaTeX format. Any CSS not found in + conversion table is dropped. A style can be forced by adding option + `--latex`. See notes. Returns ------- @@ -661,6 +666,45 @@ def to_latex( & ix2 & \$3 & 4.400 & CATS \\ L1 & ix3 & \$2 & 6.600 & COWS \\ \end{tabular} + + **CSS Conversion** + + This method can convert a Styler constructured with HTML-CSS to LaTeX using + the following limited conversions. + + ================== ==================== ============= ========================== + CSS Attribute CSS value LaTeX Command LaTeX Options + ================== ==================== ============= ========================== + font-weight | bold | bfseries + | bolder | bfseries + font-style | italic | itshape + | oblique | slshape + background-color | red cellcolor | {red}--lwrap + | #fe01ea | [HTML]{FE01EA}--lwrap + | #f0e | [HTML]{FF00EE}--lwrap + | rgb(128,255,0) | [rgb]{0.5,1,0}--lwrap + | rgba(128,0,0,0.5) | [rgb]{0.5,0,0}--lwrap + | rgb(25%,255,50%) | [rgb]{0.25,1,0.5}--lwrap + color | red color | {red} + | #fe01ea | [HTML]{FE01EA} + | #f0e | [HTML]{FF00EE} + | rgb(128,255,0) | [rgb]{0.5,1,0} + | rgba(128,0,0,0.5) | [rgb]{0.5,0,0} + | rgb(25%,255,50%) | [rgb]{0.25,1,0.5} + ================== ==================== ============= ========================== + + It is also possible to add user-defined LaTeX only styles to a HTML-CSS Styler + using the ``--latex`` flag, and to add LaTeX parsing options that the + converter will detect within a CSS-comment. + + >>> df = pd.DataFrame([[1]]) + >>> df.style.set_properties( + ... **{"font-weight": "bold /* --dwrap */", "Huge": "--latex--rwrap"} + ... ).to_latex(css_convert=True) + \begin{tabular}{lr} + {} & {0} \\ + 0 & {\bfseries}{\Huge{1}} \\ + \end{tabular} """ table_selectors = ( [style["selector"] for style in self.table_styles] @@ -740,6 +784,7 @@ def to_latex( sparse_columns=sparse_columns, multirow_align=multirow_align, multicol_align=multicol_align, + convert_css=convert_css, ) return save_to_buffer(latex, buf=buf, encoding=encoding) diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 7686d8a340c37..5aeeb40879d07 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -2,6 +2,7 @@ from collections import defaultdict from functools import partial +import re from typing import ( Any, Callable, @@ -1253,7 +1254,9 @@ def _parse_latex_table_styles(table_styles: CSSStyles, selector: str) -> str | N return None -def _parse_latex_cell_styles(latex_styles: CSSList, display_value: str) -> str: +def _parse_latex_cell_styles( + latex_styles: CSSList, display_value: str, convert_css: bool = False +) -> str: r""" Mutate the ``display_value`` string including LaTeX commands from ``latex_styles``. @@ -1279,6 +1282,8 @@ def _parse_latex_cell_styles(latex_styles: CSSList, display_value: str) -> str: For example for styles: `[('c1', 'o1--wrap'), ('c2', 'o2')]` this returns: `{\c1o1 \c2o2{display_value}} """ + if convert_css: + latex_styles = _parse_latex_css_conversion(latex_styles) for (command, options) in latex_styles[::-1]: # in reverse for most recent style formatter = { "--wrap": f"{{\\{command}--to_parse {display_value}}}", @@ -1351,6 +1356,82 @@ def _parse_latex_options_strip(value: str | int | float, arg: str) -> str: return str(value).replace(arg, "").replace("/*", "").replace("*/", "").strip() +def _parse_latex_css_conversion(styles: CSSList) -> CSSList: + """ + Convert CSS (attribute,value) pairs to equivalent LaTeX (command,options) pairs. + + Ignore conversion if tagged with `--latex` option, skipped if no conversion found. + """ + + def font_weight(value, arg): + if value == "bold" or value == "bolder": + return "bfseries", f"{arg}" + return None + + def font_style(value, arg): + if value == "italic": + return "itshape", f"{arg}" + elif value == "oblique": + return "slshape", f"{arg}" + return None + + def color(value, user_arg, command, comm_arg): + """ + CSS colors have 5 formats to process: + + - 6 digit hex code: "#ff23ee" --> [HTML]{FF23EE} + - 3 digit hex code: "#f0e" --> [HTML]{FF00EE} + - rgba: rgba(128, 255, 0, 0.5) --> [rgb]{0.502, 1.000, 0.000} + - rgb: rgb(128, 255, 0,) --> [rbg]{0.502, 1.000, 0.000} + - string: red --> {red} + + Additionally rgb or rgba can be expressed in % which is also parsed. + """ + arg = user_arg if user_arg != "" else comm_arg + + if value[0] == "#" and len(value) == 7: # color is hex code + return command, f"[HTML]{{{value[1:].upper()}}}{arg}" + if value[0] == "#" and len(value) == 4: # color is short hex code + val = f"{value[1].upper()*2}{value[2].upper()*2}{value[3].upper()*2}" + return command, f"[HTML]{{{val}}}{arg}" + elif value[:3] == "rgb": # color is rgb or rgba + r = re.findall("(?<=\\()[0-9\\s%]+(?=,)", value)[0].strip() + r = float(r[:-1]) / 100 if "%" in r else int(r) / 255 + g = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[0].strip() + g = float(g[:-1]) / 100 if "%" in g else int(g) / 255 + if value[3] == "a": # color is rgba + b = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[1].strip() + else: # color is rgb + b = re.findall("(?<=,)[0-9\\s%]+(?=\\))", value)[0].strip() + b = float(b[:-1]) / 100 if "%" in b else int(b) / 255 + return command, f"[rgb]{{{r:.3f}, {g:.3f}, {b:.3f}}}{arg}" + else: + return command, f"{{{value}}}{arg}" # color is likely string-named + + CONVERTED_ATTRIBUTES: dict[str, Callable] = { + "font-weight": font_weight, + "background-color": partial(color, command="cellcolor", comm_arg="--lwrap"), + "color": partial(color, command="color", comm_arg=""), + "font-style": font_style, + } + + latex_styles: CSSList = [] + for (attribute, value) in styles: + if isinstance(value, str) and "--latex" in value: + # return the style without conversion but drop '--latex' + latex_styles.append((attribute, value.replace("--latex", ""))) + if attribute in CONVERTED_ATTRIBUTES.keys(): + arg = "" + for x in ["--wrap", "--nowrap", "--lwrap", "--dwrap", "--rwrap"]: + if x in str(value): + arg, value = x, _parse_latex_options_strip(value, x) + break + latex_style = CONVERTED_ATTRIBUTES[attribute](value, arg) + if latex_style is not None: + latex_styles.extend([latex_style]) + return latex_styles + + def _escape_latex(s): r""" Replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, ``{``, ``}``, diff --git a/pandas/io/formats/templates/latex.tpl b/pandas/io/formats/templates/latex.tpl index 66fe99642850f..fe081676d87af 100644 --- a/pandas/io/formats/templates/latex.tpl +++ b/pandas/io/formats/templates/latex.tpl @@ -39,7 +39,7 @@ {% endif %} {% for row in body %} {% for c in row %}{% if not loop.first %} & {% endif %} - {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align)}}{% else %}{{parse_cell(c.cellstyle, c.display_value)}}{% endif %} + {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align)}}{% else %}{{parse_cell(c.cellstyle, c.display_value, convert_css)}}{% endif %} {%- endfor %} \\ {% endfor %} {% set bottomrule = parse_table(table_styles, 'bottomrule') %} diff --git a/pandas/tests/io/formats/style/test_to_latex.py b/pandas/tests/io/formats/style/test_to_latex.py index 97347bddaa187..91ac652e1f652 100644 --- a/pandas/tests/io/formats/style/test_to_latex.py +++ b/pandas/tests/io/formats/style/test_to_latex.py @@ -12,6 +12,7 @@ from pandas.io.formats.style import Styler from pandas.io.formats.style_render import ( _parse_latex_cell_styles, + _parse_latex_css_conversion, _parse_latex_header_span, _parse_latex_table_styles, _parse_latex_table_wrapping, @@ -443,3 +444,48 @@ def test_parse_latex_table_wrapping(styler): def test_short_caption(styler): result = styler.to_latex(caption=("full cap", "short cap")) assert "\\caption[short cap]{full cap}" in result + + +@pytest.mark.parametrize( + "css, expected", + [ + ([("color", "red")], [("color", "{red}")]), # test color and input format types + ( + [("color", "rgb(128, 128, 128 )")], + [("color", "[rgb]{0.502, 0.502, 0.502}")], + ), + ( + [("color", "rgb(128, 50%, 25% )")], + [("color", "[rgb]{0.502, 0.500, 0.250}")], + ), + ( + [("color", "rgba(128,128,128,1)")], + [("color", "[rgb]{0.502, 0.502, 0.502}")], + ), + ([("color", "#FF00FF")], [("color", "[HTML]{FF00FF}")]), + ([("color", "#F0F")], [("color", "[HTML]{FF00FF}")]), + ([("font-weight", "bold")], [("bfseries", "")]), # test font-weight and types + ([("font-weight", "bolder")], [("bfseries", "")]), + ([("font-weight", "normal")], []), + ([("background-color", "red")], [("cellcolor", "{red}--lwrap")]), + ( + [("background-color", "#FF00FF")], # test background-color command and wrap + [("cellcolor", "[HTML]{FF00FF}--lwrap")], + ), + ([("font-style", "italic")], [("itshape", "")]), # test font-style and types + ([("font-style", "oblique")], [("slshape", "")]), + ([("font-style", "normal")], []), + ([("color", "red /*--dwrap*/")], [("color", "{red}--dwrap")]), # css comments + ([("background-color", "red /* --dwrap */")], [("cellcolor", "{red}--dwrap")]), + ], +) +def test_parse_latex_css_conversion(css, expected): + result = _parse_latex_css_conversion(css) + assert result == expected + + +def test_parse_latex_css_conversion_option(): + css = [("command", "option--latex--wrap")] + expected = [("command", "option--wrap")] + result = _parse_latex_css_conversion(css) + assert result == expected
This is an extension PR to #40422 (most of this code is included there), and must be committed after it. This PR only adds the `_parse_latex_css_conversion` function and associated `convert_css` argument. Below is the relevant section added to the user guide: ![Screen Shot 2021-04-01 at 16 37 12](https://user-images.githubusercontent.com/24256554/113310637-bf186300-9308-11eb-8f9a-2ea5e2c67bb0.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/40731
2021-04-01T14:39:30Z
2021-06-16T00:20:10Z
2021-06-16T00:20:10Z
2021-06-16T13:25:12Z
REF: only fallback to masked op for object dtype
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 8205534c9d48b..957a493925405 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -104,11 +104,20 @@ def _evaluate_numexpr(op, op_str, a, b): a_value = a b_value = b - result = ne.evaluate( - f"a_value {op_str} b_value", - local_dict={"a_value": a_value, "b_value": b_value}, - casting="safe", - ) + try: + result = ne.evaluate( + f"a_value {op_str} b_value", + local_dict={"a_value": a_value, "b_value": b_value}, + casting="safe", + ) + except TypeError: + # numexpr raises eg for array ** array with integers + # (https://github.com/pydata/numexpr/issues/379) + pass + + if is_reversed: + # reverse order to original for fallback + a, b = b, a if _TEST_MODE: _store_test_result(result is not None) diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 1d7c16de0c05d..5c26377f44c2b 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -158,12 +158,14 @@ def _na_arithmetic_op(left, right, op, is_cmp: bool = False): try: result = expressions.evaluate(op, left, right) except TypeError: - if is_cmp: - # numexpr failed on comparison op, e.g. ndarray[float] > datetime - # In this case we do not fall back to the masked op, as that - # will handle complex numbers incorrectly, see GH#32047 + if is_object_dtype(left) or is_object_dtype(right) and not is_cmp: + # For object dtype, fallback to a masked operation (only operating + # on the non-missing values) + # Don't do this for comparisons, as that will handle complex numbers + # incorrectly, see GH#32047 + result = _masked_arith_op(left, right, op) + else: raise - result = _masked_arith_op(left, right, op) if is_cmp and (is_scalar(result) or result is NotImplemented): # numpy returned a scalar instead of operating element-wise
While investigating https://github.com/pandas-dev/pandas/pull/40727, I noticed that we are often trying `_masked_arith_op`, while it will end up giving the exact same error (for example when using ints, which don't have NaNs). From what I understand, I *think* that, in theory, we only need to fallback to a masked version of the op when having object dtype (I might be wrong here, though). So this PR tries that. For numeric dtypes, all operations should simply work with NaNs? For object dtype, we can have NaN or None, which doesn't necessarily work out of the box, so here we need to fallback to a masked operation. ExtensionArrays don't end up here, and datetimelike dtypes get wrapped into EAs before getting here.
https://api.github.com/repos/pandas-dev/pandas/pulls/40728
2021-04-01T12:49:22Z
2021-04-02T15:09:05Z
2021-04-02T15:09:05Z
2021-04-02T15:12:16Z
PERF: numexpr doesn't support floordiv, so don't try
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 8205534c9d48b..43b6a6fb64314 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -128,8 +128,9 @@ def _evaluate_numexpr(op, op_str, a, b): roperator.rsub: "-", operator.truediv: "/", roperator.rtruediv: "/", - operator.floordiv: "//", - roperator.rfloordiv: "//", + # floordiv not supported by numexpr 2.x + operator.floordiv: None, + roperator.rfloordiv: None, # we require Python semantics for mod of negative for backwards compatibility # see https://github.com/pydata/numexpr/issues/365 # so sticking with unaccelerated for now
See overview of supported operators: https://numexpr.readthedocs.io/projects/NumExpr3/en/latest/user_guide.html#supported-operators. By trying to use it, we were falling back to the (slower) `_masked_arith_op` Discovered by investigating the arithmetic benchmarks at https://github.com/pandas-dev/pandas/issues/39146#issuecomment-799482699 For example using the `arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function floordiv>)` benchmark: ```python import operator dtype = np.float64 arr = np.random.randn(20000, 100) df = pd.DataFrame(arr.astype(dtype)) scalar = 3.0 op = operator.floordiv ``` ``` In [2]: %timeit op(df, scalar) 56.4 ms ± 723 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <-- master 24.3 ms ± 236 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <-- PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/40727
2021-04-01T12:15:39Z
2021-04-01T22:28:08Z
2021-04-01T22:28:08Z
2021-04-02T06:29:24Z
BUG: Do not allow boolean values for indexing with .loc without a boolean index.
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 5e95cd6e5ee10..21e6f0ea57451 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -674,6 +674,7 @@ Indexing - Bug in setting ``numpy.timedelta64`` values into an object-dtype :class:`Series` using a boolean indexer (:issue:`39488`) - Bug in setting numeric values into a into a boolean-dtypes :class:`Series` using ``at`` or ``iat`` failing to cast to object-dtype (:issue:`39582`) - Bug in :meth:`DataFrame.loc.__setitem__` when setting-with-expansion incorrectly raising when the index in the expanding axis contains duplicates (:issue:`40096`) +- Bug in :meth:`DataFrame.loc` incorrectly matching non-boolean index elements (:issue:`20432`) - Bug in :meth:`Series.__delitem__` with ``ExtensionDtype`` incorrectly casting to ``ndarray`` (:issue:`40386`) Missing diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 3d8d189046d8a..59550927299fe 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1386,7 +1386,7 @@ def is_bool_dtype(arr_or_dtype) -> bool: # we don't have a boolean Index class # so its object, we need to infer to # guess this - return arr_or_dtype.is_object and arr_or_dtype.inferred_type == "boolean" + return arr_or_dtype.is_object() and arr_or_dtype.inferred_type == "boolean" elif is_extension_array_dtype(arr_or_dtype): return getattr(dtype, "_is_boolean", False) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 4c8a6a200b196..143f7aadc1594 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -961,12 +961,19 @@ class _LocIndexer(_LocationIndexer): @doc(_LocationIndexer._validate_key) def _validate_key(self, key, axis: int): - # valid for a collection of labels (we check their presence later) # slice of labels (where start-end in labels) # slice of integers (only if in the labels) - # boolean - pass + # boolean not in slice and with boolean index + if isinstance(key, bool) and not is_bool_dtype(self.obj.index): + raise KeyError( + f"{key}: boolean label can not be used without a boolean index" + ) + + if isinstance(key, slice) and ( + isinstance(key.start, bool) or isinstance(key.stop, bool) + ): + raise TypeError(f"{key}: boolean values can not be used in a slice") def _has_valid_setitem_indexer(self, indexer) -> bool: return True diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 928b42b915b18..e2121fa2318eb 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -944,20 +944,17 @@ def test_getitem_ix_boolean_duplicates_multiple(self): exp = df[df[0] > 0] tm.assert_frame_equal(result, exp) - def test_getitem_setitem_ix_bool_keyerror(self): + @pytest.mark.parametrize("bool_value", [True, False]) + def test_getitem_setitem_ix_bool_keyerror(self, bool_value): # #2199 df = DataFrame({"a": [1, 2, 3]}) - - with pytest.raises(KeyError, match=r"^False$"): - df.loc[False] - with pytest.raises(KeyError, match=r"^True$"): - df.loc[True] + message = f"{bool_value}: boolean label can not be used without a boolean index" + with pytest.raises(KeyError, match=message): + df.loc[bool_value] msg = "cannot use a single bool to index into setitem" with pytest.raises(KeyError, match=msg): - df.loc[False] = 0 - with pytest.raises(KeyError, match=msg): - df.loc[True] = 0 + df.loc[bool_value] = 0 # TODO: rename? remove? def test_single_element_ix_dont_upcast(self, float_frame): diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 734cf13289c1f..eac46fb64b65e 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -2116,6 +2116,35 @@ def test_loc_getitem_slice_columns_mixed_dtype(self): tm.assert_frame_equal(df.loc[:, 1:], expected) +class TestLocBooleanLabelsAndSlices(Base): + @pytest.mark.parametrize("bool_value", [True, False]) + def test_loc_bool_incompatible_index_raises( + self, index, frame_or_series, bool_value + ): + # GH20432 + message = f"{bool_value}: boolean label can not be used without a boolean index" + if index.inferred_type != "boolean": + obj = frame_or_series(index=index, dtype="object") + with pytest.raises(KeyError, match=message): + obj.loc[bool_value] + + @pytest.mark.parametrize("bool_value", [True, False]) + def test_loc_bool_should_not_raise(self, frame_or_series, bool_value): + obj = frame_or_series( + index=Index([True, False], dtype="boolean"), dtype="object" + ) + obj.loc[bool_value] + + def test_loc_bool_slice_raises(self, index, frame_or_series): + # GH20432 + message = ( + r"slice\(True, False, None\): boolean values can not be used in a slice" + ) + obj = frame_or_series(index=index, dtype="object") + with pytest.raises(TypeError, match=message): + obj.loc[True:False] + + class TestLocBooleanMask: def test_loc_setitem_bool_mask_timedeltaindex(self): # GH#14946
- [x] closes #20432 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40726
2021-04-01T12:11:41Z
2021-04-06T19:18:52Z
2021-04-06T19:18:52Z
2021-04-06T19:25:50Z
BUG: [ArrowStringArray] Recognize ArrowStringArray in infer_dtype
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4b423175172d2..e816bd4cd4026 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1111,6 +1111,7 @@ _TYPE_MAP = { "complex128": "complex", "c": "complex", "string": "string", + str: "string", "S": "bytes", "U": "string", "bool": "boolean", diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index b3c6015475674..907991b97ead1 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1267,9 +1267,9 @@ def test_interval(self): @pytest.mark.parametrize("klass", [pd.array, Series]) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("data", [["a", "b", "c"], ["a", "b", pd.NA]]) - def test_string_dtype(self, data, skipna, klass): + def test_string_dtype(self, data, skipna, klass, nullable_string_dtype): # StringArray - val = klass(data, dtype="string") + val = klass(data, dtype=nullable_string_dtype) inferred = lib.infer_dtype(val, skipna=skipna) assert inferred == "string"
PR is draft. follow-up to #40679, https://github.com/pandas-dev/pandas/pull/40679#issuecomment-809642594 which is not yet merged and need to make window to run benchmarks https://github.com/pandas-dev/pandas/pull/40679#pullrequestreview-623553887 on master ``` >>> pd.__version__ '1.3.0.dev0+1180.gdd697e1cca' >>> >>> from pandas.core.arrays.string_arrow import ArrowStringDtype >>> >>> arr = ArrowStringDtype.construct_array_type()._from_sequence(["B", pd.NA, "A"]) >>> >>> arr <ArrowStringArray> ['B', <NA>, 'A'] Length: 3, dtype: arrow_string >>> >>> pd._libs.lib.infer_dtype(arr) 'unknown-array' >>> ```
https://api.github.com/repos/pandas-dev/pandas/pulls/40725
2021-04-01T10:39:32Z
2021-04-09T01:41:44Z
2021-04-09T01:41:44Z
2021-04-09T08:59:41Z
BUG: fix comparison of NaT with numpy array
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst index 26d768f830830..9cef1307278e8 100644 --- a/doc/source/whatsnew/v1.2.4.rst +++ b/doc/source/whatsnew/v1.2.4.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.sum` when ``min_count`` greater than the :class:`DataFrame` shape was passed resulted in a ``ValueError`` (:issue:`39738`) - Fixed regression in :meth:`DataFrame.to_json` raising ``AttributeError`` when run on PyPy (:issue:`39837`) +- Fixed regression in (in)equality comparison of ``pd.NaT`` with a non-datetimelike numpy array returning a scalar instead of an array (:issue:`40722`) - Fixed regression in :meth:`DataFrame.where` not returning a copy in the case of an all True condition (:issue:`39595`) - Fixed regression in :meth:`DataFrame.replace` raising ``IndexError`` when ``regex`` was a multi-key dictionary (:issue:`39338`) - diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index d86d3261d404e..0c598beb6ad16 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -127,6 +127,10 @@ cdef class _NaT(datetime): result.fill(_nat_scalar_rules[op]) elif other.dtype.kind == "O": result = np.array([PyObject_RichCompare(self, x, op) for x in other]) + elif op == Py_EQ: + result = np.zeros(other.shape, dtype=bool) + elif op == Py_NE: + result = np.ones(other.shape, dtype=bool) else: return NotImplemented return result diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 96aea4da9fac5..08c5ea706111a 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -590,6 +590,47 @@ def test_nat_comparisons_invalid(other_and_type, symbol_and_op): op(other, NaT) +@pytest.mark.parametrize( + "other", + [ + np.array(["foo"] * 2, dtype=object), + np.array([2, 3], dtype="int64"), + np.array([2.0, 3.5], dtype="float64"), + ], + ids=["str", "int", "float"], +) +def test_nat_comparisons_invalid_ndarray(other): + # GH#40722 + expected = np.array([False, False]) + result = NaT == other + tm.assert_numpy_array_equal(result, expected) + result = other == NaT + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([True, True]) + result = NaT != other + tm.assert_numpy_array_equal(result, expected) + result = other != NaT + tm.assert_numpy_array_equal(result, expected) + + for symbol, op in [ + ("<=", operator.le), + ("<", operator.lt), + (">=", operator.ge), + (">", operator.gt), + ]: + msg = f"'{symbol}' not supported between" + + with pytest.raises(TypeError, match=msg): + op(NaT, other) + + if other.dtype == np.dtype("object"): + # uses the reverse operator, so symbol changes + msg = None + with pytest.raises(TypeError, match=msg): + op(other, NaT) + + def test_compare_date(): # GH#39151 comparing NaT with date object is deprecated # See also: tests.scalar.timestamps.test_comparisons::test_compare_date
Closes #40722
https://api.github.com/repos/pandas-dev/pandas/pulls/40723
2021-04-01T09:14:42Z
2021-04-01T18:17:35Z
2021-04-01T18:17:35Z
2021-04-01T18:17:40Z
COMPAT: matplotlib 3.4.0
diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py index 964596d9b6319..729d2bf1f019a 100644 --- a/pandas/plotting/_matplotlib/compat.py +++ b/pandas/plotting/_matplotlib/compat.py @@ -22,3 +22,4 @@ def inner(): mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge) mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge) mpl_ge_3_3_0 = _mpl_version("3.3.0", operator.ge) +mpl_ge_3_4_0 = _mpl_version("3.4.0", operator.ge) diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 500d570835493..03d73d1d36953 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -392,6 +392,11 @@ def handle_shared_axes( row_num = lambda x: x.rowNum col_num = lambda x: x.colNum + if compat.mpl_ge_3_4_0(): + is_first_col = lambda x: x.get_subplotspec().is_first_col() + else: + is_first_col = lambda x: x.is_first_col() + if nrows > 1: try: # first find out the ax layout, @@ -423,7 +428,7 @@ def handle_shared_axes( # only the first column should get y labels -> set all other to # off as we only have labels in the first column and we always # have a subplot there, we can skip the layout test - if ax.is_first_col(): + if is_first_col(ax): continue if sharey or _has_externally_shared_axis(ax, "y"): _remove_labels_from_axis(ax.yaxis) diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index bed60be169e57..e23abc1eee167 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -746,7 +746,9 @@ def test_plot_scatter_with_categorical_data(self, x, y): _check_plot_works(df.plot.scatter, x=x, y=y) - def test_plot_scatter_with_c(self): + def test_plot_scatter_with_c(self, request): + from pandas.plotting._matplotlib.compat import mpl_ge_3_4_0 + df = DataFrame( np.random.randn(6, 4), index=list(string.ascii_letters[:6]), @@ -758,9 +760,10 @@ def test_plot_scatter_with_c(self): # default to Greys assert ax.collections[0].cmap.name == "Greys" - # n.b. there appears to be no public method - # to get the colorbar label - assert ax.collections[0].colorbar._label == "z" + if mpl_ge_3_4_0(): + assert ax.collections[0].colorbar.ax.get_ylabel() == "z" + else: + assert ax.collections[0].colorbar._label == "z" cm = "cubehelix" ax = df.plot.scatter(x="x", y="y", c="z", colormap=cm)
- [ ] closes #40714 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40718
2021-04-01T00:58:37Z
2021-04-01T19:27:51Z
2021-04-01T19:27:50Z
2021-04-04T14:26:51Z
REF: RangeIndex dont subclass Int64Index
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index f37faa4ab844b..b0911cc0ce556 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -46,6 +46,7 @@ from pandas.core.indexes.numeric import ( Float64Index, Int64Index, + NumericIndex, ) from pandas.core.ops.common import unpack_zerodim_and_defer @@ -55,7 +56,7 @@ _empty_range = range(0) -class RangeIndex(Int64Index): +class RangeIndex(NumericIndex): """ Immutable Index implementing a monotonic integer range. @@ -97,6 +98,7 @@ class RangeIndex(Int64Index): _typ = "rangeindex" _engine_type = libindex.Int64Engine + _can_hold_na = False _range: range # -------------------------------------------------------------------- @@ -381,6 +383,10 @@ def __contains__(self, key: Any) -> bool: return False return key in self._range + @property + def inferred_type(self) -> str: + return "integer" + # -------------------------------------------------------------------- # Indexing Methods diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 3999f91a7b141..d032c54395c6d 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -56,7 +56,7 @@ def to_feather( # validate that we have only a default index # raise on anything else as we don't serialize the index - if not isinstance(df.index, Int64Index): + if not isinstance(df.index, (Int64Index, RangeIndex)): typ = type(df.index) raise ValueError( f"feather does not support serializing {typ} " diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index e5a24e9b938e2..ab2b2db7eec53 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -516,7 +516,7 @@ def test_hasnans_isnans(self, index_flat): return elif isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT - elif isinstance(index, (Int64Index, UInt64Index)): + elif isinstance(index, (Int64Index, UInt64Index, RangeIndex)): return else: values[1] = np.nan @@ -555,7 +555,7 @@ def test_fillna(self, index): if isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT - elif isinstance(index, (Int64Index, UInt64Index)): + elif isinstance(index, (Int64Index, UInt64Index, RangeIndex)): return else: values[1] = np.nan diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py index 1ee7c5547ecf9..59c30c3abac03 100644 --- a/pandas/tests/indexes/test_numpy_compat.py +++ b/pandas/tests/indexes/test_numpy_compat.py @@ -12,6 +12,7 @@ Index, Int64Index, PeriodIndex, + RangeIndex, TimedeltaIndex, UInt64Index, ) @@ -55,7 +56,7 @@ def test_numpy_ufuncs_basic(index, func): with tm.external_error_raised((TypeError, AttributeError)): with np.errstate(all="ignore"): func(index) - elif isinstance(index, (Float64Index, Int64Index, UInt64Index)): + elif isinstance(index, (Float64Index, Int64Index, UInt64Index, RangeIndex)): # coerces to float (e.g. np.sin) with np.errstate(all="ignore"): result = func(index) @@ -104,7 +105,7 @@ def test_numpy_ufuncs_other(index, func, request): with tm.external_error_raised(TypeError): func(index) - elif isinstance(index, (Float64Index, Int64Index, UInt64Index)): + elif isinstance(index, (Float64Index, Int64Index, UInt64Index, RangeIndex)): # Results in bool array result = func(index) assert isinstance(result, np.ndarray)
This makes it much simpler if we want to put constructors in cython.
https://api.github.com/repos/pandas-dev/pandas/pulls/40717
2021-03-31T23:31:13Z
2021-04-02T01:52:49Z
2021-04-02T01:52:49Z
2021-04-02T02:20:08Z
TYP: groupby.pyi
diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi new file mode 100644 index 0000000000000..8721624e9881c --- /dev/null +++ b/pandas/_libs/groupby.pyi @@ -0,0 +1,168 @@ +from typing import Literal + +import numpy as np + +def group_median_float64( + out: np.ndarray, # ndarray[float64_t, ndim=2] + counts: np.ndarray, # ndarray[int64_t] + values: np.ndarray, # ndarray[float64_t, ndim=2] + labels: np.ndarray, # ndarray[int64_t] + min_count: int = ..., # Py_ssize_t +) -> None: ... + +def group_cumprod_float64( + out: np.ndarray, # float64_t[:, ::1] + values: np.ndarray, # const float64_t[:, :] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + skipna: bool = ..., +) -> None: ... + +def group_cumsum( + out: np.ndarray, # numeric[:, ::1] + values: np.ndarray, # ndarray[numeric, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + skipna: bool = ..., +) -> None: ... + + +def group_shift_indexer( + out: np.ndarray, # int64_t[::1] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + periods: int, +) -> None: ... + + +def group_fillna_indexer( + out: np.ndarray, # ndarray[int64_t] + labels: np.ndarray, # ndarray[int64_t] + mask: np.ndarray, # ndarray[uint8_t] + direction: Literal["ffill", "bfill"], + limit: int, # int64_t + dropna: bool, +) -> None: ... + + +def group_any_all( + out: np.ndarray, # uint8_t[::1] + values: np.ndarray, # const uint8_t[::1] + labels: np.ndarray, # const int64_t[:] + mask: np.ndarray, # const uint8_t[::1] + val_test: Literal["any", "all"], + skipna: bool, +) -> None: ... + +def group_add( + out: np.ndarray, # complexfloating_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[complexfloating_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ... +) -> None: ... + +def group_prod( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ... +) -> None: ... + +def group_var( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., # Py_ssize_t + ddof: int = ..., # int64_t +) -> None: ... + +def group_mean( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ... +) -> None: ... + +def group_ohlc( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ... +) -> None: ... + +def group_quantile( + out: np.ndarray, # ndarray[float64_t] + values: np.ndarray, # ndarray[numeric, ndim=1] + labels: np.ndarray, # ndarray[int64_t] + mask: np.ndarray, # ndarray[uint8_t] + q: float, # float64_t + interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"], +) -> None: ... + +def group_last( + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., # Py_ssize_t +) -> None: ... + +def group_nth( + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., # int64_t + rank: int = ..., # int64_t +) -> None: ... + +def group_rank( + out: np.ndarray, # float64_t[:, ::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + ties_method: Literal["aveage", "min", "max", "first", "dense"] = ..., + ascending: bool = ..., + pct: bool = ..., + na_option: Literal["keep", "top", "bottom"] = ..., +) -> None: ... + +def group_max( + out: np.ndarray, # groupby_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., +) -> None: ... + +def group_min( + out: np.ndarray, # groupby_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., +) -> None: ... + +def group_cummin( + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, +) -> None: ... + +def group_cummax( + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, +) -> None: ... diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index ed8911b6cd929..e7cd7cd898d5b 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1239,12 +1239,12 @@ def group_min(groupby_t[:, ::1] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_cummin_max(groupby_t[:, ::1] out, - ndarray[groupby_t, ndim=2] values, - const intp_t[:] labels, - int ngroups, - bint is_datetimelike, - bint compute_max): +cdef group_cummin_max(groupby_t[:, ::1] out, + ndarray[groupby_t, ndim=2] values, + const intp_t[:] labels, + int ngroups, + bint is_datetimelike, + bint compute_max): """ Cumulative minimum/maximum of columns of `values`, in row groups `labels`.
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40716
2021-03-31T23:28:13Z
2021-04-02T01:37:10Z
2021-04-02T01:37:10Z
2021-04-02T10:02:55Z
CLN: follow-up cleanups
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index b8d79d0835fb8..4b423175172d2 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2440,6 +2440,9 @@ class NoDefault(Enum): # 2) because mypy does not understand singletons no_default = "NO_DEFAULT" + def __repr__(self) -> str: + return "<no_default>" + # Note: no_default is exported to the public API in pandas.api.extensions no_default = NoDefault.no_default # Sentinel indicating the default value. diff --git a/pandas/core/base.py b/pandas/core/base.py index 18fc76fe79a5a..b0c2af89ad0c7 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1164,7 +1164,7 @@ def is_monotonic_decreasing(self) -> bool: return Index(self).is_monotonic_decreasing - def memory_usage(self, deep=False): + def _memory_usage(self, deep: bool = False) -> int: """ Memory usage of the values. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 29c2f7cfcf00d..c3f3476618bf0 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -12,7 +12,6 @@ FrozenSet, Hashable, List, - NewType, Optional, Sequence, Set, @@ -195,9 +194,6 @@ _o_dtype = np.dtype("object") -_Identity = NewType("_Identity", object) - - def disallow_kwargs(kwargs: Dict[str, Any]): if kwargs: raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}") @@ -4405,9 +4401,9 @@ def _get_engine_target(self) -> np.ndarray: # ndarray]", expected "ndarray") return self._values # type: ignore[return-value] - @doc(IndexOpsMixin.memory_usage) + @doc(IndexOpsMixin._memory_usage) def memory_usage(self, deep: bool = False) -> int: - result = super().memory_usage(deep=deep) + result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 944844dfbbb5b..e605486e0044e 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -207,6 +207,7 @@ def is_bool(self) -> bool: def external_values(self): return external_values(self.values) + @final def internal_values(self): """ The array that Series._values returns (internal values). @@ -593,8 +594,6 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"): Block """ values = self.values - if values.dtype.kind in ["m", "M"]: - values = self.array_values new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) @@ -1763,10 +1762,6 @@ def is_view(self) -> bool: # check the ndarray values of the DatetimeIndex values return self.values._ndarray.base is not None - def internal_values(self): - # Override to return DatetimeArray and TimedeltaArray - return self.values - def get_values(self, dtype: Optional[DtypeObj] = None) -> np.ndarray: """ return object dtype as boxed values, such as Timestamps/Timedelta @@ -1878,7 +1873,6 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeLikeBlockMixin): is_extension = True is_numeric = False - internal_values = Block.internal_values diff = DatetimeBlock.diff where = DatetimeBlock.where putmask = DatetimeLikeBlockMixin.putmask diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index de0a5687aeb8b..3c8d942554575 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1085,17 +1085,11 @@ def value_getitem(placement): else: if value.ndim == 2: value = value.T - - if value.ndim == self.ndim - 1: - value = ensure_block_shape(value, ndim=2) - - def value_getitem(placement): - return value - else: + value = ensure_block_shape(value, ndim=2) - def value_getitem(placement): - return value[placement.indexer] + def value_getitem(placement): + return value[placement.indexer] if value.shape[1:] != self.shape[1:]: raise AssertionError( diff --git a/pandas/core/series.py b/pandas/core/series.py index 27ebf7f228bc0..4ade9992e9e3e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4609,7 +4609,7 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> Series: periods=periods, freq=freq, axis=axis, fill_value=fill_value ) - def memory_usage(self, index=True, deep=False): + def memory_usage(self, index: bool = True, deep: bool = False) -> int: """ Return the memory usage of the Series. @@ -4658,7 +4658,7 @@ def memory_usage(self, index=True, deep=False): >>> s.memory_usage(deep=True) 244 """ - v = super().memory_usage(deep=deep) + v = self._memory_usage(deep=deep) if index: v += self.index.memory_usage(deep=deep) return v
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40715
2021-03-31T23:07:12Z
2021-04-02T00:50:08Z
2021-04-02T00:50:08Z
2021-04-02T01:03:35Z
CLN: docstring cleanup
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index a9feaee825a4b..ed8911b6cd929 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -154,9 +154,9 @@ def group_cumprod_float64(float64_t[:, ::1] out, Parameters ---------- - out : float64 array + out : np.ndarray[np.float64, ndim=2] Array to store cumprod in. - values : float64 array + values : np.ndarray[np.float64, ndim=2] Values to take cumprod of. labels : np.ndarray[np.intp] Labels to group by. @@ -211,9 +211,9 @@ def group_cumsum(numeric[:, ::1] out, Parameters ---------- - out : array + out : np.ndarray[ndim=2] Array to store cumsum in. - values : array + values : np.ndarray[ndim=2] Values to take cumsum of. labels : np.ndarray[np.intp] Labels to group by. @@ -329,12 +329,15 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[intp_t] labels, Parameters ---------- - out : array of int64_t values which this method will write its results to - Missing values will be written to with a value of -1 + out : np.ndarray[np.uint8] + Values into which this method will write its results. labels : np.ndarray[np.intp] Array containing unique label for each group, with its ordering matching up to the corresponding record in `values`. - mask : array of int64_t values where a 1 indicates a missing value + values : np.ndarray[np.uint8] + Containing the truth value of each element. + mask : np.ndarray[np.uint8] + Indicating whether a value is na or not. direction : {'ffill', 'bfill'} Direction for fill to be applied (forwards or backwards, respectively) limit : Consecutive values to fill before stopping, or -1 for no limit @@ -396,12 +399,15 @@ def group_any_all(uint8_t[::1] out, Parameters ---------- - out : array of values which this method will write its results to + out : np.ndarray[np.uint8] + Values into which this method will write its results. labels : np.ndarray[np.intp] Array containing unique label for each group, with its ordering matching up to the corresponding record in `values` - values : array containing the truth value of each element - mask : array indicating whether a value is na or not + values : np.ndarray[np.uint8] + Containing the truth value of each element. + mask : np.ndarray[np.uint8] + Indicating whether a value is na or not. val_test : {'any', 'all'} String object dictating whether to use any or all truth testing skipna : bool @@ -721,14 +727,17 @@ def group_quantile(ndarray[float64_t] out, Parameters ---------- - out : ndarray + out : np.ndarray[np.float64] Array of aggregated values that will be written to. + values : np.ndarray + Array containing the values to apply the function against. labels : ndarray[np.intp] Array containing the unique group labels. values : ndarray Array containing the values to apply the function against. q : float The quantile value to search for. + interpolation : {'linear', 'lower', 'highest', 'nearest', 'midpoint'} Notes ----- @@ -1048,8 +1057,9 @@ def group_rank(float64_t[:, ::1] out, Parameters ---------- - out : array of float64_t values which this method will write its results to - values : array of rank_t values to be ranked + out : np.ndarray[np.float64, ndim=2] + Values to which this method will write its results. + values : np.ndarray of rank_t values to be ranked labels : np.ndarray[np.intp] Array containing unique label for each group, with its ordering matching up to the corresponding record in `values` @@ -1058,8 +1068,7 @@ def group_rank(float64_t[:, ::1] out, groupby functions. is_datetimelike : bool True if `values` contains datetime-like entries. - ties_method : {'average', 'min', 'max', 'first', 'dense'}, default - 'average' + ties_method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' * average: average rank of group * min: lowest rank in group * max: highest rank in group @@ -1120,9 +1129,9 @@ cdef group_min_max(groupby_t[:, ::1] out, Parameters ---------- - out : array + out : np.ndarray[groupby_t, ndim=2] Array to store result in. - counts : int64 array + counts : np.ndarray[int64] Input as a zeroed array, populated by group sizes during algorithm values : array Values to find column-wise min/max of. @@ -1241,9 +1250,9 @@ def group_cummin_max(groupby_t[:, ::1] out, Parameters ---------- - out : array + out : np.ndarray[groupby_t, ndim=2] Array to store cummin/max in. - values : array + values : np.ndarray[groupby_t, ndim=2] Values to take cummin/max of. labels : np.ndarray[np.intp] Labels to group by. diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 31b6935e9b2ba..4295db9d1613c 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -385,7 +385,7 @@ def get_blkno_indexers(int64_t[:] blknos, bint group=True): Returns ------- - iter : iterator of (int, slice or array) + list[tuple[int, slice | np.ndarray]] """ # There's blkno in this function's name because it's used in block & # blockno handling. diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 646b5a05afcad..b8d79d0835fb8 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -916,7 +916,7 @@ def indices_fast(ndarray[intp_t] index, const int64_t[:] labels, list keys, """ Parameters ---------- - index : ndarray + index : ndarray[intp] labels : ndarray[int64] keys : list sorted_labels : list[ndarray[int64]] diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 8cf48ef04ac31..959d83a55d4f3 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -52,7 +52,7 @@ def unstack(reshape_t[:, :] values, const uint8_t[:] mask, stride : int length : int width : int - new_values : typed ndarray + new_values : np.ndarray[bool] result array new_mask : np.ndarray[bool] result mask diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 893c0fa52cd15..1bda35206ccef 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -316,7 +316,7 @@ def datetime_to_datetime64(ndarray[object] values): Returns ------- - result : ndarray[int64_t] + result : ndarray[datetime64ns] inferred_tz : tzinfo or None """ cdef: diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index bf8acfb459cb8..1d99ebba3b9f0 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -315,6 +315,10 @@ def array_to_timedelta64(ndarray[object] values, str unit=None, str errors="rais """ Convert an ndarray to an array of timedeltas. If errors == 'coerce', coerce non-convertible objects to NaT. Otherwise, raise. + + Returns + ------- + np.ndarray[timedelta64ns] """ cdef: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 91a7584b975c3..541dd8abee3c3 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -943,7 +943,7 @@ def duplicated(values: ArrayLike, keep: Union[str, bool] = "first") -> np.ndarra Returns ------- - duplicated : ndarray + duplicated : ndarray[bool] """ values, _ = _ensure_data(values) ndtype = values.dtype.name @@ -1631,7 +1631,7 @@ def diff(arr, n: int, axis: int = 0, stacklevel=3): number of periods axis : {0, 1} axis to shift on - stacklevel : int + stacklevel : int, default 3 The stacklevel for the lost dtype warning. Returns diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 08646c4d25a50..9aafea4b998a1 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1452,7 +1452,7 @@ def isna(self): Returns ------- - a boolean array of whether my values are null + np.ndarray[bool] of whether my values are null See Also -------- @@ -1474,7 +1474,7 @@ def notna(self): Returns ------- - a boolean array of whether my values are not null + np.ndarray[bool] of whether my values are not null See Also -------- diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index c0a8c20832fa8..d1f0f506766a8 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1031,7 +1031,7 @@ def to_pydatetime(self) -> np.ndarray: Returns ------- - datetimes : ndarray + datetimes : ndarray[object] """ return ints_to_pydatetime(self.asi8, tz=self.tz) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index f3889ff360aa8..1bf822c1ae3e5 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -853,7 +853,7 @@ def to_pytimedelta(self) -> np.ndarray: Returns ------- - datetimes : ndarray + timedeltas : ndarray[object] """ return tslibs.ints_to_pytimedelta(self.asi8) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b22fcbd9229e7..484b01f2c04f0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8191,7 +8191,7 @@ def _gotitem( Parameters ---------- key : string / list of selections - ndim : 1,2 + ndim : {1, 2} requested ndim of result subset : object, default None subset to act on diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8524907a84099..6b4e3c7caef50 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1656,9 +1656,9 @@ def _is_label_reference(self, key, axis=0) -> bool_t: Parameters ---------- - key: str + key : str Potential label name - axis: int, default 0 + axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) @@ -1687,14 +1687,14 @@ def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t: Parameters ---------- - key: str + key : str Potential label or level name - axis: int, default 0 + axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- - is_label_or_level: bool + bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis @@ -1710,9 +1710,9 @@ def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None: Parameters ---------- - key: str or object + key : str or object Label or level name. - axis: int, default 0 + axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises @@ -1760,14 +1760,14 @@ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray: Parameters ---------- - key: str + key : str Label or level name. - axis: int, default 0 + axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- - values: np.ndarray + values : np.ndarray Raises ------ diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py index ffe31147fe87d..6de8c1d789097 100644 --- a/pandas/core/groupby/categorical.py +++ b/pandas/core/groupby/categorical.py @@ -41,7 +41,7 @@ def recode_for_groupby( Returns ------- - New Categorical + Categorical If sort=False, the new categories are set to the order of appearance in codes (unless ordered=True, in which case the original order is preserved), followed by any unrepresented diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 4c086f3b8612e..8dd5e3b771f2f 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -808,7 +808,7 @@ class BinGrouper(BaseGrouper): binlabels : the label list filter_empty : bool, default False mutated : bool, default False - indexer : a intp array + indexer : np.ndarray[np.intp] Examples -------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index e85d09a479d16..71dc52663a4e7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2412,7 +2412,7 @@ def isna(self) -> np.ndarray: Returns ------- - numpy.ndarray + numpy.ndarray[bool] A boolean array of whether my values are NA. See Also @@ -2470,7 +2470,7 @@ def notna(self) -> np.ndarray: Returns ------- - numpy.ndarray + numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also @@ -5499,7 +5499,7 @@ def isin(self, values, level=None): Returns ------- - is_contained : ndarray + is_contained : ndarray[bool] NumPy array of boolean values. See Also diff --git a/pandas/core/series.py b/pandas/core/series.py index 4b89c09cdb898..27ebf7f228bc0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3999,7 +3999,7 @@ def _gotitem(self, key, ndim, subset=None) -> Series: Parameters ---------- key : string / list of selections - ndim : 1,2 + ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 66dc80159af16..7619623bb9eda 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -576,7 +576,7 @@ def _adjust_to_origin(arg, origin, unit): date to be adjusted origin : 'julian' or Timestamp origin offset for the arg - unit : string + unit : str passed unit from to_datetime, must be 'D' Returns diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index b482934dd25d2..b90722857938e 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -239,7 +239,7 @@ def _gotitem(self, key, ndim, subset=None): Parameters ---------- key : str / list of selections - ndim : 1,2 + ndim : {1, 2} requested ndim of result subset : object, default None subset to act on diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index dbe483d021c63..648df0ff2b6d9 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -431,8 +431,8 @@ class ExcelFormatter: ---------- df : DataFrame or Styler na_rep: na representation - float_format : string, default None - Format string for floating point numbers + float_format : str, default None + Format string for floating point numbers cols : sequence, optional Columns to write header : bool or sequence of str, default True @@ -440,12 +440,12 @@ class ExcelFormatter: assumed to be aliases for the column names index : bool, default True output row names (index) - index_label : string or sequence, default None - Column label for index column(s) if desired. If None is given, and - `header` and `index` are True, then the index names are used. A - sequence should be given if the DataFrame uses MultiIndex. + index_label : str or sequence, default None + Column label for index column(s) if desired. If None is given, and + `header` and `index` are True, then the index names are used. A + sequence should be given if the DataFrame uses MultiIndex. merge_cells : bool, default False - Format MultiIndex and Hierarchical Rows as merged cells. + Format MultiIndex and Hierarchical Rows as merged cells. inf_rep : str, default `'inf'` representation for np.inf values (which aren't representable in Excel) A `'-'` sign will be added in front of -inf.
https://api.github.com/repos/pandas-dev/pandas/pulls/40713
2021-03-31T21:10:43Z
2021-04-01T22:35:24Z
2021-04-01T22:35:24Z
2021-04-01T23:11:09Z
CI: increase codecov target - append ArrayManager tests coverage
diff --git a/ci/run_tests.sh b/ci/run_tests.sh index d73940c1010ad..f5e3420b8c9b3 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -10,7 +10,7 @@ if [[ "not network" == *"$PATTERN"* ]]; then fi if [ "$COVERAGE" ]; then - COVERAGE="-s --cov=pandas --cov-report=xml" + COVERAGE="-s --cov=pandas --cov-report=xml --cov-append" fi # If no X server is found, we use xvfb to emulate it diff --git a/codecov.yml b/codecov.yml index 3f3df474956da..893e40db004a6 100644 --- a/codecov.yml +++ b/codecov.yml @@ -8,7 +8,7 @@ coverage: status: project: default: - target: '72' + target: '82' patch: default: target: '50'
Attempt to close #40670
https://api.github.com/repos/pandas-dev/pandas/pulls/40711
2021-03-31T19:59:57Z
2021-04-03T06:35:54Z
2021-04-03T06:35:54Z
2021-04-03T06:35:58Z
DOC: Add/Update Pandas-Genomics in Ecosystem files
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index f569fe7451fa7..56aa734deddd6 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -475,7 +475,7 @@ arrays can be stored inside pandas' Series and DataFrame. `Pandas-Genomics`_ ~~~~~~~~~~~~~~~~~~ -Pandas-Genomics provides extension types and extension arrays for working with genomics data +Pandas-Genomics provides extension types, extension arrays, and extension accessors for working with genomics data `Pint-Pandas`_ ~~~~~~~~~~~~~~ @@ -502,16 +502,17 @@ A directory of projects providing :ref:`extension accessors <extending.register-accessors>`. This is for users to discover new accessors and for library authors to coordinate on the namespace. -=============== ============ ==================================== =============================================================== -Library Accessor Classes Description -=============== ============ ==================================== =============================================================== -`cyberpandas`_ ``ip`` ``Series`` Provides common operations for working with IP addresses. -`pdvega`_ ``vgplot`` ``Series``, ``DataFrame`` Provides plotting functions from the Altair_ library. -`pandas_path`_ ``path`` ``Index``, ``Series`` Provides `pathlib.Path`_ functions for Series. -`pint-pandas`_ ``pint`` ``Series``, ``DataFrame`` Provides units support for numeric Series and DataFrames. -`composeml`_ ``slice`` ``DataFrame`` Provides a generator for enhanced data slicing. -`datatest`_ ``validate`` ``Series``, ``DataFrame``, ``Index`` Provides validation, differences, and acceptance managers. -=============== ============ ==================================== =============================================================== +================== ============ ==================================== =============================================================================== +Library Accessor Classes Description +================== ============ ==================================== =============================================================================== +`cyberpandas`_ ``ip`` ``Series`` Provides common operations for working with IP addresses. +`pdvega`_ ``vgplot`` ``Series``, ``DataFrame`` Provides plotting functions from the Altair_ library. +`pandas-genomics`_ ``genomics`` ``Series``, ``DataFrame`` Provides common operations for quality control and analysis of genomics data +`pandas_path`_ ``path`` ``Index``, ``Series`` Provides `pathlib.Path`_ functions for Series. +`pint-pandas`_ ``pint`` ``Series``, ``DataFrame`` Provides units support for numeric Series and DataFrames. +`composeml`_ ``slice`` ``DataFrame`` Provides a generator for enhanced data slicing. +`datatest`_ ``validate`` ``Series``, ``DataFrame``, ``Index`` Provides validation, differences, and acceptance managers. +================== ============ ==================================== =============================================================================== .. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest .. _pdvega: https://altair-viz.github.io/pdvega/ diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md index 7cf78958370ac..547a5f30e0516 100644 --- a/web/pandas/community/ecosystem.md +++ b/web/pandas/community/ecosystem.md @@ -360,6 +360,12 @@ Cyberpandas provides an extension type for storing arrays of IP Addresses. These arrays can be stored inside pandas' Series and DataFrame. +### [Pandas-Genomics](https://pandas-genomics.readthedocs.io/en/latest/) + +Pandas-Genomics provides an extension type and extension array for working + with genomics data. It also includes `genomics` accessors for many useful properties + and methods related to QC and analysis of genomics data. + ### [Pint-Pandas](https://github.com/hgrecco/pint-pandas) Pint-Pandas provides an extension type for storing numeric arrays with units. @@ -373,10 +379,11 @@ A directory of projects providing `extension accessors <extending.register-accessors>`. This is for users to discover new accessors and for library authors to coordinate on the namespace. - | Library | Accessor | Classes | - | --------------------------------------------------------------|----------|-----------------------| - | [cyberpandas](https://cyberpandas.readthedocs.io/en/latest) | `ip` | `Series` | - | [pdvega](https://altair-viz.github.io/pdvega/) | `vgplot` | `Series`, `DataFrame` | - | [pandas_path](https://github.com/drivendataorg/pandas-path/) | `path` | `Index`, `Series` | - | [pint-pandas](https://github.com/hgrecco/pint-pandas) | `pint` | `Series`, `DataFrame` | - | [composeml](https://github.com/FeatureLabs/compose) | `slice` | `DataFrame` | + | Library | Accessor | Classes | + | ---------------------------------------------------------------------|------------|-----------------------| + | [cyberpandas](https://cyberpandas.readthedocs.io/en/latest) | `ip` | `Series` | + | [pdvega](https://altair-viz.github.io/pdvega/) | `vgplot` | `Series`, `DataFrame` | + | [pandas-genomics](https://pandas-genomics.readthedocs.io/en/latest/) | `genomics` | `Series`, `DataFrame` | + | [pandas_path](https://github.com/drivendataorg/pandas-path/) | `path` | `Index`, `Series` | + | [pint-pandas](https://github.com/hgrecco/pint-pandas) | `pint` | `Series`, `DataFrame` | + | [composeml](https://github.com/FeatureLabs/compose) | `slice` | `DataFrame` |
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40710
2021-03-31T19:53:49Z
2021-04-01T15:55:56Z
2021-04-01T15:55:56Z
2021-04-01T15:56:00Z
REF: combine Block _can_hold_element methods
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 979b70c30d1b0..edc43bc68b2a8 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -113,6 +113,9 @@ from pandas.core.arrays import ( DatetimeArray, ExtensionArray, + IntervalArray, + PeriodArray, + TimedeltaArray, ) _int8_max = np.iinfo(np.int8).max @@ -2169,24 +2172,51 @@ def validate_numeric_casting(dtype: np.dtype, value: Scalar) -> None: raise ValueError(f"Cannot assign {type(value).__name__} to bool series") -def can_hold_element(dtype: np.dtype, element: Any) -> bool: +def can_hold_element(arr: ArrayLike, element: Any) -> bool: """ Can we do an inplace setitem with this element in an array with this dtype? Parameters ---------- - dtype : np.dtype + arr : np.ndarray or ExtensionArray element : Any Returns ------- bool """ + dtype = arr.dtype + if not isinstance(dtype, np.dtype) or dtype.kind in ["m", "M"]: + if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)): + # np.dtype here catches datetime64ns and timedelta64ns; we assume + # in this case that we have DatetimeArray/TimedeltaArray + arr = cast( + "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr + ) + try: + arr._validate_setitem_value(element) + return True + except (ValueError, TypeError): + return False + + # This is technically incorrect, but maintains the behavior of + # ExtensionBlock._can_hold_element + return True + tipo = maybe_infer_dtype_type(element) if dtype.kind in ["i", "u"]: if tipo is not None: - return tipo.kind in ["i", "u"] and dtype.itemsize >= tipo.itemsize + if tipo.kind not in ["i", "u"]: + # Anything other than integer we cannot hold + return False + elif dtype.itemsize < tipo.itemsize: + return False + elif not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype; we can put this into an ndarray + # losslessly iff it has no NAs + return not element._mask.any() + return True # We have not inferred an integer from the dtype # check if we have a builtin int or a float equal to an int @@ -2194,7 +2224,16 @@ def can_hold_element(dtype: np.dtype, element: Any) -> bool: elif dtype.kind == "f": if tipo is not None: - return tipo.kind in ["f", "i", "u"] + # TODO: itemsize check? + if tipo.kind not in ["f", "i", "u"]: + # Anything other than float/integer we cannot hold + return False + elif not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype or FloatingDtype; + # we can put this into an ndarray losslessly iff it has no NAs + return not element._mask.any() + return True + return lib.is_integer(element) or lib.is_float(element) elif dtype.kind == "c": @@ -2212,4 +2251,11 @@ def can_hold_element(dtype: np.dtype, element: Any) -> bool: elif dtype == object: return True + elif dtype.kind == "S": + # TODO: test tests.frame.methods.test_replace tests get here, + # need more targeted tests. xref phofl has a PR about this + if tipo is not None: + return tipo.kind == "S" and tipo.itemsize <= dtype.itemsize + return isinstance(element, bytes) and len(element) <= dtype.itemsize + raise NotImplementedError(dtype) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index e85d09a479d16..937322a50dcac 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4482,7 +4482,7 @@ def _validate_fill_value(self, value): TypeError If the value cannot be inserted into an array of this dtype. """ - if not can_hold_element(self.dtype, value): + if not can_hold_element(self._values, value): raise TypeError return value diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index a77ea61d9e6de..944844dfbbb5b 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -5,7 +5,6 @@ from typing import ( TYPE_CHECKING, Any, - Callable, List, Optional, Tuple, @@ -18,8 +17,6 @@ import numpy as np from pandas._libs import ( - Interval, - Period, Timestamp, algos as libalgos, internals as libinternals, @@ -102,6 +99,7 @@ PeriodArray, TimedeltaArray, ) +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com import pandas.core.computation.expressions as expressions @@ -122,7 +120,6 @@ Float64Index, Index, ) - from pandas.core.arrays._mixins import NDArrayBackedExtensionArray # comparison is faster than is_object_dtype _dtype_obj = np.dtype("object") @@ -625,9 +622,11 @@ def convert( """ return [self.copy()] if copy else [self] + @final def _can_hold_element(self, element: Any) -> bool: """ require the same dtype as ourselves """ - raise NotImplementedError("Implemented on subclasses") + element = extract_array(element, extract_numpy=True) + return can_hold_element(self.values, element) @final def should_store(self, value: ArrayLike) -> bool: @@ -1545,7 +1544,7 @@ def setitem(self, indexer, value): be a compatible shape. """ if not self._can_hold_element(value): - # This is only relevant for DatetimeTZBlock, ObjectValuesExtensionBlock, + # This is only relevant for DatetimeTZBlock, PeriodDtype, IntervalDtype, # which has a non-trivial `_can_hold_element`. # https://github.com/pandas-dev/pandas/issues/24020 # Need a dedicated setitem until GH#24020 (type promotion in setitem @@ -1597,10 +1596,6 @@ def take_nd( return self.make_block_same_class(new_values, new_mgr_locs) - def _can_hold_element(self, element: Any) -> bool: - # TODO: We may need to think about pushing this onto the array. - return True - def _slice(self, slicer): """ Return a slice of my values. @@ -1746,54 +1741,22 @@ def _unstack(self, unstacker, fill_value, new_placement): return blocks, mask -class HybridMixin: - """ - Mixin for Blocks backed (maybe indirectly) by ExtensionArrays. - """ - - array_values: Callable - - def _can_hold_element(self, element: Any) -> bool: - values = self.array_values - - try: - # error: "Callable[..., Any]" has no attribute "_validate_setitem_value" - values._validate_setitem_value(element) # type: ignore[attr-defined] - return True - except (ValueError, TypeError): - return False - - -class ObjectValuesExtensionBlock(HybridMixin, ExtensionBlock): - """ - Block providing backwards-compatibility for `.values`. - - Used by PeriodArray and IntervalArray to ensure that - Series[T].values is an ndarray of objects. - """ - - pass - - class NumericBlock(Block): __slots__ = () is_numeric = True - def _can_hold_element(self, element: Any) -> bool: - element = extract_array(element, extract_numpy=True) - if isinstance(element, (IntegerArray, FloatingArray)): - if element._mask.any(): - return False - return can_hold_element(self.dtype, element) - -class NDArrayBackedExtensionBlock(HybridMixin, Block): +class NDArrayBackedExtensionBlock(Block): """ Block backed by an NDArrayBackedExtensionArray """ values: NDArrayBackedExtensionArray + @property + def array_values(self) -> NDArrayBackedExtensionArray: + return self.values + @property def is_view(self) -> bool: """ return a boolean if I am possibly a view """ @@ -1901,10 +1864,6 @@ class DatetimeLikeBlockMixin(NDArrayBackedExtensionBlock): is_numeric = False - @cache_readonly - def array_values(self): - return self.values - class DatetimeBlock(DatetimeLikeBlockMixin): __slots__ = () @@ -1920,7 +1879,6 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeLikeBlockMixin): is_numeric = False internal_values = Block.internal_values - _can_hold_element = DatetimeBlock._can_hold_element diff = DatetimeBlock.diff where = DatetimeBlock.where putmask = DatetimeLikeBlockMixin.putmask @@ -1983,9 +1941,6 @@ def convert( res_values = ensure_block_shape(res_values, self.ndim) return [self.make_block(res_values)] - def _can_hold_element(self, element: Any) -> bool: - return True - class CategoricalBlock(ExtensionBlock): # this Block type is kept for backwards-compatibility @@ -2052,8 +2007,6 @@ def get_block_type(values, dtype: Optional[Dtype] = None): cls = CategoricalBlock elif vtype is Timestamp: cls = DatetimeTZBlock - elif vtype is Interval or vtype is Period: - cls = ObjectValuesExtensionBlock elif isinstance(dtype, ExtensionDtype): # Note: need to be sure PandasArray is unwrapped before we get here cls = ExtensionBlock diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 6681015856d6b..de0a5687aeb8b 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -73,7 +73,6 @@ CategoricalBlock, DatetimeTZBlock, ExtensionBlock, - ObjectValuesExtensionBlock, ensure_block_shape, extend_blocks, get_block_type, @@ -1841,14 +1840,6 @@ def _form_blocks( blocks.extend(external_blocks) - if len(items_dict["ObjectValuesExtensionBlock"]): - external_blocks = [ - new_block(array, klass=ObjectValuesExtensionBlock, placement=i, ndim=2) - for i, array in items_dict["ObjectValuesExtensionBlock"] - ] - - blocks.extend(external_blocks) - if len(extra_locs): shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index 051871513a14e..e11e74f16030c 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -18,6 +18,7 @@ import pandas.util._test_decorators as td +from pandas.core.dtypes.cast import can_hold_element from pandas.core.dtypes.dtypes import ( ExtensionDtype, PandasDtype, @@ -27,7 +28,10 @@ import pandas as pd import pandas._testing as tm from pandas.core.arrays.numpy_ import PandasArray -from pandas.core.internals import managers +from pandas.core.internals import ( + blocks, + managers, +) from pandas.tests.extension import base # TODO(ArrayManager) PandasArray @@ -45,6 +49,12 @@ def _extract_array_patched(obj): return obj +def _can_hold_element_patched(obj, element) -> bool: + if isinstance(element, PandasArray): + element = element.to_numpy() + return can_hold_element(obj, element) + + @pytest.fixture(params=["float", "object"]) def dtype(request): return PandasDtype(np.dtype(request.param)) @@ -70,6 +80,7 @@ def allow_in_pandas(monkeypatch): with monkeypatch.context() as m: m.setattr(PandasArray, "_typ", "extension") m.setattr(managers, "_extract_array", _extract_array_patched) + m.setattr(blocks, "can_hold_element", _can_hold_element_patched) yield
which in turn lets us get rid of ObjectValuesExtensionBlock and HybridMixin.
https://api.github.com/repos/pandas-dev/pandas/pulls/40709
2021-03-31T19:31:13Z
2021-04-01T22:36:41Z
2021-04-01T22:36:41Z
2021-04-01T23:13:45Z
ENH: [ArrowStringArray] Enable the string methods for the arrow-backed StringArray
diff --git a/pandas/conftest.py b/pandas/conftest.py index 3fdde3261bd68..35affa62ccf68 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1144,6 +1144,8 @@ def nullable_string_dtype(request): * 'string' * 'arrow_string' """ + from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401 + return request.param diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index faca868873efa..fd47597b2191f 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -24,6 +24,10 @@ from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.common import ( + is_object_dtype, + is_string_dtype, +) from pandas.core.dtypes.dtypes import register_extension_dtype from pandas.core.dtypes.missing import isna @@ -41,6 +45,7 @@ check_array_indexer, validate_indices, ) +from pandas.core.strings.object_array import ObjectStringArrayMixin try: import pyarrow as pa @@ -149,7 +154,12 @@ def __eq__(self, other) -> bool: return False -class ArrowStringArray(OpsMixin, ExtensionArray): +# TODO: Inherit directly from BaseStringArrayMethods. Currently we inherit from +# ObjectStringArrayMixin because we want to have the object-dtype based methods as +# fallback for the ones that pyarrow doesn't yet support + + +class ArrowStringArray(OpsMixin, ExtensionArray, ObjectStringArrayMixin): """ Extension array for string data in a ``pyarrow.ChunkedArray``. @@ -676,3 +686,71 @@ def value_counts(self, dropna: bool = True) -> Series: raise NotImplementedError("yo") return Series(counts, index=index).astype("Int64") + + # ------------------------------------------------------------------------ + # String methods interface + + _str_na_value = ArrowStringDtype.na_value + + def _str_map(self, f, na_value=None, dtype: Dtype | None = None): + # TODO: de-duplicate with StringArray method. This method is moreless copy and + # paste. + + from pandas.arrays import ( + BooleanArray, + IntegerArray, + ) + + if dtype is None: + dtype = self.dtype + if na_value is None: + na_value = self.dtype.na_value + + mask = isna(self) + arr = np.asarray(self) + + if is_integer_dtype(dtype) or is_bool_dtype(dtype): + constructor: type[IntegerArray] | type[BooleanArray] + if is_integer_dtype(dtype): + constructor = IntegerArray + else: + constructor = BooleanArray + + na_value_is_na = isna(na_value) + if na_value_is_na: + na_value = 1 + result = lib.map_infer_mask( + arr, + f, + mask.view("uint8"), + convert=False, + na_value=na_value, + # error: Value of type variable "_DTypeScalar" of "dtype" cannot be + # "object" + # error: Argument 1 to "dtype" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected + # "Type[object]" + dtype=np.dtype(dtype), # type: ignore[type-var,arg-type] + ) + + if not na_value_is_na: + mask[:] = False + + # error: Argument 1 to "IntegerArray" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "ndarray" + # error: Argument 1 to "BooleanArray" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "ndarray" + return constructor(result, mask) # type: ignore[arg-type] + + elif is_string_dtype(dtype) and not is_object_dtype(dtype): + # i.e. StringDtype + result = lib.map_infer_mask( + arr, f, mask.view("uint8"), convert=False, na_value=na_value + ) + return self._from_sequence(result) + else: + # This is when the result type is object. We reach this when + # -> We know the result type is truly object (e.g. .encode returns bytes + # or .findall returns a list). + # -> We don't know the result type. E.g. `.get` can return anything. + return lib.map_infer_mask(arr, f, mask.view("uint8")) diff --git a/pandas/core/strings/__init__.py b/pandas/core/strings/__init__.py index 943686fc85a05..28aba7c9ce0b3 100644 --- a/pandas/core/strings/__init__.py +++ b/pandas/core/strings/__init__.py @@ -25,6 +25,7 @@ # - StringArray # - PandasArray # - Categorical +# - ArrowStringArray from pandas.core.strings.accessor import StringMethods from pandas.core.strings.base import BaseStringArrayMethods diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 85da954ec842e..0b5613e302175 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -154,10 +154,11 @@ class StringMethods(NoNewAttributesMixin): def __init__(self, data): from pandas.core.arrays.string_ import StringDtype + from pandas.core.arrays.string_arrow import ArrowStringDtype self._inferred_dtype = self._validate(data) self._is_categorical = is_categorical_dtype(data.dtype) - self._is_string = isinstance(data.dtype, StringDtype) + self._is_string = isinstance(data.dtype, (StringDtype, ArrowStringDtype)) self._data = data self._index = self._name = None @@ -316,7 +317,7 @@ def cons_row(x): # This is a mess. dtype: Optional[str] if self._is_string and returns_string: - dtype = "string" + dtype = self._orig.dtype else: dtype = None diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 8b84a510c01e6..749f3d0aee8a5 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -91,17 +91,11 @@ def test_setitem_with_scalar_string(dtype): @pytest.mark.parametrize( "input, method", [ - (["a", "b", "c"], operator.methodcaller("capitalize")), (["a", "b", "c"], operator.methodcaller("capitalize")), (["a b", "a bc. de"], operator.methodcaller("capitalize")), ], ) -def test_string_methods(input, method, dtype, request): - if dtype == "arrow_string": - reason = "AttributeError: 'ArrowStringDtype' object has no attribute 'base'" - mark = pytest.mark.xfail(reason=reason) - request.node.add_marker(mark) - +def test_string_methods(input, method, dtype): a = pd.Series(input, dtype=dtype) b = pd.Series(input, dtype="object") result = method(a.str) diff --git a/pandas/tests/strings/test_string_array.py b/pandas/tests/strings/test_string_array.py index b51132caf7573..23c9b14c5a36a 100644 --- a/pandas/tests/strings/test_string_array.py +++ b/pandas/tests/strings/test_string_array.py @@ -11,14 +11,22 @@ ) -def test_string_array(any_string_method): +def test_string_array(nullable_string_dtype, any_string_method, request): method_name, args, kwargs = any_string_method if method_name == "decode": pytest.skip("decode requires bytes.") + if nullable_string_dtype == "arrow_string" and method_name in { + "extract", + "extractall", + }: + reason = "extract/extractall does not yet dispatch to array" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + data = ["a", "bb", np.nan, "ccc"] a = Series(data, dtype=object) - b = Series(data, dtype="string") + b = Series(data, dtype=nullable_string_dtype) expected = getattr(a.str, method_name)(*args, **kwargs) result = getattr(b.str, method_name)(*args, **kwargs) @@ -27,7 +35,7 @@ def test_string_array(any_string_method): if expected.dtype == "object" and lib.is_string_array( expected.dropna().values, ): - assert result.dtype == "string" + assert result.dtype == nullable_string_dtype result = result.astype(object) elif expected.dtype == "object" and lib.is_bool_array( @@ -46,7 +54,7 @@ def test_string_array(any_string_method): elif isinstance(expected, DataFrame): columns = expected.select_dtypes(include="object").columns - assert all(result[columns].dtypes == "string") + assert all(result[columns].dtypes == nullable_string_dtype) result[columns] = result[columns].astype(object) tm.assert_equal(result, expected) @@ -60,8 +68,8 @@ def test_string_array(any_string_method): ("rindex", [2, None]), ], ) -def test_string_array_numeric_integer_array(method, expected): - s = Series(["aba", None], dtype="string") +def test_string_array_numeric_integer_array(nullable_string_dtype, method, expected): + s = Series(["aba", None], dtype=nullable_string_dtype) result = getattr(s.str, method)("a") expected = Series(expected, dtype="Int64") tm.assert_series_equal(result, expected) @@ -73,33 +81,39 @@ def test_string_array_numeric_integer_array(method, expected): ("isdigit", [False, None, True]), ("isalpha", [True, None, False]), ("isalnum", [True, None, True]), - ("isdigit", [False, None, True]), + ("isnumeric", [False, None, True]), ], ) -def test_string_array_boolean_array(method, expected): - s = Series(["a", None, "1"], dtype="string") +def test_string_array_boolean_array(nullable_string_dtype, method, expected): + s = Series(["a", None, "1"], dtype=nullable_string_dtype) result = getattr(s.str, method)() expected = Series(expected, dtype="boolean") tm.assert_series_equal(result, expected) -def test_string_array_extract(): +def test_string_array_extract(nullable_string_dtype, request): # https://github.com/pandas-dev/pandas/issues/30969 # Only expand=False & multiple groups was failing - a = Series(["a1", "b2", "cc"], dtype="string") + + if nullable_string_dtype == "arrow_string": + reason = "extract does not yet dispatch to array" + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + a = Series(["a1", "b2", "cc"], dtype=nullable_string_dtype) b = Series(["a1", "b2", "cc"], dtype="object") pat = r"(\w)(\d)" result = a.str.extract(pat, expand=False) expected = b.str.extract(pat, expand=False) - assert all(result.dtypes == "string") + assert all(result.dtypes == nullable_string_dtype) result = result.astype(object) tm.assert_equal(result, expected) -def test_str_get_stringarray_multiple_nans(): - s = Series(pd.array(["a", "ab", pd.NA, "abc"])) +def test_str_get_stringarray_multiple_nans(nullable_string_dtype): + s = Series(pd.array(["a", "ab", pd.NA, "abc"], dtype=nullable_string_dtype)) result = s.str.get(2) - expected = Series(pd.array([pd.NA, pd.NA, pd.NA, "c"])) + expected = Series(pd.array([pd.NA, pd.NA, pd.NA, "c"], dtype=nullable_string_dtype)) tm.assert_series_equal(result, expected)
from https://github.com/pandas-dev/pandas/issues/35169#issuecomment-712883859 > Enable the string methods for the arrow-backed StringArray. This might also need some additional changes in the string accessor code (eg to dispatch extract to the underlying array as well)
https://api.github.com/repos/pandas-dev/pandas/pulls/40708
2021-03-31T17:47:30Z
2021-04-15T08:50:29Z
2021-04-15T08:50:29Z
2021-04-15T08:51:24Z
BUG: RollingGroupby MultiIndex levels dropped
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 75b2dee4a5822..e1358b433a0b9 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -400,6 +400,38 @@ However, floating point artifacts may now exist in the results when rolling over s = pd.Series([7, 5, 5, 5]) s.rolling(3).var() +.. _whatsnew_130.notable_bug_fixes.rolling_groupby_multiindex: + +GroupBy.rolling with MultiIndex no longer drops levels in the result +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:class:`core.window.rolling.RollingGroupby` will no longer drop levels of a :class:`DataFrame` +with a :class:`MultiIndex` in the result. This can lead to a perceived duplication of levels in the resulting +:class:`MultiIndex`, but this change restores the behavior that was present in version 1.1.3 (:issue:`38787`, :issue:`38523`). + + +.. ipython:: python + + index = pd.MultiIndex.from_tuples([('idx1', 'idx2')], names=['label1', 'label2']) + df = pd.DataFrame({'a': [1], 'b': [2]}, index=index) + df + +*Previous behavior*: + +.. code-block:: ipython + + In [1]: df.groupby('label1').rolling(1).sum() + Out[1]: + a b + label1 + idx1 1.0 2.0 + +*New behavior*: + +.. ipython:: python + + df.groupby('label1').rolling(1).sum() + .. _whatsnew_130.api_breaking.deps: diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index b90722857938e..c7fa6f99bfb1c 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -577,26 +577,23 @@ def _apply( numba_cache_key, **kwargs, ) - # Reconstruct the resulting MultiIndex from tuples + # Reconstruct the resulting MultiIndex # 1st set of levels = group by labels - # 2nd set of levels = original index - # Ignore 2nd set of levels if a group by label include an index level - result_index_names = copy.copy(self._grouper.names) - grouped_object_index = None + # 2nd set of levels = original DataFrame/Series index + grouped_object_index = self.obj.index + grouped_index_name = [*grouped_object_index.names] + groupby_keys = copy.copy(self._grouper.names) + result_index_names = groupby_keys + grouped_index_name - column_keys = [ + drop_columns = [ key - for key in result_index_names + for key in self._grouper.names if key not in self.obj.index.names or key is None ] - if len(column_keys) == len(result_index_names): - grouped_object_index = self.obj.index - grouped_index_name = [*grouped_object_index.names] - result_index_names += grouped_index_name - else: + if len(drop_columns) != len(groupby_keys): # Our result will have still kept the column in the result - result = result.drop(columns=column_keys, errors="ignore") + result = result.drop(columns=drop_columns, errors="ignore") codes = self._grouper.codes levels = copy.copy(self._grouper.levels) diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py index 5c2f69a9247e9..dd988a4abd9e1 100644 --- a/pandas/tests/window/test_groupby.py +++ b/pandas/tests/window/test_groupby.py @@ -588,23 +588,31 @@ def test_groupby_rolling_nans_in_index(self, rollings, key): with pytest.raises(ValueError, match=f"{key} must be monotonic"): df.groupby("c").rolling("60min", **rollings) - def test_groupby_rolling_group_keys(self): + @pytest.mark.parametrize("group_keys", [True, False]) + def test_groupby_rolling_group_keys(self, group_keys): # GH 37641 + # GH 38523: GH 37641 actually was not a bug. + # group_keys only applies to groupby.apply directly arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]] index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2")) s = Series([1, 2, 3], index=index) - result = s.groupby(["idx1", "idx2"], group_keys=False).rolling(1).mean() + result = s.groupby(["idx1", "idx2"], group_keys=group_keys).rolling(1).mean() expected = Series( [1.0, 2.0, 3.0], index=MultiIndex.from_tuples( - [("val1", "val1"), ("val1", "val1"), ("val2", "val2")], - names=["idx1", "idx2"], + [ + ("val1", "val1", "val1", "val1"), + ("val1", "val1", "val1", "val1"), + ("val2", "val2", "val2", "val2"), + ], + names=["idx1", "idx2", "idx1", "idx2"], ), ) tm.assert_series_equal(result, expected) def test_groupby_rolling_index_level_and_column_label(self): + # The groupby keys should not appear as a resulting column arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]] index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2")) @@ -613,7 +621,12 @@ def test_groupby_rolling_index_level_and_column_label(self): expected = DataFrame( {"B": [0.0, 1.0, 2.0]}, index=MultiIndex.from_tuples( - [("val1", 1), ("val1", 1), ("val2", 2)], names=["idx1", "A"] + [ + ("val1", 1, "val1", "val1"), + ("val1", 1, "val1", "val1"), + ("val2", 2, "val2", "val2"), + ], + names=["idx1", "A", "idx1", "idx2"], ), ) tm.assert_frame_equal(result, expected) @@ -695,6 +708,30 @@ def test_by_column_not_in_values(self, columns): assert "A" not in result.columns tm.assert_frame_equal(g.obj, original_obj) + def test_groupby_level(self): + # GH 38523, 38787 + arrays = [ + ["Falcon", "Falcon", "Parrot", "Parrot"], + ["Captive", "Wild", "Captive", "Wild"], + ] + index = MultiIndex.from_arrays(arrays, names=("Animal", "Type")) + df = DataFrame({"Max Speed": [390.0, 350.0, 30.0, 20.0]}, index=index) + result = df.groupby(level=0)["Max Speed"].rolling(2).sum() + expected = Series( + [np.nan, 740.0, np.nan, 50.0], + index=MultiIndex.from_tuples( + [ + ("Falcon", "Falcon", "Captive"), + ("Falcon", "Falcon", "Wild"), + ("Parrot", "Parrot", "Captive"), + ("Parrot", "Parrot", "Wild"), + ], + names=["Animal", "Animal", "Type"], + ), + name="Max Speed", + ) + tm.assert_series_equal(result, expected) + class TestExpanding: def setup_method(self):
- [x] closes #38523 - [x] closes #38787 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Essentially https://github.com/pandas-dev/pandas/pull/38737 but targeted for 1.3 as it appears that the current behavior is largely unexpected
https://api.github.com/repos/pandas-dev/pandas/pulls/40701
2021-03-31T06:28:03Z
2021-04-05T16:22:13Z
2021-04-05T16:22:13Z
2021-04-05T17:26:16Z
CI: lower codecov target
diff --git a/codecov.yml b/codecov.yml index 893e40db004a6..3f3df474956da 100644 --- a/codecov.yml +++ b/codecov.yml @@ -8,7 +8,7 @@ coverage: status: project: default: - target: '82' + target: '72' patch: default: target: '50'
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry xref #40670
https://api.github.com/repos/pandas-dev/pandas/pulls/40700
2021-03-31T02:36:51Z
2021-03-31T03:36:37Z
2021-03-31T03:36:37Z
2021-03-31T03:42:24Z
CLN: remove unused zip/enumerate/items
diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index a8a761b5f4aac..7a1a5f5b30590 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -162,7 +162,7 @@ def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[i order = [] columns, pairs = list(zip(*kwargs.items())) - for name, (column, aggfunc) in zip(columns, pairs): + for column, aggfunc in pairs: aggspec[column].append(aggfunc) order.append((column, com.get_callable_name(aggfunc) or aggfunc)) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 51f7b44f6d69d..2b95eedf74b5c 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -789,7 +789,7 @@ def is_in_obj(gpr) -> bool: # lambda here return False - for i, (gpr, level) in enumerate(zip(keys, levels)): + for gpr, level in zip(keys, levels): if is_in_obj(gpr): # df.groupby(df['name']) in_axis, name = True, gpr.name diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index ae0f853db628e..efddd67ee9c7b 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2248,7 +2248,7 @@ def _convert_key(self, key, is_setter: bool = False): """ Require integer args. (and convert to label arguments) """ - for a, i in zip(self.obj.axes, key): + for i in key: if not is_integer(i): raise ValueError("iAt based indexing can only have integer indexers") return key diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py index df5cd66060659..be5224fe32ae1 100644 --- a/pandas/core/internals/ops.py +++ b/pandas/core/internals/ops.py @@ -26,7 +26,7 @@ def _iter_block_pairs( # At this point we have already checked the parent DataFrames for # assert rframe._indexed_same(lframe) - for n, blk in enumerate(left.blocks): + for blk in left.blocks: locs = blk.mgr_locs blk_vals = blk.values @@ -40,7 +40,7 @@ def _iter_block_pairs( # assert len(rblks) == 1, rblks # assert rblks[0].shape[0] == 1, rblks[0].shape - for k, rblk in enumerate(rblks): + for rblk in rblks: right_ea = rblk.values.ndim == 1 lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea) diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 0278b22995089..2b1ebf0097778 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -101,12 +101,12 @@ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: table: List[List[Scalar]] = [] - for i, sheet_row in enumerate(sheet_rows): + for sheet_row in sheet_rows: sheet_cells = [x for x in sheet_row.childNodes if x.qname in cell_names] empty_cells = 0 table_row: List[Scalar] = [] - for j, sheet_cell in enumerate(sheet_cells): + for sheet_cell in sheet_cells: if sheet_cell.qname == table_cell_name: value = self._get_cell_value(sheet_cell, convert_float) else: diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py index 5c7255d5e6ee4..c9dc87ec0588b 100644 --- a/pandas/io/formats/xml.py +++ b/pandas/io/formats/xml.py @@ -307,7 +307,7 @@ def build_tree(self) -> bytes: f"{self.prefix_uri}{self.root_name}", attrib=self.other_namespaces() ) - for k, d in self.frame_dicts.items(): + for d in self.frame_dicts.values(): self.d = d self.elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}") @@ -477,7 +477,7 @@ def build_tree(self) -> bytes: self.root = Element(f"{self.prefix_uri}{self.root_name}", nsmap=self.namespaces) - for k, d in self.frame_dicts.items(): + for d in self.frame_dicts.values(): self.d = d self.elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}") diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 8658bb654b787..5ee9a268bdc8c 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3540,7 +3540,7 @@ def validate_min_itemsize(self, min_itemsize): return q = self.queryables() - for k, v in min_itemsize.items(): + for k in min_itemsize: # ok, apply generally if k == "values": diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 1ec4efe7b4795..6a81e3ae43b5d 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -84,7 +84,7 @@ def _validate_color_args(self): if isinstance(self.color, dict): valid_keys = ["boxes", "whiskers", "medians", "caps"] - for key, values in self.color.items(): + for key in self.color: if key not in valid_keys: raise ValueError( f"color dict contains invalid key '{key}'. " diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index 5093b88413110..e8d0a789e7cbd 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -149,7 +149,7 @@ def test_from_records_dictlike(self): # from the dict blocks = df._to_dict_of_blocks() columns = [] - for dtype, b in blocks.items(): + for b in blocks.values(): columns.extend(b.columns) asdict = {x: y for x, y in df.items()} diff --git a/pandas/tests/frame/methods/test_to_dict_of_blocks.py b/pandas/tests/frame/methods/test_to_dict_of_blocks.py index ca222180322bf..c81bed9d93cc4 100644 --- a/pandas/tests/frame/methods/test_to_dict_of_blocks.py +++ b/pandas/tests/frame/methods/test_to_dict_of_blocks.py @@ -20,7 +20,7 @@ def test_copy_blocks(self, float_frame): # use the default copy=True, change a column blocks = df._to_dict_of_blocks(copy=True) - for dtype, _df in blocks.items(): + for _df in blocks.values(): if column in _df: _df.loc[:, column] = _df[column] + 1 @@ -34,7 +34,7 @@ def test_no_copy_blocks(self, float_frame): # use the copy=False, change a column blocks = df._to_dict_of_blocks(copy=False) - for dtype, _df in blocks.items(): + for _df in blocks.values(): if column in _df: _df.loc[:, column] = _df[column] + 1 diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 6e71b56e8182b..6d269a27e2656 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -417,7 +417,7 @@ def test_finder_daily(self): xpl1 = xpl2 = [Period("1999-1-1", freq="B").ordinal] * len(day_lst) rs1 = [] rs2 = [] - for i, n in enumerate(day_lst): + for n in day_lst: rng = bdate_range("1999-1-1", periods=n) ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() @@ -439,7 +439,7 @@ def test_finder_quarterly(self): xpl1 = xpl2 = [Period("1988Q1").ordinal] * len(yrs) rs1 = [] rs2 = [] - for i, n in enumerate(yrs): + for n in yrs: rng = period_range("1987Q2", periods=int(n * 4), freq="Q") ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() @@ -461,7 +461,7 @@ def test_finder_monthly(self): xpl1 = xpl2 = [Period("Jan 1988").ordinal] * len(yrs) rs1 = [] rs2 = [] - for i, n in enumerate(yrs): + for n in yrs: rng = period_range("1987Q2", periods=int(n * 12), freq="M") ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() @@ -491,7 +491,7 @@ def test_finder_annual(self): xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] xp = [Period(x, freq="A").ordinal for x in xp] rs = [] - for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]): + for nyears in [5, 10, 19, 49, 99, 199, 599, 1001]: rng = period_range("1987", periods=nyears, freq="A") ser = Series(np.random.randn(len(rng)), rng) _, ax = self.plt.subplots() diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 7dcd4dc979eb2..3cc81ef851306 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -646,7 +646,7 @@ def _ex(p): return p.start_time + Timedelta(days=1, nanoseconds=-1) return Timestamp((p + p.freq).start_time.value - 1) - for i, fcode in enumerate(from_lst): + for fcode in from_lst: p = Period("1982", freq=fcode) result = p.to_timestamp().to_period(fcode) assert result == p diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index c6b998e3dbddf..98de5b2b1eb84 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -223,7 +223,7 @@ def pandas_validate(func_name: str): ) if doc.see_also: - for rel_name, rel_desc in doc.see_also.items(): + for rel_name in doc.see_also: if rel_name.startswith("pandas."): result["errors"].append( pandas_error(
found by #40570
https://api.github.com/repos/pandas-dev/pandas/pulls/40699
2021-03-31T02:14:41Z
2021-03-31T13:12:50Z
2021-03-31T13:12:50Z
2021-06-05T20:50:46Z
CLN: boolean->bool, string->str in docstrings
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 0c17e8c3f8c19..040cb17578fa2 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -933,10 +933,10 @@ def rank_1d( * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups - ascending : boolean, default True + ascending : bool, default True False for ranks by high (1) to low (N) na_option : {'keep', 'top', 'bottom'}, default 'keep' - pct : boolean, default False + pct : bool, default False Compute percentage rank of data within each group na_option : {'keep', 'top', 'bottom'}, default 'keep' * keep: leave NA values where they are diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index e23fa9b82f12e..3064ca9261b56 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -402,9 +402,9 @@ def group_any_all(uint8_t[::1] out, ordering matching up to the corresponding record in `values` values : array containing the truth value of each element mask : array indicating whether a value is na or not - val_test : str {'any', 'all'} + val_test : {'any', 'all'} String object dictating whether to use any or all truth testing - skipna : boolean + skipna : bool Flag to ignore nan values during truth testing Notes @@ -1083,10 +1083,10 @@ def group_rank(float64_t[:, ::1] out, * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups - ascending : boolean, default True + ascending : bool, default True False for ranks by high (1) to low (N) na_option : {'keep', 'top', 'bottom'}, default 'keep' - pct : boolean, default False + pct : bool, default False Compute percentage rank of data within each group na_option : {'keep', 'top', 'bottom'}, default 'keep' * keep: leave NA values where they are diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 6ace327ca3599..3b472b162cdff 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -523,7 +523,7 @@ cdef class {{name}}HashTable(HashTable): any value "val" satisfying val != val is considered missing. If na_value is not None, then _additionally_, any value "val" satisfying val == na_value is considered missing. - ignore_na : boolean, default False + ignore_na : bool, default False Whether NA-values should be ignored for calculating the uniques. If True, the labels corresponding to missing values will be set to na_sentinel. @@ -531,7 +531,7 @@ cdef class {{name}}HashTable(HashTable): If not None, the mask is used as indicator for missing values (True = missing, False = valid) instead of `na_value` or condition "val != val". - return_inverse : boolean, default False + return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. @@ -625,7 +625,7 @@ cdef class {{name}}HashTable(HashTable): ---------- values : ndarray[{{dtype}}] Array of values of which unique will be calculated - return_inverse : boolean, default False + return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. @@ -906,11 +906,11 @@ cdef class StringHashTable(HashTable): that is not a string is considered missing. If na_value is not None, then _additionally_ any value "val" satisfying val == na_value is considered missing. - ignore_na : boolean, default False + ignore_na : bool, default False Whether NA-values should be ignored for calculating the uniques. If True, the labels corresponding to missing values will be set to na_sentinel. - return_inverse : boolean, default False + return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. @@ -998,7 +998,7 @@ cdef class StringHashTable(HashTable): ---------- values : ndarray[object] Array of values of which unique will be calculated - return_inverse : boolean, default False + return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. @@ -1181,11 +1181,11 @@ cdef class PyObjectHashTable(HashTable): any value "val" satisfying val != val is considered missing. If na_value is not None, then _additionally_, any value "val" satisfying val == na_value is considered missing. - ignore_na : boolean, default False + ignore_na : bool, default False Whether NA-values should be ignored for calculating the uniques. If True, the labels corresponding to missing values will be set to na_sentinel. - return_inverse : boolean, default False + return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. @@ -1251,7 +1251,7 @@ cdef class PyObjectHashTable(HashTable): ---------- values : ndarray[object] Array of values of which unique will be calculated - return_inverse : boolean, default False + return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 05b255c40f4b2..8cf48ef04ac31 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -48,13 +48,13 @@ def unstack(reshape_t[:, :] values, const uint8_t[:] mask, Parameters ---------- values : typed ndarray - mask : boolean ndarray + mask : np.ndarray[bool] stride : int length : int width : int new_values : typed ndarray result array - new_mask : boolean ndarray + new_mask : np.ndarray[bool] result mask """ cdef: diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 56280d55e479d..893c0fa52cd15 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -267,7 +267,7 @@ def ensure_timedelta64ns(arr: ndarray, copy: bool=True): Parameters ---------- arr : ndarray - copy : boolean, default True + copy : bool, default True Returns ------- diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 79d6a42075e83..d6ca38e57d2d8 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -635,9 +635,9 @@ def get_locale_names(name_type: str, locale: object = None): Parameters ---------- - name_type : string, attribute of LocaleTime() in which to return localized - names - locale : string + name_type : str + Attribute of LocaleTime() in which to return localized names. + locale : str Returns ------- diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 3cdb654642b9c..bf8acfb459cb8 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -540,7 +540,7 @@ cdef inline int64_t timedelta_as_neg(int64_t value, bint neg): Parameters ---------- value : int64_t of the timedelta value - neg : boolean if the a negative value + neg : bool if the a negative value """ if neg: return -value diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index b38ca516c4393..adea9f6c19996 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -952,7 +952,7 @@ def get_op_from_name(op_name: str) -> Callable: Parameters ---------- - op_name : string + op_name : str The op name, in form of "add" or "__add__". Returns diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index f52aff424eb0b..91a7584b975c3 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -959,7 +959,7 @@ def mode(values, dropna: bool = True) -> Series: ---------- values : array-like Array over which to check for duplicate values. - dropna : boolean, default True + dropna : bool, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 @@ -1025,9 +1025,9 @@ def rank( - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top - ascending : boolean, default True + ascending : bool, default True Whether or not the elements should be ranked in ascending order. - pct : boolean, default False + pct : bool, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """ diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index 8318a02d5d214..d062b39725867 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -76,7 +76,7 @@ def take_nd( Axis to take from fill_value : any, default np.nan Fill value to replace -1 values with - allow_fill : boolean, default True + allow_fill : bool, default True If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index a46e74ae15a06..979b70c30d1b0 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -508,7 +508,7 @@ def maybe_upcast_putmask(result: np.ndarray, mask: np.ndarray) -> np.ndarray: result : ndarray The destination array. This will be mutated in-place if no upcasting is necessary. - mask : boolean ndarray + mask : np.ndarray[bool] Returns ------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6f2edaa300c93..bc9c7195b69f2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -231,7 +231,7 @@ If 0 or 'index': apply function to each column. If 1 or 'columns': apply function to each row.""", "inplace": """ - inplace : boolean, default False + inplace : bool, default False If True, performs operation inplace and returns None.""", "optional_by": """ by : str or list of str @@ -251,7 +251,7 @@ you to specify a location to update with some value.""", } -_numeric_only_doc = """numeric_only : boolean, default None +_numeric_only_doc = """numeric_only : bool or None, default None Include only float, int, boolean data. If None, will attempt to use everything, then use only numeric data """ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e47fa0eb45d94..8524907a84099 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -181,7 +181,7 @@ "axes_single_arg": "int or labels for object", "args_transpose": "axes to permute (int or label for object)", "inplace": """ - inplace : boolean, default False + inplace : bool, default False If True, performs operation inplace and returns None.""", "optional_by": """ by : str or list of str diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py index c9dd420ec33df..ffe31147fe87d 100644 --- a/pandas/core/groupby/categorical.py +++ b/pandas/core/groupby/categorical.py @@ -34,9 +34,9 @@ def recode_for_groupby( Parameters ---------- c : Categorical - sort : boolean + sort : bool The value of the sort parameter groupby was called with. - observed : boolean + observed : bool Account only for the observed values Returns @@ -93,7 +93,7 @@ def recode_from_groupby( Parameters ---------- c : Categorical - sort : boolean + sort : bool The value of the sort parameter groupby was called with. ci : CategoricalIndex The codes / categories to recode diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 51f7b44f6d69d..ae9f730ca1e97 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -293,7 +293,7 @@ def _get_grouper(self, obj, validate: bool = True): Parameters ---------- obj : the subject object - validate : boolean, default True + validate : bool, default True if True, validate the grouper Returns diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 5442f90a25580..9a12766febf11 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -817,8 +817,8 @@ class BinGrouper(BaseGrouper): ---------- bins : the split index of binlabels to group the item of axis binlabels : the label list - filter_empty : boolean, default False - mutated : boolean, default False + filter_empty : bool, default False + mutated : bool, default False indexer : a intp array Examples diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index ae0f853db628e..7e37048c77fa5 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2009,7 +2009,7 @@ def _align_series(self, indexer, ser: Series, multiindex_indexer: bool = False): Indexer used to get the locations that will be set to `ser`. ser : pd.Series Values to assign to the locations specified by `indexer`. - multiindex_indexer : boolean, optional + multiindex_indexer : bool, optional Defaults to False. Should be set to True if `indexer` was from a `pd.MultiIndex`, to avoid unnecessary broadcasting. diff --git a/pandas/core/missing.py b/pandas/core/missing.py index feaecec382704..53dce412f084f 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -789,7 +789,7 @@ def _interp_limit(invalid, fw_limit, bw_limit): Parameters ---------- - invalid : boolean ndarray + invalid : np.ndarray[bool] fw_limit : int or None forward limit to index bw_limit : int or None diff --git a/pandas/core/resample.py b/pandas/core/resample.py index abfd6932d7b21..70c9d2bc1e4e5 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1272,7 +1272,7 @@ def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- - method : string {'backfill', 'bfill', 'pad', 'ffill'} + method : {'backfill', 'bfill', 'pad', 'ffill'} Method for upsampling. limit : int, default None Maximum size gap to fill when reindexing. diff --git a/pandas/core/series.py b/pandas/core/series.py index 641a57a554a9b..4b89c09cdb898 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -162,7 +162,7 @@ "axes_single_arg": "{0 or 'index'}", "axis": """axis : {0 or 'index'} Parameter needed for compatibility with DataFrame.""", - "inplace": """inplace : boolean, default False + "inplace": """inplace : bool, default False If True, performs operation inplace and returns None.""", "unique": "np.ndarray", "duplicated": "Series", diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 6024c083fcc6b..816c1d9195778 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -278,7 +278,7 @@ def lexsort_indexer( ---------- keys : sequence of arrays Sequence of ndarrays to be sorted by the indexer - orders : boolean or list of booleans, optional + orders : bool or list of booleans, optional Determines the sorting order for each element in keys. If a list, it must be the same length as keys. This determines whether the corresponding element in keys should be sorted in ascending diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 5f33d00530361..66dc80159af16 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -171,7 +171,7 @@ def _maybe_cache( arg : listlike, tuple, 1-d array, Series format : string Strftime format to parse time - cache : boolean + cache : bool True attempts to create a cache of converted values convert_listlike : function Conversion function to apply on dates @@ -313,9 +313,9 @@ def _convert_listlike_datetimes( error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore' infer_datetime_format : bool, default False inferring format behavior from to_datetime - dayfirst : boolean + dayfirst : bool dayfirst parsing behavior from to_datetime - yearfirst : boolean + yearfirst : bool yearfirst parsing behavior from to_datetime exact : bool, default True exact format matching behavior from to_datetime diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 54cb6b9f91137..00a99eb8a4480 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -86,7 +86,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover Parameters ---------- obj : the object to write to the clipboard - excel : boolean, defaults to True + excel : bool, defaults to True if True, use the provided separator, writing in a csv format for allowing easy pasting into excel. if False, write a string representation of the object diff --git a/pandas/io/common.py b/pandas/io/common.py index b87e8fcae1064..eab13cb7cd598 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -565,9 +565,9 @@ def get_handle( Passing compression options as keys in dict is now supported for compression modes 'gzip' and 'bz2' as well as 'zip'. - memory_map : boolean, default False + memory_map : bool, default False See parsers._parser_params for more information. - is_text : boolean, default True + is_text : bool, default True Whether the type of the content passed to the file/buffer is string or bytes. This is not the same as `"b" not in mode`. If a string content is passed to a binary file/buffer, a wrapper is inserted. diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 0278b22995089..567ef90802956 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -23,7 +23,7 @@ class ODFReader(BaseExcelReader): Parameters ---------- - filepath_or_buffer : string, path to be parsed or + filepath_or_buffer : str, path to be parsed or an open readable stream. storage_options : dict, optional passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``) diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 72950db72e067..20d9a998505cd 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -488,7 +488,7 @@ def __init__( Parameters ---------- - filepath_or_buffer : string, path object or Workbook + filepath_or_buffer : str, path object or Workbook Object to be parsed. storage_options : dict, optional passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``) diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index 5eb88a694218a..eea0f1c03b998 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -15,7 +15,7 @@ def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): Parameters ---------- - filepath_or_buffer : string, path object or Workbook + filepath_or_buffer : str, path object or Workbook Object to be parsed. storage_options : dict, optional passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``) diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 1ec2f7bfdd4be..dbe483d021c63 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -435,18 +435,18 @@ class ExcelFormatter: Format string for floating point numbers cols : sequence, optional Columns to write - header : boolean or sequence of str, default True + header : bool or sequence of str, default True Write out column names. If a list of string is given it is assumed to be aliases for the column names - index : boolean, default True + index : bool, default True output row names (index) index_label : string or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. - merge_cells : boolean, default False + merge_cells : bool, default False Format MultiIndex and Hierarchical Rows as merged cells. - inf_rep : string, default `'inf'` + inf_rep : str, default `'inf'` representation for np.inf values (which aren't representable in Excel) A `'-'` sign will be added in front of -inf. style_converter : callable, optional @@ -796,7 +796,7 @@ def write( """ writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter - sheet_name : string, default 'Sheet1' + sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame startrow : upper left cell row to dump data frame diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index acb17aee50b76..cbc407c2624f2 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -303,7 +303,7 @@ def format_object_summary( must be iterable and support __getitem__ formatter : callable string formatter for an element - is_justify : boolean + is_justify : bool should justify the display name : name, optional defaults to the class name of the obj diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 9250d861740fc..8c72c06a96322 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1280,7 +1280,7 @@ def set_table_styles( .. versionadded:: 1.2.0 - overwrite : boolean, default True + overwrite : bool, default True Styles are replaced if `True`, or extended if `False`. CSS rules are preserved so most recent styles set will dominate if selectors intersect. diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 4824dab764259..ea47dca4f079e 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -296,7 +296,7 @@ def parse_table_schema(json, precise_float): ---------- json : A JSON table schema - precise_float : boolean + precise_float : bool Flag controlling precision when decoding string to double values, as dictated by ``read_json`` diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 6adf1b20b769f..796d44dc7877a 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -403,7 +403,7 @@ def validate_integer(name, val, min_val=0): Parameters ---------- - name : string + name : str Parameter name (used for error reporting) val : int or float The value to check diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 8658bb654b787..b5e7cfacc55eb 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -980,7 +980,7 @@ def select_as_multiple( columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection - iterator : boolean, return an iterator, default False + iterator : bool, return an iterator, default False chunksize : nrows to include in iteration, return an iterator auto_close : bool, default False Should automatically close the store when finished. @@ -1145,7 +1145,7 @@ def remove(self, key: str, where=None, start=None, stop=None): Parameters ---------- - key : string + key : str Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 05cc742b45d83..cd9667bb1ce4b 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -137,10 +137,10 @@ class SAS7BDATReader(ReaderBase, abc.Iterator): contents. index : column identifier, defaults to None Column to use as index. - convert_dates : boolean, defaults to True + convert_dates : bool, defaults to True Attempt to convert dates to Pandas datetime values. Note that some rarely used SAS date formats may be unsupported. - blank_missing : boolean, defaults to True + blank_missing : bool, defaults to True Convert empty strings to missing values (SAS uses blanks to indicate missing character variables). chunksize : int, defaults to None diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index c71de542bbf77..f7e1c56cbb196 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -63,23 +63,23 @@ _base_params_doc = """\ Parameters ---------- -filepath_or_buffer : string or file-like object +filepath_or_buffer : str or file-like object Path to SAS file or object implementing binary read method.""" _params2_doc = """\ index : identifier of index column Identifier of column that should be used as index of the DataFrame. -encoding : string +encoding : str Encoding for text data. chunksize : int Read file `chunksize` lines at a time, returns iterator.""" _format_params_doc = """\ -format : string +format : str File format, only `xport` is currently supported.""" _iterator_doc = """\ -iterator : boolean, default False +iterator : bool, default False Return XportReader object for reading file incrementally.""" diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 200565b837dea..4b5baa0a18c90 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -666,7 +666,7 @@ def to_sql( - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. - index : boolean, default True + index : bool, default True Write DataFrame index as a column. index_label : str or sequence, optional Column label for index column(s). If None is given (default) and @@ -1341,11 +1341,11 @@ def read_table( Parameters ---------- - table_name : string + table_name : str Name of SQL table in database. index_col : string, optional, default: None Column to set as index. - coerce_float : boolean, default True + coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. This can result in loss of precision. @@ -1436,11 +1436,11 @@ def read_query( Parameters ---------- - sql : string + sql : str SQL query to be executed. index_col : string, optional, default: None Column name to use as index for the returned DataFrame object. - coerce_float : boolean, default True + coerce_float : bool, default True Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None @@ -1530,7 +1530,7 @@ def to_sql( - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. - index : boolean, default True + index : bool, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and @@ -2028,7 +2028,7 @@ def to_sql( fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if it does not exist. - index : boolean, default True + index : bool, default True Write DataFrame index as a column index_label : string or sequence, default None Column label for index column(s). If None is given (default) and @@ -2135,7 +2135,7 @@ def get_schema( Parameters ---------- frame : DataFrame - name : string + name : str name of SQL table keys : string or sequence, default: None columns to use a primary key diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 677c3e791c72b..7c6a718b34e89 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -506,7 +506,7 @@ def period_break(dates: PeriodIndex, period: str) -> np.ndarray: ---------- dates : PeriodIndex Array of intervals to monitor. - period : string + period : str Name of the period to monitor. """ current = getattr(dates, period) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 616405c01cc2a..1304e861f948e 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -43,7 +43,7 @@ def assert_stat_op_calc( Parameters ---------- - opname : string + opname : str Name of the operator to test on frame alternative : function Function that opname is tested against; i.e. "frame.opname()" should @@ -146,7 +146,7 @@ def assert_stat_op_api(opname, float_frame, float_string_frame, has_numeric_only Parameters ---------- - opname : string + opname : str Name of the operator to test on frame float_frame : DataFrame DataFrame with columns of type float @@ -172,7 +172,7 @@ def assert_bool_op_calc(opname, alternative, frame, has_skipna=True): Parameters ---------- - opname : string + opname : str Name of the operator to test on frame alternative : function Function that opname is tested against; i.e. "frame.opname()" should @@ -237,7 +237,7 @@ def assert_bool_op_api( Parameters ---------- - opname : string + opname : str Name of the operator to test on frame float_frame : DataFrame DataFrame with columns of type float diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index ea60bc7f91a36..05b8885b7d9b7 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -564,7 +564,7 @@ def _maybe_coerce_freq(code) -> str: Parameters ---------- - source : string or DateOffset + source : str or DateOffset Frequency converting from Returns
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40698
2021-03-31T01:03:07Z
2021-03-31T12:52:35Z
2021-03-31T12:52:35Z
2021-03-31T14:16:33Z
CI: update exception catching for np1.21
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 7a2175a364a8a..a46e74ae15a06 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1733,6 +1733,8 @@ def maybe_cast_to_datetime( except ValueError: # TODO(GH#40048): only catch dateutil's ParserError # once we can reliably import it in all supported versions + if is_timedelta64: + raise pass # coerce datetimelike to object diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index b76a44b3c86be..5edeb72725d2f 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -89,7 +89,12 @@ def test_array_of_dt64_nat_with_td64dtype_raises(self, frame_or_series): if frame_or_series is DataFrame: arr = arr.reshape(1, 1) - msg = "Could not convert object to NumPy timedelta" + msg = "|".join( + [ + "Could not convert object to NumPy timedelta", + "Invalid type for timedelta scalar: <class 'numpy.datetime64'>", + ] + ) with pytest.raises(ValueError, match=msg): frame_or_series(arr, dtype="m8[ns]") diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index ab484e7ae9d8a..75474a29169a7 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1351,7 +1351,12 @@ def test_constructor_dtype_timedelta64(self): td.astype("int32") # this is an invalid casting - msg = "Could not convert object to NumPy timedelta" + msg = "|".join( + [ + "Could not convert object to NumPy timedelta", + "Could not convert 'foo' to NumPy timedelta", + ] + ) with pytest.raises(ValueError, match=msg): Series([timedelta(days=1), "foo"], dtype="m8[ns]")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40697
2021-03-30T21:24:30Z
2021-03-31T01:02:24Z
2021-03-31T01:02:24Z
2021-03-31T01:04:04Z
CI: unpin pymysql, sync min version, and skip failing test
diff --git a/ci/deps/actions-37-db.yaml b/ci/deps/actions-37-db.yaml index 5381caaa242cf..8755e1a02c3cf 100644 --- a/ci/deps/actions-37-db.yaml +++ b/ci/deps/actions-37-db.yaml @@ -32,7 +32,7 @@ dependencies: - google-cloud-bigquery>=1.27.2 # GH 36436 - psycopg2 - pyarrow>=0.15.0 - - pymysql<0.10.0 # temporary pin, GH 36465 + - pymysql - pytables - python-snappy - python-dateutil diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 3a0e1b7568c91..6482f8bf8d6d9 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -486,7 +486,7 @@ Optional libraries below the lowest tested version may still work, but are not c +-----------------+-----------------+---------+ | pyarrow | 0.15.0 | | +-----------------+-----------------+---------+ -| pymysql | 0.7.11 | | +| pymysql | 0.8.1 | X | +-----------------+-----------------+---------+ | pytables | 3.5.1 | | +-----------------+-----------------+---------+ diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 4b5baa0a18c90..4a83814c52ade 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1592,18 +1592,13 @@ def to_sql( ) table.create() - from sqlalchemy import exc + from sqlalchemy.exc import SQLAlchemyError try: table.insert(chunksize, method=method) - except exc.SQLAlchemyError as err: - # GH34431 - msg = "(1054, \"Unknown column 'inf' in 'field list'\")" - err_text = str(err.orig) - if re.search(msg, err_text): - raise ValueError("inf cannot be used with MySQL") from err - else: - raise err + except SQLAlchemyError as err: + # GH 34431 36465 + raise ValueError("inf cannot be used with MySQL") from err if not name.isdigit() and not name.islower(): # check for potentially case sensitivity issues (GH7815) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index c90f6ef956a65..04ddef57a9621 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2007,12 +2007,22 @@ def main(connectable): "input", [{"foo": [np.inf]}, {"foo": [-np.inf]}, {"foo": [-np.inf], "infe0": ["bar"]}], ) - def test_to_sql_with_negative_npinf(self, input): + def test_to_sql_with_negative_npinf(self, input, request): # GH 34431 df = DataFrame(input) if self.flavor == "mysql": + # GH 36465 + # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error + # for pymysql version >= 0.10 + # TODO: remove this version check after GH 36465 is fixed + import pymysql + + if pymysql.VERSION[0:3] >= (0, 10, 0) and "infe0" in df.columns: + mark = pytest.mark.xfail(reason="GH 36465") + request.node.add_marker(mark) + msg = "inf cannot be used with MySQL" with pytest.raises(ValueError, match=msg): df.to_sql("foobar", self.conn, index=False)
xref #36465
https://api.github.com/repos/pandas-dev/pandas/pulls/40696
2021-03-30T20:52:46Z
2021-04-09T15:51:17Z
2021-04-09T15:51:17Z
2022-11-18T02:21:55Z
TYP: tslibs.conversion, dtypes, fields
diff --git a/pandas/_libs/tslibs/conversion.pyi b/pandas/_libs/tslibs/conversion.pyi new file mode 100644 index 0000000000000..6470361542597 --- /dev/null +++ b/pandas/_libs/tslibs/conversion.pyi @@ -0,0 +1,41 @@ +from datetime import ( + datetime, + tzinfo, +) + +import numpy as np + +DT64NS_DTYPE: np.dtype +TD64NS_DTYPE: np.dtype + +class OutOfBoundsTimedelta(ValueError): ... + +def precision_from_unit( + unit: str, +) -> tuple[ + int, # int64_t + int, +]: ... + + +def ensure_datetime64ns( + arr: np.ndarray, # np.ndarray[datetime64[ANY]] + copy: bool = ..., +) -> np.ndarray: ... # np.ndarray[datetime64ns] + + +def ensure_timedelta64ns( + arr: np.ndarray, # np.ndarray[timedelta64[ANY]] + copy: bool = ..., +) -> np.ndarray: ... # np.ndarray[timedelta64ns] + + +def datetime_to_datetime64( + values: np.ndarray, # np.ndarray[object] +) -> tuple[ + np.ndarray, # np.ndarray[dt64ns] + tzinfo | None, +]: ... + + +def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ... diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi new file mode 100644 index 0000000000000..53752098bafe9 --- /dev/null +++ b/pandas/_libs/tslibs/dtypes.pyi @@ -0,0 +1,65 @@ +from enum import Enum + +from pandas._libs.tslibs.offsets import BaseOffset + +_attrname_to_abbrevs: dict[str, str] +_period_code_map: dict[str, int] + + +class PeriodDtypeBase: + # actually __cinit__ + def __new__(self, code: int): ... + + def freq_group_code(self) -> int: ... + def date_offset(self) -> BaseOffset: ... + + @classmethod + def from_date_offset(cls, offset: BaseOffset) -> PeriodDtypeBase: ... + + +class FreqGroup(Enum): + FR_ANN: int = ... + FR_QTR: int = ... + FR_MTH: int = ... + FR_WK: int = ... + FR_BUS: int = ... + FR_DAY: int = ... + FR_HR: int = ... + FR_MIN: int = ... + FR_SEC: int = ... + FR_MS: int = ... + FR_US: int = ... + FR_NS: int = ... + FR_UND: int = ... + + @staticmethod + def get_freq_group(code: int) -> FreqGroup: ... + + +class Resolution(Enum): + RESO_NS: int = ... + RESO_US: int = ... + RESO_MS: int = ... + RESO_SEC: int = ... + RESO_MIN: int = ... + RESO_HR: int = ... + RESO_DAY: int = ... + RESO_MTH: int = ... + RESO_QTR: int = ... + RESO_YR: int = ... + + def __lt__(self, other: Resolution) -> bool: ... + + def __ge__(self, other: Resolution) -> bool: ... + + @property + def freq_group(self) -> FreqGroup: ... + + @property + def attrname(self) -> str: ... + + @classmethod + def from_attrname(cls, attrname: str) -> Resolution: ... + + @classmethod + def get_reso_from_freq(cls, freq: str) -> Resolution: ... diff --git a/pandas/_libs/tslibs/fields.pyi b/pandas/_libs/tslibs/fields.pyi new file mode 100644 index 0000000000000..22ae156d78b7d --- /dev/null +++ b/pandas/_libs/tslibs/fields.pyi @@ -0,0 +1,69 @@ +import numpy as np + +def build_field_sarray( + dtindex: np.ndarray, # const int64_t[:] +) -> np.ndarray: ... + +def month_position_check(fields, weekdays) -> str | None: ... + +def get_date_name_field( + dtindex: np.ndarray, # const int64_t[:] + field: str, + locale=..., +) -> np.ndarray: ... # np.ndarray[object] + +def get_start_end_field( + dtindex: np.ndarray, # const int64_t[:] + field: str, + freqstr: str | None = ..., + month_kw: int = ... +) -> np.ndarray: ... # np.ndarray[bool] + + +def get_date_field( + dtindex: np.ndarray, # const int64_t[:] + + field: str, +) -> np.ndarray: ... # np.ndarray[in32] + + +def get_timedelta_field( + tdindex: np.ndarray, # const int64_t[:] + field: str, +) -> np.ndarray: ... # np.ndarray[int32] + + +def isleapyear_arr( + years: np.ndarray, +) -> np.ndarray: ... # np.ndarray[bool] + +def build_isocalendar_sarray( + dtindex: np.ndarray, # const int64_t[:] +) -> np.ndarray: ... + + +def get_locale_names(name_type: str, locale: object = None): ... + + +class RoundTo: + @property + def MINUS_INFTY(self) -> int: ... + + @property + def PLUS_INFTY(self) -> int: ... + + @property + def NEAREST_HALF_EVEN(self) -> int: ... + + @property + def NEAREST_HALF_PLUS_INFTY(self) -> int: ... + + @property + def NEAREST_HALF_MINUS_INFTY(self) -> int: ... + + +def round_nsint64( + values: np.ndarray, # np.ndarray[np.int64] + mode: RoundTo, + nanos: int, +) -> np.ndarray: ... # np.ndarray[np.int64] diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 67241a866ef35..cefb9bfa51280 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1727,6 +1727,7 @@ def _round(self, freq, mode, ambiguous, nonexistent): ) values = self.view("i8") + values = cast(np.ndarray, values) nanos = to_offset(freq).nanos result = round_nsint64(values, mode, nanos) result = self._maybe_mask_results(result, fill_value=iNaT)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40693
2021-03-30T16:14:38Z
2021-03-31T01:05:50Z
2021-03-31T01:05:50Z
2021-03-31T01:08:56Z
DOC: minor fixes to recent Styler guide PR
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index b8119477407c0..8a10a6e4d4c2e 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -243,7 +243,7 @@ "Table styles are flexible enough to control all individual parts of the table, including column headers and indexes. \n", "However, they can be unwieldy to type for individual data cells or for any kind of conditional formatting, so we recommend that table styles are used for broad styling, such as entire rows or columns at a time.\n", "\n", - "Table styles are also used to control features which can apply to the whole table at once such as greating a generic hover functionality. The `:hover` pseudo-selector, as well as other pseudo-selectors, can only be used this way.\n", + "Table styles are also used to control features which can apply to the whole table at once such as creating a generic hover functionality. The `:hover` pseudo-selector, as well as other pseudo-selectors, can only be used this way.\n", "\n", "To replicate the normal format of CSS selectors and properties (attribute value pairs), e.g. \n", "\n", @@ -295,7 +295,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next we just add a couple more styling artifacts targeting specific parts of the table, and we add some internally defined CSS classes that we need for the next section. Be careful here, since we are *chaining methods* we need to explicitly instruct the method **not to** ``overwrite`` the existing styles." + "Next we just add a couple more styling artifacts targeting specific parts of the table. Be careful here, since we are *chaining methods* we need to explicitly instruct the method **not to** ``overwrite`` the existing styles." ] }, { @@ -308,11 +308,6 @@ " {'selector': 'th.col_heading', 'props': 'text-align: center;'},\n", " {'selector': 'th.col_heading.level0', 'props': 'font-size: 1.5em;'},\n", " {'selector': 'td', 'props': 'text-align: center; font-weight: bold;'},\n", - " # internal CSS classes\n", - " {'selector': '.true', 'props': 'background-color: #e6ffe6;'},\n", - " {'selector': '.false', 'props': 'background-color: #ffe6e6;'},\n", - " {'selector': '.border-red', 'props': 'border: 2px dashed red;'},\n", - " {'selector': '.border-green', 'props': 'border: 2px dashed green;'},\n", "], overwrite=False)" ] }, @@ -394,7 +389,7 @@ "\n", "*New in version 1.2.0*\n", "\n", - "The [.set_td_classes()][tdclass] method accepts a DataFrame with matching indices and columns to the underlying [Styler][styler]'s DataFrame. That DataFrame will contain strings as css-classes to add to individual data cells: the `<td>` elements of the `<table>`. Here we add our `.true` and `.false` classes that we created previously. We will save adding the borders until the [section on tooltips](#Tooltips).\n", + "The [.set_td_classes()][tdclass] method accepts a DataFrame with matching indices and columns to the underlying [Styler][styler]'s DataFrame. That DataFrame will contain strings as css-classes to add to individual data cells: the `<td>` elements of the `<table>`. Rather than use external CSS we will create our classes internally and add them to table style. We will save adding the borders until the [section on tooltips](#Tooltips).\n", "\n", "[tdclass]: ../reference/api/pandas.io.formats.style.Styler.set_td_classes.rst\n", "[styler]: ../reference/api/pandas.io.formats.style.Styler.rst" @@ -406,6 +401,10 @@ "metadata": {}, "outputs": [], "source": [ + "s.set_table_styles([ # create internal CSS classes\n", + " {'selector': '.true', 'props': 'background-color: #e6ffe6;'},\n", + " {'selector': '.false', 'props': 'background-color: #ffe6e6;'},\n", + "], overwrite=False)\n", "cell_color = pd.DataFrame([['true ', 'false ', 'true ', 'false '], \n", " ['false ', 'true ', 'false ', 'true ']], \n", " index=df.index, \n", @@ -622,7 +621,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The only thing left to do for our table is to add the highlighting borders to draw the audience attention to the tooltips. **Setting classes always overwrites** so we need to make sure we add the previous classes." + "The only thing left to do for our table is to add the highlighting borders to draw the audience attention to the tooltips. We will create internal CSS classes as before using table styles. **Setting classes always overwrites** so we need to make sure we add the previous classes." ] }, { @@ -631,6 +630,10 @@ "metadata": {}, "outputs": [], "source": [ + "s.set_table_styles([ # create internal CSS classes\n", + " {'selector': '.border-red', 'props': 'border: 2px dashed red;'},\n", + " {'selector': '.border-green', 'props': 'border: 2px dashed green;'},\n", + "], overwrite=False)\n", "cell_border = pd.DataFrame([['border-green ', ' ', ' ', 'border-red '], \n", " [' ', ' ', ' ', ' ']], \n", " index=df.index, \n", @@ -1381,7 +1384,7 @@ "source": [ "### HTML Escaping\n", "\n", - "Suppose you have to display HTML within HTML, that can be a bit of pain when the renderer can't distinguish. You can use the `escape` formatting option to handle this. Even use it within a formatter that contains HTML itself." + "Suppose you have to display HTML within HTML, that can be a bit of pain when the renderer can't distinguish. You can use the `escape` formatting option to handle this, and even use it within a formatter that contains HTML itself." ] }, { @@ -1400,7 +1403,16 @@ "metadata": {}, "outputs": [], "source": [ - "# df4.style.format(escape=True)" + "df4.style.format(escape=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df4.style.format('<a href=\"https://pandas.pydata.org\" target=\"_blank\">{}</a>', escape=True)" ] }, {
small fixes to the previous doc edits: 1 spell correction. 1 uncomment and add an extra cell for clarity. 2 move creation of internal classes to relevant section.
https://api.github.com/repos/pandas-dev/pandas/pulls/40691
2021-03-30T08:25:53Z
2021-03-30T13:57:33Z
2021-03-30T13:57:33Z
2021-03-30T15:28:44Z
REF: de-duplicate pad/backfill code
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index a4bc2443e0eeb..0c17e8c3f8c19 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -794,68 +794,14 @@ def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None) -> ndarray: return indexer -@cython.boundscheck(False) -@cython.wraparound(False) def backfill_inplace(algos_t[:] values, uint8_t[:] mask, limit=None): - cdef: - Py_ssize_t i, N - algos_t val - uint8_t prev_mask - int lim, fill_count = 0 - - N = len(values) - - # GH#2778 - if N == 0: - return - - lim = validate_limit(N, limit) - - val = values[N - 1] - prev_mask = mask[N - 1] - for i in range(N - 1, -1, -1): - if mask[i]: - if fill_count >= lim: - continue - fill_count += 1 - values[i] = val - mask[i] = prev_mask - else: - fill_count = 0 - val = values[i] - prev_mask = mask[i] + pad_inplace(values[::-1], mask[::-1], limit=limit) -@cython.boundscheck(False) -@cython.wraparound(False) def backfill_2d_inplace(algos_t[:, :] values, const uint8_t[:, :] mask, limit=None): - cdef: - Py_ssize_t i, j, N, K - algos_t val - int lim, fill_count = 0 - - K, N = (<object>values).shape - - # GH#2778 - if N == 0: - return - - lim = validate_limit(N, limit) - - for j in range(K): - fill_count = 0 - val = values[j, N - 1] - for i in range(N - 1, -1, -1): - if mask[j, i]: - if fill_count >= lim: - continue - fill_count += 1 - values[j, i] = val - else: - fill_count = 0 - val = values[j, i] + pad_2d_inplace(values[:, ::-1], mask[:, ::-1], limit) @cython.boundscheck(False)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40689
2021-03-30T03:35:08Z
2021-03-31T01:01:49Z
2021-03-31T01:01:49Z
2021-03-31T01:04:25Z
CLN/TST: Some cleanups in apply.test_invalid_arg
diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py index c3cb3b245affd..698f85a04a757 100644 --- a/pandas/tests/apply/test_invalid_arg.py +++ b/pandas/tests/apply/test_invalid_arg.py @@ -102,26 +102,6 @@ def test_series_nested_renamer(renamer): s.agg(renamer) -def test_agg_dict_nested_renaming_depr_agg(): - - df = DataFrame({"A": range(5), "B": 5}) - - # nested renaming - msg = r"nested renamer is not supported" - with pytest.raises(SpecificationError, match=msg): - df.agg({"A": {"foo": "min"}, "B": {"bar": "max"}}) - - -def test_agg_dict_nested_renaming_depr_transform(): - df = DataFrame({"A": range(5), "B": 5}) - - # nested renaming - msg = r"nested renamer is not supported" - with pytest.raises(SpecificationError, match=msg): - # mypy identifies the argument as an invalid type - df.transform({"A": {"foo": "min"}, "B": {"bar": "max"}}) - - def test_apply_dict_depr(): tsdf = DataFrame( @@ -134,6 +114,17 @@ def test_apply_dict_depr(): tsdf.A.agg({"foo": ["sum", "mean"]}) +@pytest.mark.parametrize("method", ["agg", "transform"]) +def test_dict_nested_renaming_depr(method): + + df = DataFrame({"A": range(5), "B": 5}) + + # nested renaming + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + getattr(df, method)({"A": {"foo": "min"}, "B": {"bar": "max"}}) + + @pytest.mark.parametrize("method", ["apply", "agg", "transform"]) @pytest.mark.parametrize("func", [{"B": "sum"}, {"B": ["sum"]}]) def test_missing_column(method, func): @@ -288,25 +279,21 @@ def test_transform_none_to_type(): df.transform({"a": int}) -def test_apply_broadcast_error(int_frame_const_col): +@pytest.mark.parametrize( + "func", + [ + lambda x: np.array([1, 2]).reshape(-1, 2), + lambda x: [1, 2], + lambda x: Series([1, 2]), + ], +) +def test_apply_broadcast_error(int_frame_const_col, func): df = int_frame_const_col # > 1 ndim - msg = "too many dims to broadcast" + msg = "too many dims to broadcast|cannot broadcast result" with pytest.raises(ValueError, match=msg): - df.apply( - lambda x: np.array([1, 2]).reshape(-1, 2), - axis=1, - result_type="broadcast", - ) - - # cannot broadcast - msg = "cannot broadcast result" - with pytest.raises(ValueError, match=msg): - df.apply(lambda x: [1, 2], axis=1, result_type="broadcast") - - with pytest.raises(ValueError, match=msg): - df.apply(lambda x: Series([1, 2]), axis=1, result_type="broadcast") + df.apply(func, axis=1, result_type="broadcast") def test_transform_and_agg_err_agg(axis, float_frame): @@ -317,34 +304,47 @@ def test_transform_and_agg_err_agg(axis, float_frame): float_frame.agg(["max", "sqrt"], axis=axis) -def test_transform_and_agg_err_series(string_series): +@pytest.mark.parametrize( + "func, msg", + [ + (["sqrt", "max"], "cannot combine transform and aggregation"), + ( + {"foo": np.sqrt, "bar": "sum"}, + "cannot perform both aggregation and transformation", + ), + ], +) +def test_transform_and_agg_err_series(string_series, func, msg): # we are trying to transform with an aggregator - msg = "cannot combine transform and aggregation" with pytest.raises(ValueError, match=msg): with np.errstate(all="ignore"): - string_series.agg(["sqrt", "max"]) + string_series.agg(func) - msg = "cannot perform both aggregation and transformation" - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - string_series.agg({"foo": np.sqrt, "bar": "sum"}) - -def test_transform_and_agg_err_frame(axis, float_frame): +@pytest.mark.parametrize("func", [["max", "min"], ["max", "sqrt"]]) +def test_transform_wont_agg_frame(axis, float_frame, func): # GH 35964 # cannot both transform and agg msg = "Function did not transform" with pytest.raises(ValueError, match=msg): - float_frame.transform(["max", "min"], axis=axis) + float_frame.transform(func, axis=axis) + +@pytest.mark.parametrize("func", [["min", "max"], ["sqrt", "max"]]) +def test_transform_wont_agg_series(string_series, func): + # GH 35964 + # we are trying to transform with an aggregator msg = "Function did not transform" with pytest.raises(ValueError, match=msg): - float_frame.transform(["max", "sqrt"], axis=axis) + string_series.transform(func) -def test_transform_reducer_raises(all_reductions, frame_or_series): +@pytest.mark.parametrize( + "op_wrapper", [lambda x: x, lambda x: [x], lambda x: {"A": x}, lambda x: {"A": [x]}] +) +def test_transform_reducer_raises(all_reductions, frame_or_series, op_wrapper): # GH 35964 - op = all_reductions + op = op_wrapper(all_reductions) obj = DataFrame({"A": [1, 2, 3]}) if frame_or_series is not DataFrame: @@ -353,22 +353,3 @@ def test_transform_reducer_raises(all_reductions, frame_or_series): msg = "Function did not transform" with pytest.raises(ValueError, match=msg): obj.transform(op) - with pytest.raises(ValueError, match=msg): - obj.transform([op]) - with pytest.raises(ValueError, match=msg): - obj.transform({"A": op}) - with pytest.raises(ValueError, match=msg): - obj.transform({"A": [op]}) - - -def test_transform_wont_agg(string_series): - # GH 35964 - # we are trying to transform with an aggregator - msg = "Function did not transform" - with pytest.raises(ValueError, match=msg): - string_series.transform(["min", "max"]) - - msg = "Function did not transform" - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - string_series.transform(["sqrt", "max"])
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/40688
2021-03-30T00:32:16Z
2021-03-30T13:17:38Z
2021-03-30T13:17:38Z
2021-03-30T13:39:36Z
BUG: round_trip_pickle for lib.no_default
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 94a4d586b4f13..646b5a05afcad 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1,5 +1,6 @@ from collections import abc from decimal import Decimal +from enum import Enum import warnings import cython @@ -2433,8 +2434,15 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False, return objects +class NoDefault(Enum): + # We make this an Enum + # 1) because it round-trips through pickle correctly (see GH#40397) + # 2) because mypy does not understand singletons + no_default = "NO_DEFAULT" + + # Note: no_default is exported to the public API in pandas.api.extensions -no_default = object() # Sentinel indicating the default value. +no_default = NoDefault.no_default # Sentinel indicating the default value. @cython.boundscheck(False) diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py index 0532de9998c5f..67bd5b309b634 100644 --- a/pandas/tests/libs/test_lib.py +++ b/pandas/tests/libs/test_lib.py @@ -206,3 +206,9 @@ def test_get_reverse_indexer(self): def test_cache_readonly_preserve_docstrings(): # GH18197 assert Index.hasnans.__doc__ is not None + + +def test_no_default_pickle(): + # GH#40397 + obj = tm.round_trip_pickle(lib.no_default) + assert obj is lib.no_default
- [x] closes #40397 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry This will also un-block making a .pyi for lib.pyx
https://api.github.com/repos/pandas-dev/pandas/pulls/40684
2021-03-29T22:15:16Z
2021-03-30T13:17:57Z
2021-03-30T13:17:57Z
2021-06-08T16:29:49Z
Agg functions for df not respecting numeric only with level
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 21e6f0ea57451..f735a1ffa16c3 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -754,6 +754,7 @@ Groupby/resample/rolling - Bug in :class:`core.window.ewm.ExponentialMovingWindow` when calling ``__getitem__`` would not retain ``com``, ``span``, ``alpha`` or ``halflife`` attributes (:issue:`40164`) - :class:`core.window.ewm.ExponentialMovingWindow` now raises a ``NotImplementedError`` when specifying ``times`` with ``adjust=False`` due to an incorrect calculation (:issue:`40098`) - Bug in :meth:`Series.asfreq` and :meth:`DataFrame.asfreq` dropping rows when the index is not sorted (:issue:`39805`) +- Bug in aggregation functions for :class:`DataFrame` not respecting ``numeric_only`` argument when ``level`` keyword was given (:issue:`40660`) Reshaping ^^^^^^^^^ @@ -809,7 +810,6 @@ Other - Bug in :func:`pandas.util.show_versions` where console JSON output was not proper JSON (:issue:`39701`) - Bug in :meth:`DataFrame.convert_dtypes` incorrectly raised ValueError when called on an empty DataFrame (:issue:`40393`) - .. --------------------------------------------------------------------------- .. _whatsnew_130.contributors: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4ef5aa1109074..ce1df8db82cb8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10493,7 +10493,9 @@ def _stat_function( if axis is None: axis = self._stat_axis_number if level is not None: - return self._agg_by_level(name, axis=axis, level=level, skipna=skipna) + return self._agg_by_level( + name, axis=axis, level=level, skipna=skipna, numeric_only=numeric_only + ) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) @@ -10554,7 +10556,12 @@ def _min_count_stat_function( axis = self._stat_axis_number if level is not None: return self._agg_by_level( - name, axis=axis, level=level, skipna=skipna, min_count=min_count + name, + axis=axis, + level=level, + skipna=skipna, + min_count=min_count, + numeric_only=numeric_only, ) return self._reduce( func, diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 1304e861f948e..72f0787d69b22 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1546,3 +1546,18 @@ def test_minmax_extensionarray(method, numeric_only): [getattr(int64_info, method)], index=Index(["Int64"], dtype="object") ) tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("meth", ["max", "min", "sum", "mean", "median"]) +def test_groupy_regular_arithmetic_equivalent(meth): + # GH#40660 + df = DataFrame( + {"a": [pd.Timedelta(hours=6), pd.Timedelta(hours=7)], "b": [12.1, 13.3]} + ) + expected = df.copy() + + result = getattr(df, meth)(level=0) + tm.assert_frame_equal(result, expected) + + result = getattr(df.groupby(level=0), meth)(numeric_only=False) + tm.assert_frame_equal(result, expected)
- [x] closes #40660 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry stumbled accross this one
https://api.github.com/repos/pandas-dev/pandas/pulls/40683
2021-03-29T21:55:04Z
2021-04-08T22:15:43Z
2021-04-08T22:15:42Z
2021-04-09T20:08:50Z
REF: simplify dispatch in groupby.ops
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index e23fa9b82f12e..b43a0d2eced93 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -455,11 +455,11 @@ ctypedef fused complexfloating_t: @cython.wraparound(False) @cython.boundscheck(False) -def _group_add(complexfloating_t[:, ::1] out, - int64_t[::1] counts, - ndarray[complexfloating_t, ndim=2] values, - const intp_t[:] labels, - Py_ssize_t min_count=0): +def group_add(complexfloating_t[:, ::1] out, + int64_t[::1] counts, + ndarray[complexfloating_t, ndim=2] values, + const intp_t[:] labels, + Py_ssize_t min_count=0): """ Only aggregates on axis=0 using Kahan summation """ @@ -506,19 +506,13 @@ def _group_add(complexfloating_t[:, ::1] out, out[i, j] = sumx[i, j] -group_add_float32 = _group_add['float32_t'] -group_add_float64 = _group_add['float64_t'] -group_add_complex64 = _group_add['float complex'] -group_add_complex128 = _group_add['double complex'] - - @cython.wraparound(False) @cython.boundscheck(False) -def _group_prod(floating[:, ::1] out, - int64_t[::1] counts, - ndarray[floating, ndim=2] values, - const intp_t[:] labels, - Py_ssize_t min_count=0): +def group_prod(floating[:, ::1] out, + int64_t[::1] counts, + ndarray[floating, ndim=2] values, + const intp_t[:] labels, + Py_ssize_t min_count=0): """ Only aggregates on axis=0 """ @@ -560,19 +554,15 @@ def _group_prod(floating[:, ::1] out, out[i, j] = prodx[i, j] -group_prod_float32 = _group_prod['float'] -group_prod_float64 = _group_prod['double'] - - @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) -def _group_var(floating[:, ::1] out, - int64_t[::1] counts, - ndarray[floating, ndim=2] values, - const intp_t[:] labels, - Py_ssize_t min_count=-1, - int64_t ddof=1): +def group_var(floating[:, ::1] out, + int64_t[::1] counts, + ndarray[floating, ndim=2] values, + const intp_t[:] labels, + Py_ssize_t min_count=-1, + int64_t ddof=1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) floating val, ct, oldmean @@ -619,17 +609,13 @@ def _group_var(floating[:, ::1] out, out[i, j] /= (ct - ddof) -group_var_float32 = _group_var['float'] -group_var_float64 = _group_var['double'] - - @cython.wraparound(False) @cython.boundscheck(False) -def _group_mean(floating[:, ::1] out, - int64_t[::1] counts, - ndarray[floating, ndim=2] values, - const intp_t[::1] labels, - Py_ssize_t min_count=-1): +def group_mean(floating[:, ::1] out, + int64_t[::1] counts, + ndarray[floating, ndim=2] values, + const intp_t[::1] labels, + Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) floating val, count, y, t @@ -675,10 +661,6 @@ def _group_mean(floating[:, ::1] out, out[i, j] = sumx[i, j] / count -group_mean_float32 = _group_mean['float'] -group_mean_float64 = _group_mean['double'] - - @cython.wraparound(False) @cython.boundscheck(False) def group_ohlc(floating[:, ::1] out, diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 0ecd798986c53..a6c3cb3ff5d0b 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1602,7 +1602,7 @@ def std(self, ddof: int = 1): Standard deviation of values within each group. """ return self._get_cythonized_result( - "group_var_float64", + "group_var", aggregate=True, needs_counts=True, needs_values=True, diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 5442f90a25580..20b8dd99b8cd3 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -129,31 +129,22 @@ def _get_cython_function(kind: str, how: str, dtype: np.dtype, is_numeric: bool) # see if there is a fused-type version of function # only valid for numeric f = getattr(libgroupby, ftype, None) - if f is not None and is_numeric: - return f - - # otherwise find dtype-specific version, falling back to object - for dt in [dtype_str, "object"]: - f2 = getattr(libgroupby, f"{ftype}_{dt}", None) - if f2 is not None: - return f2 - - if hasattr(f, "__signatures__"): - # inspect what fused types are implemented - if dtype_str == "object" and "object" not in f.__signatures__: - # disallow this function so we get a NotImplementedError below - # instead of a TypeError at runtime - f = None - - func = f - - if func is None: - raise NotImplementedError( - f"function is not implemented for this dtype: " - f"[how->{how},dtype->{dtype_str}]" - ) + if f is not None: + if is_numeric: + return f + elif dtype == object: + if "object" not in f.__signatures__: + # raise NotImplementedError here rather than TypeError later + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) + return f - return func + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) class BaseGrouper: @@ -475,25 +466,24 @@ def _get_cython_func_and_vals( func : callable values : np.ndarray """ - try: - func = _get_cython_function(kind, how, values.dtype, is_numeric) - except NotImplementedError: + if how in ["median", "cumprod"]: + # these two only have float64 implementations if is_numeric: - try: - values = ensure_float64(values) - except TypeError: - if lib.infer_dtype(values, skipna=False) == "complex": - values = values.astype(complex) - else: - raise - func = _get_cython_function(kind, how, values.dtype, is_numeric) + values = ensure_float64(values) else: - raise - else: - if values.dtype.kind in ["i", "u"]: - if how in ["ohlc"]: - # The output may still include nans, so we have to cast - values = ensure_float64(values) + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{values.dtype.name}]" + ) + func = getattr(libgroupby, f"group_{how}_float64") + return func, values + + func = _get_cython_function(kind, how, values.dtype, is_numeric) + + if values.dtype.kind in ["i", "u"]: + if how in ["add", "var", "prod", "mean", "ohlc"]: + # result may still include NaN, so we have to cast + values = ensure_float64(values) return func, values @@ -643,10 +633,9 @@ def _cython_operation( values = ensure_float64(values) else: values = ensure_int_or_float(values) - elif is_numeric and not is_complex_dtype(dtype): - values = ensure_float64(values) - else: - values = values.astype(object) + elif is_numeric: + if not is_complex_dtype(dtype): + values = ensure_float64(values) arity = self._cython_arity.get(how, 1) ngroups = self.ngroups diff --git a/pandas/tests/groupby/test_libgroupby.py b/pandas/tests/groupby/test_libgroupby.py index d776c34f5b5ec..7a9cadb6c8232 100644 --- a/pandas/tests/groupby/test_libgroupby.py +++ b/pandas/tests/groupby/test_libgroupby.py @@ -4,8 +4,7 @@ from pandas._libs.groupby import ( group_cumprod_float64, group_cumsum, - group_var_float32, - group_var_float64, + group_var, ) from pandas.core.dtypes.common import ensure_platform_int @@ -102,7 +101,7 @@ def test_group_var_constant(self): class TestGroupVarFloat64(GroupVarTestMixin): __test__ = True - algo = staticmethod(group_var_float64) + algo = staticmethod(group_var) dtype = np.float64 rtol = 1e-5 @@ -124,7 +123,7 @@ def test_group_var_large_inputs(self): class TestGroupVarFloat32(GroupVarTestMixin): __test__ = True - algo = staticmethod(group_var_float32) + algo = staticmethod(group_var) dtype = np.float32 rtol = 1e-2
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry only real perf impact is one case where we currently do a complex->object->complex cast that this removes
https://api.github.com/repos/pandas-dev/pandas/pulls/40681
2021-03-29T17:09:02Z
2021-03-31T12:57:39Z
2021-03-31T12:57:39Z
2021-03-31T14:17:13Z
TST: [ArrowStringArray] more parameterised testing - part 1
diff --git a/pandas/conftest.py b/pandas/conftest.py index f3356d2998ff8..aa43746d0e7d5 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1131,6 +1131,24 @@ def string_dtype(request): return request.param +@pytest.fixture( + params=[ + "string", + pytest.param( + "arrow_string", marks=td.skip_if_no("pyarrow", min_version="1.0.0") + ), + ] +) +def nullable_string_dtype(request): + """ + Parametrized fixture for string dtypes. + + * 'string' + * 'arrow_string' + """ + return request.param + + @pytest.fixture(params=tm.BYTES_DTYPES) def bytes_dtype(request): """ diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 406aec9d4c16e..616f46624bfd7 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -281,7 +281,10 @@ def test_is_string_dtype(): assert com.is_string_dtype(object) assert com.is_string_dtype(np.array(["a", "b"])) assert com.is_string_dtype(pd.StringDtype()) - assert com.is_string_dtype(pd.array(["a", "b"], dtype="string")) + + +def test_is_string_dtype_nullable(nullable_string_dtype): + assert com.is_string_dtype(pd.array(["a", "b"], dtype=nullable_string_dtype)) integer_dtypes: List = [] diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 6c1161294dd17..a63c849d25a9f 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -39,6 +39,7 @@ ExtensionDtype, ) from pandas.api.types import is_bool_dtype +from pandas.core.arrays.string_arrow import ArrowStringDtype class JSONDtype(ExtensionDtype): @@ -194,7 +195,7 @@ def astype(self, dtype, copy=True): if copy: return self.copy() return self - elif isinstance(dtype, StringDtype): + elif isinstance(dtype, (StringDtype, ArrowStringDtype)): value = self.astype(str) # numpy doesn'y like nested dicts return dtype.construct_array_type()._from_sequence(value, copy=False) diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py index b4d8a53e4b23f..dd91b32c8eb8c 100644 --- a/pandas/tests/frame/methods/test_combine_first.py +++ b/pandas/tests/frame/methods/test_combine_first.py @@ -381,15 +381,17 @@ def test_combine_first_with_asymmetric_other(self, val): tm.assert_frame_equal(res, exp) - def test_combine_first_string_dtype_only_na(self): + def test_combine_first_string_dtype_only_na(self, nullable_string_dtype): # GH: 37519 - df = DataFrame({"a": ["962", "85"], "b": [pd.NA] * 2}, dtype="string") - df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype="string") + df = DataFrame( + {"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype + ) + df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype=nullable_string_dtype) df.set_index(["a", "b"], inplace=True) df2.set_index(["a", "b"], inplace=True) result = df.combine_first(df2) expected = DataFrame( - {"a": ["962", "85"], "b": [pd.NA] * 2}, dtype="string" + {"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype ).set_index(["a", "b"]) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index fc8d82b9e00b2..869255505eb74 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1654,10 +1654,10 @@ def test_constructor_empty_with_string_dtype(self): df = DataFrame(index=[0, 1], columns=[0, 1], dtype="U5") tm.assert_frame_equal(df, expected) - def test_constructor_empty_with_string_extension(self): + def test_constructor_empty_with_string_extension(self, nullable_string_dtype): # GH 34915 - expected = DataFrame(index=[], columns=["c1"], dtype="string") - df = DataFrame(columns=["c1"], dtype="string") + expected = DataFrame(index=[], columns=["c1"], dtype=nullable_string_dtype) + df = DataFrame(columns=["c1"], dtype=nullable_string_dtype) tm.assert_frame_equal(df, expected) def test_constructor_single_value(self): diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py index 65aa189a3e965..30d6436c7e250 100644 --- a/pandas/tests/tools/test_to_numeric.py +++ b/pandas/tests/tools/test_to_numeric.py @@ -725,9 +725,9 @@ def test_precision_float_conversion(strrep): (["1", "2", "3.5"], Series([1, 2, 3.5])), ], ) -def test_to_numeric_from_nullable_string(values, expected): +def test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected): # https://github.com/pandas-dev/pandas/issues/37262 - s = Series(values, dtype="string") + s = Series(values, dtype=nullable_string_dtype) result = to_numeric(s) tm.assert_series_equal(result, expected)
aim to reduce diff to address https://github.com/pandas-dev/pandas/pull/39908#discussion_r585573328, but good to do as follow-up to #35259 anyway. This also includes a change to inference, which could be broken off if not wanted as part of a TST PR. more tests to change once fixture in play and also some parameterisation to add for the str accessor testing (which could be a precursor with xfails for ArrowStringArray or combined with PR that adds the accessor.. will open a draft shortly for discussion)
https://api.github.com/repos/pandas-dev/pandas/pulls/40679
2021-03-29T13:56:47Z
2021-04-01T13:35:41Z
2021-04-01T13:35:41Z
2021-04-01T14:45:00Z
TST: [ArrowStringArray] remove xfail from test_repr
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 0574061a6a544..89defb278d321 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -42,23 +42,16 @@ def cls(request): return request.param -def test_repr(dtype, request): - if dtype == "arrow_string": - reason = ( - "AssertionError: assert ' A\n0 a\n1 None\n2 b' " - "== ' A\n0 a\n1 <NA>\n2 b'" - ) - mark = pytest.mark.xfail(reason=reason) - request.node.add_marker(mark) - +def test_repr(dtype): df = pd.DataFrame({"A": pd.array(["a", pd.NA, "b"], dtype=dtype)}) expected = " A\n0 a\n1 <NA>\n2 b" assert repr(df) == expected - expected = "0 a\n1 <NA>\n2 b\nName: A, dtype: string" + expected = f"0 a\n1 <NA>\n2 b\nName: A, dtype: {dtype}" assert repr(df.A) == expected - expected = "<StringArray>\n['a', <NA>, 'b']\nLength: 3, dtype: string" + arr_name = "ArrowStringArray" if dtype == "arrow_string" else "StringArray" + expected = f"<{arr_name}>\n['a', <NA>, 'b']\nLength: 3, dtype: {dtype}" assert repr(df.A.array) == expected
follow-up to #35259, xref https://github.com/pandas-dev/pandas/issues/35169#issuecomment-727217462
https://api.github.com/repos/pandas-dev/pandas/pulls/40678
2021-03-29T12:32:30Z
2021-03-29T13:50:04Z
2021-03-29T13:50:04Z
2021-04-09T11:58:42Z
TST: [ArrowStringArray] add dtype parameterisation to test_astype_float and test_fillna_args
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 0574061a6a544..373360bf788e1 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -371,9 +371,20 @@ def test_astype_int(dtype, request): tm.assert_extension_array_equal(result, expected) -def test_astype_float(any_float_allowed_nullable_dtype): +def test_astype_float(dtype, any_float_allowed_nullable_dtype, request): # Don't compare arrays (37974) - ser = pd.Series(["1.1", pd.NA, "3.3"], dtype="string") + + if dtype == "arrow_string": + if any_float_allowed_nullable_dtype in {"Float32", "Float64"}: + reason = "TypeError: Cannot interpret 'Float32Dtype()' as a data type" + else: + reason = ( + "TypeError: float() argument must be a string or a number, not 'NAType'" + ) + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + ser = pd.Series(["1.1", pd.NA, "3.3"], dtype=dtype) result = ser.astype(any_float_allowed_nullable_dtype) expected = pd.Series([1.1, np.nan, 3.3], dtype=any_float_allowed_nullable_dtype) @@ -436,17 +447,25 @@ def test_reduce_missing(skipna, dtype): assert pd.isna(result) -def test_fillna_args(): +def test_fillna_args(dtype, request): # GH 37987 - arr = pd.array(["a", pd.NA], dtype="string") + if dtype == "arrow_string": + reason = ( + "AssertionError: Regex pattern \"Cannot set non-string value '1' into " + "a StringArray.\" does not match 'Scalar must be NA or str'" + ) + mark = pytest.mark.xfail(reason=reason) + request.node.add_marker(mark) + + arr = pd.array(["a", pd.NA], dtype=dtype) res = arr.fillna(value="b") - expected = pd.array(["a", "b"], dtype="string") + expected = pd.array(["a", "b"], dtype=dtype) tm.assert_extension_array_equal(res, expected) res = arr.fillna(value=np.str_("b")) - expected = pd.array(["a", "b"], dtype="string") + expected = pd.array(["a", "b"], dtype=dtype) tm.assert_extension_array_equal(res, expected) msg = "Cannot set non-string value '1' into a StringArray."
#35259 added dtype parameterisation to existing tests in pandas/tests/arrays/string_/test_string.py and xfailed for ArrowStringArray where needed. test_astype_float and test_fillna_args were later added in #37974 and #37987 respectively. This PR doesn't fix (although will be looking at the all the xfailed tests in this module shortly, https://github.com/pandas-dev/pandas/issues/35169#issuecomment-727217462) just xfails to reduce the diff (slightly) for #39908
https://api.github.com/repos/pandas-dev/pandas/pulls/40677
2021-03-29T12:13:47Z
2021-03-29T13:52:08Z
2021-03-29T13:52:08Z
2021-04-01T12:35:05Z
REF: _cython_operation handle values.ndim==1 case up-front
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 1350848741ad1..8afb0e116fea8 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -603,6 +603,23 @@ def _cython_operation( kind, values, how, axis, min_count, **kwargs ) + elif values.ndim == 1: + # expand to 2d, dispatch, then squeeze if appropriate + values2d = values[None, :] + res = self._cython_operation( + kind=kind, + values=values2d, + how=how, + axis=1, + min_count=min_count, + **kwargs, + ) + if res.shape[0] == 1: + return res[0] + + # otherwise we have OHLC + return res.T + is_datetimelike = needs_i8_conversion(dtype) if is_datetimelike: @@ -623,22 +640,20 @@ def _cython_operation( values = values.astype(object) arity = self._cython_arity.get(how, 1) + ngroups = self.ngroups - vdim = values.ndim - swapped = False - if vdim == 1: - values = values[:, None] - out_shape = (self.ngroups, arity) + assert axis == 1 + values = values.T + if how == "ohlc": + out_shape = (ngroups, 4) + elif arity > 1: + raise NotImplementedError( + "arity of more than 1 is not supported for the 'how' argument" + ) + elif kind == "transform": + out_shape = values.shape else: - if axis > 0: - swapped = True - assert axis == 1, axis - values = values.T - if arity > 1: - raise NotImplementedError( - "arity of more than 1 is not supported for the 'how' argument" - ) - out_shape = (self.ngroups,) + values.shape[1:] + out_shape = (ngroups,) + values.shape[1:] func, values = self._get_cython_func_and_vals(kind, how, values, is_numeric) @@ -652,13 +667,11 @@ def _cython_operation( codes, _, _ = self.group_info + result = maybe_fill(np.empty(out_shape, dtype=out_dtype)) if kind == "aggregate": - result = maybe_fill(np.empty(out_shape, dtype=out_dtype)) counts = np.zeros(self.ngroups, dtype=np.int64) result = self._aggregate(result, counts, values, codes, func, min_count) elif kind == "transform": - result = maybe_fill(np.empty(values.shape, dtype=out_dtype)) - # TODO: min_count result = self._transform( result, values, codes, func, is_datetimelike, **kwargs @@ -674,11 +687,7 @@ def _cython_operation( assert result.ndim != 2 result = result[counts > 0] - if vdim == 1 and arity == 1: - result = result[:, 0] - - if swapped: - result = result.swapaxes(0, axis) + result = result.T if how not in base.cython_cast_blocklist: # e.g. if we are int64 and need to restore to datetime64/timedelta64 diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 41d7fed66469d..feaecec382704 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -861,7 +861,4 @@ def _rolling_window(a: np.ndarray, window: int): # https://stackoverflow.com/a/6811241 shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) - # error: Module has no attribute "stride_tricks" - return np.lib.stride_tricks.as_strided( # type: ignore[attr-defined] - a, shape=shape, strides=strides - ) + return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40672
2021-03-29T00:37:08Z
2021-03-29T18:41:04Z
2021-03-29T18:41:04Z
2021-04-01T18:52:15Z
REF: move most of Block.interpolate to an array method
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d1d0db913f854..7c2a31e63eeb3 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1106,128 +1106,34 @@ def interpolate( # If there are no NAs, then interpolate is a no-op return [self] if inplace else [self.copy()] - # a fill na type method try: m = missing.clean_fill_method(method) except ValueError: m = None + if m is None and self.dtype.kind != "f": + # only deal with floats + # bc we already checked that can_hold_na, we dont have int dtype here + # TODO: make a copy if not inplace? + return [self] - if m is not None: - if fill_value is not None: - # similar to validate_fillna_kwargs - raise ValueError("Cannot pass both fill_value and method") - - return self._interpolate_with_fill( - method=m, - axis=axis, - inplace=inplace, - limit=limit, - limit_area=limit_area, - downcast=downcast, - ) - # validate the interp method - m = missing.clean_interp_method(method, **kwargs) - - assert index is not None # for mypy + data = self.values if inplace else self.values.copy() + data = cast(np.ndarray, data) # bc overridden by ExtensionBlock - return self._interpolate( - method=m, - index=index, + interp_values = missing.interpolate_array_2d( + data, + method=method, axis=axis, + index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, - inplace=inplace, - downcast=downcast, **kwargs, ) - @final - def _interpolate_with_fill( - self, - method: str = "pad", - axis: int = 0, - inplace: bool = False, - limit: Optional[int] = None, - limit_area: Optional[str] = None, - downcast: Optional[str] = None, - ) -> List[Block]: - """ fillna but using the interpolate machinery """ - inplace = validate_bool_kwarg(inplace, "inplace") - - assert self._can_hold_na # checked by caller - - values = self.values if inplace else self.values.copy() - - values = missing.interpolate_2d( - values, - method=method, - axis=axis, - limit=limit, - limit_area=limit_area, - ) - - values = maybe_coerce_values(values) - blocks = [self.make_block_same_class(values)] - return self._maybe_downcast(blocks, downcast) - - @final - def _interpolate( - self, - method: str, - index: Index, - fill_value: Optional[Any] = None, - axis: int = 0, - limit: Optional[int] = None, - limit_direction: str = "forward", - limit_area: Optional[str] = None, - inplace: bool = False, - downcast: Optional[str] = None, - **kwargs, - ) -> List[Block]: - """ interpolate using scipy wrappers """ - inplace = validate_bool_kwarg(inplace, "inplace") - data = self.values if inplace else self.values.copy() - - # only deal with floats - if self.dtype.kind != "f": - # bc we already checked that can_hold_na, we dont have int dtype here - return [self] - - if is_valid_na_for_dtype(fill_value, self.dtype): - fill_value = self.fill_value - - if method in ("krogh", "piecewise_polynomial", "pchip"): - if not index.is_monotonic: - raise ValueError( - f"{method} interpolation requires that the index be monotonic." - ) - # process 1-d slices in the axis direction - - def func(yvalues: np.ndarray) -> np.ndarray: - - # process a 1-d slice, returning it - # should the axis argument be handled below in apply_along_axis? - # i.e. not an arg to missing.interpolate_1d - return missing.interpolate_1d( - xvalues=index, - yvalues=yvalues, - method=method, - limit=limit, - limit_direction=limit_direction, - limit_area=limit_area, - fill_value=fill_value, - bounds_error=False, - **kwargs, - ) - - # interp each column independently - interp_values = np.apply_along_axis(func, axis, data) interp_values = maybe_coerce_values(interp_values) - - blocks = [self.make_block_same_class(interp_values)] - return self._maybe_downcast(blocks, downcast) + nbs = [self.make_block_same_class(interp_values)] + return self._maybe_downcast(nbs, downcast) def take_nd( self, diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 53dce412f084f..21c79588317df 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -36,7 +36,11 @@ is_numeric_v_string_like, needs_i8_conversion, ) -from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + na_value_for_dtype, +) if TYPE_CHECKING: from pandas import Index @@ -145,7 +149,7 @@ def clean_fill_method(method, allow_nearest: bool = False): ] -def clean_interp_method(method: str, **kwargs) -> str: +def clean_interp_method(method: str, index: Index, **kwargs) -> str: order = kwargs.get("order") if method in ("spline", "polynomial") and order is None: @@ -155,6 +159,12 @@ def clean_interp_method(method: str, **kwargs) -> str: if method not in valid: raise ValueError(f"method must be one of {valid}. Got '{method}' instead.") + if method in ("krogh", "piecewise_polynomial", "pchip"): + if not index.is_monotonic: + raise ValueError( + f"{method} interpolation requires that the index be monotonic." + ) + return method @@ -195,6 +205,102 @@ def find_valid_index(values, *, how: str) -> Optional[int]: return idxpos +def interpolate_array_2d( + data: np.ndarray, + method: str = "pad", + axis: int = 0, + index: Optional[Index] = None, + limit: Optional[int] = None, + limit_direction: str = "forward", + limit_area: Optional[str] = None, + fill_value: Optional[Any] = None, + coerce: bool = False, + downcast: Optional[str] = None, + **kwargs, +): + """ + Wrapper to dispatch to either interpolate_2d or interpolate_2d_with_fill. + """ + try: + m = clean_fill_method(method) + except ValueError: + m = None + + if m is not None: + if fill_value is not None: + # similar to validate_fillna_kwargs + raise ValueError("Cannot pass both fill_value and method") + + interp_values = interpolate_2d( + data, + method=m, + axis=axis, + limit=limit, + limit_area=limit_area, + ) + else: + assert index is not None # for mypy + + interp_values = interpolate_2d_with_fill( + data=data, + index=index, + axis=axis, + method=method, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + fill_value=fill_value, + **kwargs, + ) + return interp_values + + +def interpolate_2d_with_fill( + data: np.ndarray, # floating dtype + index: Index, + axis: int, + method: str = "linear", + limit: Optional[int] = None, + limit_direction: str = "forward", + limit_area: Optional[str] = None, + fill_value: Optional[Any] = None, + **kwargs, +) -> np.ndarray: + """ + Column-wise application of interpolate_1d. + + Notes + ----- + The signature does differs from interpolate_1d because it only + includes what is needed for Block.interpolate. + """ + # validate the interp method + clean_interp_method(method, index, **kwargs) + + if is_valid_na_for_dtype(fill_value, data.dtype): + fill_value = na_value_for_dtype(data.dtype, compat=False) + + def func(yvalues: np.ndarray) -> np.ndarray: + # process 1-d slices in the axis direction, returning it + + # should the axis argument be handled below in apply_along_axis? + # i.e. not an arg to interpolate_1d + return interpolate_1d( + xvalues=index, + yvalues=yvalues, + method=method, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + fill_value=fill_value, + bounds_error=False, + **kwargs, + ) + + # interp each column independently + return np.apply_along_axis(func, axis, data) + + def interpolate_1d( xvalues: Index, yvalues: np.ndarray, @@ -638,7 +744,7 @@ def interpolate_2d( Perform an actual interpolation of values, values will be make 2-d if needed fills inplace, returns the result. - Parameters + Parameters ---------- values: array-like Input array.
Not _quite_ at the point where it can be called directly from ArrayManager, but close.
https://api.github.com/repos/pandas-dev/pandas/pulls/40671
2021-03-29T00:27:36Z
2021-04-02T20:20:24Z
2021-04-02T20:20:24Z
2021-04-02T21:04:41Z
REF: better use of fused_types for group_ohlc
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 7ddc087df9b11..f0f96fdfcc0a1 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -681,18 +681,17 @@ group_mean_float64 = _group_mean['double'] @cython.wraparound(False) @cython.boundscheck(False) -def _group_ohlc(floating[:, ::1] out, - int64_t[::1] counts, - ndarray[floating, ndim=2] values, - const intp_t[:] labels, - Py_ssize_t min_count=-1): +def group_ohlc(floating[:, ::1] out, + int64_t[::1] counts, + ndarray[floating, ndim=2] values, + const intp_t[:] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ cdef: Py_ssize_t i, j, N, K, lab - floating val, count - Py_ssize_t ngroups = len(counts) + floating val assert min_count == -1, "'min_count' only used in add and prod" @@ -727,10 +726,6 @@ def _group_ohlc(floating[:, ::1] out, out[lab, 3] = val -group_ohlc_float32 = _group_ohlc['float'] -group_ohlc_float64 = _group_ohlc['double'] - - @cython.boundscheck(False) @cython.wraparound(False) def group_quantile(ndarray[float64_t] out, diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 1350848741ad1..99b9aea4f82df 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -486,6 +486,12 @@ def _get_cython_func_and_vals( func = _get_cython_function(kind, how, values.dtype, is_numeric) else: raise + else: + if values.dtype.kind in ["i", "u"]: + if how in ["ohlc"]: + # The output may still include nans, so we have to cast + values = ensure_float64(values) + return func, values @final diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 41d7fed66469d..feaecec382704 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -861,7 +861,4 @@ def _rolling_window(a: np.ndarray, window: int): # https://stackoverflow.com/a/6811241 shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) - # error: Module has no attribute "stride_tricks" - return np.lib.stride_tricks.as_strided( # type: ignore[attr-defined] - a, shape=shape, strides=strides - ) + return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) diff --git a/pandas/tests/groupby/test_libgroupby.py b/pandas/tests/groupby/test_libgroupby.py index febc12edf0b32..d776c34f5b5ec 100644 --- a/pandas/tests/groupby/test_libgroupby.py +++ b/pandas/tests/groupby/test_libgroupby.py @@ -138,7 +138,7 @@ def _check(dtype): counts = np.zeros(len(out), dtype=np.int64) labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins]))) - func = getattr(libgroupby, f"group_ohlc_{dtype}") + func = libgroupby.group_ohlc func(out, counts, obj[:, None], labels) def _ohlc(group):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40668
2021-03-28T18:49:43Z
2021-03-29T13:09:14Z
2021-03-29T13:09:14Z
2021-03-29T14:39:59Z
DOC: Trivial typo fix in user_guide/window (#40666)
diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst index d09c1ab9a1409..be9c04ae5d4f3 100644 --- a/doc/source/user_guide/window.rst +++ b/doc/source/user_guide/window.rst @@ -101,7 +101,7 @@ be calculated with :meth:`~Rolling.apply` by specifying a separate column of wei All windowing operations support a ``min_periods`` argument that dictates the minimum amount of non-``np.nan`` values a window must have; otherwise, the resulting value is ``np.nan``. -``min_peridos`` defaults to 1 for time-based windows and ``window`` for fixed windows +``min_periods`` defaults to 1 for time-based windows and ``window`` for fixed windows .. ipython:: python
Trivial string change: `min_periods` corrected from `min_peridos` Fixes Issue #40666
https://api.github.com/repos/pandas-dev/pandas/pulls/40667
2021-03-28T18:18:57Z
2021-03-28T21:46:37Z
2021-03-28T21:46:37Z
2021-03-29T03:53:21Z
BUG: Update Styler.clear method to clear all
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 92efb225682b7..78fb8838a9fa9 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -137,6 +137,7 @@ Other enhancements - :meth:`.Styler.apply` now more consistently accepts ndarray function returns, i.e. in all cases for ``axis`` is ``0, 1 or None`` (:issue:`39359`) - :meth:`.Styler.apply` and :meth:`.Styler.applymap` now raise errors if wrong format CSS is passed on render (:issue:`39660`) - :meth:`.Styler.format` adds keyword argument ``escape`` for optional HTML escaping (:issue:`40437`) +- :meth:`.Styler.clear` now clears :attr:`Styler.hidden_index` and :attr:`Styler.hidden_columns` as well (:issue:`40484`) - Builtin highlighting methods in :class:`Styler` have a more consistent signature and css customisability (:issue:`40242`) - :meth:`Series.loc.__getitem__` and :meth:`Series.loc.__setitem__` with :class:`MultiIndex` now raising helpful error message when indexer has too many dimensions (:issue:`35349`) - :meth:`pandas.read_stata` and :class:`StataReader` support reading data from compressed files. diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 9250d861740fc..daab955cf1fbe 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -882,7 +882,12 @@ def clear(self) -> None: self.ctx.clear() self.tooltips = None self.cell_context.clear() - self._todo = [] + self._todo.clear() + + self.hidden_index = False + self.hidden_columns = [] + # self.format and self.table_styles may be dependent on user + # input in self.__init__() def _compute(self): """ diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index 302019b702829..3422eb9dc64b7 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -174,10 +174,13 @@ def test_clear(self): tt = DataFrame({"A": [None, "tt"]}) css = DataFrame({"A": [None, "cls-a"]}) s = self.df.style.highlight_max().set_tooltips(tt).set_td_classes(css) + s = s.hide_index().hide_columns("A") # _todo, tooltips and cell_context items added to.. assert len(s._todo) > 0 assert s.tooltips assert len(s.cell_context) > 0 + assert s.hidden_index is True + assert len(s.hidden_columns) > 0 s = s._compute() # ctx item affected when a render takes place. _todo is maintained @@ -190,6 +193,8 @@ def test_clear(self): assert len(s._todo) == 0 assert not s.tooltips assert len(s.cell_context) == 0 + assert s.hidden_index is False + assert len(s.hidden_columns) == 0 def test_render(self): df = DataFrame({"A": [0, 1]})
- [ x ] closes #40484 - [ x ] tests added / passed - [ x ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ x ] whatsnew entry updated in docs
https://api.github.com/repos/pandas-dev/pandas/pulls/40664
2021-03-28T17:44:44Z
2021-04-02T17:20:24Z
2021-04-02T17:20:24Z
2021-04-02T17:50:01Z
BUG: rank treating min int as NaN
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 92efb225682b7..6bba650dc80a1 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -501,6 +501,7 @@ Numeric - Bug in :meth:`DataFrame.mode` and :meth:`Series.mode` not keeping consistent integer :class:`Index` for empty input (:issue:`33321`) - Bug in :meth:`DataFrame.rank` with ``np.inf`` and mixture of ``np.nan`` and ``np.inf`` (:issue:`32593`) - Bug in :meth:`DataFrame.rank` with ``axis=0`` and columns holding incomparable types raising ``IndexError`` (:issue:`38932`) +- Bug in ``rank`` method for :class:`Series`, :class:`DataFrame`, :class:`DataFrameGroupBy`, and :class:`SeriesGroupBy` treating the most negative ``int64`` value as missing (:issue:`32859`) - Bug in :func:`select_dtypes` different behavior between Windows and Linux with ``include="int"`` (:issue:`36569`) - Bug in :meth:`DataFrame.apply` and :meth:`DataFrame.agg` when passed argument ``func="size"`` would operate on the entire ``DataFrame`` instead of rows or columns (:issue:`39934`) - Bug in :meth:`DataFrame.transform` would raise ``SpecificationError`` when passed a dictionary and columns were missing; will now raise a ``KeyError`` instead (:issue:`40004`) diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 495160e65eec3..a4bc2443e0eeb 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -962,6 +962,7 @@ ctypedef fused rank_t: def rank_1d( ndarray[rank_t, ndim=1] values, const intp_t[:] labels, + bint is_datetimelike=False, ties_method="average", bint ascending=True, bint pct=False, @@ -977,6 +978,8 @@ def rank_1d( Array containing unique label for each group, with its ordering matching up to the corresponding record in `values`. If not called from a groupby operation, will be an array of 0's + is_datetimelike : bool, default False + True if `values` contains datetime-like entries. ties_method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' * average: average rank of group @@ -1032,7 +1035,7 @@ def rank_1d( if rank_t is object: mask = missing.isnaobj(masked_vals) - elif rank_t is int64_t: + elif rank_t is int64_t and is_datetimelike: mask = (masked_vals == NPY_NAT).astype(np.uint8) elif rank_t is float64_t: mask = np.isnan(masked_vals).astype(np.uint8) @@ -1059,7 +1062,7 @@ def rank_1d( if rank_t is object: nan_fill_val = NegInfinity() elif rank_t is int64_t: - nan_fill_val = np.iinfo(np.int64).min + nan_fill_val = NPY_NAT elif rank_t is uint64_t: nan_fill_val = 0 else: @@ -1275,6 +1278,7 @@ def rank_1d( def rank_2d( ndarray[rank_t, ndim=2] in_arr, int axis=0, + bint is_datetimelike=False, ties_method="average", bint ascending=True, na_option="keep", @@ -1299,7 +1303,9 @@ def rank_2d( tiebreak = tiebreakers[ties_method] keep_na = na_option == 'keep' - check_mask = rank_t is not uint64_t + + # For cases where a mask is not possible, we can avoid mask checks + check_mask = not (rank_t is uint64_t or (rank_t is int64_t and not is_datetimelike)) if axis == 0: values = np.asarray(in_arr).T.copy() @@ -1310,13 +1316,15 @@ def rank_2d( if values.dtype != np.object_: values = values.astype('O') - if rank_t is not uint64_t: + if check_mask: if ascending ^ (na_option == 'top'): if rank_t is object: nan_value = Infinity() elif rank_t is float64_t: nan_value = np.inf - elif rank_t is int64_t: + + # int64 and datetimelike + else: nan_value = np.iinfo(np.int64).max else: @@ -1324,14 +1332,18 @@ def rank_2d( nan_value = NegInfinity() elif rank_t is float64_t: nan_value = -np.inf - elif rank_t is int64_t: + + # int64 and datetimelike + else: nan_value = NPY_NAT if rank_t is object: mask = missing.isnaobj2d(values) elif rank_t is float64_t: mask = np.isnan(values) - elif rank_t is int64_t: + + # int64 and datetimelike + else: mask = values == NPY_NAT np.putmask(values, mask, nan_value) diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 7ddc087df9b11..64373adf0217f 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1079,9 +1079,8 @@ def group_rank(float64_t[:, ::1] out, ngroups : int This parameter is not used, is needed to match signatures of other groupby functions. - is_datetimelike : bool, default False - unused in this method but provided for call compatibility with other - Cython transformations + is_datetimelike : bool + True if `values` contains datetime-like entries. ties_method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' * average: average rank of group @@ -1109,6 +1108,7 @@ def group_rank(float64_t[:, ::1] out, result = rank_1d( values=values[:, 0], labels=labels, + is_datetimelike=is_datetimelike, ties_method=ties_method, ascending=ascending, pct=pct, diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 77b5a0148905e..f52aff424eb0b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1031,21 +1031,23 @@ def rank( Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """ + is_datetimelike = needs_i8_conversion(values.dtype) + values = _get_values_for_rank(values) if values.ndim == 1: - values = _get_values_for_rank(values) ranks = algos.rank_1d( values, labels=np.zeros(len(values), dtype=np.intp), + is_datetimelike=is_datetimelike, ties_method=method, ascending=ascending, na_option=na_option, pct=pct, ) elif values.ndim == 2: - values = _get_values_for_rank(values) ranks = algos.rank_2d( values, axis=axis, + is_datetimelike=is_datetimelike, ties_method=method, ascending=ascending, na_option=na_option, diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py index ce46d1d8b1869..6538eda8cdeff 100644 --- a/pandas/tests/frame/methods/test_rank.py +++ b/pandas/tests/frame/methods/test_rank.py @@ -6,7 +6,6 @@ import numpy as np import pytest -from pandas._libs import iNaT from pandas._libs.algos import ( Infinity, NegInfinity, @@ -382,7 +381,7 @@ def test_pct_max_many_rows(self): "float32", ), ([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max], "uint8"), - pytest.param( + ( [ np.iinfo(np.int64).min, -100, @@ -394,20 +393,20 @@ def test_pct_max_many_rows(self): np.iinfo(np.int64).max, ], "int64", - marks=pytest.mark.xfail( - reason="iNaT is equivalent to minimum value of dtype" - "int64 pending issue GH#16674" - ), ), ([NegInfinity(), "1", "A", "BA", "Ba", "C", Infinity()], "object"), + ( + [datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 5)], + "datetime64", + ), ], ) def test_rank_inf_and_nan(self, contents, dtype, frame_or_series): dtype_na_map = { "float64": np.nan, "float32": np.nan, - "int64": iNaT, "object": None, + "datetime64": np.datetime64("nat"), } # Insert nans at random positions if underlying dtype has missing # value. Then adjust the expected order by adding nans accordingly diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py index 6116703ebd174..00641effac08d 100644 --- a/pandas/tests/groupby/test_rank.py +++ b/pandas/tests/groupby/test_rank.py @@ -1,9 +1,12 @@ +from datetime import datetime + import numpy as np import pytest import pandas as pd from pandas import ( DataFrame, + NaT, Series, concat, ) @@ -517,3 +520,25 @@ def test_rank_zero_div(input_key, input_value, output_value): result = df.groupby("A").rank(method="dense", pct=True) expected = DataFrame({"B": output_value}) tm.assert_frame_equal(result, expected) + + +def test_rank_min_int(): + # GH-32859 + df = DataFrame( + { + "grp": [1, 1, 2], + "int_col": [ + np.iinfo(np.int64).min, + np.iinfo(np.int64).max, + np.iinfo(np.int64).min, + ], + "datetimelike": [NaT, datetime(2001, 1, 1), NaT], + } + ) + + result = df.groupby("grp").rank() + expected = DataFrame( + {"int_col": [1.0, 2.0, 1.0], "datetimelike": [np.NaN, 1.0, np.NaN]} + ) + + tm.assert_frame_equal(result, expected)
- [x] closes #32859 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry ASV's look unaffected: <details> ``` before after ratio [029907c9] [b45f9193] <master> <bug/rank_datetimelike> 6.90±0.2ms 6.76±0.6ms 0.98 frame_methods.Rank.time_rank('float') 2.63±0.3ms 2.39±0.2ms ~0.91 frame_methods.Rank.time_rank('int') 45.7±1ms 45.3±1ms 0.99 frame_methods.Rank.time_rank('object') 2.47±0.2ms 2.22±0.1ms ~0.90 frame_methods.Rank.time_rank('uint') 10.8±0.09ms 10.8±0.8ms 1.01 series_methods.Rank.time_rank('float') 8.10±0.2ms 8.26±0.3ms 1.02 series_methods.Rank.time_rank('int') 51.3±2ms 51.2±1ms 1.00 series_methods.Rank.time_rank('object') 8.76±0.6ms 8.30±0.2ms 0.95 series_methods.Rank.time_rank('uint') ``` </details>
https://api.github.com/repos/pandas-dev/pandas/pulls/40659
2021-03-27T21:20:45Z
2021-03-29T14:21:48Z
2021-03-29T14:21:48Z
2021-03-29T15:51:23Z