title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Backport PR #35750 on branch 1.1.x (Pass check_dtype to assert_extension_array_equal)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 10bdfdc10c87a..9c92c803fa677 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -37,6 +37,7 @@ Bug fixes ~~~~~~~~~ - Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`). +- Bug in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` where extension dtypes were not ignored when ``check_dtypes`` was set to ``False`` (:issue:`35715`). Categorical ^^^^^^^^^^^ diff --git a/pandas/_testing.py b/pandas/_testing.py index 713f29466f097..ef6232fa6d575 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1377,12 +1377,18 @@ def assert_series_equal( ) elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype): assert_extension_array_equal( - left._values, right._values, index_values=np.asarray(left.index) + left._values, + right._values, + check_dtype=check_dtype, + index_values=np.asarray(left.index), ) elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype): # DatetimeArray or TimedeltaArray assert_extension_array_equal( - left._values, right._values, index_values=np.asarray(left.index) + left._values, + right._values, + check_dtype=check_dtype, + index_values=np.asarray(left.index), ) else: _testing.assert_almost_equal( diff --git a/pandas/tests/util/test_assert_extension_array_equal.py b/pandas/tests/util/test_assert_extension_array_equal.py index d9fdf1491c328..f9259beab5d13 100644 --- a/pandas/tests/util/test_assert_extension_array_equal.py +++ b/pandas/tests/util/test_assert_extension_array_equal.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from pandas import array import pandas._testing as tm from pandas.core.arrays.sparse import SparseArray @@ -102,3 +103,11 @@ def test_assert_extension_array_equal_non_extension_array(side): with pytest.raises(AssertionError, match=msg): tm.assert_extension_array_equal(*args) + + +@pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) +def test_assert_extension_array_equal_ignore_dtype_mismatch(right_dtype): + # https://github.com/pandas-dev/pandas/issues/35715 + left = array([1, 2, 3], dtype="Int64") + right = array([1, 2, 3], dtype=right_dtype) + tm.assert_extension_array_equal(left, right, check_dtype=False) diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index fe3e1ff906919..3aa3c64923b14 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -260,3 +260,11 @@ def test_assert_frame_equal_interval_dtype_mismatch(): with pytest.raises(AssertionError, match=msg): tm.assert_frame_equal(left, right, check_dtype=True) + + +@pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) +def test_assert_frame_equal_ignore_extension_dtype_mismatch(right_dtype): + # https://github.com/pandas-dev/pandas/issues/35715 + left = pd.DataFrame({"a": [1, 2, 3]}, dtype="Int64") + right = pd.DataFrame({"a": [1, 2, 3]}, dtype=right_dtype) + tm.assert_frame_equal(left, right, check_dtype=False) diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index a7b5aeac560e4..f3c66052b1904 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -296,3 +296,11 @@ def test_series_equal_exact_for_nonnumeric(): tm.assert_series_equal(s1, s3, check_exact=True) with pytest.raises(AssertionError): tm.assert_series_equal(s3, s1, check_exact=True) + + +@pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) +def test_assert_series_equal_ignore_extension_dtype_mismatch(right_dtype): + # https://github.com/pandas-dev/pandas/issues/35715 + left = pd.Series([1, 2, 3], dtype="Int64") + right = pd.Series([1, 2, 3], dtype=right_dtype) + tm.assert_series_equal(left, right, check_dtype=False)
Backport PR #35750: Pass check_dtype to assert_extension_array_equal
https://api.github.com/repos/pandas-dev/pandas/pulls/35773
2020-08-17T18:31:38Z
2020-08-17T20:37:01Z
2020-08-17T20:37:00Z
2020-08-17T20:37:01Z
CI: close sockets in SQL tests
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 29b787d39c09d..a7e3162ed7b73 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -263,7 +263,8 @@ def _get_all_tables(self): return table_list def _close_conn(self): - pass + # https://docs.sqlalchemy.org/en/13/core/connections.html#engine-disposal + self.conn.dispose() class PandasSQLTest: @@ -1242,7 +1243,7 @@ class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest): def setup_class(cls): cls.setup_import() cls.setup_driver() - conn = cls.connect() + conn = cls.conn = cls.connect() conn.connect() def load_test_data_and_sql(self):
closes #35660 closes #29514 Broken off from #35711
https://api.github.com/repos/pandas-dev/pandas/pulls/35772
2020-08-17T18:12:28Z
2020-08-17T20:50:14Z
2020-08-17T20:50:14Z
2020-08-18T15:46:19Z
Backport PR #35519 on branch 1.1.x (REF: StringArray._from_sequence, use less memory)
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index d7fb2775376c0..2023858181baa 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -7,6 +7,21 @@ from .pandas_vb_common import tm +class Construction: + + params = ["str", "string"] + param_names = ["dtype"] + + def setup(self, dtype): + self.data = tm.rands_array(nchars=10 ** 5, size=10) + + def time_construction(self, dtype): + Series(self.data, dtype=dtype) + + def peakmem_construction(self, dtype): + Series(self.data, dtype=dtype) + + class Methods: def setup(self): self.s = Series(tm.makeStringIndex(10 ** 5)) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index d93cd6edb983a..10bdfdc10c87a 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -75,6 +75,11 @@ Categorical - Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`) - +**Strings** + +- fix memory usage issue when instantiating large :class:`pandas.arrays.StringArray` (:issue:`35499`) + + .. --------------------------------------------------------------------------- .. _whatsnew_111.contributors: diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 5fa91ffee8ea8..eadfcefaac73d 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -618,35 +618,52 @@ def astype_intsafe(ndarray[object] arr, new_dtype): @cython.wraparound(False) @cython.boundscheck(False) -def astype_str(arr: ndarray, skipna: bool=False) -> ndarray[object]: - """ - Convert all elements in an array to string. +cpdef ndarray[object] ensure_string_array( + arr, + object na_value=np.nan, + bint convert_na_value=True, + bint copy=True, + bint skipna=True, +): + """Returns a new numpy array with object dtype and only strings and na values. Parameters ---------- - arr : ndarray - The array whose elements we are casting. - skipna : bool, default False + arr : array-like + The values to be converted to str, if needed. + na_value : Any + The value to use for na. For example, np.nan or pd.NA. + convert_na_value : bool, default True + If False, existing na values will be used unchanged in the new array. + copy : bool, default True + Whether to ensure that a new array is returned. + skipna : bool, default True Whether or not to coerce nulls to their stringified form - (e.g. NaN becomes 'nan'). + (e.g. if False, NaN becomes 'nan'). Returns ------- ndarray - A new array with the input array's elements casted. + An array with the input array's elements casted to str or nan-like. """ cdef: - object arr_i - Py_ssize_t i, n = arr.size - ndarray[object] result = np.empty(n, dtype=object) - - for i in range(n): - arr_i = arr[i] + Py_ssize_t i = 0, n = len(arr) - if not (skipna and checknull(arr_i)): - arr_i = str(arr_i) + result = np.asarray(arr, dtype="object") + if copy and result is arr: + result = result.copy() - result[i] = arr_i + for i in range(n): + val = result[i] + if not checknull(val): + result[i] = str(val) + else: + if convert_na_value: + val = na_value + if skipna: + result[i] = val + else: + result[i] = str(val) return result diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index fddd3af858f77..a4778869aee24 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -178,11 +178,10 @@ class StringArray(PandasArray): def __init__(self, values, copy=False): values = extract_array(values) - skip_validation = isinstance(values, type(self)) super().__init__(values, copy=copy) self._dtype = StringDtype() - if not skip_validation: + if not isinstance(values, type(self)): self._validate() def _validate(self): @@ -201,23 +200,11 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): assert dtype == "string" result = np.asarray(scalars, dtype="object") - if copy and result is scalars: - result = result.copy() - - # Standardize all missing-like values to NA - # TODO: it would be nice to do this in _validate / lib.is_string_array - # We are already doing a scan over the values there. - na_values = isna(result) - has_nans = na_values.any() - if has_nans and result is scalars: - # force a copy now, if we haven't already - result = result.copy() - - # convert to str, then to object to avoid dtype like '<U3', then insert na_value - result = np.asarray(result, dtype=str) - result = np.asarray(result, dtype="object") - if has_nans: - result[na_values] = StringDtype.na_value + + # convert non-na-likes to str, and nan-likes to StringDtype.na_value + result = lib.ensure_string_array( + result, na_value=StringDtype.na_value, copy=copy + ) return cls(result) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 228329898b6a4..2697f42eb05a4 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -916,7 +916,7 @@ def astype_nansafe(arr, dtype, copy: bool = True, skipna: bool = False): dtype = pandas_dtype(dtype) if issubclass(dtype.type, str): - return lib.astype_str(arr.ravel(), skipna=skipna).reshape(arr.shape) + return lib.ensure_string_array(arr.ravel(), skipna=skipna).reshape(arr.shape) elif is_datetime64_dtype(arr): if is_object_dtype(dtype): @@ -1608,19 +1608,11 @@ def construct_1d_ndarray_preserving_na( >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype=np.dtype('str')) array(['1.0', '2.0', None], dtype=object) """ - subarr = np.array(values, dtype=dtype, copy=copy) if dtype is not None and dtype.kind == "U": - # GH-21083 - # We can't just return np.array(subarr, dtype='str') since - # NumPy will convert the non-string objects into strings - # Including NA values. Se we have to go - # string -> object -> update NA, which requires an - # additional pass over the data. - na_values = isna(values) - subarr2 = subarr.astype(object) - subarr2[na_values] = np.asarray(values, dtype=object)[na_values] - subarr = subarr2 + subarr = lib.ensure_string_array(values, convert_na_value=False, copy=copy) + else: + subarr = np.array(values, dtype=dtype, copy=copy) return subarr diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 6f9a1a5be4c43..efd5d29ae0717 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -206,12 +206,16 @@ def test_constructor_raises(): @pytest.mark.parametrize("copy", [True, False]) def test_from_sequence_no_mutate(copy): - a = np.array(["a", np.nan], dtype=object) - original = a.copy() - result = pd.arrays.StringArray._from_sequence(a, copy=copy) - expected = pd.arrays.StringArray(np.array(["a", pd.NA], dtype=object)) + nan_arr = np.array(["a", np.nan], dtype=object) + na_arr = np.array(["a", pd.NA], dtype=object) + + result = pd.arrays.StringArray._from_sequence(nan_arr, copy=copy) + expected = pd.arrays.StringArray(na_arr) + tm.assert_extension_array_equal(result, expected) - tm.assert_numpy_array_equal(a, original) + + expected = nan_arr if copy else na_arr + tm.assert_numpy_array_equal(nan_arr, expected) def test_astype_int():
Backport PR #35519: REF: StringArray._from_sequence, use less memory
https://api.github.com/repos/pandas-dev/pandas/pulls/35770
2020-08-17T14:38:44Z
2020-08-17T15:32:36Z
2020-08-17T15:32:36Z
2020-08-17T15:32:36Z
BUG: Raise ValueError instead of bare Exception in sanitize_array
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 57e3c9dd66afb..1336fd7d83f7e 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -476,6 +476,7 @@ Other - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`) - Fixed metadata propagation in the :class:`Series.dt` accessor (:issue:`28283`) - Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`) +- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError``, from a bare ``Exception`` previously (:issue:`35744`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 4751f6076f869..7901e150a7ff4 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -510,7 +510,7 @@ def sanitize_array( elif subarr.ndim > 1: if isinstance(data, np.ndarray): - raise Exception("Data must be 1-dimensional") + raise ValueError("Data must be 1-dimensional") else: subarr = com.asarray_tuplesafe(data, dtype=dtype) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 4ad4917533422..a950ca78fc742 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -113,8 +113,8 @@ def test_constructor(self, datetime_series): with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): assert not Series().index._is_all_dates - # exception raised is of type Exception - with pytest.raises(Exception, match="Data must be 1-dimensional"): + # exception raised is of type ValueError GH35744 + with pytest.raises(ValueError, match="Data must be 1-dimensional"): Series(np.random.randn(3, 3), index=np.arange(3)) mixed.name = "Series"
- [x] closes #35744 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35769
2020-08-17T13:16:03Z
2020-10-10T16:46:25Z
2020-10-10T16:46:24Z
2021-01-25T21:15:38Z
Backport PR #35697 on branch 1.1.x (REGR: Don't ignore compiled patterns in replace)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 565b4a014bd0c..d93cd6edb983a 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -26,6 +26,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) - Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) +- Fixed regression in :meth:`DataFrame.replace` and :meth:`Series.replace` where compiled regular expressions would be ignored during replacement (:issue:`35680`) - Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` where a list of functions would produce the wrong results if at least one of the functions did not aggregate. (:issue:`35490`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e6e2b06e1873e..4c3805f812bb0 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -2,7 +2,17 @@ import itertools import operator import re -from typing import DefaultDict, Dict, List, Optional, Sequence, Tuple, TypeVar, Union +from typing import ( + DefaultDict, + Dict, + List, + Optional, + Pattern, + Sequence, + Tuple, + TypeVar, + Union, +) import warnings import numpy as np @@ -1922,7 +1932,10 @@ def _merge_blocks( def _compare_or_regex_search( - a: ArrayLike, b: Scalar, regex: bool = False, mask: Optional[ArrayLike] = None + a: ArrayLike, + b: Union[Scalar, Pattern], + regex: bool = False, + mask: Optional[ArrayLike] = None, ) -> Union[ArrayLike, bool]: """ Compare two array_like inputs of the same shape or two scalar values @@ -1933,7 +1946,7 @@ def _compare_or_regex_search( Parameters ---------- a : array_like - b : scalar + b : scalar or regex pattern regex : bool, default False mask : array_like or None (default) @@ -1943,7 +1956,7 @@ def _compare_or_regex_search( """ def _check_comparison_types( - result: Union[ArrayLike, bool], a: ArrayLike, b: Scalar, + result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern], ): """ Raises an error if the two arrays (a,b) cannot be compared. @@ -1964,7 +1977,7 @@ def _check_comparison_types( else: op = np.vectorize( lambda x: bool(re.search(b, x)) - if isinstance(x, str) and isinstance(b, str) + if isinstance(x, str) and isinstance(b, (str, Pattern)) else False ) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index a3f056dbf9648..8603bff0587b6 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1573,3 +1573,11 @@ def test_replace_dict_category_type(self, input_category_df, expected_category_d result = input_df.replace({"a": "z", "obj1": "obj9", "cat1": "catX"}) tm.assert_frame_equal(result, expected) + + def test_replace_with_compiled_regex(self): + # https://github.com/pandas-dev/pandas/issues/35680 + df = pd.DataFrame(["a", "b", "c"]) + regex = re.compile("^a$") + result = df.replace({regex: "z"}, regex=True) + expected = pd.DataFrame(["z", "b", "c"]) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index 11802c59a29da..f78a28c66e946 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -415,3 +417,11 @@ def test_replace_extension_other(self): # https://github.com/pandas-dev/pandas/issues/34530 ser = pd.Series(pd.array([1, 2, 3], dtype="Int64")) ser.replace("", "") # no exception + + def test_replace_with_compiled_regex(self): + # https://github.com/pandas-dev/pandas/issues/35680 + s = pd.Series(["a", "b", "c"]) + regex = re.compile("^a$") + result = s.replace({regex: "z"}, regex=True) + expected = pd.Series(["z", "b", "c"]) + tm.assert_series_equal(result, expected)
Backport PR #35697: REGR: Don't ignore compiled patterns in replace
https://api.github.com/repos/pandas-dev/pandas/pulls/35765
2020-08-17T11:00:57Z
2020-08-17T12:21:40Z
2020-08-17T12:21:40Z
2020-08-17T12:21:40Z
Backport PR #35543 on branch 1.1.x (REGR: Fix interpolation on empty dataframe)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 565b4a014bd0c..b1fd76157b9f1 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -19,6 +19,7 @@ Fixed regressions - Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`) - Fixed regression where :func:`pandas.testing.assert_series_equal` would raise an error when non-numeric dtypes were passed with ``check_exact=True`` (:issue:`35446`) - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) +- Fixed regression where :meth:`DataFrame.interpolate` would raise a ``TypeError`` when the :class:`DataFrame` was empty (:issue:`35598`). - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in :meth:`DataFrame.diff` with read-only data (:issue:`35559`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a11ee6b5d9846..3a386a8df7075 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6799,6 +6799,9 @@ def interpolate( obj = self.T if should_transpose else self + if obj.empty: + return self + if method not in fillna_methods: axis = self._info_axis_number diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index ddb5723e7bd3e..3c9d79397e4bd 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -34,6 +34,13 @@ def test_interp_basic(self): expected.loc[5, "B"] = 9 tm.assert_frame_equal(result, expected) + def test_interp_empty(self): + # https://github.com/pandas-dev/pandas/issues/35598 + df = DataFrame() + result = df.interpolate() + expected = df + tm.assert_frame_equal(result, expected) + def test_interp_bad_method(self): df = DataFrame( {
Backport PR #35543: REGR: Fix interpolation on empty dataframe
https://api.github.com/repos/pandas-dev/pandas/pulls/35764
2020-08-17T10:24:14Z
2020-08-17T20:37:25Z
2020-08-17T20:37:25Z
2020-08-17T20:37:25Z
MAINT: Initialize year to silence warning
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 8429aebbd85b8..7478179df3b75 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -381,7 +381,8 @@ cdef inline object _parse_dateabbr_string(object date_string, datetime default, object freq): cdef: object ret - int year, quarter = -1, month, mnum, date_len + # year initialized to prevent compiler warnings + int year = -1, quarter = -1, month, mnum, date_len # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1 assert isinstance(date_string, str)
Initialize year to silence warning due to subtracting from value that compiler cannot reason must be either initialized or never reached closes #35622 - [X] closes #35622 - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/35763
2020-08-17T09:47:13Z
2020-08-17T18:27:45Z
2020-08-17T18:27:45Z
2020-08-17T18:27:49Z
Backport PR #35754 on branch 1.1.x (CI: Min Pytest Cov Version/Restrict xdist version)
diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 548660cabaa67..21b4e86918f3b 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -8,7 +8,7 @@ dependencies: # tools - cython>=0.29.16 - pytest>=5.0.1 - - pytest-xdist>=1.21 + - pytest-xdist>=1.21,<2.0.0 # GH 35737 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 5bbd0e2795d7e..287d6877b9810 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -8,7 +8,7 @@ dependencies: # tools - cython>=0.29.16 - pytest>=5.0.1 - - pytest-xdist>=1.21 + - pytest-xdist>=1.21,<2.0.0 # GH 35737 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index 177e0d3f4c0af..2457c04e67759 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -10,7 +10,7 @@ dependencies: - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - - pytest-cov # this is only needed in the coverage build + - pytest-cov>=2.10.1 # this is only needed in the coverage build, ref: GH 35737 # pandas dependencies - beautifulsoup4
Backport PR #35754: CI: Min Pytest Cov Version/Restrict xdist version
https://api.github.com/repos/pandas-dev/pandas/pulls/35761
2020-08-17T08:52:00Z
2020-08-17T09:39:03Z
2020-08-17T09:39:03Z
2020-08-17T09:39:03Z
PERF: Allow jitting of groupby agg loop
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 3e6ed1cdf8f7e..09a5bcb0917c2 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -154,7 +154,7 @@ Deprecations Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- +- Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 60e23b14eaf09..2d46a545ba979 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -69,19 +69,16 @@ GroupBy, _agg_template, _apply_docs, + _group_selection_context, _transform_template, get_groupby, ) +from pandas.core.groupby.numba_ import generate_numba_func, split_for_numba from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same import pandas.core.indexes.base as ibase from pandas.core.internals import BlockManager, make_block from pandas.core.series import Series -from pandas.core.util.numba_ import ( - NUMBA_FUNC_CACHE, - generate_numba_func, - maybe_use_numba, - split_for_numba, -) +from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba from pandas.plotting import boxplot_frame_groupby @@ -229,6 +226,18 @@ def apply(self, func, *args, **kwargs): ) def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): + if maybe_use_numba(engine): + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) + with _group_selection_context(self): + data = self._selected_obj + result, index = self._aggregate_with_numba( + data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + return self.obj._constructor(result.ravel(), index=index, name=data.name) + relabeling = func is None columns = None if relabeling: @@ -251,16 +260,11 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) return getattr(self, cyfunc)() if self.grouper.nkeys > 1: - return self._python_agg_general( - func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs - ) + return self._python_agg_general(func, *args, **kwargs) try: - return self._python_agg_general( - func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs - ) + return self._python_agg_general(func, *args, **kwargs) except (ValueError, KeyError): - # Do not catch Numba errors here, we want to raise and not fall back. # TODO: KeyError is raised in _python_agg_general, # see see test_groupby.test_basic result = self._aggregate_named(func, *args, **kwargs) @@ -936,12 +940,19 @@ class DataFrameGroupBy(GroupBy[DataFrame]): ) def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): - relabeling, func, columns, order = reconstruct_func(func, **kwargs) - if maybe_use_numba(engine): - return self._python_agg_general( - func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) + with _group_selection_context(self): + data = self._selected_obj + result, index = self._aggregate_with_numba( + data, func, *args, engine_kwargs=engine_kwargs, **kwargs ) + return self.obj._constructor(result, index=index, columns=data.columns) + + relabeling, func, columns, order = reconstruct_func(func, **kwargs) result, how = self._aggregate(func, *args, **kwargs) if how is None: diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 0047877ef78ee..f96b488fb8d0d 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -34,7 +34,7 @@ class providing the base-class of operations. from pandas._config.config import option_context -from pandas._libs import Timestamp +from pandas._libs import Timestamp, lib import pandas._libs.groupby as libgroupby from pandas._typing import F, FrameOrSeries, FrameOrSeriesUnion, Scalar from pandas.compat.numpy import function as nv @@ -61,11 +61,11 @@ class providing the base-class of operations. import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame -from pandas.core.groupby import base, ops +from pandas.core.groupby import base, numba_, ops from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex from pandas.core.series import Series from pandas.core.sorting import get_group_index_sorter -from pandas.core.util.numba_ import maybe_use_numba +from pandas.core.util.numba_ import NUMBA_FUNC_CACHE _common_see_also = """ See Also @@ -384,7 +384,8 @@ class providing the base-class of operations. - dict of axis labels -> functions, function names or list of such. Can also accept a Numba JIT function with - ``engine='numba'`` specified. + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. If the ``'numba'`` engine is chosen, the function must be a user defined function with ``values`` and ``index`` as the @@ -1053,12 +1054,43 @@ def _cython_agg_general( return self._wrap_aggregated_output(output, index=self.grouper.result_index) - def _python_agg_general( - self, func, *args, engine="cython", engine_kwargs=None, **kwargs - ): + def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby aggregation routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + group_keys = self.grouper._get_group_keys() + labels, _, n_groups = self.grouper.group_info + sorted_index = get_group_index_sorter(labels, n_groups) + sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False) + sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() + starts, ends = lib.generate_slices(sorted_labels, n_groups) + cache_key = (func, "groupby_agg") + if cache_key in NUMBA_FUNC_CACHE: + # Return an already compiled version of roll_apply if available + numba_agg_func = NUMBA_FUNC_CACHE[cache_key] + else: + numba_agg_func = numba_.generate_numba_agg_func( + tuple(args), kwargs, func, engine_kwargs + ) + result = numba_agg_func( + sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns), + ) + if cache_key not in NUMBA_FUNC_CACHE: + NUMBA_FUNC_CACHE[cache_key] = numba_agg_func + + if self.grouper.nkeys > 1: + index = MultiIndex.from_tuples(group_keys, names=self.grouper.names) + else: + index = Index(group_keys, name=self.grouper.names[0]) + return result, index + + def _python_agg_general(self, func, *args, **kwargs): func = self._is_builtin_func(func) - if engine != "numba": - f = lambda x: func(x, *args, **kwargs) + f = lambda x: func(x, *args, **kwargs) # iterate through "columns" ex exclusions to populate output dict output: Dict[base.OutputKey, np.ndarray] = {} @@ -1069,21 +1101,11 @@ def _python_agg_general( # agg_series below assumes ngroups > 0 continue - if maybe_use_numba(engine): - result, counts = self.grouper.agg_series( - obj, - func, - *args, - engine=engine, - engine_kwargs=engine_kwargs, - **kwargs, - ) - else: - try: - # if this function is invalid for this dtype, we will ignore it. - result, counts = self.grouper.agg_series(obj, f) - except TypeError: - continue + try: + # if this function is invalid for this dtype, we will ignore it. + result, counts = self.grouper.agg_series(obj, f) + except TypeError: + continue assert result is not None key = base.OutputKey(label=name, position=idx) diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py new file mode 100644 index 0000000000000..aebe60f797fcd --- /dev/null +++ b/pandas/core/groupby/numba_.py @@ -0,0 +1,172 @@ +"""Common utilities for Numba operations with groupby ops""" +import inspect +from typing import Any, Callable, Dict, Optional, Tuple + +import numpy as np + +from pandas._typing import FrameOrSeries, Scalar +from pandas.compat._optional import import_optional_dependency + +from pandas.core.util.numba_ import ( + NUMBA_FUNC_CACHE, + NumbaUtilError, + check_kwargs_and_nopython, + get_jit_arguments, + jit_user_function, +) + + +def split_for_numba(arg: FrameOrSeries) -> Tuple[np.ndarray, np.ndarray]: + """ + Split pandas object into its components as numpy arrays for numba functions. + + Parameters + ---------- + arg : Series or DataFrame + + Returns + ------- + (ndarray, ndarray) + values, index + """ + return arg.to_numpy(), arg.index.to_numpy() + + +def validate_udf(func: Callable) -> None: + """ + Validate user defined function for ops when using Numba with groupby ops. + + The first signature arguments should include: + + def f(values, index, ...): + ... + + Parameters + ---------- + func : function, default False + user defined function + + Returns + ------- + None + + Raises + ------ + NumbaUtilError + """ + udf_signature = list(inspect.signature(func).parameters.keys()) + expected_args = ["values", "index"] + min_number_args = len(expected_args) + if ( + len(udf_signature) < min_number_args + or udf_signature[:min_number_args] != expected_args + ): + raise NumbaUtilError( + f"The first {min_number_args} arguments to {func.__name__} must be " + f"{expected_args}" + ) + + +def generate_numba_func( + func: Callable, + engine_kwargs: Optional[Dict[str, bool]], + kwargs: dict, + cache_key_str: str, +) -> Tuple[Callable, Tuple[Callable, str]]: + """ + Return a JITed function and cache key for the NUMBA_FUNC_CACHE + + This _may_ be specific to groupby (as it's only used there currently). + + Parameters + ---------- + func : function + user defined function + engine_kwargs : dict or None + numba.jit arguments + kwargs : dict + kwargs for func + cache_key_str : str + string representing the second part of the cache key tuple + + Returns + ------- + (JITed function, cache key) + + Raises + ------ + NumbaUtilError + """ + nopython, nogil, parallel = get_jit_arguments(engine_kwargs) + check_kwargs_and_nopython(kwargs, nopython) + validate_udf(func) + cache_key = (func, cache_key_str) + numba_func = NUMBA_FUNC_CACHE.get( + cache_key, jit_user_function(func, nopython, nogil, parallel) + ) + return numba_func, cache_key + + +def generate_numba_agg_func( + args: Tuple, + kwargs: Dict[str, Any], + func: Callable[..., Scalar], + engine_kwargs: Optional[Dict[str, bool]], +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]: + """ + Generate a numba jitted agg function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby agg function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the rolling apply function. + + Parameters + ---------- + args : tuple + *args to be passed into the function + kwargs : dict + **kwargs to be passed into the function + func : function + function to be applied to each window and will be JITed + engine_kwargs : dict + dictionary of arguments to be passed into numba.jit + + Returns + ------- + Numba function + """ + nopython, nogil, parallel = get_jit_arguments(engine_kwargs) + + check_kwargs_and_nopython(kwargs, nopython) + + validate_udf(func) + + numba_func = jit_user_function(func, nopython, nogil, parallel) + + numba = import_optional_dependency("numba") + + if parallel: + loop_range = numba.prange + else: + loop_range = range + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_apply( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_groups: int, + num_columns: int, + ) -> np.ndarray: + result = np.empty((num_groups, num_columns)) + for i in loop_range(num_groups): + group_index = index[begin[i] : end[i]] + for j in loop_range(num_columns): + group = values[begin[i] : end[i], j] + result[i, j] = numba_func(group, group_index, *args) + return result + + return group_apply diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 64eb413fe78fa..c6171a55359fe 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -55,12 +55,6 @@ get_group_index_sorter, get_indexer_dict, ) -from pandas.core.util.numba_ import ( - NUMBA_FUNC_CACHE, - generate_numba_func, - maybe_use_numba, - split_for_numba, -) class BaseGrouper: @@ -610,21 +604,11 @@ def _transform( return result def agg_series( - self, - obj: Series, - func: F, - *args, - engine: str = "cython", - engine_kwargs=None, - **kwargs, + self, obj: Series, func: F, *args, **kwargs, ): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 - if maybe_use_numba(engine): - return self._aggregate_series_pure_python( - obj, func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs - ) if len(obj) == 0: # SeriesGrouper would raise if we were to call _aggregate_series_fast return self._aggregate_series_pure_python(obj, func) @@ -670,20 +654,8 @@ def _aggregate_series_fast(self, obj: Series, func: F): return result, counts def _aggregate_series_pure_python( - self, - obj: Series, - func: F, - *args, - engine: str = "cython", - engine_kwargs=None, - **kwargs, + self, obj: Series, func: F, *args, **kwargs, ): - - if maybe_use_numba(engine): - numba_func, cache_key = generate_numba_func( - func, engine_kwargs, kwargs, "groupby_agg" - ) - group_index, _, ngroups = self.group_info counts = np.zeros(ngroups, dtype=int) @@ -692,13 +664,7 @@ def _aggregate_series_pure_python( splitter = get_splitter(obj, group_index, ngroups, axis=0) for label, group in splitter: - if maybe_use_numba(engine): - values, index = split_for_numba(group) - res = numba_func(values, index, *args) - if cache_key not in NUMBA_FUNC_CACHE: - NUMBA_FUNC_CACHE[cache_key] = numba_func - else: - res = func(group, *args, **kwargs) + res = func(group, *args, **kwargs) if result is None: if isinstance(res, (Series, Index, np.ndarray)): @@ -876,13 +842,7 @@ def groupings(self) -> "List[grouper.Grouping]": ] def agg_series( - self, - obj: Series, - func: F, - *args, - engine: str = "cython", - engine_kwargs=None, - **kwargs, + self, obj: Series, func: F, *args, **kwargs, ): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py index c9b7943478cdd..b951cd4f0cc2a 100644 --- a/pandas/core/util/numba_.py +++ b/pandas/core/util/numba_.py @@ -1,12 +1,10 @@ """Common utilities for Numba operations""" from distutils.version import LooseVersion -import inspect import types from typing import Callable, Dict, Optional, Tuple import numpy as np -from pandas._typing import FrameOrSeries from pandas.compat._optional import import_optional_dependency from pandas.errors import NumbaUtilError @@ -129,94 +127,3 @@ def impl(data, *_args): return impl return numba_func - - -def split_for_numba(arg: FrameOrSeries) -> Tuple[np.ndarray, np.ndarray]: - """ - Split pandas object into its components as numpy arrays for numba functions. - - Parameters - ---------- - arg : Series or DataFrame - - Returns - ------- - (ndarray, ndarray) - values, index - """ - return arg.to_numpy(), arg.index.to_numpy() - - -def validate_udf(func: Callable) -> None: - """ - Validate user defined function for ops when using Numba. - - The first signature arguments should include: - - def f(values, index, ...): - ... - - Parameters - ---------- - func : function, default False - user defined function - - Returns - ------- - None - - Raises - ------ - NumbaUtilError - """ - udf_signature = list(inspect.signature(func).parameters.keys()) - expected_args = ["values", "index"] - min_number_args = len(expected_args) - if ( - len(udf_signature) < min_number_args - or udf_signature[:min_number_args] != expected_args - ): - raise NumbaUtilError( - f"The first {min_number_args} arguments to {func.__name__} must be " - f"{expected_args}" - ) - - -def generate_numba_func( - func: Callable, - engine_kwargs: Optional[Dict[str, bool]], - kwargs: dict, - cache_key_str: str, -) -> Tuple[Callable, Tuple[Callable, str]]: - """ - Return a JITed function and cache key for the NUMBA_FUNC_CACHE - - This _may_ be specific to groupby (as it's only used there currently). - - Parameters - ---------- - func : function - user defined function - engine_kwargs : dict or None - numba.jit arguments - kwargs : dict - kwargs for func - cache_key_str : str - string representing the second part of the cache key tuple - - Returns - ------- - (JITed function, cache key) - - Raises - ------ - NumbaUtilError - """ - nopython, nogil, parallel = get_jit_arguments(engine_kwargs) - check_kwargs_and_nopython(kwargs, nopython) - validate_udf(func) - cache_key = (func, cache_key_str) - numba_func = NUMBA_FUNC_CACHE.get( - cache_key, jit_user_function(func, nopython, nogil, parallel) - ) - return numba_func, cache_key diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py index 690694b0e66f5..29e65e938f6f9 100644 --- a/pandas/tests/groupby/aggregate/test_numba.py +++ b/pandas/tests/groupby/aggregate/test_numba.py @@ -4,7 +4,7 @@ from pandas.errors import NumbaUtilError import pandas.util._test_decorators as td -from pandas import DataFrame, option_context +from pandas import DataFrame, NamedAgg, option_context import pandas._testing as tm from pandas.core.util.numba_ import NUMBA_FUNC_CACHE @@ -128,3 +128,25 @@ def func_1(values, index): with option_context("compute.use_numba", True): result = grouped.agg(func_1, engine=None) tm.assert_frame_equal(expected, result) + + +@td.skip_if_no("numba", "0.46.0") +@pytest.mark.parametrize( + "agg_func", + [ + ["min", "max"], + "min", + {"B": ["min", "max"], "C": "sum"}, + NamedAgg(column="B", aggfunc="min"), + ], +) +def test_multifunc_notimplimented(agg_func): + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + ) + grouped = data.groupby(0) + with pytest.raises(NotImplementedError, match="Numba engine can"): + grouped.agg(agg_func, engine="numba") + + with pytest.raises(NotImplementedError, match="Numba engine can"): + grouped[1].agg(agg_func, engine="numba")
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry New performance comparison for 10,000 groups ``` In [1]: In [1]: df_g = pd.DataFrame({'a': range(10**4), 'b': range(10**4), 'c': range(10**4)}) In [2]: In [2]: def f(x): ...: ...: return np.sum(x) + 1 ...: In [3]: df_g.groupby('a').agg(f) Out[3]: b c a 0 1 1 1 2 2 2 3 3 3 4 4 4 5 5 ... ... ... 9995 9996 9996 9996 9997 9997 9997 9998 9998 9998 9999 9999 9999 10000 10000 [10000 rows x 2 columns] In [4]: %timeit df_g.groupby('a').agg(f) 1.2 s ± 70.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) In [5]: def f(values, index): ...: return np.sum(values) + 1 ...: In [6]: df_g.groupby('a').agg(f, engine='numba', engine_kwargs={'parallel': True}) Out[6]: b c a 0 1.0 1.0 1 2.0 2.0 2 3.0 3.0 3 4.0 4.0 4 5.0 5.0 ... ... ... 9995 9996.0 9996.0 9996 9997.0 9997.0 9997 9998.0 9998.0 9998 9999.0 9999.0 9999 10000.0 10000.0 In [8]: %timeit df_g.groupby('a').agg(f, engine='numba', engine_kwargs={'parallel': True}) 2.07 ms ± 64.3 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/35759
2020-08-17T06:01:31Z
2020-08-22T03:30:05Z
2020-08-22T03:30:05Z
2020-08-22T03:31:34Z
CI: Unpin Pytest + Pytest Asyncio Min Version
diff --git a/ci/deps/azure-38-locale.yaml b/ci/deps/azure-38-locale.yaml index c466a5929ea29..c7090d3a46a77 100644 --- a/ci/deps/azure-38-locale.yaml +++ b/ci/deps/azure-38-locale.yaml @@ -6,9 +6,9 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0 # https://github.com/pandas-dev/pandas/issues/35620 + - pytest>=5.0.1 - pytest-xdist>=1.21 - - pytest-asyncio + - pytest-asyncio>=0.12.0 - hypothesis>=3.58.0 - pytest-azurepipelines
- [x] closes #35620 Pytest 6.0.0+ will require pytest-asyncio>=0.12.0 for these async tests to run. Also worth noting `pytest-asyncio 0.12.0 requires pytest>=5.4.0`. (So maybe we should think about bumping min pytest version) ``` ____________________________________________________________________________________ ERROR collecting pandas/tests/indexes/test_base.py _____________________________________________________________________________________ ../../.conda/envs/pandas-dev/lib/python3.8/site-packages/pluggy/hooks.py:286: in __call__ return self._hookexec(self, self.get_hookimpls(), kwargs) ../../.conda/envs/pandas-dev/lib/python3.8/site-packages/pluggy/manager.py:93: in _hookexec return self._inner_hookexec(hook, methods, kwargs) ../../.conda/envs/pandas-dev/lib/python3.8/site-packages/pluggy/manager.py:84: in <lambda> self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall( ../../.conda/envs/pandas-dev/lib/python3.8/site-packages/pytest_asyncio/plugin.py:39: in pytest_pycollect_makeitem item = pytest.Function(name, parent=collector) ../../.conda/envs/pandas-dev/lib/python3.8/site-packages/_pytest/nodes.py:95: in __call__ warnings.warn(NODE_USE_FROM_PARENT.format(name=self.__name__), stacklevel=2) E pytest.PytestDeprecationWarning: Direct construction of Function has been deprecated, please use Function.from_parent. E See https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent for more details. ``` cc @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/35757
2020-08-17T00:09:59Z
2020-08-19T17:42:48Z
2020-08-19T17:42:48Z
2020-12-22T19:03:19Z
CI: Min Pytest Cov Version/Restrict xdist version
diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 4d745454afcab..f4c238ab8b173 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -8,7 +8,7 @@ dependencies: # tools - cython>=0.29.16 - pytest>=5.0.1 - - pytest-xdist>=1.21 + - pytest-xdist>=1.21,<2.0.0 # GH 35737 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index f428a6dadfaa2..1f383164b5328 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -8,7 +8,7 @@ dependencies: # tools - cython>=0.29.16 - pytest>=5.0.1 - - pytest-xdist>=1.21 + - pytest-xdist>=1.21,<2.0.0 # GH 35737 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/travis-37-cov.yaml b/ci/deps/travis-37-cov.yaml index 3a0827a16f97a..edc11bdf4ab35 100644 --- a/ci/deps/travis-37-cov.yaml +++ b/ci/deps/travis-37-cov.yaml @@ -10,7 +10,7 @@ dependencies: - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - - pytest-cov # this is only needed in the coverage build + - pytest-cov>=2.10.1 # this is only needed in the coverage build, ref: GH 35737 # pandas dependencies - beautifulsoup4
- [x] closes #35737
https://api.github.com/repos/pandas-dev/pandas/pulls/35754
2020-08-16T19:21:29Z
2020-08-17T08:50:01Z
2020-08-17T08:50:01Z
2020-08-17T08:51:47Z
BUG: DataFrame.groupby(., dropna=True, axis=0) incorrectly throws ShapeError
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 3545dd8a89159..621baa01fbded 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -224,6 +224,7 @@ Indexing Missing ^^^^^^^ +- Bug in :class:`Grouper` now correctly propagates ``dropna`` argument and :meth:`DataFrameGroupBy.transform` now correctly handles missing values for ``dropna=True`` (:issue:`35612`) - - diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 07ffb881495fa..16b00735cf694 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -553,7 +553,6 @@ def _transform_general(self, func, *args, **kwargs): result = maybe_downcast_numeric(result, self._selected_obj.dtype) result.name = self._selected_obj.name - result.index = self._selected_obj.index return result def _transform_fast(self, result) -> Series: diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 947f18901775b..cebbfac16019e 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -729,14 +729,28 @@ def _set_result_index_ordered( # set the result index on the passed values object and # return the new object, xref 8046 - # the values/counts are repeated according to the group index - # shortcut if we have an already ordered grouper - if not self.grouper.is_monotonic: - index = Index(np.concatenate(self._get_indices(self.grouper.result_index))) - result.set_axis(index, axis=self.axis, inplace=True) - result = result.sort_index(axis=self.axis) - - result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True) + if self.grouper.is_monotonic: + # shortcut if we have an already ordered grouper + result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True) + return result + + # row order is scrambled => sort the rows by position in original index + original_positions = Index( + np.concatenate(self._get_indices(self.grouper.result_index)) + ) + result.set_axis(original_positions, axis=self.axis, inplace=True) + result = result.sort_index(axis=self.axis) + + dropped_rows = len(result.index) < len(self.obj.index) + + if dropped_rows: + # get index by slicing original index according to original positions + # slice drops attrs => use set_axis when no rows were dropped + sorted_indexer = result.index + result.index = self._selected_obj.index[sorted_indexer] + else: + result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True) + return result @final diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index e38fa5e8de87e..ab568e24ff029 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -171,36 +171,53 @@ def test_grouper_dropna_propagation(dropna): @pytest.mark.parametrize( - "dropna,df_expected,s_expected", + "dropna,input_index,expected_data,expected_index", [ - pytest.param( + (True, pd.RangeIndex(0, 4), {"B": [2, 2, 1]}, pd.RangeIndex(0, 3)), + (True, list("abcd"), {"B": [2, 2, 1]}, list("abc")), + ( True, - pd.DataFrame({"B": [2, 2, 1]}), - pd.Series(data=[2, 2, 1], name="B"), - marks=pytest.mark.xfail(raises=ValueError), + pd.MultiIndex.from_tuples( + [(1, "R"), (1, "B"), (2, "R"), (2, "B")], names=["num", "col"] + ), + {"B": [2, 2, 1]}, + pd.MultiIndex.from_tuples( + [(1, "R"), (1, "B"), (2, "R")], names=["num", "col"] + ), ), + (False, pd.RangeIndex(0, 4), {"B": [2, 2, 1, 1]}, pd.RangeIndex(0, 4)), + (False, list("abcd"), {"B": [2, 2, 1, 1]}, list("abcd")), ( False, - pd.DataFrame({"B": [2, 2, 1, 1]}), - pd.Series(data=[2, 2, 1, 1], name="B"), + pd.MultiIndex.from_tuples( + [(1, "R"), (1, "B"), (2, "R"), (2, "B")], names=["num", "col"] + ), + {"B": [2, 2, 1, 1]}, + pd.MultiIndex.from_tuples( + [(1, "R"), (1, "B"), (2, "R"), (2, "B")], names=["num", "col"] + ), ), ], ) -def test_slice_groupby_then_transform(dropna, df_expected, s_expected): - # GH35014 +def test_groupby_dataframe_slice_then_transform( + dropna, input_index, expected_data, expected_index +): + # GH35014 & GH35612 - df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]}) + df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]}, index=input_index) gb = df.groupby("A", dropna=dropna) - res = gb.transform(len) - tm.assert_frame_equal(res, df_expected) + result = gb.transform(len) + expected = pd.DataFrame(expected_data, index=expected_index) + tm.assert_frame_equal(result, expected) - gb_slice = gb[["B"]] - res = gb_slice.transform(len) - tm.assert_frame_equal(res, df_expected) + result = gb[["B"]].transform(len) + expected = pd.DataFrame(expected_data, index=expected_index) + tm.assert_frame_equal(result, expected) - res = gb["B"].transform(len) - tm.assert_series_equal(res, s_expected) + result = gb["B"].transform(len) + expected = pd.Series(expected_data["B"], index=expected_index, name="B") + tm.assert_series_equal(result, expected) @pytest.mark.parametrize( diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 1d2208592a06d..5205ca3777fc0 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -626,7 +626,7 @@ def test_list_grouper_with_nat(self): [ ( "transform", - Series(name=2, dtype=np.float64, index=pd.RangeIndex(0, 0, 1)), + Series(name=2, dtype=np.float64, index=Index([])), ), ( "agg",
- [x] closes #35612 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR makes a few more changes to propagate `dropna` correctly for sliced groupby objects. It depends on #35444 for the changes to `pandas/core/groupby/generic.py`. I put them in by hand for now so that the tests pass but there should be no diff once #35444 is merged.
https://api.github.com/repos/pandas-dev/pandas/pulls/35751
2020-08-16T05:04:18Z
2020-12-19T02:34:45Z
2020-12-19T02:34:44Z
2020-12-19T02:34:50Z
Pass check_dtype to assert_extension_array_equal
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index c028fe6bea719..ff5bbccf63ffe 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -38,6 +38,7 @@ Bug fixes ~~~~~~~~~ - Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`). +- Bug in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` where extension dtypes were not ignored when ``check_dtypes`` was set to ``False`` (:issue:`35715`). Categorical ^^^^^^^^^^^ diff --git a/pandas/_testing.py b/pandas/_testing.py index 713f29466f097..ef6232fa6d575 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1377,12 +1377,18 @@ def assert_series_equal( ) elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype): assert_extension_array_equal( - left._values, right._values, index_values=np.asarray(left.index) + left._values, + right._values, + check_dtype=check_dtype, + index_values=np.asarray(left.index), ) elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype): # DatetimeArray or TimedeltaArray assert_extension_array_equal( - left._values, right._values, index_values=np.asarray(left.index) + left._values, + right._values, + check_dtype=check_dtype, + index_values=np.asarray(left.index), ) else: _testing.assert_almost_equal( diff --git a/pandas/tests/util/test_assert_extension_array_equal.py b/pandas/tests/util/test_assert_extension_array_equal.py index d9fdf1491c328..f9259beab5d13 100644 --- a/pandas/tests/util/test_assert_extension_array_equal.py +++ b/pandas/tests/util/test_assert_extension_array_equal.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from pandas import array import pandas._testing as tm from pandas.core.arrays.sparse import SparseArray @@ -102,3 +103,11 @@ def test_assert_extension_array_equal_non_extension_array(side): with pytest.raises(AssertionError, match=msg): tm.assert_extension_array_equal(*args) + + +@pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) +def test_assert_extension_array_equal_ignore_dtype_mismatch(right_dtype): + # https://github.com/pandas-dev/pandas/issues/35715 + left = array([1, 2, 3], dtype="Int64") + right = array([1, 2, 3], dtype=right_dtype) + tm.assert_extension_array_equal(left, right, check_dtype=False) diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index fe3e1ff906919..3aa3c64923b14 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -260,3 +260,11 @@ def test_assert_frame_equal_interval_dtype_mismatch(): with pytest.raises(AssertionError, match=msg): tm.assert_frame_equal(left, right, check_dtype=True) + + +@pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) +def test_assert_frame_equal_ignore_extension_dtype_mismatch(right_dtype): + # https://github.com/pandas-dev/pandas/issues/35715 + left = pd.DataFrame({"a": [1, 2, 3]}, dtype="Int64") + right = pd.DataFrame({"a": [1, 2, 3]}, dtype=right_dtype) + tm.assert_frame_equal(left, right, check_dtype=False) diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index a7b5aeac560e4..f3c66052b1904 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -296,3 +296,11 @@ def test_series_equal_exact_for_nonnumeric(): tm.assert_series_equal(s1, s3, check_exact=True) with pytest.raises(AssertionError): tm.assert_series_equal(s3, s1, check_exact=True) + + +@pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) +def test_assert_series_equal_ignore_extension_dtype_mismatch(right_dtype): + # https://github.com/pandas-dev/pandas/issues/35715 + left = pd.Series([1, 2, 3], dtype="Int64") + right = pd.Series([1, 2, 3], dtype=right_dtype) + tm.assert_series_equal(left, right, check_dtype=False)
- [x] closes #35715 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/35750
2020-08-16T04:14:10Z
2020-08-17T18:20:20Z
2020-08-17T18:20:20Z
2020-08-17T18:31:25Z
BUG: close file handles in mmap
diff --git a/pandas/io/common.py b/pandas/io/common.py index 54f35e689aac8..d1305c9cabe0e 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -18,6 +18,7 @@ Optional, Tuple, Type, + Union, ) from urllib.parse import ( urljoin, @@ -452,7 +453,7 @@ def get_handle( except ImportError: need_text_wrapping = (BufferedIOBase, RawIOBase) - handles: List[IO] = list() + handles: List[Union[IO, _MMapWrapper]] = list() f = path_or_buf # Convert pathlib.Path/py.path.local or string @@ -535,6 +536,8 @@ def get_handle( try: wrapped = _MMapWrapper(f) f.close() + handles.remove(f) + handles.append(wrapped) f = wrapped except Exception: # we catch any errors that may have occurred diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 3d5f6ae3a4af9..1d8d5a29686a4 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1836,6 +1836,7 @@ def test_raise_on_no_columns(all_parsers, nrows): parser.read_csv(StringIO(data)) +@td.check_file_leaks def test_memory_map(all_parsers, csv_dir_path): mmap_file = os.path.join(csv_dir_path, "test_mmap.csv") parser = all_parsers
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Broken off from #35711
https://api.github.com/repos/pandas-dev/pandas/pulls/35748
2020-08-15T23:04:48Z
2020-08-17T18:58:32Z
2020-08-17T18:58:32Z
2020-08-17T19:44:57Z
REF: insert self.on column _after_ concat
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 966773b7c6982..ac96258cbc3c9 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -38,7 +38,7 @@ from pandas.core.base import DataError, PandasObject, SelectionMixin, ShallowMixin import pandas.core.common as com from pandas.core.construction import extract_array -from pandas.core.indexes.api import Index, MultiIndex, ensure_index +from pandas.core.indexes.api import Index, MultiIndex from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba from pandas.core.window.common import ( WindowGroupByMixin, @@ -402,36 +402,27 @@ def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries: return result final.append(result) - # if we have an 'on' column - # we want to put it back into the results - # in the same location - columns = self._selected_obj.columns - if self.on is not None and not self._on.equals(obj.index): - - name = self._on.name - final.append(Series(self._on, index=obj.index, name=name)) - - if self._selection is not None: - - selection = ensure_index(self._selection) - - # need to reorder to include original location of - # the on column (if its not already there) - if name not in selection: - columns = self.obj.columns - indexer = columns.get_indexer(selection.tolist() + [name]) - columns = columns.take(sorted(indexer)) - - # exclude nuisance columns so that they are not reindexed - if exclude is not None and exclude: - columns = [c for c in columns if c not in exclude] + exclude = exclude or [] + columns = [c for c in self._selected_obj.columns if c not in exclude] + if not columns and not len(final) and exclude: + raise DataError("No numeric types to aggregate") + elif not len(final): + return obj.astype("float64") - if not columns: - raise DataError("No numeric types to aggregate") + df = concat(final, axis=1).reindex(columns=columns, copy=False) - if not len(final): - return obj.astype("float64") - return concat(final, axis=1).reindex(columns=columns, copy=False) + # if we have an 'on' column we want to put it back into + # the results in the same location + if self.on is not None and not self._on.equals(obj.index): + name = self._on.name + extra_col = Series(self._on, index=obj.index, name=name) + if name not in df.columns and name not in df.index.names: + new_loc = len(df.columns) + df.insert(new_loc, name, extra_col) + elif name in df.columns: + # TODO: sure we want to overwrite results? + df[name] = extra_col + return df def _center_window(self, result, window) -> np.ndarray: """ @@ -2277,6 +2268,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: if isinstance(self.window, BaseIndexer): rolling_indexer = type(self.window) indexer_kwargs = self.window.__dict__ + assert isinstance(indexer_kwargs, dict) # We'll be using the index of each group later indexer_kwargs.pop("index_array", None) elif self.is_freq_type:
The idea here is to push towards #34714 by making _wrap_results do things in the same order as other other similar methods do. cc @mroeschke LMK if there is a simpler way to accomplish this. Orthogonal to #35470, #35730, #35696.
https://api.github.com/repos/pandas-dev/pandas/pulls/35746
2020-08-15T22:17:21Z
2020-08-20T16:59:24Z
2020-08-20T16:59:23Z
2020-08-20T17:53:01Z
TST: encoding for URLs in read_csv
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 509ae89909699..b30a7b1ef34de 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -46,6 +46,21 @@ def check_compressed_urls(salaries_table, compression, extension, mode, engine): tm.assert_frame_equal(url_table, salaries_table) +@tm.network("https://raw.githubusercontent.com/", check_before_test=True) +def test_url_encoding_csv(): + """ + read_csv should honor the requested encoding for URLs. + + GH 10424 + """ + path = ( + "https://raw.githubusercontent.com/pandas-dev/pandas/master/" + + "pandas/tests/io/parser/data/unicode_series.csv" + ) + df = read_csv(path, encoding="latin-1", header=None) + assert df.loc[15, 1] == "Á köldum klaka (Cold Fever) (1994)" + + @pytest.fixture def tips_df(datapath): """DataFrame with the tips dataset."""
- [x] closes #10424 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Reading CSVs from URLs with a non UTF-8 encoding should already work.
https://api.github.com/repos/pandas-dev/pandas/pulls/35742
2020-08-15T20:08:03Z
2020-08-17T18:59:43Z
2020-08-17T18:59:43Z
2020-08-17T19:53:42Z
DEPR: Deprecate pandas/io/date_converters.py
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index a1ce2f847d4b8..4dfabaa99fff6 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -930,7 +930,7 @@ take full advantage of the flexibility of the date parsing API: .. ipython:: python df = pd.read_csv('tmp.csv', header=None, parse_dates=date_spec, - date_parser=pd.io.date_converters.parse_date_time) + date_parser=pd.to_datetime) df Pandas will try to call the ``date_parser`` function in three different ways. If @@ -942,11 +942,6 @@ an exception is raised, the next one is tried: 2. If #1 fails, ``date_parser`` is called with all the columns concatenated row-wise into a single array (e.g., ``date_parser(['2013 1', '2013 2'])``). -3. If #2 fails, ``date_parser`` is called once for every row with one or more - string arguments from the columns indicated with `parse_dates` - (e.g., ``date_parser('2013', '1')`` for the first row, ``date_parser('2013', '2')`` - for the second, etc.). - Note that performance-wise, you should try these methods of parsing dates in order: 1. Try to infer the format using ``infer_datetime_format=True`` (see section below). @@ -958,14 +953,6 @@ Note that performance-wise, you should try these methods of parsing dates in ord For optimal performance, this should be vectorized, i.e., it should accept arrays as arguments. -You can explore the date parsing functionality in -`date_converters.py <https://github.com/pandas-dev/pandas/blob/master/pandas/io/date_converters.py>`__ -and add your own. We would love to turn this module into a community supported -set of date/time parsers. To get you started, ``date_converters.py`` contains -functions to parse dual date and time columns, year/month/day columns, -and year/month/day/hour/minute/second columns. It also contains a -``generic_parser`` function so you can curry it with a function that deals with -a single date rather than the entire array. .. ipython:: python :suppress: diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index bce6a735b7b07..7c7313219c15b 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -195,7 +195,7 @@ Deprecations ~~~~~~~~~~~~ - Deprecated parameter ``inplace`` in :meth:`MultiIndex.set_codes` and :meth:`MultiIndex.set_levels` (:issue:`35626`) - Deprecated parameter ``dtype`` in :~meth:`Index.copy` on method all index classes. Use the :meth:`Index.astype` method instead for changing dtype(:issue:`35853`) -- +- Date parser functions :func:`~pandas.io.date_converters.parse_date_time`, :func:`~pandas.io.date_converters.parse_date_fields`, :func:`~pandas.io.date_converters.parse_all_fields` and :func:`~pandas.io.date_converters.generic_parser` from ``pandas.io.date_converters`` are deprecated and will be removed in a future version; use :func:`to_datetime` instead (:issue:`35741`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py index 07919dbda63ae..f079a25f69fec 100644 --- a/pandas/io/date_converters.py +++ b/pandas/io/date_converters.py @@ -1,16 +1,46 @@ """This module is designed for community supported date conversion functions""" +import warnings + import numpy as np from pandas._libs.tslibs import parsing def parse_date_time(date_col, time_col): + """ + Parse columns with dates and times into a single datetime column. + + .. deprecated:: 1.2 + """ + warnings.warn( + """ + Use pd.to_datetime(date_col + " " + time_col) instead to get a Pandas Series. + Use pd.to_datetime(date_col + " " + time_col).to_pydatetime() instead to get a Numpy array. +""", # noqa: E501 + FutureWarning, + stacklevel=2, + ) date_col = _maybe_cast(date_col) time_col = _maybe_cast(time_col) return parsing.try_parse_date_and_time(date_col, time_col) def parse_date_fields(year_col, month_col, day_col): + """ + Parse columns with years, months and days into a single date column. + + .. deprecated:: 1.2 + """ + warnings.warn( + """ + Use pd.to_datetime({"year": year_col, "month": month_col, "day": day_col}) instead to get a Pandas Series. + Use ser = pd.to_datetime({"year": year_col, "month": month_col, "day": day_col}) and + np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. +""", # noqa: E501 + FutureWarning, + stacklevel=2, + ) + year_col = _maybe_cast(year_col) month_col = _maybe_cast(month_col) day_col = _maybe_cast(day_col) @@ -18,6 +48,24 @@ def parse_date_fields(year_col, month_col, day_col): def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col, second_col): + """ + Parse columns with datetime information into a single datetime column. + + .. deprecated:: 1.2 + """ + + warnings.warn( + """ + Use pd.to_datetime({"year": year_col, "month": month_col, "day": day_col, + "hour": hour_col, "minute": minute_col, second": second_col}) instead to get a Pandas Series. + Use ser = pd.to_datetime({"year": year_col, "month": month_col, "day": day_col, + "hour": hour_col, "minute": minute_col, second": second_col}) and + np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. +""", # noqa: E501 + FutureWarning, + stacklevel=2, + ) + year_col = _maybe_cast(year_col) month_col = _maybe_cast(month_col) day_col = _maybe_cast(day_col) @@ -30,6 +78,20 @@ def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col, second_ def generic_parser(parse_func, *cols): + """ + Use dateparser to parse columns with data information into a single datetime column. + + .. deprecated:: 1.2 + """ + + warnings.warn( + """ + Use pd.to_datetime instead. +""", + FutureWarning, + stacklevel=2, + ) + N = _check_columns(cols) results = np.empty(N, dtype=object) diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 833186b69c63b..662659982c0b3 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -370,7 +370,11 @@ def test_date_col_as_index_col(all_parsers): tm.assert_frame_equal(result, expected) -def test_multiple_date_cols_int_cast(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ([conv.parse_date_time, FutureWarning], [pd.to_datetime, None]), +) +def test_multiple_date_cols_int_cast(all_parsers, date_parser, warning): data = ( "KORD,19990127, 19:00:00, 18:56:00, 0.8100\n" "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n" @@ -382,13 +386,15 @@ def test_multiple_date_cols_int_cast(all_parsers): parse_dates = {"actual": [1, 2], "nominal": [1, 3]} parser = all_parsers - result = parser.read_csv( - StringIO(data), - header=None, - date_parser=conv.parse_date_time, - parse_dates=parse_dates, - prefix="X", - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=None, + date_parser=date_parser, + parse_dates=parse_dates, + prefix="X", + ) + expected = DataFrame( [ [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56), "KORD", 0.81], @@ -808,7 +814,9 @@ def test_parse_dates_custom_euro_format(all_parsers, kwargs): tm.assert_frame_equal(df, expected) else: msg = "got an unexpected keyword argument 'day_first'" - with pytest.raises(TypeError, match=msg): + with pytest.raises(TypeError, match=msg), tm.assert_produces_warning( + FutureWarning + ): parser.read_csv( StringIO(data), names=["time", "Q", "NTU"], @@ -1166,7 +1174,11 @@ def test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected): tm.assert_frame_equal(result, expected) -def test_parse_date_time_multi_level_column_name(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ([conv.parse_date_time, FutureWarning], [pd.to_datetime, None]), +) +def test_parse_date_time_multi_level_column_name(all_parsers, date_parser, warning): data = """\ D,T,A,B date, time,a,b @@ -1174,12 +1186,13 @@ def test_parse_date_time_multi_level_column_name(all_parsers): 2001-01-06, 00:00:00, 1.0, 11. """ parser = all_parsers - result = parser.read_csv( - StringIO(data), - header=[0, 1], - parse_dates={"date_time": [0, 1]}, - date_parser=conv.parse_date_time, - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=[0, 1], + parse_dates={"date_time": [0, 1]}, + date_parser=date_parser, + ) expected_data = [ [datetime(2001, 1, 5, 9, 0, 0), 0.0, 10.0], @@ -1189,6 +1202,10 @@ def test_parse_date_time_multi_level_column_name(all_parsers): tm.assert_frame_equal(result, expected) +@pytest.mark.parametrize( + "date_parser, warning", + ([conv.parse_date_time, FutureWarning], [pd.to_datetime, None]), +) @pytest.mark.parametrize( "data,kwargs,expected", [ @@ -1261,9 +1278,10 @@ def test_parse_date_time_multi_level_column_name(all_parsers): ), ], ) -def test_parse_date_time(all_parsers, data, kwargs, expected): +def test_parse_date_time(all_parsers, data, kwargs, expected, date_parser, warning): parser = all_parsers - result = parser.read_csv(StringIO(data), date_parser=conv.parse_date_time, **kwargs) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv(StringIO(data), date_parser=date_parser, **kwargs) # Python can sometimes be flaky about how # the aggregated columns are entered, so @@ -1272,15 +1290,20 @@ def test_parse_date_time(all_parsers, data, kwargs, expected): tm.assert_frame_equal(result, expected) -def test_parse_date_fields(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ([conv.parse_date_fields, FutureWarning], [pd.to_datetime, None]), +) +def test_parse_date_fields(all_parsers, date_parser, warning): parser = all_parsers data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11." - result = parser.read_csv( - StringIO(data), - header=0, - parse_dates={"ymd": [0, 1, 2]}, - date_parser=conv.parse_date_fields, - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=0, + parse_dates={"ymd": [0, 1, 2]}, + date_parser=date_parser, + ) expected = DataFrame( [[datetime(2001, 1, 10), 10.0], [datetime(2001, 2, 1), 11.0]], @@ -1289,19 +1312,27 @@ def test_parse_date_fields(all_parsers): tm.assert_frame_equal(result, expected) -def test_parse_date_all_fields(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ( + [conv.parse_all_fields, FutureWarning], + [lambda x: pd.to_datetime(x, format="%Y %m %d %H %M %S"), None], + ), +) +def test_parse_date_all_fields(all_parsers, date_parser, warning): parser = all_parsers data = """\ year,month,day,hour,minute,second,a,b 2001,01,05,10,00,0,0.0,10. 2001,01,5,10,0,00,1.,11. """ - result = parser.read_csv( - StringIO(data), - header=0, - date_parser=conv.parse_all_fields, - parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=0, + date_parser=date_parser, + parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, + ) expected = DataFrame( [ [datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0], @@ -1312,19 +1343,27 @@ def test_parse_date_all_fields(all_parsers): tm.assert_frame_equal(result, expected) -def test_datetime_fractional_seconds(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ( + [conv.parse_all_fields, FutureWarning], + [lambda x: pd.to_datetime(x, format="%Y %m %d %H %M %S.%f"), None], + ), +) +def test_datetime_fractional_seconds(all_parsers, date_parser, warning): parser = all_parsers data = """\ year,month,day,hour,minute,second,a,b 2001,01,05,10,00,0.123456,0.0,10. 2001,01,5,10,0,0.500000,1.,11. """ - result = parser.read_csv( - StringIO(data), - header=0, - date_parser=conv.parse_all_fields, - parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=0, + date_parser=date_parser, + parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, + ) expected = DataFrame( [ [datetime(2001, 1, 5, 10, 0, 0, microsecond=123456), 0.0, 10.0], @@ -1339,12 +1378,13 @@ def test_generic(all_parsers): parser = all_parsers data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11." - result = parser.read_csv( - StringIO(data), - header=0, - parse_dates={"ym": [0, 1]}, - date_parser=lambda y, m: date(year=int(y), month=int(m), day=1), - ) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=0, + parse_dates={"ym": [0, 1]}, + date_parser=lambda y, m: date(year=int(y), month=int(m), day=1), + ) expected = DataFrame( [[date(2001, 1, 1), 10, 10.0], [date(2001, 2, 1), 1, 11.0]], columns=["ym", "day", "a"], diff --git a/pandas/tests/io/test_date_converters.py b/pandas/tests/io/test_date_converters.py index cdb8eca02a3e5..a9fa27e091714 100644 --- a/pandas/tests/io/test_date_converters.py +++ b/pandas/tests/io/test_date_converters.py @@ -8,11 +8,12 @@ def test_parse_date_time(): + dates = np.array(["2007/1/3", "2008/2/4"], dtype=object) times = np.array(["05:07:09", "06:08:00"], dtype=object) expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)]) - - result = conv.parse_date_time(dates, times) + with tm.assert_produces_warning(FutureWarning): + result = conv.parse_date_time(dates, times) tm.assert_numpy_array_equal(result, expected) @@ -20,9 +21,10 @@ def test_parse_date_fields(): days = np.array([3, 4]) months = np.array([1, 2]) years = np.array([2007, 2008]) - result = conv.parse_date_fields(years, months, days) - expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)]) + + with tm.assert_produces_warning(FutureWarning): + result = conv.parse_date_fields(years, months, days) tm.assert_numpy_array_equal(result, expected) @@ -34,7 +36,8 @@ def test_parse_all_fields(): days = np.array([3, 4]) years = np.array([2007, 2008]) months = np.array([1, 2]) - - result = conv.parse_all_fields(years, months, days, hours, minutes, seconds) expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)]) + + with tm.assert_produces_warning(FutureWarning): + result = conv.parse_all_fields(years, months, days, hours, minutes, seconds) tm.assert_numpy_array_equal(result, expected)
- [x] closes #24518 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35741
2020-08-15T19:26:47Z
2020-09-12T21:37:58Z
2020-09-12T21:37:58Z
2020-09-12T21:38:03Z
REF: _apply_blockwise define exclude in terms of skipped
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index ac96258cbc3c9..f516871f789d0 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -12,7 +12,7 @@ from pandas._libs.tslibs import BaseOffset, to_offset import pandas._libs.window.aggregations as window_aggregations -from pandas._typing import ArrayLike, Axis, FrameOrSeries, Scalar +from pandas._typing import ArrayLike, Axis, FrameOrSeries, Label from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, cache_readonly, doc @@ -381,21 +381,31 @@ def _wrap_result(self, result, block=None, obj=None): return type(obj)(result, index=index, columns=block.columns) return result - def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries: + def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeries: """ Wrap the results. Parameters ---------- results : list of ndarrays - blocks : list of blocks obj : conformed data (may be resampled) - exclude: list of columns to exclude, default to None + skipped: List[int] + Indices of blocks that are skipped. """ from pandas import Series, concat + exclude: List[Label] = [] + if obj.ndim == 2: + orig_blocks = list(obj._to_dict_of_blocks(copy=False).values()) + for i in skipped: + exclude.extend(orig_blocks[i].columns) + else: + orig_blocks = [obj] + + kept_blocks = [blk for i, blk in enumerate(orig_blocks) if i not in skipped] + final = [] - for result, block in zip(results, blocks): + for result, block in zip(results, kept_blocks): result = self._wrap_result(result, block=block, obj=obj) if result.ndim == 1: @@ -491,7 +501,6 @@ def _apply_blockwise( skipped: List[int] = [] results: List[ArrayLike] = [] - exclude: List[Scalar] = [] for i, b in enumerate(blocks): try: values = self._prep_values(b.values) @@ -499,7 +508,6 @@ def _apply_blockwise( except (TypeError, NotImplementedError) as err: if isinstance(obj, ABCDataFrame): skipped.append(i) - exclude.extend(b.columns) continue else: raise DataError("No numeric types to aggregate") from err @@ -507,8 +515,7 @@ def _apply_blockwise( result = homogeneous_func(values) results.append(result) - block_list = [blk for i, blk in enumerate(blocks) if i not in skipped] - return self._wrap_results(results, block_list, obj, exclude) + return self._wrap_results(results, obj, skipped) def _apply( self, @@ -1283,7 +1290,7 @@ def count(self): ).sum() results.append(result) - return self._wrap_results(results, blocks, obj) + return self._wrap_results(results, obj, skipped=[]) _shared_docs["apply"] = dedent( r"""
orthogonal to #35730
https://api.github.com/repos/pandas-dev/pandas/pulls/35740
2020-08-15T19:16:36Z
2020-08-21T21:56:51Z
2020-08-21T21:56:51Z
2020-08-21T22:06:35Z
Backport PR #35723 on branch 1.1.x (agg with list of non-aggregating functions)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 85e2a335c55c6..565b4a014bd0c 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -26,6 +26,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) - Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) +- Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` where a list of functions would produce the wrong results if at least one of the functions did not aggregate. (:issue:`35490`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index c50b753cf3293..f5858c5c54f1d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -322,11 +322,14 @@ def _aggregate_multiple_funcs(self, arg): # let higher level handle return results - output = self._wrap_aggregated_output(results) + output = self._wrap_aggregated_output(results, index=None) return self.obj._constructor_expanddim(output, columns=columns) + # TODO: index should not be Optional - see GH 35490 def _wrap_series_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Index, + self, + output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + index: Optional[Index], ) -> Union[Series, DataFrame]: """ Wraps the output of a SeriesGroupBy operation into the expected result. @@ -335,7 +338,7 @@ def _wrap_series_output( ---------- output : Mapping[base.OutputKey, Union[Series, np.ndarray]] Data to wrap. - index : pd.Index + index : pd.Index or None Index to apply to the output. Returns @@ -363,8 +366,11 @@ def _wrap_series_output( return result + # TODO: Remove index argument, use self.grouper.result_index, see GH 35490 def _wrap_aggregated_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]] + self, + output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + index: Optional[Index], ) -> Union[Series, DataFrame]: """ Wraps the output of a SeriesGroupBy aggregation into the expected result. @@ -383,9 +389,7 @@ def _wrap_aggregated_output( In the vast majority of cases output will only contain one element. The exception is operations that expand dimensions, like ohlc. """ - result = self._wrap_series_output( - output=output, index=self.grouper.result_index - ) + result = self._wrap_series_output(output=output, index=index) return self._reindex_output(result) def _wrap_transformed_output( @@ -1714,7 +1718,9 @@ def _insert_inaxis_grouper_inplace(self, result): result.insert(0, name, lev) def _wrap_aggregated_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]] + self, + output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + index: Optional[Index], ) -> DataFrame: """ Wraps the output of DataFrameGroupBy aggregations into the expected result. @@ -1739,8 +1745,7 @@ def _wrap_aggregated_output( self._insert_inaxis_grouper_inplace(result) result = result._consolidate() else: - index = self.grouper.result_index - result.index = index + result.index = self.grouper.result_index if self.axis == 1: result = result.T diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ac45222625569..11d0c8e42f745 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -973,7 +973,9 @@ def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs): return self._wrap_transformed_output(output) - def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]): + def _wrap_aggregated_output( + self, output: Mapping[base.OutputKey, np.ndarray], index: Optional[Index] + ): raise AbstractMethodError(self) def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]): @@ -1048,7 +1050,7 @@ def _cython_agg_general( if len(output) == 0: raise DataError("No numeric types to aggregate") - return self._wrap_aggregated_output(output) + return self._wrap_aggregated_output(output, index=self.grouper.result_index) def _python_agg_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs @@ -1101,7 +1103,7 @@ def _python_agg_general( output[key] = maybe_cast_result(values[mask], result) - return self._wrap_aggregated_output(output) + return self._wrap_aggregated_output(output, index=self.grouper.result_index) def _concat_objects(self, keys, values, not_indexed_same: bool = False): from pandas.core.reshape.concat import concat @@ -2521,7 +2523,7 @@ def _get_cythonized_result( raise TypeError(error_msg) if aggregate: - return self._wrap_aggregated_output(output) + return self._wrap_aggregated_output(output, index=self.grouper.result_index) else: return self._wrap_transformed_output(output) diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 40a20c8210052..ce9d4b892d775 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -1061,3 +1061,16 @@ def test_groupby_get_by_index(): res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])}) expected = pd.DataFrame(dict(A=["S", "W"], B=[1.0, 2.0])).set_index("A") pd.testing.assert_frame_equal(res, expected) + + +def test_nonagg_agg(): + # GH 35490 - Single/Multiple agg of non-agg function give same results + # TODO: agg should raise for functions that don't aggregate + df = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]}) + g = df.groupby("a") + + result = g.agg(["cumsum"]) + result.columns = result.columns.droplevel(-1) + expected = g.agg("cumsum") + + tm.assert_frame_equal(result, expected)
Backport PR #35723: agg with list of non-aggregating functions
https://api.github.com/repos/pandas-dev/pandas/pulls/35738
2020-08-15T15:55:41Z
2020-08-15T16:59:36Z
2020-08-15T16:59:36Z
2020-08-15T16:59:37Z
BUG/ENH: to_pickle/read_pickle support compression for file ojects
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 8b28a4439e1da..44dd5ba122acd 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -295,6 +295,7 @@ I/O - :meth:`to_csv` passes compression arguments for `'gzip'` always to `gzip.GzipFile` (:issue:`28103`) - :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue: `35058`) - :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`) +- :meth:`to_picke` and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, and :issue:`29570`) Plotting ^^^^^^^^ diff --git a/pandas/_typing.py b/pandas/_typing.py index 74bfc9134c3af..b237013ac7805 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -116,7 +116,7 @@ # compression keywords and compression -CompressionDict = Mapping[str, Optional[Union[str, int, bool]]] +CompressionDict = Dict[str, Any] CompressionOptions = Optional[Union[str, CompressionDict]] @@ -138,6 +138,6 @@ class IOargs(Generic[ModeVar, EncodingVar]): filepath_or_buffer: FileOrBuffer encoding: EncodingVar - compression: CompressionOptions + compression: CompressionDict should_close: bool mode: Union[ModeVar, str] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c48bec9b670ad..1713743b98bff 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -27,7 +27,6 @@ Iterable, Iterator, List, - Mapping, Optional, Sequence, Set, @@ -49,6 +48,7 @@ ArrayLike, Axes, Axis, + CompressionOptions, Dtype, FilePathOrBuffer, FrameOrSeriesUnion, @@ -2062,7 +2062,7 @@ def to_stata( variable_labels: Optional[Dict[Label, str]] = None, version: Optional[int] = 114, convert_strl: Optional[Sequence[Label]] = None, - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> None: """ diff --git a/pandas/io/common.py b/pandas/io/common.py index 2b13d54ec3aed..a80b89569f429 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -205,11 +205,13 @@ def get_filepath_or_buffer( """ filepath_or_buffer = stringify_path(filepath_or_buffer) + # handle compression dict + compression_method, compression = get_compression_method(compression) + compression_method = infer_compression(filepath_or_buffer, compression_method) + compression = dict(compression, method=compression_method) + # bz2 and xz do not write the byte order mark for utf-16 and utf-32 # print a warning when writing such files - compression_method = infer_compression( - filepath_or_buffer, get_compression_method(compression)[0] - ) if ( mode and "w" in mode @@ -238,7 +240,7 @@ def get_filepath_or_buffer( content_encoding = req.headers.get("Content-Encoding", None) if content_encoding == "gzip": # Override compression based on Content-Encoding header - compression = "gzip" + compression = {"method": "gzip"} reader = BytesIO(req.read()) req.close() return IOargs( @@ -374,11 +376,7 @@ def get_compression_method( if isinstance(compression, Mapping): compression_args = dict(compression) try: - # error: Incompatible types in assignment (expression has type - # "Union[str, int, None]", variable has type "Optional[str]") - compression_method = compression_args.pop( # type: ignore[assignment] - "method" - ) + compression_method = compression_args.pop("method") except KeyError as err: raise ValueError("If mapping, compression must have key 'method'") from err else: @@ -652,12 +650,8 @@ def __init__( super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type] def write(self, data): - archive_name = self.filename - if self.archive_name is not None: - archive_name = self.archive_name - if archive_name is None: - # ZipFile needs a non-empty string - archive_name = "zip" + # ZipFile needs a non-empty string + archive_name = self.archive_name or self.filename or "zip" super().writestr(archive_name, data) @property diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 270caec022fef..15cd5c026c6b6 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -21,12 +21,7 @@ ) from pandas.core.dtypes.missing import notna -from pandas.io.common import ( - get_compression_method, - get_filepath_or_buffer, - get_handle, - infer_compression, -) +from pandas.io.common import get_filepath_or_buffer, get_handle class CSVFormatter: @@ -60,17 +55,15 @@ def __init__( if path_or_buf is None: path_or_buf = StringIO() - # Extract compression mode as given, if dict - compression, self.compression_args = get_compression_method(compression) - self.compression = infer_compression(path_or_buf, compression) - ioargs = get_filepath_or_buffer( path_or_buf, encoding=encoding, - compression=self.compression, + compression=compression, mode=mode, storage_options=storage_options, ) + self.compression = ioargs.compression.pop("method") + self.compression_args = ioargs.compression self.path_or_buf = ioargs.filepath_or_buffer self.should_close = ioargs.should_close self.mode = ioargs.mode diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 7a3b76ff7e3d0..a4d923fdbe45a 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -19,12 +19,7 @@ from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.reshape.concat import concat -from pandas.io.common import ( - get_compression_method, - get_filepath_or_buffer, - get_handle, - infer_compression, -) +from pandas.io.common import get_compression_method, get_filepath_or_buffer, get_handle from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import build_table_schema, parse_table_schema from pandas.io.parsers import _validate_integer @@ -66,6 +61,7 @@ def to_json( ) path_or_buf = ioargs.filepath_or_buffer should_close = ioargs.should_close + compression = ioargs.compression if lines and orient != "records": raise ValueError("'lines' keyword only valid when 'orient' is records") @@ -616,9 +612,6 @@ def read_json( if encoding is None: encoding = "utf-8" - compression_method, compression = get_compression_method(compression) - compression_method = infer_compression(path_or_buf, compression_method) - compression = dict(compression, method=compression_method) ioargs = get_filepath_or_buffer( path_or_buf, encoding=encoding, diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index c6ef5221e7ead..a0466c5ac6b57 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -63,12 +63,7 @@ from pandas.core.series import Series from pandas.core.tools import datetimes as tools -from pandas.io.common import ( - get_filepath_or_buffer, - get_handle, - infer_compression, - validate_header_arg, -) +from pandas.io.common import get_filepath_or_buffer, get_handle, validate_header_arg from pandas.io.date_converters import generic_parser # BOM character (byte order mark) @@ -424,9 +419,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): if encoding is not None: encoding = re.sub("_", "-", encoding).lower() kwds["encoding"] = encoding - compression = kwds.get("compression", "infer") - compression = infer_compression(filepath_or_buffer, compression) # TODO: get_filepath_or_buffer could return # Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] @@ -1976,6 +1969,10 @@ def __init__(self, src, **kwds): encoding = kwds.get("encoding") + # parsers.TextReader doesn't support compression dicts + if isinstance(kwds.get("compression"), dict): + kwds["compression"] = kwds["compression"]["method"] + if kwds.get("compression") is None and encoding: if isinstance(src, str): src = open(src, "rb") diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 857a2d1b69be4..655deb5ca3779 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -92,11 +92,8 @@ def to_pickle( mode="wb", storage_options=storage_options, ) - compression = ioargs.compression - if not isinstance(ioargs.filepath_or_buffer, str) and compression == "infer": - compression = None f, fh = get_handle( - ioargs.filepath_or_buffer, "wb", compression=compression, is_text=False + ioargs.filepath_or_buffer, "wb", compression=ioargs.compression, is_text=False ) if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL @@ -196,11 +193,8 @@ def read_pickle( ioargs = get_filepath_or_buffer( filepath_or_buffer, compression=compression, storage_options=storage_options ) - compression = ioargs.compression - if not isinstance(ioargs.filepath_or_buffer, str) and compression == "infer": - compression = None f, fh = get_handle( - ioargs.filepath_or_buffer, "rb", compression=compression, is_text=False + ioargs.filepath_or_buffer, "rb", compression=ioargs.compression, is_text=False ) # 1) try standard library Pickle diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 34d520004cc65..b3b16e04a5d9e 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -16,18 +16,7 @@ from pathlib import Path import struct import sys -from typing import ( - Any, - AnyStr, - BinaryIO, - Dict, - List, - Mapping, - Optional, - Sequence, - Tuple, - Union, -) +from typing import Any, AnyStr, BinaryIO, Dict, List, Optional, Sequence, Tuple, Union import warnings from dateutil.relativedelta import relativedelta @@ -58,13 +47,7 @@ from pandas.core.indexes.base import Index from pandas.core.series import Series -from pandas.io.common import ( - get_compression_method, - get_filepath_or_buffer, - get_handle, - infer_compression, - stringify_path, -) +from pandas.io.common import get_filepath_or_buffer, get_handle, stringify_path _version_error = ( "Version of given Stata file is {version}. pandas supports importing " @@ -1976,9 +1959,6 @@ def _open_file_binary_write( return fname, False, None # type: ignore[return-value] elif isinstance(fname, (str, Path)): # Extract compression mode as given, if dict - compression_typ, compression_args = get_compression_method(compression) - compression_typ = infer_compression(fname, compression_typ) - compression = dict(compression_args, method=compression_typ) ioargs = get_filepath_or_buffer( fname, mode="wb", compression=compression, storage_options=storage_options ) @@ -2235,7 +2215,7 @@ def __init__( time_stamp: Optional[datetime.datetime] = None, data_label: Optional[str] = None, variable_labels: Optional[Dict[Label, str]] = None, - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ): super().__init__() @@ -3118,7 +3098,7 @@ def __init__( data_label: Optional[str] = None, variable_labels: Optional[Dict[Label, str]] = None, convert_strl: Optional[Sequence[Label]] = None, - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ): # Copy to new list since convert_strl might be modified later @@ -3523,7 +3503,7 @@ def __init__( variable_labels: Optional[Dict[Label, str]] = None, convert_strl: Optional[Sequence[Label]] = None, version: Optional[int] = None, - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ): if version is None: diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 6331113ab8945..d1c6705dd7a6f 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -14,7 +14,9 @@ import datetime import glob import gzip +import io import os +from pathlib import Path import pickle import shutil from warnings import catch_warnings, simplefilter @@ -486,3 +488,30 @@ def test_read_pickle_with_subclass(): tm.assert_series_equal(result[0], expected[0]) assert isinstance(result[1], MyTz) + + +def test_pickle_binary_object_compression(compression): + """ + Read/write from binary file-objects w/wo compression. + + GH 26237, GH 29054, and GH 29570 + """ + df = tm.makeDataFrame() + + # reference for compression + with tm.ensure_clean() as path: + df.to_pickle(path, compression=compression) + reference = Path(path).read_bytes() + + # write + buffer = io.BytesIO() + df.to_pickle(buffer, compression=compression) + buffer.seek(0) + + # gzip and zip safe the filename: cannot compare the compressed content + assert buffer.getvalue() == reference or compression in ("gzip", "zip") + + # read + read_df = pd.read_pickle(buffer, compression=compression) + buffer.seek(0) + tm.assert_frame_equal(df, read_df)
- [x] closes #26237, closes #29054, and closes #29570 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This was basically already supported but `to/read_pickle` set `compression` to `None` for file objects. Some functions called `get_filepath_or_buffer` (might convert a string to a file object) before calling `infer_compression` (doesn't work with file objects). I moved `infer_compression` and `get_compression_method` inside `get_filepath_or_buffer`.
https://api.github.com/repos/pandas-dev/pandas/pulls/35736
2020-08-15T15:24:31Z
2020-09-05T14:50:04Z
2020-09-05T14:50:04Z
2020-09-05T17:47:58Z
BLD: update min versions #35732
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 81eac490fe5b9..689c7c889ef66 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -11,25 +11,25 @@ "fsspec": "0.7.4", "fastparquet": "0.3.2", "gcsfs": "0.6.0", - "lxml.etree": "3.8.0", - "matplotlib": "2.2.2", - "numexpr": "2.6.2", + "lxml.etree": "4.3.0", + "matplotlib": "2.2.3", + "numexpr": "2.6.8", "odfpy": "1.3.0", "openpyxl": "2.5.7", "pandas_gbq": "0.12.0", - "pyarrow": "0.13.0", - "pytables": "3.4.3", + "pyarrow": "0.15.0", + "pytables": "3.4.4", "pytest": "5.0.1", "pyxlsb": "1.0.6", "s3fs": "0.4.0", "scipy": "1.2.0", - "sqlalchemy": "1.1.4", - "tables": "3.4.3", + "sqlalchemy": "1.2.8", + "tables": "3.4.4", "tabulate": "0.8.3", - "xarray": "0.8.2", + "xarray": "0.12.0", "xlrd": "1.2.0", - "xlwt": "1.2.0", - "xlsxwriter": "0.9.8", + "xlwt": "1.3.0", + "xlsxwriter": "1.0.2", "numba": "0.46.0", }
- [x] closes #35732
https://api.github.com/repos/pandas-dev/pandas/pulls/35733
2020-08-14T22:45:03Z
2020-08-17T13:11:54Z
2020-08-17T13:11:54Z
2020-08-17T15:39:20Z
REF: move towards making _apply_blockwise actually block-wise
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index f516871f789d0..f7e81f41b8675 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -6,13 +6,23 @@ from functools import partial import inspect from textwrap import dedent -from typing import Callable, Dict, List, Optional, Set, Tuple, Type, Union +from typing import ( + TYPE_CHECKING, + Callable, + Dict, + List, + Optional, + Set, + Tuple, + Type, + Union, +) import numpy as np from pandas._libs.tslibs import BaseOffset, to_offset import pandas._libs.window.aggregations as window_aggregations -from pandas._typing import ArrayLike, Axis, FrameOrSeries, Label +from pandas._typing import ArrayLike, Axis, FrameOrSeriesUnion, Label from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, cache_readonly, doc @@ -55,6 +65,9 @@ ) from pandas.core.window.numba_ import generate_numba_apply_func +if TYPE_CHECKING: + from pandas import Series + def calculate_center_offset(window) -> int: """ @@ -145,7 +158,7 @@ class _Window(PandasObject, ShallowMixin, SelectionMixin): def __init__( self, - obj: FrameOrSeries, + obj: FrameOrSeriesUnion, window=None, min_periods: Optional[int] = None, center: bool = False, @@ -219,7 +232,7 @@ def _validate_get_window_bounds_signature(window: BaseIndexer) -> None: f"get_window_bounds" ) - def _create_blocks(self, obj: FrameOrSeries): + def _create_blocks(self, obj: FrameOrSeriesUnion): """ Split data into blocks & return conformed data. """ @@ -381,7 +394,7 @@ def _wrap_result(self, result, block=None, obj=None): return type(obj)(result, index=index, columns=block.columns) return result - def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeries: + def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeriesUnion: """ Wrap the results. @@ -394,22 +407,23 @@ def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeries: """ from pandas import Series, concat + if obj.ndim == 1: + if not results: + raise DataError("No numeric types to aggregate") + assert len(results) == 1 + return Series(results[0], index=obj.index, name=obj.name) + exclude: List[Label] = [] - if obj.ndim == 2: - orig_blocks = list(obj._to_dict_of_blocks(copy=False).values()) - for i in skipped: - exclude.extend(orig_blocks[i].columns) - else: - orig_blocks = [obj] + orig_blocks = list(obj._to_dict_of_blocks(copy=False).values()) + for i in skipped: + exclude.extend(orig_blocks[i].columns) kept_blocks = [blk for i, blk in enumerate(orig_blocks) if i not in skipped] final = [] for result, block in zip(results, kept_blocks): - result = self._wrap_result(result, block=block, obj=obj) - if result.ndim == 1: - return result + result = type(obj)(result, index=obj.index, columns=block.columns) final.append(result) exclude = exclude or [] @@ -488,13 +502,31 @@ def _get_window_indexer(self, window: int) -> BaseIndexer: return VariableWindowIndexer(index_array=self._on.asi8, window_size=window) return FixedWindowIndexer(window_size=window) + def _apply_series(self, homogeneous_func: Callable[..., ArrayLike]) -> "Series": + """ + Series version of _apply_blockwise + """ + _, obj = self._create_blocks(self._selected_obj) + values = obj.values + + try: + values = self._prep_values(obj.values) + except (TypeError, NotImplementedError) as err: + raise DataError("No numeric types to aggregate") from err + + result = homogeneous_func(values) + return obj._constructor(result, index=obj.index, name=obj.name) + def _apply_blockwise( self, homogeneous_func: Callable[..., ArrayLike] - ) -> FrameOrSeries: + ) -> FrameOrSeriesUnion: """ Apply the given function to the DataFrame broken down into homogeneous sub-frames. """ + if self._selected_obj.ndim == 1: + return self._apply_series(homogeneous_func) + # This isn't quite blockwise, since `blocks` is actually a collection # of homogenenous DataFrames. blocks, obj = self._create_blocks(self._selected_obj) @@ -505,12 +537,9 @@ def _apply_blockwise( try: values = self._prep_values(b.values) - except (TypeError, NotImplementedError) as err: - if isinstance(obj, ABCDataFrame): - skipped.append(i) - continue - else: - raise DataError("No numeric types to aggregate") from err + except (TypeError, NotImplementedError): + skipped.append(i) + continue result = homogeneous_func(values) results.append(result) @@ -2234,7 +2263,7 @@ def _apply( def _constructor(self): return Rolling - def _create_blocks(self, obj: FrameOrSeries): + def _create_blocks(self, obj: FrameOrSeriesUnion): """ Split data into blocks & return conformed data. """ @@ -2275,7 +2304,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: if isinstance(self.window, BaseIndexer): rolling_indexer = type(self.window) indexer_kwargs = self.window.__dict__ - assert isinstance(indexer_kwargs, dict) + assert isinstance(indexer_kwargs, dict) # for mypy # We'll be using the index of each group later indexer_kwargs.pop("index_array", None) elif self.is_freq_type:
A step towards #34714, orthogonal to the other outstanding PR in this vein #35696.
https://api.github.com/repos/pandas-dev/pandas/pulls/35730
2020-08-14T21:42:51Z
2020-08-23T00:06:17Z
2020-08-23T00:06:17Z
2020-08-23T01:26:18Z
DOC: Fix broken link in cookbook.rst
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst index 49487ac327e73..7542e1dc7df6f 100644 --- a/doc/source/user_guide/cookbook.rst +++ b/doc/source/user_guide/cookbook.rst @@ -765,7 +765,7 @@ Timeseries <https://stackoverflow.com/questions/13893227/vectorized-look-up-of-values-in-pandas-dataframe>`__ `Aggregation and plotting time series -<http://nipunbatra.github.io/2015/06/timeseries/>`__ +<https://nipunbatra.github.io/blog/visualisation/2013/05/01/aggregation-timeseries.html>`__ Turn a matrix with hours in columns and days in rows into a continuous row sequence in the form of a time series. `How to rearrange a Python pandas DataFrame?
The original link [Aggregation and plotting time series](http://nipunbatra.github.io/2015/06/timeseries/) found in the [Pandas Cookbook](https://pandas.pydata.org/pandas-docs/stable/user_guide/cookbook.html?highlight=get_group#timeseries) is broken. This appears to have been moved to the authors [Blog](https://nipunbatra.github.io/blog/visualisation/2013/05/01/aggregation-timeseries.html) While the date does not match ( 2013/05 vs 2015/06 ) the contents appear identical. I was able to determine this after viewing the archive at [WayBack Machine](https://web.archive.org/web/20161202094122/http://nipunbatra.github.io/2015/06/timeseries/) - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35729
2020-08-14T18:29:27Z
2020-08-14T21:01:49Z
2020-08-14T21:01:49Z
2020-08-14T22:06:03Z
BLD: bump xlrd min version to 1.2.0
diff --git a/ci/deps/azure-37-locale_slow.yaml b/ci/deps/azure-37-locale_slow.yaml index 3ccb66e09fe7e..8000f3e6b9a9c 100644 --- a/ci/deps/azure-37-locale_slow.yaml +++ b/ci/deps/azure-37-locale_slow.yaml @@ -24,7 +24,7 @@ dependencies: - pytz=2017.3 - scipy - sqlalchemy=1.2.8 - - xlrd=1.1.0 + - xlrd=1.2.0 - xlsxwriter=1.0.2 - xlwt=1.3.0 - html5lib=1.0.1 diff --git a/ci/deps/azure-37-minimum_versions.yaml b/ci/deps/azure-37-minimum_versions.yaml index 94cc5812bcc10..05b1957198bc4 100644 --- a/ci/deps/azure-37-minimum_versions.yaml +++ b/ci/deps/azure-37-minimum_versions.yaml @@ -25,7 +25,7 @@ dependencies: - pytz=2017.3 - pyarrow=0.15 - scipy=1.2 - - xlrd=1.1.0 + - xlrd=1.2.0 - xlsxwriter=1.0.2 - xlwt=1.3.0 - html5lib=1.0.1 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 7ab150394bf51..4c270117e079e 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -287,7 +287,7 @@ s3fs 0.4.0 Amazon S3 access tabulate 0.8.3 Printing in Markdown-friendly format (see `tabulate`_) xarray 0.12.0 pandas-like API for N-dimensional data xclip Clipboard I/O on linux -xlrd 1.1.0 Excel reading +xlrd 1.2.0 Excel reading xlwt 1.3.0 Excel writing xsel Clipboard I/O on linux zlib Compression for HDF5 diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index a3bb6dfd86bd2..42f95d88d74ac 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -122,7 +122,7 @@ Optional libraries below the lowest tested version may still work, but are not c +-----------------+-----------------+---------+ | xarray | 0.12.0 | X | +-----------------+-----------------+---------+ -| xlrd | 1.1.0 | | +| xlrd | 1.2.0 | X | +-----------------+-----------------+---------+ | xlsxwriter | 1.0.2 | X | +-----------------+-----------------+---------+ diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 6423064732def..81eac490fe5b9 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -27,7 +27,7 @@ "tables": "3.4.3", "tabulate": "0.8.3", "xarray": "0.8.2", - "xlrd": "1.1.0", + "xlrd": "1.2.0", "xlwt": "1.2.0", "xlsxwriter": "0.9.8", "numba": "0.46.0", diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index b610c5ec3a838..51fbbf836a03f 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1,9 +1,7 @@ -import contextlib from datetime import datetime, time from functools import partial import os from urllib.error import URLError -import warnings import numpy as np import pytest @@ -14,22 +12,6 @@ from pandas import DataFrame, Index, MultiIndex, Series import pandas._testing as tm - -@contextlib.contextmanager -def ignore_xlrd_time_clock_warning(): - """ - Context manager to ignore warnings raised by the xlrd library, - regarding the deprecation of `time.clock` in Python 3.7. - """ - with warnings.catch_warnings(): - warnings.filterwarnings( - action="ignore", - message="time.clock has been deprecated", - category=DeprecationWarning, - ) - yield - - read_ext_params = [".xls", ".xlsx", ".xlsm", ".xlsb", ".ods"] engine_params = [ # Add any engines to test here @@ -134,21 +116,19 @@ def test_usecols_int(self, read_ext, df_ref): # usecols as int msg = "Passing an integer for `usecols`" with pytest.raises(ValueError, match=msg): - with ignore_xlrd_time_clock_warning(): - pd.read_excel( - "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3 - ) + pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3 + ) # usecols as int with pytest.raises(ValueError, match=msg): - with ignore_xlrd_time_clock_warning(): - pd.read_excel( - "test1" + read_ext, - sheet_name="Sheet2", - skiprows=[1], - index_col=0, - usecols=3, - ) + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=3, + ) def test_usecols_list(self, read_ext, df_ref): if pd.read_excel.keywords["engine"] == "pyxlsb": @@ -597,8 +577,7 @@ def test_sheet_name(self, read_ext, df_ref): df1 = pd.read_excel( filename + read_ext, sheet_name=sheet_name, index_col=0 ) # doc - with ignore_xlrd_time_clock_warning(): - df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name) + df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name) tm.assert_frame_equal(df1, df_ref, check_names=False) tm.assert_frame_equal(df2, df_ref, check_names=False)
Warnings about time.clock are filling up the py37 min_version build. xlrd 1.2.0 was released Dec 15, 2018, so I think we're safe to bump it.
https://api.github.com/repos/pandas-dev/pandas/pulls/35728
2020-08-14T16:53:02Z
2020-08-14T21:01:03Z
2020-08-14T21:01:03Z
2020-08-15T16:02:08Z
CLN: remove unused variable
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 0306d4de2fc73..966773b7c6982 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -318,7 +318,7 @@ def __repr__(self) -> str: def __iter__(self): window = self._get_window(win_type=None) - blocks, obj = self._create_blocks(self._selected_obj) + _, obj = self._create_blocks(self._selected_obj) index = self._get_window_indexer(window=window) start, end = index.get_window_bounds(
Tiny step towards #34714.
https://api.github.com/repos/pandas-dev/pandas/pulls/35726
2020-08-14T16:26:34Z
2020-08-14T19:50:21Z
2020-08-14T19:50:21Z
2020-08-14T21:38:46Z
agg with list of non-aggregating functions
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index b37103910afab..81057297cbb71 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -24,6 +24,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) - Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) +- Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` where a list of functions would produce the wrong results if at least one of the functions did not aggregate. (:issue:`35490`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b7280a9f7db3c..b806d9856d20f 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -322,11 +322,14 @@ def _aggregate_multiple_funcs(self, arg): # let higher level handle return results - output = self._wrap_aggregated_output(results) + output = self._wrap_aggregated_output(results, index=None) return self.obj._constructor_expanddim(output, columns=columns) + # TODO: index should not be Optional - see GH 35490 def _wrap_series_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Index, + self, + output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + index: Optional[Index], ) -> Union[Series, DataFrame]: """ Wraps the output of a SeriesGroupBy operation into the expected result. @@ -335,7 +338,7 @@ def _wrap_series_output( ---------- output : Mapping[base.OutputKey, Union[Series, np.ndarray]] Data to wrap. - index : pd.Index + index : pd.Index or None Index to apply to the output. Returns @@ -363,8 +366,11 @@ def _wrap_series_output( return result + # TODO: Remove index argument, use self.grouper.result_index, see GH 35490 def _wrap_aggregated_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]] + self, + output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + index: Optional[Index], ) -> Union[Series, DataFrame]: """ Wraps the output of a SeriesGroupBy aggregation into the expected result. @@ -383,9 +389,7 @@ def _wrap_aggregated_output( In the vast majority of cases output will only contain one element. The exception is operations that expand dimensions, like ohlc. """ - result = self._wrap_series_output( - output=output, index=self.grouper.result_index - ) + result = self._wrap_series_output(output=output, index=index) return self._reindex_output(result) def _wrap_transformed_output( @@ -1720,7 +1724,9 @@ def _insert_inaxis_grouper_inplace(self, result): result.insert(0, name, lev) def _wrap_aggregated_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]] + self, + output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + index: Optional[Index], ) -> DataFrame: """ Wraps the output of DataFrameGroupBy aggregations into the expected result. @@ -1745,8 +1751,7 @@ def _wrap_aggregated_output( self._insert_inaxis_grouper_inplace(result) result = result._consolidate() else: - index = self.grouper.result_index - result.index = index + result.index = self.grouper.result_index if self.axis == 1: result = result.T diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 4597afeeaddbf..0047877ef78ee 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -974,7 +974,9 @@ def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs): return self._wrap_transformed_output(output) - def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]): + def _wrap_aggregated_output( + self, output: Mapping[base.OutputKey, np.ndarray], index: Optional[Index] + ): raise AbstractMethodError(self) def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]): @@ -1049,7 +1051,7 @@ def _cython_agg_general( if len(output) == 0: raise DataError("No numeric types to aggregate") - return self._wrap_aggregated_output(output) + return self._wrap_aggregated_output(output, index=self.grouper.result_index) def _python_agg_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs @@ -1102,7 +1104,7 @@ def _python_agg_general( output[key] = maybe_cast_result(values[mask], result) - return self._wrap_aggregated_output(output) + return self._wrap_aggregated_output(output, index=self.grouper.result_index) def _concat_objects(self, keys, values, not_indexed_same: bool = False): from pandas.core.reshape.concat import concat @@ -2534,7 +2536,7 @@ def _get_cythonized_result( raise TypeError(error_msg) if aggregate: - return self._wrap_aggregated_output(output) + return self._wrap_aggregated_output(output, index=self.grouper.result_index) else: return self._wrap_transformed_output(output) diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 40a20c8210052..ce9d4b892d775 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -1061,3 +1061,16 @@ def test_groupby_get_by_index(): res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])}) expected = pd.DataFrame(dict(A=["S", "W"], B=[1.0, 2.0])).set_index("A") pd.testing.assert_frame_equal(res, expected) + + +def test_nonagg_agg(): + # GH 35490 - Single/Multiple agg of non-agg function give same results + # TODO: agg should raise for functions that don't aggregate + df = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]}) + g = df.groupby("a") + + result = g.agg(["cumsum"]) + result.columns = result.columns.droplevel(-1) + expected = g.agg("cumsum") + + tm.assert_frame_equal(result, expected)
- [x] closes #35490 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Reverts to 1.0.5 behavior while maintaining the bugfix that introduced the "regression". Ideally `agg` would instead raise in these scenarios ([ref](https://github.com/pandas-dev/pandas/issues/35490#issuecomment-672981745)), but that would be an API change. Much of this PR should be reverted once this is done, I've marked such places with TODOs.
https://api.github.com/repos/pandas-dev/pandas/pulls/35723
2020-08-14T15:59:40Z
2020-08-14T20:59:10Z
2020-08-14T20:59:10Z
2021-04-23T01:26:59Z
Backport PR #35664 on branch 1.1.x (BUG: Styler cell_ids fails on multiple renders)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 98d67e930ccc0..3f177b29d52b8 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -33,7 +33,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`). +- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`). Categorical ^^^^^^^^^^^ diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 584f42a6cab12..3bbb5271bce61 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -390,16 +390,16 @@ def format_attr(pair): "is_visible": (c not in hidden_columns), } # only add an id if the cell has a style + props = [] if self.cell_ids or (r, c) in ctx: row_dict["id"] = "_".join(cs[1:]) + for x in ctx[r, c]: + # have to handle empty styles like [''] + if x.count(":"): + props.append(tuple(x.split(":"))) + else: + props.append(("", "")) row_es.append(row_dict) - props = [] - for x in ctx[r, c]: - # have to handle empty styles like [''] - if x.count(":"): - props.append(tuple(x.split(":"))) - else: - props.append(("", "")) cellstyle_map[tuple(props)].append(f"row{r}_col{c}") body.append(row_es) diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 3ef5157655e78..6025649e9dbec 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -1684,8 +1684,11 @@ def f(a, b, styler): def test_no_cell_ids(self): # GH 35588 + # GH 35663 df = pd.DataFrame(data=[[0]]) - s = Styler(df, uuid="_", cell_ids=False).render() + styler = Styler(df, uuid="_", cell_ids=False) + styler.render() + s = styler.render() # render twice to ensure ctx is not updated assert s.find('<td class="data row0 col0" >') != -1
Backport PR #35664: BUG: Styler cell_ids fails on multiple renders
https://api.github.com/repos/pandas-dev/pandas/pulls/35722
2020-08-14T14:37:59Z
2020-08-14T15:39:01Z
2020-08-14T15:39:01Z
2020-08-14T15:39:01Z
Backport PR #35707 on branch 1.1.x (REGR: fix DataFrame.diff with read-only data)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 98d67e930ccc0..22d34bef65aa9 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -16,10 +16,11 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`) -- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). +- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`) - Fixed regression where :func:`pandas.testing.assert_series_equal` would raise an error when non-numeric dtypes were passed with ``check_exact=True`` (:issue:`35446`) - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) +- Fixed regression in :meth:`DataFrame.diff` with read-only data (:issue:`35559`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 7e90a8cc681ef..0a70afda893cf 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -1200,14 +1200,15 @@ ctypedef fused out_t: @cython.boundscheck(False) @cython.wraparound(False) def diff_2d( - diff_t[:, :] arr, - out_t[:, :] out, + ndarray[diff_t, ndim=2] arr, # TODO(cython 3) update to "const diff_t[:, :] arr" + ndarray[out_t, ndim=2] out, Py_ssize_t periods, int axis, ): cdef: Py_ssize_t i, j, sx, sy, start, stop - bint f_contig = arr.is_f_contig() + bint f_contig = arr.flags.f_contiguous + # bint f_contig = arr.is_f_contig() # TODO(cython 3) # Disable for unsupported dtype combinations, # see https://github.com/cython/cython/issues/2646 diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py index 45f134a93a23a..0486fb2d588b6 100644 --- a/pandas/tests/frame/methods/test_diff.py +++ b/pandas/tests/frame/methods/test_diff.py @@ -214,3 +214,12 @@ def test_diff_integer_na(self, axis, expected): # Test case for default behaviour of diff result = df.diff(axis=axis) tm.assert_frame_equal(result, expected) + + def test_diff_readonly(self): + # https://github.com/pandas-dev/pandas/issues/35559 + arr = np.random.randn(5, 2) + arr.flags.writeable = False + df = pd.DataFrame(arr) + result = df.diff() + expected = pd.DataFrame(np.array(df)).diff() + tm.assert_frame_equal(result, expected) diff --git a/setup.py b/setup.py index aebbdbf4d1e96..22da02360619e 100755 --- a/setup.py +++ b/setup.py @@ -457,6 +457,9 @@ def run(self): if sys.version_info[:2] == (3, 8): # GH 33239 extra_compile_args.append("-Wno-error=deprecated-declarations") + # https://github.com/pandas-dev/pandas/issues/35559 + extra_compile_args.append("-Wno-error=unreachable-code") + # enable coverage by building cython files by setting the environment variable # "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext # with `--with-cython-coverage`enabled
Backport PR #35707: REGR: fix DataFrame.diff with read-only data
https://api.github.com/repos/pandas-dev/pandas/pulls/35721
2020-08-14T14:37:31Z
2020-08-14T15:38:21Z
2020-08-14T15:38:21Z
2020-08-14T15:38:21Z
CI: doctest failure for read_hdf on 1.1.x
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b67a1c5781d91..5693ecc500e35 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -312,6 +312,10 @@ def read_hdf( mode : {'r', 'r+', 'a'}, default 'r' Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. + errors : str, default 'strict' + Specifies how encoding and decoding errors are to be handled. + See the errors argument for :func:`open` for a full list + of options. where : list, optional A list of Term (or convertible) objects. start : int, optional @@ -324,10 +328,6 @@ def read_hdf( Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. - errors : str, default 'strict' - Specifies how encoding and decoding errors are to be handled. - See the errors argument for :func:`open` for a full list - of options. **kwargs Additional keyword arguments passed to HDFStore.
NOTE: PR against 1.1.x branch (fixed in #35214 on master) xref https://github.com/pandas-dev/pandas/pull/35699#issuecomment-673392019
https://api.github.com/repos/pandas-dev/pandas/pulls/35718
2020-08-14T11:24:39Z
2020-08-14T12:12:39Z
2020-08-14T12:12:39Z
2020-08-14T12:12:48Z
CLN: remove extant uses of built-in filter function
diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py index 66865e1afb952..3933c8f3d519c 100644 --- a/pandas/_config/localization.py +++ b/pandas/_config/localization.py @@ -88,12 +88,14 @@ def _valid_locales(locales, normalize): valid_locales : list A list of valid locales. """ - if normalize: - normalizer = lambda x: locale.normalize(x.strip()) - else: - normalizer = lambda x: x.strip() - - return list(filter(can_set_locale, map(normalizer, locales))) + return [ + loc + for loc in ( + locale.normalize(loc.strip()) if normalize else loc.strip() + for loc in locales + ) + if can_set_locale(loc) + ] def _default_locale_getter(): diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index fcccc24ed7615..125ecb0d88036 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -167,10 +167,9 @@ def _is_type(t): # partition all AST nodes _all_nodes = frozenset( - filter( - lambda x: isinstance(x, type) and issubclass(x, ast.AST), - (getattr(ast, node) for node in dir(ast)), - ) + node + for node in (getattr(ast, name) for name in dir(ast)) + if isinstance(node, type) and issubclass(node, ast.AST) ) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 2349cb1dcc0c7..01e20f49917ac 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2012,8 +2012,11 @@ def _sort_labels(uniques: np.ndarray, left, right): def _get_join_keys(llab, rlab, shape, sort: bool): # how many levels can be done without overflow - pred = lambda i: not is_int64_overflow_possible(shape[:i]) - nlev = next(filter(pred, range(len(shape), 0, -1))) + nlev = next( + lev + for lev in range(len(shape), 0, -1) + if not is_int64_overflow_possible(shape[:lev]) + ) # get keys for the first `nlev` levels stride = np.prod(shape[1:nlev], dtype="i8") diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 0d2b351926343..41a28d32521c0 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -754,8 +754,9 @@ def _combine_lines(self, lines) -> str: """ Combines a list of JSON objects into one JSON object. """ - lines = filter(None, map(lambda x: x.strip(), lines)) - return "[" + ",".join(lines) + "]" + return ( + f'[{",".join((line for line in (line.strip() for line in lines) if line))}]' + ) def read(self): """ diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 9dc0e1f71d13b..5d49757ce7d58 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2161,9 +2161,7 @@ def read(self, nrows=None): if self.usecols is not None: columns = self._filter_usecols(columns) - col_dict = dict( - filter(lambda item: item[0] in columns, col_dict.items()) - ) + col_dict = {k: v for k, v in col_dict.items() if k in columns} return index, columns, col_dict diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 2abc570a04de3..f08e0514a68e1 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -99,22 +99,20 @@ def _ensure_str(name): def _ensure_term(where, scope_level: int): """ - ensure that the where is a Term or a list of Term - this makes sure that we are capturing the scope of variables - that are passed - create the terms here with a frame_level=2 (we are 2 levels down) + Ensure that the where is a Term or a list of Term. + + This makes sure that we are capturing the scope of variables that are + passed create the terms here with a frame_level=2 (we are 2 levels down) """ # only consider list/tuple here as an ndarray is automatically a coordinate # list level = scope_level + 1 if isinstance(where, (list, tuple)): - wlist = [] - for w in filter(lambda x: x is not None, where): - if not maybe_expression(w): - wlist.append(w) - else: - wlist.append(Term(w, scope_level=level)) - where = wlist + where = [ + Term(term, scope_level=level + 1) if maybe_expression(term) else term + for term in where + if term is not None + ] elif maybe_expression(where): where = Term(where, scope_level=level) return where if where is None or len(where) else None diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 08d8d5ca342b7..853ab00853d1b 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -168,7 +168,7 @@ def setup_ops(self): def setup_method(self, method): self.setup_ops() self.setup_data() - self.current_engines = filter(lambda x: x != self.engine, _engines) + self.current_engines = (engine for engine in _engines if engine != self.engine) def teardown_method(self, method): del self.lhses, self.rhses, self.scalar_rhses, self.scalar_lhses @@ -774,11 +774,9 @@ def setup_class(cls): cls.parser = "python" def setup_ops(self): - self.cmp_ops = list( - filter(lambda x: x not in ("in", "not in"), expr._cmp_ops_syms) - ) + self.cmp_ops = [op for op in expr._cmp_ops_syms if op not in ("in", "not in")] self.cmp2_ops = self.cmp_ops[::-1] - self.bin_ops = [s for s in expr._bool_ops_syms if s not in ("and", "or")] + self.bin_ops = [op for op in expr._bool_ops_syms if op not in ("and", "or")] self.special_case_ops = _special_case_arith_ops_syms self.arith_ops = _good_arith_ops self.unary_ops = "+", "-", "~" @@ -1150,9 +1148,9 @@ def eval(self, *args, **kwargs): return pd.eval(*args, **kwargs) def test_simple_arith_ops(self): - ops = self.arith_ops + ops = (op for op in self.arith_ops if op != "//") - for op in filter(lambda x: x != "//", ops): + for op in ops: ex = f"1 {op} 1" ex2 = f"x {op} 1" ex3 = f"1 {op} (x + 1)" @@ -1637,8 +1635,11 @@ def setup_class(cls): super().setup_class() cls.engine = "numexpr" cls.parser = "python" - cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms - cls.arith_ops = filter(lambda x: x not in ("in", "not in"), cls.arith_ops) + cls.arith_ops = [ + op + for op in expr._arith_ops_syms + expr._cmp_ops_syms + if op not in ("in", "not in") + ] def test_check_many_exprs(self): a = 1 # noqa @@ -1726,8 +1727,11 @@ class TestOperationsPythonPython(TestOperationsNumExprPython): def setup_class(cls): super().setup_class() cls.engine = cls.parser = "python" - cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms - cls.arith_ops = filter(lambda x: x not in ("in", "not in"), cls.arith_ops) + cls.arith_ops = [ + op + for op in expr._arith_ops_syms + expr._cmp_ops_syms + if op not in ("in", "not in") + ] class TestOperationsPythonPandas(TestOperationsNumExprPandas):
No internet for a good part of yesterday so some time to experiment with code cleanups.
https://api.github.com/repos/pandas-dev/pandas/pulls/35717
2020-08-14T11:01:42Z
2020-08-14T12:32:01Z
2020-08-14T12:32:01Z
2020-09-03T12:15:58Z
Backport PR #35673 on branch 1.1.x (REGR: Dataframe.reset_index() on empty DataFrame with MI and datatime level)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index b37103910afab..98d67e930ccc0 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) +- Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) - Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b7286ce86d24e..041121d60ad33 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4778,7 +4778,7 @@ def _maybe_casted_values(index, labels=None): # we can have situations where the whole mask is -1, # meaning there is nothing found in labels, so make all nan's - if mask.all(): + if mask.size > 0 and mask.all(): dtype = index.dtype fill_value = na_value_for_dtype(dtype) values = construct_1d_arraylike_from_scalar( diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index da4bfa9be4881..b88ef0e6691cb 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -318,3 +318,33 @@ def test_reset_index_dtypes_on_empty_frame_with_multiindex(array, dtype): result = DataFrame(index=idx)[:0].reset_index().dtypes expected = Series({"level_0": np.int64, "level_1": np.float64, "level_2": dtype}) tm.assert_series_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex(): + # https://github.com/pandas-dev/pandas/issues/35606 + idx = MultiIndex( + levels=[[pd.Timestamp("2020-07-20 00:00:00")], [3, 4]], + codes=[[], []], + names=["a", "b"], + ) + df = DataFrame(index=idx, columns=["c", "d"]) + result = df.reset_index() + expected = DataFrame( + columns=list("abcd"), index=RangeIndex(start=0, stop=0, step=1) + ) + expected["a"] = expected["a"].astype("datetime64[ns]") + expected["b"] = expected["b"].astype("int64") + tm.assert_frame_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex_from_groupby(): + # https://github.com/pandas-dev/pandas/issues/35657 + df = DataFrame(dict(c1=[10.0], c2=["a"], c3=pd.to_datetime("2020-01-01"))) + df = df.head(0).groupby(["c2", "c3"])[["c1"]].sum() + result = df.reset_index() + expected = DataFrame( + columns=["c2", "c3", "c1"], index=RangeIndex(start=0, stop=0, step=1) + ) + expected["c3"] = expected["c3"].astype("datetime64[ns]") + expected["c1"] = expected["c1"].astype("float64") + tm.assert_frame_equal(result, expected)
Backport PR #35673: REGR: Dataframe.reset_index() on empty DataFrame with MI and datatime level
https://api.github.com/repos/pandas-dev/pandas/pulls/35716
2020-08-14T10:17:29Z
2020-08-14T11:15:48Z
2020-08-14T11:15:48Z
2020-08-14T11:15:48Z
PERF: RangeIndex.format performance
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 3cd920158f774..0f0f009307c75 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -540,7 +540,7 @@ with :attr:`numpy.nan` in the case of an empty :class:`DataFrame` (:issue:`26397 .. ipython:: python - df.describe() + df.describe() ``__str__`` methods now call ``__repr__`` rather than vice versa ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index af61354470a71..7739a483e3d38 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -15,8 +15,9 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) +- Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - -- + .. --------------------------------------------------------------------------- @@ -26,7 +27,7 @@ Bug fixes ~~~~~~~~~ - Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`) - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) -- +- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ceb109fdf6d7a..b1e5d5627e3f6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -933,7 +933,9 @@ def format( return self._format_with_header(header, na_rep=na_rep) - def _format_with_header(self, header, na_rep="NaN") -> List[str_t]: + def _format_with_header( + self, header: List[str_t], na_rep: str_t = "NaN" + ) -> List[str_t]: from pandas.io.formats.format import format_array values = self._values diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 4990e6a8e20e9..cbb30763797d1 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -347,7 +347,7 @@ def _format_attrs(self): attrs.append(("length", len(self))) return attrs - def _format_with_header(self, header, na_rep="NaN") -> List[str]: + def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]: from pandas.io.formats.printing import pprint_thing result = [ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 9d00f50a65a06..0e8d7c1b866b8 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -354,15 +354,20 @@ def format( """ header = [] if name: - fmt_name = ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) - header.append(fmt_name) + header.append( + ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) + if self.name is not None + else "" + ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep, date_format=date_format) - def _format_with_header(self, header, na_rep="NaT", date_format=None) -> List[str]: + def _format_with_header( + self, header: List[str], na_rep: str = "NaT", date_format: Optional[str] = None + ) -> List[str]: return header + list( self._format_native_types(na_rep=na_rep, date_format=date_format) ) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index e8d0a44324cc5..9281f8017761d 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -948,7 +948,7 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): # Rendering Methods # __repr__ associated methods are based on MultiIndex - def _format_with_header(self, header, na_rep="NaN") -> List[str]: + def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]: return header + list(self._format_native_types(na_rep=na_rep)) def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index c5572a9de7fa5..b85e2d3947cb1 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -1,7 +1,7 @@ from datetime import timedelta import operator from sys import getsizeof -from typing import Any +from typing import Any, List import warnings import numpy as np @@ -187,6 +187,15 @@ def _format_data(self, name=None): # we are formatting thru the attributes return None + def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]: + if not len(self._range): + return header + first_val_str = str(self._range[0]) + last_val_str = str(self._range[-1]) + max_length = max(len(first_val_str), len(last_val_str)) + + return header + [f"{x:<{max_length}}" for x in self._range] + # -------------------------------------------------------------------- _deprecation_message = ( "RangeIndex.{} is deprecated and will be " diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index e4d0b46f7c716..e95e7267f17ec 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -1,5 +1,5 @@ import gc -from typing import Optional, Type +from typing import Type import numpy as np import pytest @@ -33,7 +33,7 @@ class Base: """ base class for index sub-class tests """ - _holder: Optional[Type[Index]] = None + _holder: Type[Index] _compat_props = ["shape", "ndim", "size", "nbytes"] def create_index(self) -> Index: @@ -686,6 +686,12 @@ def test_format(self): expected = [str(x) for x in idx] assert idx.format() == expected + def test_format_empty(self): + # GH35712 + empty_idx = self._holder([]) + assert empty_idx.format() == [] + assert empty_idx.format(name=True) == [""] + def test_hasnans_isnans(self, index): # GH 11343, added tests for hasnans / isnans if isinstance(index, MultiIndex): diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 15a88ab3819ce..085d41aaa5b76 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -536,6 +536,12 @@ def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key): with pytest.raises(KeyError, match=msg): df.loc[key] + def test_format_empty(self): + # GH35712 + empty_idx = self._holder([], freq="A") + assert empty_idx.format() == [] + assert empty_idx.format(name=True) == [""] + def test_maybe_convert_timedelta(): pi = PeriodIndex(["2000", "2001"], freq="D") diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index c4c242746e92c..172cd4a106ac1 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -171,8 +171,14 @@ def test_cache(self): pass assert idx._cache == {} + idx.format() + assert idx._cache == {} + df = pd.DataFrame({"a": range(10)}, index=idx) + str(df) + assert idx._cache == {} + df.loc[50] assert idx._cache == {} @@ -515,3 +521,9 @@ def test_engineless_lookup(self): idx.get_loc("a") assert "_engine" not in idx._cache + + def test_format_empty(self): + # GH35712 + empty_idx = self._holder(0) + assert empty_idx.format() == [] + assert empty_idx.format(name=True) == [""]
#35440 dropped ``RangeIndex._format_with_header``, which was functionally not needed, but needed to avoid creating an internal ndarray. This rectifies that + gives some perf. improvements. ```python >>> idx = pd.RangeIndex(1_000_000) >>> %timeit idx.format() 4.6 s ± 102 ms per loop # pandas v1.1.0 1.67 s ± 19.6 ms per loop # master 595 ms ± 2.35 ms per loop # this PR ``` Also, now the ``_data`` attribute isn't called, so this PR gives a perf. & memory improvement in some use cases compared to master: ```python >>> idx = pd.RangeIndex(1_000_000) >>> idx.format() >>> "_data" in idx._cache False # pandas v.1.1.0 True # master False # this PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/35712
2020-08-13T18:39:42Z
2020-08-26T11:12:56Z
2020-08-26T11:12:55Z
2020-08-26T13:29:51Z
add web/ directory to isort checks
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 816bb23865c04..852f66763683b 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -121,7 +121,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then # Imports - Check formatting using isort see setup.cfg for settings MSG='Check import format using isort' ; echo $MSG - ISORT_CMD="isort --quiet --check-only pandas asv_bench scripts" + ISORT_CMD="isort --quiet --check-only pandas asv_bench scripts web" if [[ "$GITHUB_ACTIONS" == "true" ]]; then eval $ISORT_CMD | awk '{print "##[error]" $0}'; RET=$(($RET + ${PIPESTATUS[0]})) else
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35709
2020-08-13T16:04:03Z
2020-08-13T17:52:33Z
2020-08-13T17:52:33Z
2020-08-13T17:52:37Z
Reorganize imports to be compliant with isort (and conventional)
diff --git a/web/pandas_web.py b/web/pandas_web.py index e62deaa8cdc7f..7dd63175e69ac 100755 --- a/web/pandas_web.py +++ b/web/pandas_web.py @@ -34,13 +34,12 @@ import time import typing +import feedparser import jinja2 +import markdown import requests import yaml -import feedparser -import markdown - class Preprocessors: """
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35708
2020-08-13T14:59:01Z
2020-08-13T16:21:47Z
2020-08-13T16:21:47Z
2020-08-13T16:21:47Z
REGR: fix DataFrame.diff with read-only data
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 3f177b29d52b8..85e2a335c55c6 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -16,10 +16,11 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`) -- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). +- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`) - Fixed regression where :func:`pandas.testing.assert_series_equal` would raise an error when non-numeric dtypes were passed with ``check_exact=True`` (:issue:`35446`) - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) +- Fixed regression in :meth:`DataFrame.diff` with read-only data (:issue:`35559`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 7e90a8cc681ef..0a70afda893cf 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -1200,14 +1200,15 @@ ctypedef fused out_t: @cython.boundscheck(False) @cython.wraparound(False) def diff_2d( - diff_t[:, :] arr, - out_t[:, :] out, + ndarray[diff_t, ndim=2] arr, # TODO(cython 3) update to "const diff_t[:, :] arr" + ndarray[out_t, ndim=2] out, Py_ssize_t periods, int axis, ): cdef: Py_ssize_t i, j, sx, sy, start, stop - bint f_contig = arr.is_f_contig() + bint f_contig = arr.flags.f_contiguous + # bint f_contig = arr.is_f_contig() # TODO(cython 3) # Disable for unsupported dtype combinations, # see https://github.com/cython/cython/issues/2646 diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py index 45f134a93a23a..0486fb2d588b6 100644 --- a/pandas/tests/frame/methods/test_diff.py +++ b/pandas/tests/frame/methods/test_diff.py @@ -214,3 +214,12 @@ def test_diff_integer_na(self, axis, expected): # Test case for default behaviour of diff result = df.diff(axis=axis) tm.assert_frame_equal(result, expected) + + def test_diff_readonly(self): + # https://github.com/pandas-dev/pandas/issues/35559 + arr = np.random.randn(5, 2) + arr.flags.writeable = False + df = pd.DataFrame(arr) + result = df.diff() + expected = pd.DataFrame(np.array(df)).diff() + tm.assert_frame_equal(result, expected) diff --git a/setup.py b/setup.py index 43d19d525876b..f6f0cd9aabc0e 100755 --- a/setup.py +++ b/setup.py @@ -456,6 +456,9 @@ def run(self): if sys.version_info[:2] == (3, 8): # GH 33239 extra_compile_args.append("-Wno-error=deprecated-declarations") + # https://github.com/pandas-dev/pandas/issues/35559 + extra_compile_args.append("-Wno-error=unreachable-code") + # enable coverage by building cython files by setting the environment variable # "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext # with `--with-cython-coverage`enabled
Closes #35559
https://api.github.com/repos/pandas-dev/pandas/pulls/35707
2020-08-13T13:49:48Z
2020-08-14T14:35:22Z
2020-08-14T14:35:21Z
2020-08-14T14:42:58Z
ENH add na_action to DataFrame.applymap
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 2afa1f1a6199e..25f845a8bf012 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -100,8 +100,8 @@ For example: Other enhancements ^^^^^^^^^^^^^^^^^^ - - Added :meth:`~DataFrame.set_flags` for setting table-wide flags on a ``Series`` or ``DataFrame`` (:issue:`28394`) +- :meth:`DataFrame.applymap` now supports ``na_action`` (:issue:`23803`) - :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) - :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`) - diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index eadfcefaac73d..7464fafee2b94 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2377,7 +2377,7 @@ def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask, bint convert=Tr @cython.boundscheck(False) @cython.wraparound(False) -def map_infer(ndarray arr, object f, bint convert=True): +def map_infer(ndarray arr, object f, bint convert=True, bint ignore_na=False): """ Substitute for np.vectorize with pandas-friendly dtype inference. @@ -2385,6 +2385,9 @@ def map_infer(ndarray arr, object f, bint convert=True): ---------- arr : ndarray f : function + convert : bint + ignore_na : bint + If True, NA values will not have f applied Returns ------- @@ -2398,6 +2401,9 @@ def map_infer(ndarray arr, object f, bint convert=True): n = len(arr) result = np.empty(n, dtype=object) for i in range(n): + if ignore_na and checknull(arr[i]): + result[i] = arr[i] + continue val = f(arr[i]) if cnp.PyArray_IsZeroDim(val): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e1a889bf79d95..647857c8bab67 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7617,7 +7617,7 @@ def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds): ) return op.get_result() - def applymap(self, func) -> DataFrame: + def applymap(self, func, na_action: Optional[str] = None) -> DataFrame: """ Apply a function to a Dataframe elementwise. @@ -7628,6 +7628,10 @@ def applymap(self, func) -> DataFrame: ---------- func : callable Python function, returns a single value from a single value. + na_action : {None, 'ignore'}, default None + If ‘ignore’, propagate NaN values, without passing them to func. + + .. versionadded:: 1.2 Returns ------- @@ -7651,6 +7655,15 @@ def applymap(self, func) -> DataFrame: 0 3 4 1 5 5 + Like Series.map, NA values can be ignored: + + >>> df_copy = df.copy() + >>> df_copy.iloc[0, 0] = pd.NA + >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore') + 0 1 + 0 <NA> 4 + 1 5 5 + Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. @@ -7666,11 +7679,17 @@ def applymap(self, func) -> DataFrame: 0 1.000000 4.494400 1 11.262736 20.857489 """ + if na_action not in {"ignore", None}: + raise ValueError( + f"na_action must be 'ignore' or None. Got {repr(na_action)}" + ) + ignore_na = na_action == "ignore" + # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: - return lib.map_infer(x, func) - return lib.map_infer(x.astype(object)._values, func) + return lib.map_infer(x, func, ignore_na=ignore_na) + return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na) return self.apply(infer) diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index bc09501583e2c..1662f9e2fff56 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -630,6 +630,22 @@ def test_applymap(self, float_frame): result = frame.applymap(func) tm.assert_frame_equal(result, frame) + def test_applymap_na_ignore(self, float_frame): + # GH 23803 + strlen_frame = float_frame.applymap(lambda x: len(str(x))) + float_frame_with_na = float_frame.copy() + mask = np.random.randint(0, 2, size=float_frame.shape, dtype=bool) + float_frame_with_na[mask] = pd.NA + strlen_frame_na_ignore = float_frame_with_na.applymap( + lambda x: len(str(x)), na_action="ignore" + ) + strlen_frame_with_na = strlen_frame.copy() + strlen_frame_with_na[mask] = pd.NA + tm.assert_frame_equal(strlen_frame_na_ignore, strlen_frame_with_na) + + with pytest.raises(ValueError, match="na_action must be .*Got 'abc'"): + float_frame_with_na.applymap(lambda x: len(str(x)), na_action="abc") + def test_applymap_box_timestamps(self): # GH 2689, GH 2627 ser = pd.Series(date_range("1/1/2000", periods=10))
For symmetry with Series.map - [x] closes #23803 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35704
2020-08-13T11:11:53Z
2020-09-11T17:40:12Z
2020-09-11T17:40:11Z
2020-09-11T17:40:18Z
Backport PR #35654 on branch 1.1.x (BUG: GH-35558 merge_asof tolerance error)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index cdc244ca193b4..b37103910afab 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) +- Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 27b331babe692..2349cb1dcc0c7 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1667,7 +1667,7 @@ def _get_merge_keys(self): msg = ( f"incompatible tolerance {self.tolerance}, must be compat " - f"with type {repr(lk.dtype)}" + f"with type {repr(lt.dtype)}" ) if needs_i8_conversion(lt): diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 9b09f0033715d..895de2b748c34 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1339,3 +1339,25 @@ def test_merge_index_column_tz(self): index=pd.Index([0, 1, 2, 3, 4]), ) tm.assert_frame_equal(result, expected) + + def test_left_index_right_index_tolerance(self): + # https://github.com/pandas-dev/pandas/issues/35558 + dr1 = pd.date_range( + start="1/1/2020", end="1/20/2020", freq="2D" + ) + pd.Timedelta(seconds=0.4) + dr2 = pd.date_range(start="1/1/2020", end="2/1/2020") + + df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1)) + df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2)) + + expected = pd.DataFrame( + {"val1": "foo", "val2": "bar"}, index=pd.DatetimeIndex(dr1) + ) + result = pd.merge_asof( + df1, + df2, + left_index=True, + right_index=True, + tolerance=pd.Timedelta(seconds=0.5), + ) + tm.assert_frame_equal(result, expected)
Backport PR #35654: BUG: GH-35558 merge_asof tolerance error
https://api.github.com/repos/pandas-dev/pandas/pulls/35702
2020-08-13T10:17:28Z
2020-08-13T11:04:50Z
2020-08-13T11:04:50Z
2020-08-13T11:04:51Z
Backport PR #35647 on branch 1.1.x (BUG: Support custom BaseIndexers in groupby.rolling)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 415f9e508feb8..cdc244ca193b4 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) +- Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index bc36bdca982e8..7cbe34cdebf9f 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -1,6 +1,6 @@ """Indexer objects for computing start/end window bounds for rolling operations""" from datetime import timedelta -from typing import Dict, Optional, Tuple, Type, Union +from typing import Dict, Optional, Tuple, Type import numpy as np @@ -265,7 +265,8 @@ def __init__( index_array: Optional[np.ndarray], window_size: int, groupby_indicies: Dict, - rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]], + rolling_indexer: Type[BaseIndexer], + indexer_kwargs: Optional[Dict], **kwargs, ): """ @@ -276,7 +277,10 @@ def __init__( """ self.groupby_indicies = groupby_indicies self.rolling_indexer = rolling_indexer - super().__init__(index_array, window_size, **kwargs) + self.indexer_kwargs = indexer_kwargs or {} + super().__init__( + index_array, self.indexer_kwargs.pop("window_size", window_size), **kwargs + ) @Appender(get_window_bounds_doc) def get_window_bounds( @@ -298,7 +302,9 @@ def get_window_bounds( else: index_array = self.index_array indexer = self.rolling_indexer( - index_array=index_array, window_size=self.window_size, + index_array=index_array, + window_size=self.window_size, + **self.indexer_kwargs, ) start, end = indexer.get_window_bounds( len(indicies), min_periods, center, closed diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index ea03a7f2f8162..d727881f8285a 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -145,7 +145,7 @@ class _Window(PandasObject, ShallowMixin, SelectionMixin): def __init__( self, - obj, + obj: FrameOrSeries, window=None, min_periods: Optional[int] = None, center: bool = False, @@ -2255,10 +2255,16 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: ------- GroupbyRollingIndexer """ - rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]] - if self.is_freq_type: + rolling_indexer: Type[BaseIndexer] + indexer_kwargs: Optional[Dict] = None + index_array = self.obj.index.asi8 + if isinstance(self.window, BaseIndexer): + rolling_indexer = type(self.window) + indexer_kwargs = self.window.__dict__ + # We'll be using the index of each group later + indexer_kwargs.pop("index_array", None) + elif self.is_freq_type: rolling_indexer = VariableWindowIndexer - index_array = self.obj.index.asi8 else: rolling_indexer = FixedWindowIndexer index_array = None @@ -2267,6 +2273,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: window_size=window, groupby_indicies=self._groupby.indices, rolling_indexer=rolling_indexer, + indexer_kwargs=indexer_kwargs, ) return window_indexer diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index e1dcac06c39cc..a9590c7e1233a 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -305,6 +305,29 @@ def test_groupby_subselect_rolling(self): ) tm.assert_series_equal(result, expected) + def test_groupby_rolling_custom_indexer(self): + # GH 35557 + class SimpleIndexer(pd.api.indexers.BaseIndexer): + def get_window_bounds( + self, num_values=0, min_periods=None, center=None, closed=None + ): + min_periods = self.window_size if min_periods is None else 0 + end = np.arange(num_values, dtype=np.int64) + 1 + start = end.copy() - self.window_size + start[start < 0] = min_periods + return start, end + + df = pd.DataFrame( + {"a": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5 + ) + result = ( + df.groupby(df.index) + .rolling(SimpleIndexer(window_size=3), min_periods=1) + .sum() + ) + expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum() + tm.assert_frame_equal(result, expected) + def test_groupby_rolling_subset_with_closed(self): # GH 35549 df = pd.DataFrame(
Backport PR #35647: BUG: Support custom BaseIndexers in groupby.rolling
https://api.github.com/repos/pandas-dev/pandas/pulls/35699
2020-08-13T06:14:44Z
2020-08-13T10:14:51Z
2020-08-13T10:14:51Z
2020-08-13T10:14:52Z
REGR: Don't ignore compiled patterns in replace
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 565b4a014bd0c..d93cd6edb983a 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -26,6 +26,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) - Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) +- Fixed regression in :meth:`DataFrame.replace` and :meth:`Series.replace` where compiled regular expressions would be ignored during replacement (:issue:`35680`) - Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` where a list of functions would produce the wrong results if at least one of the functions did not aggregate. (:issue:`35490`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 371b721f08b27..5a215c4cd5fa3 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -2,7 +2,17 @@ import itertools import operator import re -from typing import DefaultDict, Dict, List, Optional, Sequence, Tuple, TypeVar, Union +from typing import ( + DefaultDict, + Dict, + List, + Optional, + Pattern, + Sequence, + Tuple, + TypeVar, + Union, +) import warnings import numpy as np @@ -1907,7 +1917,10 @@ def _merge_blocks( def _compare_or_regex_search( - a: ArrayLike, b: Scalar, regex: bool = False, mask: Optional[ArrayLike] = None + a: ArrayLike, + b: Union[Scalar, Pattern], + regex: bool = False, + mask: Optional[ArrayLike] = None, ) -> Union[ArrayLike, bool]: """ Compare two array_like inputs of the same shape or two scalar values @@ -1918,7 +1931,7 @@ def _compare_or_regex_search( Parameters ---------- a : array_like - b : scalar + b : scalar or regex pattern regex : bool, default False mask : array_like or None (default) @@ -1928,7 +1941,7 @@ def _compare_or_regex_search( """ def _check_comparison_types( - result: Union[ArrayLike, bool], a: ArrayLike, b: Scalar, + result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern], ): """ Raises an error if the two arrays (a,b) cannot be compared. @@ -1949,7 +1962,7 @@ def _check_comparison_types( else: op = np.vectorize( lambda x: bool(re.search(b, x)) - if isinstance(x, str) and isinstance(b, str) + if isinstance(x, str) and isinstance(b, (str, Pattern)) else False ) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index a3f056dbf9648..8603bff0587b6 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1573,3 +1573,11 @@ def test_replace_dict_category_type(self, input_category_df, expected_category_d result = input_df.replace({"a": "z", "obj1": "obj9", "cat1": "catX"}) tm.assert_frame_equal(result, expected) + + def test_replace_with_compiled_regex(self): + # https://github.com/pandas-dev/pandas/issues/35680 + df = pd.DataFrame(["a", "b", "c"]) + regex = re.compile("^a$") + result = df.replace({regex: "z"}, regex=True) + expected = pd.DataFrame(["z", "b", "c"]) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index 11802c59a29da..f78a28c66e946 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -415,3 +417,11 @@ def test_replace_extension_other(self): # https://github.com/pandas-dev/pandas/issues/34530 ser = pd.Series(pd.array([1, 2, 3], dtype="Int64")) ser.replace("", "") # no exception + + def test_replace_with_compiled_regex(self): + # https://github.com/pandas-dev/pandas/issues/35680 + s = pd.Series(["a", "b", "c"]) + regex = re.compile("^a$") + result = s.replace({regex: "z"}, regex=True) + expected = pd.Series(["z", "b", "c"]) + tm.assert_series_equal(result, expected)
- [x] closes #35680 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35697
2020-08-13T02:19:55Z
2020-08-17T10:59:09Z
2020-08-17T10:59:09Z
2020-08-17T13:03:21Z
REF: implement reset_dropped_locs
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b806d9856d20f..1f0cdbd07560f 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1111,6 +1111,7 @@ def blk_func(block: "Block") -> List["Block"]: assert len(locs) == result.shape[1] for i, loc in enumerate(locs): agg_block = result.iloc[:, [i]]._mgr.blocks[0] + agg_block.mgr_locs = [loc] new_blocks.append(agg_block) else: result = result._mgr.blocks[0].values @@ -1124,7 +1125,6 @@ def blk_func(block: "Block") -> List["Block"]: return new_blocks skipped: List[int] = [] - new_items: List[np.ndarray] = [] for i, block in enumerate(data.blocks): try: nbs = blk_func(block) @@ -1136,33 +1136,13 @@ def blk_func(block: "Block") -> List["Block"]: deleted_items.append(block.mgr_locs.as_array) else: agg_blocks.extend(nbs) - new_items.append(block.mgr_locs.as_array) if not agg_blocks: raise DataError("No numeric types to aggregate") # reset the locs in the blocks to correspond to our # current ordering - indexer = np.concatenate(new_items) - agg_items = data.items.take(np.sort(indexer)) - - if deleted_items: - - # we need to adjust the indexer to account for the - # items we have removed - # really should be done in internals :< - - deleted = np.concatenate(deleted_items) - ai = np.arange(len(data)) - mask = np.zeros(len(data)) - mask[deleted] = 1 - indexer = (ai - mask.cumsum())[indexer] - - offset = 0 - for blk in agg_blocks: - loc = len(blk.mgr_locs) - blk.mgr_locs = indexer[offset : (offset + loc)] - offset += loc + agg_items = data.reset_dropped_locs(agg_blocks, skipped) return agg_blocks, agg_items diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 5a215c4cd5fa3..f05d4cf1c4be6 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1504,6 +1504,38 @@ def unstack(self, unstacker, fill_value) -> "BlockManager": bm = BlockManager(new_blocks, [new_columns, new_index]) return bm + def reset_dropped_locs(self, blocks: List[Block], skipped: List[int]) -> Index: + """ + Decrement the mgr_locs of the given blocks with `skipped` removed. + + Notes + ----- + Alters each block's mgr_locs inplace. + """ + ncols = len(self) + + new_locs = [blk.mgr_locs.as_array for blk in blocks] + indexer = np.concatenate(new_locs) + + new_items = self.items.take(np.sort(indexer)) + + if skipped: + # we need to adjust the indexer to account for the + # items we have removed + deleted_items = [self.blocks[i].mgr_locs.as_array for i in skipped] + deleted = np.concatenate(deleted_items) + ai = np.arange(ncols) + mask = np.zeros(ncols) + mask[deleted] = 1 + indexer = (ai - mask.cumsum())[indexer] + + offset = 0 + for blk in blocks: + loc = len(blk.mgr_locs) + blk.mgr_locs = indexer[offset : (offset + loc)] + offset += loc + return new_items + class SingleBlockManager(BlockManager): """ manage a single block with """
We get to get rid of the comment `# really should be done in internals :<` Making the window.rolling usage use this helper method is the next step.
https://api.github.com/repos/pandas-dev/pandas/pulls/35696
2020-08-13T01:32:19Z
2020-08-17T18:28:28Z
2020-08-17T18:28:28Z
2020-08-17T19:45:25Z
CI: avoid file leaks in sas_xport tests
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index 291c9d1ee7f0c..fffdebda8c87a 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -6,7 +6,7 @@ from pandas._typing import FilePathOrBuffer, Label -from pandas.io.common import stringify_path +from pandas.io.common import get_filepath_or_buffer, stringify_path if TYPE_CHECKING: from pandas import DataFrame # noqa: F401 @@ -109,6 +109,10 @@ def read_sas( else: raise ValueError("unable to infer format of SAS file") + filepath_or_buffer, _, _, should_close = get_filepath_or_buffer( + filepath_or_buffer, encoding + ) + reader: ReaderBase if format.lower() == "xport": from pandas.io.sas.sas_xport import XportReader @@ -129,5 +133,7 @@ def read_sas( return reader data = reader.read() - reader.close() + + if should_close: + reader.close() return data diff --git a/pandas/tests/io/sas/test_xport.py b/pandas/tests/io/sas/test_xport.py index 2682bafedb8f1..939edb3d8e0b4 100644 --- a/pandas/tests/io/sas/test_xport.py +++ b/pandas/tests/io/sas/test_xport.py @@ -3,6 +3,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd import pandas._testing as tm @@ -26,10 +28,12 @@ def setup_method(self, datapath): self.dirpath = datapath("io", "sas", "data") self.file01 = os.path.join(self.dirpath, "DEMO_G.xpt") self.file02 = os.path.join(self.dirpath, "SSHSV1_A.xpt") - self.file02b = open(os.path.join(self.dirpath, "SSHSV1_A.xpt"), "rb") self.file03 = os.path.join(self.dirpath, "DRXFCD_G.xpt") self.file04 = os.path.join(self.dirpath, "paxraw_d_short.xpt") + with td.file_leak_context(): + yield + def test1_basic(self): # Tests with DEMO_G.xpt (all numeric file) @@ -127,7 +131,12 @@ def test2_binary(self): data_csv = pd.read_csv(self.file02.replace(".xpt", ".csv")) numeric_as_float(data_csv) - data = read_sas(self.file02b, format="xport") + with open(self.file02, "rb") as fd: + with td.file_leak_context(): + # GH#35693 ensure that if we pass an open file, we + # dont incorrectly close it in read_sas + data = read_sas(fd, format="xport") + tm.assert_frame_equal(data, data_csv) def test_multiple_types(self): diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index bdf633839b2cd..0dad8c7397e37 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -23,8 +23,8 @@ def test_foo(): For more information, refer to the ``pytest`` documentation on ``skipif``. """ +from contextlib import contextmanager from distutils.version import LooseVersion -from functools import wraps import locale from typing import Callable, Optional @@ -237,23 +237,36 @@ def documented_fixture(fixture): def check_file_leaks(func) -> Callable: """ - Decorate a test function tot check that we are not leaking file descriptors. + Decorate a test function to check that we are not leaking file descriptors. """ - psutil = safe_import("psutil") - if not psutil: + with file_leak_context(): return func - @wraps(func) - def new_func(*args, **kwargs): + +@contextmanager +def file_leak_context(): + """ + ContextManager analogue to check_file_leaks. + """ + psutil = safe_import("psutil") + if not psutil: + yield + else: proc = psutil.Process() flist = proc.open_files() + conns = proc.connections() - func(*args, **kwargs) + yield flist2 = proc.open_files() - assert flist2 == flist - - return new_func + # on some builds open_files includes file position, which we _dont_ + # expect to remain unchanged, so we need to compare excluding that + flist_ex = [(x.path, x.fd) for x in flist] + flist2_ex = [(x.path, x.fd) for x in flist2] + assert flist2_ex == flist_ex, (flist2, flist) + + conns2 = proc.connections() + assert conns2 == conns, (conns2, conns) def async_mark():
Introduces a contextmanager version of td.check_file_leaks so we can do more targeted versions of those checks for debugging
https://api.github.com/repos/pandas-dev/pandas/pulls/35693
2020-08-12T17:15:52Z
2020-08-13T19:18:48Z
2020-08-13T19:18:48Z
2020-08-13T23:09:59Z
ENH: GH-35611 Tests for top-level Pandas functions serializable
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index bcfed2d0d3a10..3d45a1f7389b7 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -10,6 +10,7 @@ import pandas as pd from pandas import Series, Timestamp +import pandas._testing as tm from pandas.core import ops import pandas.core.common as com @@ -157,3 +158,12 @@ def test_version_tag(): raise ValueError( "No git tags exist, please sync tags between upstream and your repo" ) + + +@pytest.mark.parametrize( + "obj", [(obj,) for obj in pd.__dict__.values() if callable(obj)] +) +def test_serializable(obj): + # GH 35611 + unpickled = tm.round_trip_pickle(obj) + assert type(obj) == type(unpickled)
- [x] closes #35611 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/35692
2020-08-12T16:52:24Z
2020-08-19T18:02:56Z
2020-08-19T18:02:56Z
2020-08-19T18:03:00Z
Fix GH-29442 DataFrame.groupby doesn't preserve _metadata
diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst index e63912ebc8fee..c9dde182ab831 100644 --- a/doc/source/whatsnew/v1.1.4.rst +++ b/doc/source/whatsnew/v1.1.4.rst @@ -22,7 +22,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- +- Bug causing ``groupby(...).sum()`` and similar to not preserve metadata (:issue:`29442`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 8340f964fb44b..4e43ff63b0959 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1001,8 +1001,9 @@ def _agg_general( ): with group_selection_context(self): # try a cython aggregation if we can + result = None try: - return self._cython_agg_general( + result = self._cython_agg_general( how=alias, alt=npfunc, numeric_only=numeric_only, @@ -1021,8 +1022,9 @@ def _agg_general( raise # apply a non-cython aggregation - result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) - return result + if result is None: + result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) + return result.__finalize__(self.obj, method="groupby") def _cython_agg_general( self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index 6692102bc9008..5507e98d974c1 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -772,13 +772,27 @@ def test_categorical_accessor(method): [ operator.methodcaller("sum"), lambda x: x.agg("sum"), + ], +) +def test_groupby_finalize(obj, method): + obj.attrs = {"a": 1} + result = method(obj.groupby([0, 0])) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})] +) +@pytest.mark.parametrize( + "method", + [ lambda x: x.agg(["sum", "count"]), lambda x: x.transform(lambda y: y), lambda x: x.apply(lambda y: y), ], ) @not_implemented_mark -def test_groupby(obj, method): +def test_groupby_finalize_not_implemented(obj, method): obj.attrs = {"a": 1} result = method(obj.groupby([0, 0])) assert result.attrs == {"a": 1}
This bug is a regression in v1.1.0 and was introduced by the fix for GH-34214 in commit [6f065b]. Underlying cause is that the `*Splitter` classes do not use the `._constructor` property and do not call `__finalize__`. Please note that the method name used for `__finalize__` calls was my best guess since documentation for the value has been hard to find. [6f065b]: https://github.com/pandas-dev/pandas/commit/6f065b6d423ea211d803e8be93c27f547541c372 - [x] closes #29442 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35688
2020-08-12T13:47:15Z
2020-10-14T18:37:52Z
2020-10-14T18:37:51Z
2020-10-14T19:50:59Z
BUG: to_pickle/read_pickle do not close user-provided file objects
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 86f47a5826214..deb5697053ea8 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -234,7 +234,7 @@ I/O - Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`) - In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`) -- +- :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`) Plotting ^^^^^^^^ diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 549d55e65546d..eee6ec7c9feca 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -100,7 +100,9 @@ def to_pickle( try: f.write(pickle.dumps(obj, protocol=protocol)) finally: - f.close() + if f != filepath_or_buffer: + # do not close user-provided file objects GH 35679 + f.close() for _f in fh: _f.close() if should_close: @@ -215,7 +217,9 @@ def read_pickle( # e.g. can occur for files written in py27; see GH#28645 and GH#31988 return pc.load(f, encoding="latin-1") finally: - f.close() + if f != filepath_or_buffer: + # do not close user-provided file objects GH 35679 + f.close() for _f in fh: _f.close() if should_close: diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index e4d43db7834e3..6331113ab8945 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -183,6 +183,15 @@ def python_unpickler(path): result = python_unpickler(path) compare_element(result, expected, typ) + # and the same for file objects (GH 35679) + with open(path, mode="wb") as handle: + writer(expected, path) + handle.seek(0) # shouldn't close file handle + with open(path, mode="rb") as handle: + result = pd.read_pickle(handle) + handle.seek(0) # shouldn't close file handle + compare_element(result, expected, typ) + def test_pickle_path_pathlib(): df = tm.makeDataFrame()
- [x] closes #35679 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Do not close user-provided file-objects in `to_pickle` and `read_pickle`.
https://api.github.com/repos/pandas-dev/pandas/pulls/35686
2020-08-12T12:30:50Z
2020-08-12T22:23:26Z
2020-08-12T22:23:26Z
2020-08-12T22:30:28Z
BUG/ENH: compression for google cloud storage in to_csv
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 55570341cf4e8..dae2f98bc0b76 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -240,6 +240,8 @@ I/O - In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`) - :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`) - :meth:`to_csv` passes compression arguments for `'gzip'` always to `gzip.GzipFile` (:issue:`28103`) +- :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue: `35058`) +- :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`) Plotting ^^^^^^^^ diff --git a/pandas/_typing.py b/pandas/_typing.py index 1b972030ef5a5..f8af92e07c674 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -1,4 +1,6 @@ +from dataclasses import dataclass from datetime import datetime, timedelta, tzinfo +from io import IOBase from pathlib import Path from typing import ( IO, @@ -8,6 +10,7 @@ Callable, Collection, Dict, + Generic, Hashable, List, Mapping, @@ -62,7 +65,8 @@ "ExtensionDtype", str, np.dtype, Type[Union[str, float, int, complex, bool]] ] DtypeObj = Union[np.dtype, "ExtensionDtype"] -FilePathOrBuffer = Union[str, Path, IO[AnyStr]] +FilePathOrBuffer = Union[str, Path, IO[AnyStr], IOBase] +FileOrBuffer = Union[str, IO[AnyStr], IOBase] # FrameOrSeriesUnion means either a DataFrame or a Series. E.g. # `def func(a: FrameOrSeriesUnion) -> FrameOrSeriesUnion: ...` means that if a Series @@ -114,3 +118,26 @@ # compression keywords and compression CompressionDict = Mapping[str, Optional[Union[str, int, bool]]] CompressionOptions = Optional[Union[str, CompressionDict]] + + +# let's bind types +ModeVar = TypeVar("ModeVar", str, None, Optional[str]) +EncodingVar = TypeVar("EncodingVar", str, None, Optional[str]) + + +@dataclass +class IOargs(Generic[ModeVar, EncodingVar]): + """ + Return value of io/common.py:get_filepath_or_buffer. + + Note (copy&past from io/parsers): + filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] + though mypy handling of conditional imports is difficult. + See https://github.com/python/mypy/issues/1297 + """ + + filepath_or_buffer: FileOrBuffer + encoding: EncodingVar + compression: CompressionOptions + should_close: bool + mode: Union[ModeVar, str] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 312d449e36022..eaa27d3f2a857 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2281,14 +2281,11 @@ def to_markdown( result = tabulate.tabulate(self, **kwargs) if buf is None: return result - buf, _, _, should_close = get_filepath_or_buffer( - buf, mode=mode, storage_options=storage_options - ) - assert buf is not None # Help mypy. - assert not isinstance(buf, str) - buf.writelines(result) - if should_close: - buf.close() + ioargs = get_filepath_or_buffer(buf, mode=mode, storage_options=storage_options) + assert not isinstance(ioargs.filepath_or_buffer, str) + ioargs.filepath_or_buffer.writelines(result) + if ioargs.should_close: + ioargs.filepath_or_buffer.close() return None @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3bad2d6dd18b9..94eef26e57592 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2,6 +2,7 @@ from datetime import timedelta import functools import gc +from io import StringIO import json import operator import pickle @@ -3249,6 +3250,7 @@ def to_csv( formatter.save() if path_or_buf is None: + assert isinstance(formatter.path_or_buf, StringIO) return formatter.path_or_buf.getvalue() return None diff --git a/pandas/io/common.py b/pandas/io/common.py index d1305c9cabe0e..97dbc7f1031a2 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -27,12 +27,17 @@ uses_params, uses_relative, ) +import warnings import zipfile from pandas._typing import ( CompressionDict, CompressionOptions, + EncodingVar, + FileOrBuffer, FilePathOrBuffer, + IOargs, + ModeVar, StorageOptions, ) from pandas.compat import _get_lzma_file, _import_lzma @@ -69,9 +74,7 @@ def is_url(url) -> bool: return parse_url(url).scheme in _VALID_URLS -def _expand_user( - filepath_or_buffer: FilePathOrBuffer[AnyStr], -) -> FilePathOrBuffer[AnyStr]: +def _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]: """ Return the argument with an initial component of ~ or ~user replaced by that user's home directory. @@ -101,7 +104,7 @@ def validate_header_arg(header) -> None: def stringify_path( filepath_or_buffer: FilePathOrBuffer[AnyStr], -) -> FilePathOrBuffer[AnyStr]: +) -> FileOrBuffer[AnyStr]: """ Attempt to convert a path-like object to a string. @@ -134,9 +137,9 @@ def stringify_path( # "__fspath__" [union-attr] # error: Item "IO[bytes]" of "Union[str, Path, IO[bytes]]" has no # attribute "__fspath__" [union-attr] - return filepath_or_buffer.__fspath__() # type: ignore[union-attr] + filepath_or_buffer = filepath_or_buffer.__fspath__() # type: ignore[union-attr] elif isinstance(filepath_or_buffer, pathlib.Path): - return str(filepath_or_buffer) + filepath_or_buffer = str(filepath_or_buffer) return _expand_user(filepath_or_buffer) @@ -162,13 +165,13 @@ def is_fsspec_url(url: FilePathOrBuffer) -> bool: ) -def get_filepath_or_buffer( +def get_filepath_or_buffer( # type: ignore[assignment] filepath_or_buffer: FilePathOrBuffer, - encoding: Optional[str] = None, + encoding: EncodingVar = None, compression: CompressionOptions = None, - mode: Optional[str] = None, + mode: ModeVar = None, storage_options: StorageOptions = None, -): +) -> IOargs[ModeVar, EncodingVar]: """ If the filepath_or_buffer is a url, translate and return the buffer. Otherwise passthrough. @@ -191,14 +194,35 @@ def get_filepath_or_buffer( .. versionadded:: 1.2.0 - Returns - ------- - Tuple[FilePathOrBuffer, str, CompressionOptions, bool] - Tuple containing the filepath or buffer, the encoding, the compression - and should_close. + ..versionchange:: 1.2.0 + + Returns the dataclass IOargs. """ filepath_or_buffer = stringify_path(filepath_or_buffer) + # bz2 and xz do not write the byte order mark for utf-16 and utf-32 + # print a warning when writing such files + compression_method = infer_compression( + filepath_or_buffer, get_compression_method(compression)[0] + ) + if ( + mode + and "w" in mode + and compression_method in ["bz2", "xz"] + and encoding in ["utf-16", "utf-32"] + ): + warnings.warn( + f"{compression} will not write the byte order mark for {encoding}", + UnicodeWarning, + ) + + # Use binary mode when converting path-like objects to file-like objects (fsspec) + # except when text mode is explicitly requested. The original mode is returned if + # fsspec is not used. + fsspec_mode = mode or "rb" + if "t" not in fsspec_mode and "b" not in fsspec_mode: + fsspec_mode += "b" + if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer): # TODO: fsspec can also handle HTTP via requests, but leaving this unchanged if storage_options: @@ -212,7 +236,13 @@ def get_filepath_or_buffer( compression = "gzip" reader = BytesIO(req.read()) req.close() - return reader, encoding, compression, True + return IOargs( + filepath_or_buffer=reader, + encoding=encoding, + compression=compression, + should_close=True, + mode=fsspec_mode, + ) if is_fsspec_url(filepath_or_buffer): assert isinstance( @@ -244,7 +274,7 @@ def get_filepath_or_buffer( try: file_obj = fsspec.open( - filepath_or_buffer, mode=mode or "rb", **(storage_options or {}) + filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) ).open() # GH 34626 Reads from Public Buckets without Credentials needs anon=True except tuple(err_types_to_retry_with_anon): @@ -255,23 +285,41 @@ def get_filepath_or_buffer( storage_options = dict(storage_options) storage_options["anon"] = True file_obj = fsspec.open( - filepath_or_buffer, mode=mode or "rb", **(storage_options or {}) + filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) ).open() - return file_obj, encoding, compression, True + return IOargs( + filepath_or_buffer=file_obj, + encoding=encoding, + compression=compression, + should_close=True, + mode=fsspec_mode, + ) elif storage_options: raise ValueError( "storage_options passed with file object or non-fsspec file path" ) if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): - return _expand_user(filepath_or_buffer), None, compression, False + return IOargs( + filepath_or_buffer=_expand_user(filepath_or_buffer), + encoding=encoding, + compression=compression, + should_close=False, + mode=mode, + ) if not is_file_like(filepath_or_buffer): msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}" raise ValueError(msg) - return filepath_or_buffer, None, compression, False + return IOargs( + filepath_or_buffer=filepath_or_buffer, + encoding=encoding, + compression=compression, + should_close=False, + mode=mode, + ) def file_path_to_url(path: str) -> str: @@ -452,6 +500,15 @@ def get_handle( need_text_wrapping = (BufferedIOBase, RawIOBase, S3File) except ImportError: need_text_wrapping = (BufferedIOBase, RawIOBase) + # fsspec is an optional dependency. If it is available, add its file-object + # class to the list of classes that need text wrapping. If fsspec is too old and is + # needed, get_filepath_or_buffer would already have thrown an exception. + try: + from fsspec.spec import AbstractFileSystem + + need_text_wrapping = (*need_text_wrapping, AbstractFileSystem) + except ImportError: + pass handles: List[Union[IO, _MMapWrapper]] = list() f = path_or_buf @@ -583,12 +640,15 @@ def __init__( self.archive_name = archive_name kwargs_zip: Dict[str, Any] = {"compression": zipfile.ZIP_DEFLATED} kwargs_zip.update(kwargs) - super().__init__(file, mode, **kwargs_zip) + super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type] def write(self, data): archive_name = self.filename if self.archive_name is not None: archive_name = self.archive_name + if archive_name is None: + # ZipFile needs a non-empty string + archive_name = "zip" super().writestr(archive_name, data) @property diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index ead36c95556b1..9bc1d7fedcb31 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -352,9 +352,9 @@ def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): if is_url(filepath_or_buffer): filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read()) elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)): - filepath_or_buffer, _, _, _ = get_filepath_or_buffer( + filepath_or_buffer = get_filepath_or_buffer( filepath_or_buffer, storage_options=storage_options - ) + ).filepath_or_buffer if isinstance(filepath_or_buffer, self._workbook_class): self.book = filepath_or_buffer diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index fb606b5ec8aef..a98eebe1c6a2a 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -34,9 +34,7 @@ def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kw import_optional_dependency("pyarrow") from pyarrow import feather - path, _, _, should_close = get_filepath_or_buffer( - path, mode="wb", storage_options=storage_options - ) + ioargs = get_filepath_or_buffer(path, mode="wb", storage_options=storage_options) if not isinstance(df, DataFrame): raise ValueError("feather only support IO with DataFrames") @@ -74,7 +72,11 @@ def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kw if df.columns.inferred_type not in valid_types: raise ValueError("feather must have string column names") - feather.write_feather(df, path, **kwargs) + feather.write_feather(df, ioargs.filepath_or_buffer, **kwargs) + + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) + ioargs.filepath_or_buffer.close() def read_feather( @@ -122,14 +124,15 @@ def read_feather( import_optional_dependency("pyarrow") from pyarrow import feather - path, _, _, should_close = get_filepath_or_buffer( - path, storage_options=storage_options - ) + ioargs = get_filepath_or_buffer(path, storage_options=storage_options) - df = feather.read_feather(path, columns=columns, use_threads=bool(use_threads)) + df = feather.read_feather( + ioargs.filepath_or_buffer, columns=columns, use_threads=bool(use_threads) + ) # s3fs only validates the credentials when the file is closed. - if should_close: - path.close() + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) + ioargs.filepath_or_buffer.close() return df diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index c462a96da7133..270caec022fef 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -62,14 +62,19 @@ def __init__( # Extract compression mode as given, if dict compression, self.compression_args = get_compression_method(compression) + self.compression = infer_compression(path_or_buf, compression) - self.path_or_buf, _, _, self.should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( path_or_buf, encoding=encoding, - compression=compression, + compression=self.compression, mode=mode, storage_options=storage_options, ) + self.path_or_buf = ioargs.filepath_or_buffer + self.should_close = ioargs.should_close + self.mode = ioargs.mode + self.sep = sep self.na_rep = na_rep self.float_format = float_format @@ -78,12 +83,10 @@ def __init__( self.header = header self.index = index self.index_label = index_label - self.mode = mode if encoding is None: encoding = "utf-8" self.encoding = encoding self.errors = errors - self.compression = infer_compression(self.path_or_buf, compression) if quoting is None: quoting = csvlib.QUOTE_MINIMAL diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index fe5e172655ae1..7a3b76ff7e3d0 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -58,12 +58,14 @@ def to_json( ) if path_or_buf is not None: - path_or_buf, _, _, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( path_or_buf, compression=compression, mode="wt", storage_options=storage_options, ) + path_or_buf = ioargs.filepath_or_buffer + should_close = ioargs.should_close if lines and orient != "records": raise ValueError("'lines' keyword only valid when 'orient' is records") @@ -102,6 +104,8 @@ def to_json( fh.write(s) finally: fh.close() + for handle in handles: + handle.close() elif path_or_buf is None: return s else: @@ -615,7 +619,7 @@ def read_json( compression_method, compression = get_compression_method(compression) compression_method = infer_compression(path_or_buf, compression_method) compression = dict(compression, method=compression_method) - filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, @@ -623,7 +627,7 @@ def read_json( ) json_reader = JsonReader( - filepath_or_buffer, + ioargs.filepath_or_buffer, orient=orient, typ=typ, dtype=dtype, @@ -633,10 +637,10 @@ def read_json( numpy=numpy, precise_float=precise_float, date_unit=date_unit, - encoding=encoding, + encoding=ioargs.encoding, lines=lines, chunksize=chunksize, - compression=compression, + compression=ioargs.compression, nrows=nrows, ) @@ -644,8 +648,9 @@ def read_json( return json_reader result = json_reader.read() - if should_close: - filepath_or_buffer.close() + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) + ioargs.filepath_or_buffer.close() return result diff --git a/pandas/io/orc.py b/pandas/io/orc.py index b556732e4d116..f1b1aa6a43cb5 100644 --- a/pandas/io/orc.py +++ b/pandas/io/orc.py @@ -50,7 +50,7 @@ def read_orc( import pyarrow.orc - path, _, _, _ = get_filepath_or_buffer(path) - orc_file = pyarrow.orc.ORCFile(path) + ioargs = get_filepath_or_buffer(path) + orc_file = pyarrow.orc.ORCFile(ioargs.filepath_or_buffer) result = orc_file.read(columns=columns, **kwargs).to_pandas() return result diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 7f0eef039a1e8..e5d6ac006e251 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -9,7 +9,7 @@ from pandas import DataFrame, get_option -from pandas.io.common import _expand_user, get_filepath_or_buffer, is_fsspec_url +from pandas.io.common import get_filepath_or_buffer, is_fsspec_url, stringify_path def get_engine(engine: str) -> "BaseImpl": @@ -113,7 +113,7 @@ def write( raise ValueError( "storage_options passed with file object or non-fsspec file path" ) - path = _expand_user(path) + path = stringify_path(path) if partition_cols is not None: # writes to multiple files under the given path self.api.parquet.write_to_dataset( @@ -143,10 +143,12 @@ def read( ) fs = kwargs.pop("filesystem", None) should_close = False - path = _expand_user(path) + path = stringify_path(path) if not fs: - path, _, _, should_close = get_filepath_or_buffer(path) + ioargs = get_filepath_or_buffer(path) + path = ioargs.filepath_or_buffer + should_close = ioargs.should_close kwargs["use_pandas_metadata"] = True result = self.api.parquet.read_table( @@ -205,7 +207,7 @@ def write( raise ValueError( "storage_options passed with file object or non-fsspec file path" ) - path, _, _, _ = get_filepath_or_buffer(path) + path = get_filepath_or_buffer(path).filepath_or_buffer with catch_warnings(record=True): self.api.write( @@ -228,7 +230,7 @@ def read( ).open() parquet_file = self.api.ParquetFile(path, open_with=open_with) else: - path, _, _, _ = get_filepath_or_buffer(path) + path = get_filepath_or_buffer(path).filepath_or_buffer parquet_file = self.api.ParquetFile(path) return parquet_file.to_pandas(columns=columns, **kwargs) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 983aa56324083..a917bff9d7ca7 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -432,10 +432,10 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): # Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] # though mypy handling of conditional imports is difficult. # See https://github.com/python/mypy/issues/1297 - fp_or_buf, _, compression, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( filepath_or_buffer, encoding, compression, storage_options=storage_options ) - kwds["compression"] = compression + kwds["compression"] = ioargs.compression if kwds.get("date_parser", None) is not None: if isinstance(kwds["parse_dates"], bool): @@ -450,7 +450,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): _validate_names(kwds.get("names", None)) # Create the parser. - parser = TextFileReader(fp_or_buf, **kwds) + parser = TextFileReader(ioargs.filepath_or_buffer, **kwds) if chunksize or iterator: return parser @@ -460,9 +460,10 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): finally: parser.close() - if should_close: + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) try: - fp_or_buf.close() + ioargs.filepath_or_buffer.close() except ValueError: pass diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index fc1d2e385cf72..857a2d1b69be4 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -86,15 +86,18 @@ def to_pickle( >>> import os >>> os.remove("./dummy.pkl") """ - fp_or_buf, _, compression, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( filepath_or_buffer, compression=compression, mode="wb", storage_options=storage_options, ) - if not isinstance(fp_or_buf, str) and compression == "infer": + compression = ioargs.compression + if not isinstance(ioargs.filepath_or_buffer, str) and compression == "infer": compression = None - f, fh = get_handle(fp_or_buf, "wb", compression=compression, is_text=False) + f, fh = get_handle( + ioargs.filepath_or_buffer, "wb", compression=compression, is_text=False + ) if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL try: @@ -105,9 +108,10 @@ def to_pickle( f.close() for _f in fh: _f.close() - if should_close: + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) try: - fp_or_buf.close() + ioargs.filepath_or_buffer.close() except ValueError: pass @@ -189,12 +193,15 @@ def read_pickle( >>> import os >>> os.remove("./dummy.pkl") """ - fp_or_buf, _, compression, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( filepath_or_buffer, compression=compression, storage_options=storage_options ) - if not isinstance(fp_or_buf, str) and compression == "infer": + compression = ioargs.compression + if not isinstance(ioargs.filepath_or_buffer, str) and compression == "infer": compression = None - f, fh = get_handle(fp_or_buf, "rb", compression=compression, is_text=False) + f, fh = get_handle( + ioargs.filepath_or_buffer, "rb", compression=compression, is_text=False + ) # 1) try standard library Pickle # 2) try pickle_compat (older pandas version) to handle subclass changes @@ -222,8 +229,9 @@ def read_pickle( f.close() for _f in fh: _f.close() - if should_close: + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) try: - fp_or_buf.close() + ioargs.filepath_or_buffer.close() except ValueError: pass diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 3d9be7c15726b..76dac39d1889f 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -137,7 +137,7 @@ def __init__( self._current_row_on_page_index = 0 self._current_row_in_file_index = 0 - self._path_or_buf, _, _, _ = get_filepath_or_buffer(path_or_buf) + self._path_or_buf = get_filepath_or_buffer(path_or_buf).filepath_or_buffer if isinstance(self._path_or_buf, str): self._path_or_buf = open(self._path_or_buf, "rb") self.handle = self._path_or_buf diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 6cf248b748107..e4d9324ce5130 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -253,12 +253,9 @@ def __init__( self._chunksize = chunksize if isinstance(filepath_or_buffer, str): - ( - filepath_or_buffer, - encoding, - compression, - should_close, - ) = get_filepath_or_buffer(filepath_or_buffer, encoding=encoding) + filepath_or_buffer = get_filepath_or_buffer( + filepath_or_buffer, encoding=encoding + ).filepath_or_buffer if isinstance(filepath_or_buffer, (str, bytes)): self.filepath_or_buffer = open(filepath_or_buffer, "rb") diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index fffdebda8c87a..ae9457a8e3147 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -109,22 +109,26 @@ def read_sas( else: raise ValueError("unable to infer format of SAS file") - filepath_or_buffer, _, _, should_close = get_filepath_or_buffer( - filepath_or_buffer, encoding - ) + ioargs = get_filepath_or_buffer(filepath_or_buffer, encoding) reader: ReaderBase if format.lower() == "xport": from pandas.io.sas.sas_xport import XportReader reader = XportReader( - filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize + ioargs.filepath_or_buffer, + index=index, + encoding=ioargs.encoding, + chunksize=chunksize, ) elif format.lower() == "sas7bdat": from pandas.io.sas.sas7bdat import SAS7BDATReader reader = SAS7BDATReader( - filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize + ioargs.filepath_or_buffer, + index=index, + encoding=ioargs.encoding, + chunksize=chunksize, ) else: raise ValueError("unknown SAS format") @@ -134,6 +138,6 @@ def read_sas( data = reader.read() - if should_close: + if ioargs.should_close: reader.close() return data diff --git a/pandas/io/stata.py b/pandas/io/stata.py index ec3819f1673a8..0074ebc4decb0 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1069,9 +1069,9 @@ def __init__( self._native_byteorder = _set_endianness(sys.byteorder) path_or_buf = stringify_path(path_or_buf) if isinstance(path_or_buf, str): - path_or_buf, encoding, _, should_close = get_filepath_or_buffer( + path_or_buf = get_filepath_or_buffer( path_or_buf, storage_options=storage_options - ) + ).filepath_or_buffer if isinstance(path_or_buf, (str, bytes)): self.path_or_buf = open(path_or_buf, "rb") @@ -1979,11 +1979,16 @@ def _open_file_binary_write( compression_typ, compression_args = get_compression_method(compression) compression_typ = infer_compression(fname, compression_typ) compression = dict(compression_args, method=compression_typ) - path_or_buf, _, compression, _ = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( fname, mode="wb", compression=compression, storage_options=storage_options, ) - f, _ = get_handle(path_or_buf, "wb", compression=compression, is_text=False) - return f, True, compression + f, _ = get_handle( + ioargs.filepath_or_buffer, + "wb", + compression=ioargs.compression, + is_text=False, + ) + return f, True, ioargs.compression else: raise TypeError("fname must be a binary file, buffer or path-like.") diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 5ce2233bc0cd0..85a12a13d19fb 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -105,21 +105,21 @@ def test_infer_compression_from_path(self, extension, expected, path_type): compression = icom.infer_compression(path, compression="infer") assert compression == expected - def test_get_filepath_or_buffer_with_path(self): - filename = "~/sometest" - filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(filename) - assert filepath_or_buffer != filename - assert os.path.isabs(filepath_or_buffer) - assert os.path.expanduser(filename) == filepath_or_buffer - assert not should_close + @pytest.mark.parametrize("path_type", [str, CustomFSPath, Path]) + def test_get_filepath_or_buffer_with_path(self, path_type): + # ignore LocalPath: it creates strange paths: /absolute/~/sometest + filename = path_type("~/sometest") + ioargs = icom.get_filepath_or_buffer(filename) + assert ioargs.filepath_or_buffer != filename + assert os.path.isabs(ioargs.filepath_or_buffer) + assert os.path.expanduser(filename) == ioargs.filepath_or_buffer + assert not ioargs.should_close def test_get_filepath_or_buffer_with_buffer(self): input_buffer = StringIO() - filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer( - input_buffer - ) - assert filepath_or_buffer == input_buffer - assert not should_close + ioargs = icom.get_filepath_or_buffer(input_buffer) + assert ioargs.filepath_or_buffer == input_buffer + assert not ioargs.should_close def test_iterator(self): reader = pd.read_csv(StringIO(self.data1), chunksize=1) @@ -389,6 +389,25 @@ def test_binary_mode(self): df.to_csv(path, mode="w+b") tm.assert_frame_equal(df, pd.read_csv(path, index_col=0)) + @pytest.mark.parametrize("encoding", ["utf-16", "utf-32"]) + @pytest.mark.parametrize("compression_", ["bz2", "xz"]) + def test_warning_missing_utf_bom(self, encoding, compression_): + """ + bz2 and xz do not write the byte order mark (BOM) for utf-16/32. + + https://stackoverflow.com/questions/55171439 + + GH 35681 + """ + df = tm.makeDataFrame() + with tm.ensure_clean() as path: + with tm.assert_produces_warning(UnicodeWarning): + df.to_csv(path, compression=compression_, encoding=encoding) + + # reading should fail (otherwise we wouldn't need the warning) + with pytest.raises(Exception): + pd.read_csv(path, compression=compression_, encoding=encoding) + def test_is_fsspec_url(): assert icom.is_fsspec_url("gcs://pandas/somethingelse.com") diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index bc14b485f75e5..31e9ad4cf4416 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -124,6 +124,8 @@ def test_compression_binary(compression_only): GH22555 """ df = tm.makeDataFrame() + + # with a file with tm.ensure_clean() as path: with open(path, mode="wb") as file: df.to_csv(file, mode="wb", compression=compression_only) @@ -132,6 +134,14 @@ def test_compression_binary(compression_only): df, pd.read_csv(path, index_col=0, compression=compression_only) ) + # with BytesIO + file = io.BytesIO() + df.to_csv(file, mode="wb", compression=compression_only) + file.seek(0) # file shouldn't be closed + tm.assert_frame_equal( + df, pd.read_csv(file, index_col=0, compression=compression_only) + ) + def test_gzip_reproducibility_file_name(): """ diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py index eacf4fa08545d..18b5743a3375a 100644 --- a/pandas/tests/io/test_gcs.py +++ b/pandas/tests/io/test_gcs.py @@ -9,12 +9,32 @@ from pandas.util import _test_decorators as td -@td.skip_if_no("gcsfs") -def test_read_csv_gcs(monkeypatch): +@pytest.fixture +def gcs_buffer(monkeypatch): + """Emulate GCS using a binary buffer.""" from fsspec import AbstractFileSystem, registry registry.target.clear() # noqa # remove state + gcs_buffer = BytesIO() + gcs_buffer.close = lambda: True + + class MockGCSFileSystem(AbstractFileSystem): + def open(*args, **kwargs): + gcs_buffer.seek(0) + return gcs_buffer + + monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem) + + return gcs_buffer + + +@td.skip_if_no("gcsfs") +def test_read_csv_gcs(gcs_buffer): + from fsspec import registry + + registry.target.clear() # noqa # remove state + df1 = DataFrame( { "int": [1, 3], @@ -24,21 +44,19 @@ def test_read_csv_gcs(monkeypatch): } ) - class MockGCSFileSystem(AbstractFileSystem): - def open(*args, **kwargs): - return BytesIO(df1.to_csv(index=False).encode()) + gcs_buffer.write(df1.to_csv(index=False).encode()) - monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem) df2 = read_csv("gs://test/test.csv", parse_dates=["dt"]) tm.assert_frame_equal(df1, df2) @td.skip_if_no("gcsfs") -def test_to_csv_gcs(monkeypatch): - from fsspec import AbstractFileSystem, registry +def test_to_csv_gcs(gcs_buffer): + from fsspec import registry registry.target.clear() # noqa # remove state + df1 = DataFrame( { "int": [1, 3], @@ -47,29 +65,57 @@ def test_to_csv_gcs(monkeypatch): "dt": date_range("2018-06-18", periods=2), } ) - s = BytesIO() - s.close = lambda: True - - class MockGCSFileSystem(AbstractFileSystem): - def open(*args, **kwargs): - s.seek(0) - return s - monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem) df1.to_csv("gs://test/test.csv", index=True) - def mock_get_filepath_or_buffer(*args, **kwargs): - return BytesIO(df1.to_csv(index=True).encode()), None, None, False - - monkeypatch.setattr( - "pandas.io.common.get_filepath_or_buffer", mock_get_filepath_or_buffer - ) - df2 = read_csv("gs://test/test.csv", parse_dates=["dt"], index_col=0) tm.assert_frame_equal(df1, df2) +@td.skip_if_no("gcsfs") +@pytest.mark.parametrize("encoding", ["utf-8", "cp1251"]) +def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding): + """ + Compression and encoding should with GCS. + + GH 35677 (to_csv, compression), GH 26124 (to_csv, encoding), and + GH 32392 (read_csv, encoding) + """ + from fsspec import registry + + registry.target.clear() # noqa # remove state + df = tm.makeDataFrame() + + # reference of compressed and encoded file + compression = {"method": compression_only} + if compression_only == "gzip": + compression["mtime"] = 1 # be reproducible + buffer = BytesIO() + df.to_csv(buffer, compression=compression, encoding=encoding, mode="wb") + + # write compressed file with explicit compression + path_gcs = "gs://test/test.csv" + df.to_csv(path_gcs, compression=compression, encoding=encoding) + assert gcs_buffer.getvalue() == buffer.getvalue() + read_df = read_csv( + path_gcs, index_col=0, compression=compression_only, encoding=encoding + ) + tm.assert_frame_equal(df, read_df) + + # write compressed file with implicit compression + if compression_only == "gzip": + compression_only = "gz" + compression["method"] = "infer" + path_gcs += f".{compression_only}" + df.to_csv( + path_gcs, compression=compression, encoding=encoding, + ) + assert gcs_buffer.getvalue() == buffer.getvalue() + read_df = read_csv(path_gcs, index_col=0, compression="infer", encoding=encoding) + tm.assert_frame_equal(df, read_df) + + @td.skip_if_no("fastparquet") @td.skip_if_no("gcsfs") def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
- [x] closes #35677, closes #26124, and closes #32392 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry By inferring the compression before converting the path to a file object `df.to_csv("gs://mybucket/test2.csv.gz", compression="infer", mode="wb")` works. By wrapping fsspec file-objects in a `TextIOWrapper` `df.to_csv("gs://mybucket/test2.csv", mode="wb")` works as well. Path-like objects that are internally converted to file-like objects (in `get_filepath_or_buffer`) are now always opened in binary mode (unless text mode is explicitly requested) and the potentially changed mode is returned (no need to specify `mode="wb"` for google cloud files). As long as the google file is opened in binary mode (which is now always the case), we also honor the requested `encoding`. This PR also fixes Zip compression for file objects not having a name.
https://api.github.com/repos/pandas-dev/pandas/pulls/35681
2020-08-12T01:38:04Z
2020-09-03T03:06:19Z
2020-09-03T03:06:18Z
2020-09-03T15:56:27Z
PERF: make RangeIndex iterate over ._range
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index b242de6a17208..9c05019c70396 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -57,8 +57,8 @@ def time_datetime_difference_disjoint(self): class Range: def setup(self): - self.idx_inc = RangeIndex(start=0, stop=10 ** 7, step=3) - self.idx_dec = RangeIndex(start=10 ** 7, stop=-1, step=-3) + self.idx_inc = RangeIndex(start=0, stop=10 ** 6, step=3) + self.idx_dec = RangeIndex(start=10 ** 6, stop=-1, step=-3) def time_max(self): self.idx_inc.max() @@ -73,15 +73,23 @@ def time_min_trivial(self): self.idx_inc.min() def time_get_loc_inc(self): - self.idx_inc.get_loc(900000) + self.idx_inc.get_loc(900_000) def time_get_loc_dec(self): - self.idx_dec.get_loc(100000) + self.idx_dec.get_loc(100_000) + + def time_iter_inc(self): + for _ in self.idx_inc: + pass + + def time_iter_dec(self): + for _ in self.idx_dec: + pass class IndexEquals: def setup(self): - idx_large_fast = RangeIndex(100000) + idx_large_fast = RangeIndex(100_000) idx_small_slow = date_range(start="1/1/2012", periods=1) self.mi_large_slow = MultiIndex.from_product([idx_large_fast, idx_small_slow]) @@ -94,7 +102,7 @@ def time_non_object_equals_multiindex(self): class IndexAppend: def setup(self): - N = 10000 + N = 10_000 self.range_idx = RangeIndex(0, 100) self.int_idx = self.range_idx.astype(int) self.obj_idx = self.int_idx.astype(str) @@ -168,7 +176,7 @@ def time_get_loc_non_unique_sorted(self, dtype): class Float64IndexMethod: # GH 13166 def setup(self): - N = 100000 + N = 100_000 a = np.arange(N) self.ind = Float64Index(a * 4.8000000418824129e-08) @@ -212,7 +220,7 @@ class GC: params = [1, 2, 5] def create_use_drop(self): - idx = Index(list(range(1000 * 1000))) + idx = Index(list(range(1_000_000))) idx._engine def peakmem_gc_instances(self, N): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 3577a7aacc008..7efb70f0752e2 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -373,6 +373,10 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): def tolist(self): return list(self._range) + @doc(Int64Index.__iter__) + def __iter__(self): + yield from self._range + @doc(Int64Index._shallow_copy) def _shallow_copy(self, values=None, name: Label = no_default): name = self.name if name is no_default else name diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index ef4bb9a0869b0..c4c242746e92c 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -167,6 +167,10 @@ def test_cache(self): idx.any() assert idx._cache == {} + for _ in idx: + pass + assert idx._cache == {} + df = pd.DataFrame({"a": range(10)}, index=idx) df.loc[50]
Minor performance issue. By adding a custom ``__iter__`` method to ``RangeIndex``, we partly avoid needing to create/cache the expensive ``_data`` attribute and partly it's just faster to iterate over a ``range`` than a ``ndarray``: ```python >>> idx = pd.RangeIndex(100_000) >>> %%timeit ... for _ in idx: ... pass 10.9 ms ± 74.7 µs per loop # master 6.11 ms ± 48.8 µs per loop # this PR >>> "_data" in idx._cache True # master False # this PR ``` xref #35432, #26565.
https://api.github.com/repos/pandas-dev/pandas/pulls/35676
2020-08-11T18:35:41Z
2020-08-13T18:06:36Z
2020-08-13T18:06:36Z
2020-08-13T18:27:18Z
Avoid redirect
diff --git a/doc/source/getting_started/tutorials.rst b/doc/source/getting_started/tutorials.rst index 4c2d0621c6103..b8940d2efed2f 100644 --- a/doc/source/getting_started/tutorials.rst +++ b/doc/source/getting_started/tutorials.rst @@ -94,4 +94,4 @@ Various tutorials * `Intro to pandas data structures, by Greg Reda <http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/>`_ * `Pandas and Python: Top 10, by Manish Amde <https://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/>`_ * `Pandas DataFrames Tutorial, by Karlijn Willems <https://www.datacamp.com/community/tutorials/pandas-tutorial-dataframe-python>`_ -* `A concise tutorial with real life examples <https://tutswiki.com/pandas-cookbook/chapter1>`_ +* `A concise tutorial with real life examples <https://tutswiki.com/pandas-cookbook/chapter1/>`_
A minor change to avoid 301 redirect on link.
https://api.github.com/repos/pandas-dev/pandas/pulls/35674
2020-08-11T16:25:43Z
2020-08-22T03:08:19Z
2020-08-22T03:08:19Z
2020-08-22T03:08:29Z
REGR: Dataframe.reset_index() on empty DataFrame with MI and datatime level
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index b37103910afab..98d67e930ccc0 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) +- Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) - Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 547d86f221b5f..1587dd8798ec3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4816,7 +4816,7 @@ def _maybe_casted_values(index, labels=None): # we can have situations where the whole mask is -1, # meaning there is nothing found in labels, so make all nan's - if mask.all(): + if mask.size > 0 and mask.all(): dtype = index.dtype fill_value = na_value_for_dtype(dtype) values = construct_1d_arraylike_from_scalar( diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index da4bfa9be4881..b88ef0e6691cb 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -318,3 +318,33 @@ def test_reset_index_dtypes_on_empty_frame_with_multiindex(array, dtype): result = DataFrame(index=idx)[:0].reset_index().dtypes expected = Series({"level_0": np.int64, "level_1": np.float64, "level_2": dtype}) tm.assert_series_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex(): + # https://github.com/pandas-dev/pandas/issues/35606 + idx = MultiIndex( + levels=[[pd.Timestamp("2020-07-20 00:00:00")], [3, 4]], + codes=[[], []], + names=["a", "b"], + ) + df = DataFrame(index=idx, columns=["c", "d"]) + result = df.reset_index() + expected = DataFrame( + columns=list("abcd"), index=RangeIndex(start=0, stop=0, step=1) + ) + expected["a"] = expected["a"].astype("datetime64[ns]") + expected["b"] = expected["b"].astype("int64") + tm.assert_frame_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex_from_groupby(): + # https://github.com/pandas-dev/pandas/issues/35657 + df = DataFrame(dict(c1=[10.0], c2=["a"], c3=pd.to_datetime("2020-01-01"))) + df = df.head(0).groupby(["c2", "c3"])[["c1"]].sum() + result = df.reset_index() + expected = DataFrame( + columns=["c2", "c3", "c1"], index=RangeIndex(start=0, stop=0, step=1) + ) + expected["c3"] = expected["c3"].astype("datetime64[ns]") + expected["c1"] = expected["c1"].astype("float64") + tm.assert_frame_equal(result, expected)
- [ ] closes #35606 - [ ] closes #35657 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35673
2020-08-11T16:24:25Z
2020-08-14T01:50:52Z
2020-08-14T01:50:52Z
2020-08-14T10:23:32Z
CI/TST: change skip to xfail #35660
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index c84c0048cc838..3d5f6ae3a4af9 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1138,7 +1138,7 @@ def test_parse_integers_above_fp_precision(all_parsers): tm.assert_frame_equal(result, expected) -@pytest.mark.skip("unreliable test #35214") +@pytest.mark.xfail(reason="ResourceWarning #35660", strict=False) def test_chunks_have_consistent_numerical_type(all_parsers): parser = all_parsers integers = [str(i) for i in range(499999)] @@ -1152,7 +1152,7 @@ def test_chunks_have_consistent_numerical_type(all_parsers): assert result.a.dtype == float -@pytest.mark.skip("unreliable test #35214") +@pytest.mark.xfail(reason="ResourceWarning #35660", strict=False) def test_warn_if_chunks_have_mismatched_type(all_parsers): warning_type = None parser = all_parsers
Followup of #35660
https://api.github.com/repos/pandas-dev/pandas/pulls/35672
2020-08-11T16:22:57Z
2020-08-11T21:57:57Z
2020-08-11T21:57:57Z
2020-08-19T17:37:41Z
ENH: Enable short_caption in to_latex
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 9fc094330fb36..b6d5fc0f35bc3 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -96,6 +96,32 @@ For example: buffer = io.BytesIO() data.to_csv(buffer, mode="w+b", encoding="utf-8", compression="gzip") +Support for short caption and table position in ``to_latex`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:meth:`DataFrame.to_latex` now allows one to specify +a floating table position (:issue:`35281`) +and a short caption (:issue:`36267`). + +New keyword ``position`` is implemented to set the position. + +.. ipython:: python + + data = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) + table = data.to_latex(position='ht') + print(table) + +Usage of keyword ``caption`` is extended. +Besides taking a single string as an argument, +one can optionally provide a tuple of ``(full_caption, short_caption)`` +to add a short caption macro. + +.. ipython:: python + + data = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) + table = data.to_latex(caption=('the full long caption', 'short caption')) + print(table) + .. _whatsnew_120.read_csv_table_precision_default: Change in default floating precision for ``read_csv`` and ``read_table`` diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 784e8877ef128..77be48ef29df8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3013,6 +3013,9 @@ def to_latex( .. versionchanged:: 1.0.0 Added caption and label arguments. + .. versionchanged:: 1.2.0 + Added position argument, changed meaning of caption argument. + Parameters ---------- buf : str, Path or StringIO-like, optional, default None @@ -3074,11 +3077,16 @@ def to_latex( centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. - caption : str, optional - The LaTeX caption to be placed inside ``\caption{{}}`` in the output. + caption : str or tuple, optional + Tuple (full_caption, short_caption), + which results in ``\caption[short_caption]{{full_caption}}``; + if a single string is passed, no short caption will be set. .. versionadded:: 1.0.0 + .. versionchanged:: 1.2.0 + Optionally allow caption to be a tuple ``(full_caption, short_caption)``. + label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. @@ -3087,6 +3095,8 @@ def to_latex( position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. + + .. versionadded:: 1.2.0 {returns} See Also -------- @@ -3097,8 +3107,8 @@ def to_latex( Examples -------- >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], - ... mask=['red', 'purple'], - ... weapon=['sai', 'bo staff'])) + ... mask=['red', 'purple'], + ... weapon=['sai', 'bo staff'])) >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE \begin{{tabular}}{{lll}} \toprule diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index dcd91b3a12294..7635cda56ba26 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1021,7 +1021,7 @@ def to_latex( multicolumn: bool = False, multicolumn_format: Optional[str] = None, multirow: bool = False, - caption: Optional[str] = None, + caption: Optional[Union[str, Tuple[str, str]]] = None, label: Optional[str] = None, position: Optional[str] = None, ) -> Optional[str]: diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 170df193bef00..2eee0ce73291f 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -2,7 +2,7 @@ Module for formatting output data in Latex. """ from abc import ABC, abstractmethod -from typing import IO, Iterator, List, Optional, Type +from typing import IO, Iterator, List, Optional, Tuple, Type, Union import numpy as np @@ -11,6 +11,39 @@ from pandas.io.formats.format import DataFrameFormatter, TableFormatter +def _split_into_full_short_caption( + caption: Optional[Union[str, Tuple[str, str]]] +) -> Tuple[str, str]: + """Extract full and short captions from caption string/tuple. + + Parameters + ---------- + caption : str or tuple, optional + Either table caption string or tuple (full_caption, short_caption). + If string is provided, then it is treated as table full caption, + while short_caption is considered an empty string. + + Returns + ------- + full_caption, short_caption : tuple + Tuple of full_caption, short_caption strings. + """ + if caption: + if isinstance(caption, str): + full_caption = caption + short_caption = "" + else: + try: + full_caption, short_caption = caption + except ValueError as err: + msg = "caption must be either a string or a tuple of two strings" + raise ValueError(msg) from err + else: + full_caption = "" + short_caption = "" + return full_caption, short_caption + + class RowStringConverter(ABC): r"""Converter for dataframe rows into LaTeX strings. @@ -275,6 +308,8 @@ class TableBuilderAbstract(ABC): Use multirow to enhance MultiIndex rows. caption: str, optional Table caption. + short_caption: str, optional + Table short caption. label: str, optional LaTeX label. position: str, optional @@ -289,6 +324,7 @@ def __init__( multicolumn_format: Optional[str] = None, multirow: bool = False, caption: Optional[str] = None, + short_caption: Optional[str] = None, label: Optional[str] = None, position: Optional[str] = None, ): @@ -298,6 +334,7 @@ def __init__( self.multicolumn_format = multicolumn_format self.multirow = multirow self.caption = caption + self.short_caption = short_caption self.label = label self.position = position @@ -384,8 +421,23 @@ def _position_macro(self) -> str: @property def _caption_macro(self) -> str: - r"""Caption macro, extracted from self.caption, like \caption{cap}.""" - return f"\\caption{{{self.caption}}}" if self.caption else "" + r"""Caption macro, extracted from self.caption. + + With short caption: + \caption[short_caption]{caption_string}. + + Without short caption: + \caption{caption_string}. + """ + if self.caption: + return "".join( + [ + r"\caption", + f"[{self.short_caption}]" if self.short_caption else "", + f"{{{self.caption}}}", + ] + ) + return "" @property def _label_macro(self) -> str: @@ -596,15 +648,32 @@ def env_end(self) -> str: class LatexFormatter(TableFormatter): - """ + r""" Used to render a DataFrame to a LaTeX tabular/longtable environment output. Parameters ---------- formatter : `DataFrameFormatter` + longtable : bool, default False + Use longtable environment. column_format : str, default None The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns + multicolumn : bool, default False + Use \multicolumn to enhance MultiIndex columns. + multicolumn_format : str, default 'l' + The alignment for multicolumns, similar to `column_format` + multirow : bool, default False + Use \multirow to enhance MultiIndex rows. + caption : str or tuple, optional + Tuple (full_caption, short_caption), + which results in \caption[short_caption]{full_caption}; + if a single string is passed, no short caption will be set. + label : str, optional + The LaTeX label to be placed inside ``\label{}`` in the output. + position : str, optional + The LaTeX positional argument for tables, to be placed after + ``\begin{}`` in the output. See Also -------- @@ -619,18 +688,18 @@ def __init__( multicolumn: bool = False, multicolumn_format: Optional[str] = None, multirow: bool = False, - caption: Optional[str] = None, + caption: Optional[Union[str, Tuple[str, str]]] = None, label: Optional[str] = None, position: Optional[str] = None, ): self.fmt = formatter self.frame = self.fmt.frame self.longtable = longtable - self.column_format = column_format # type: ignore[assignment] + self.column_format = column_format self.multicolumn = multicolumn self.multicolumn_format = multicolumn_format self.multirow = multirow - self.caption = caption + self.caption, self.short_caption = _split_into_full_short_caption(caption) self.label = label self.position = position @@ -658,6 +727,7 @@ def builder(self) -> TableBuilderAbstract: multicolumn_format=self.multicolumn_format, multirow=self.multirow, caption=self.caption, + short_caption=self.short_caption, label=self.label, position=self.position, ) @@ -671,7 +741,7 @@ def _select_builder(self) -> Type[TableBuilderAbstract]: return TabularBuilder @property - def column_format(self) -> str: + def column_format(self) -> Optional[str]: """Column format.""" return self._column_format diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index d3d865158309c..908fdea2f73d0 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -414,6 +414,11 @@ def caption_table(self): """Caption for table/tabular LaTeX environment.""" return "a table in a \\texttt{table/tabular} environment" + @pytest.fixture + def short_caption(self): + """Short caption for testing \\caption[short_caption]{full_caption}.""" + return "a table" + @pytest.fixture def label_table(self): """Label for table/tabular LaTeX environment.""" @@ -493,6 +498,107 @@ def test_to_latex_caption_and_label(self, df_short, caption_table, label_table): ) assert result == expected + def test_to_latex_caption_and_shortcaption( + self, + df_short, + caption_table, + short_caption, + ): + result = df_short.to_latex(caption=(caption_table, short_caption)) + expected = _dedent( + r""" + \begin{table} + \centering + \caption[a table]{a table in a \texttt{table/tabular} environment} + \begin{tabular}{lrl} + \toprule + {} & a & b \\ + \midrule + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ + \bottomrule + \end{tabular} + \end{table} + """ + ) + assert result == expected + + def test_to_latex_caption_and_shortcaption_list_is_ok(self, df_short): + caption = ("Long-long-caption", "Short") + result_tuple = df_short.to_latex(caption=caption) + result_list = df_short.to_latex(caption=list(caption)) + assert result_tuple == result_list + + def test_to_latex_caption_shortcaption_and_label( + self, + df_short, + caption_table, + short_caption, + label_table, + ): + # test when the short_caption is provided alongside caption and label + result = df_short.to_latex( + caption=(caption_table, short_caption), + label=label_table, + ) + expected = _dedent( + r""" + \begin{table} + \centering + \caption[a table]{a table in a \texttt{table/tabular} environment} + \label{tab:table_tabular} + \begin{tabular}{lrl} + \toprule + {} & a & b \\ + \midrule + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ + \bottomrule + \end{tabular} + \end{table} + """ + ) + assert result == expected + + @pytest.mark.parametrize( + "bad_caption", + [ + ("full_caption", "short_caption", "extra_string"), + ("full_caption", "short_caption", 1), + ("full_caption", "short_caption", None), + ("full_caption",), + (None,), + ], + ) + def test_to_latex_bad_caption_raises(self, bad_caption): + # test that wrong number of params is raised + df = pd.DataFrame({"a": [1]}) + msg = "caption must be either a string or a tuple of two strings" + with pytest.raises(ValueError, match=msg): + df.to_latex(caption=bad_caption) + + def test_to_latex_two_chars_caption(self, df_short): + # test that two chars caption is handled correctly + # it must not be unpacked into long_caption, short_caption. + result = df_short.to_latex(caption="xy") + expected = _dedent( + r""" + \begin{table} + \centering + \caption{xy} + \begin{tabular}{lrl} + \toprule + {} & a & b \\ + \midrule + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ + \bottomrule + \end{tabular} + \end{table} + """ + ) + assert result == expected + def test_to_latex_longtable_caption_only(self, df_short, caption_longtable): # GH 25436 # test when no caption and no label is provided @@ -595,6 +701,47 @@ def test_to_latex_longtable_caption_and_label( ) assert result == expected + def test_to_latex_longtable_caption_shortcaption_and_label( + self, + df_short, + caption_longtable, + short_caption, + label_longtable, + ): + # test when the caption, the short_caption and the label are provided + result = df_short.to_latex( + longtable=True, + caption=(caption_longtable, short_caption), + label=label_longtable, + ) + expected = _dedent( + r""" + \begin{longtable}{lrl} + \caption[a table]{a table in a \texttt{longtable} environment} + \label{tab:longtable}\\ + \toprule + {} & a & b \\ + \midrule + \endfirsthead + \caption[]{a table in a \texttt{longtable} environment} \\ + \toprule + {} & a & b \\ + \midrule + \endhead + \midrule + \multicolumn{3}{r}{{Continued on next page}} \\ + \midrule + \endfoot + + \bottomrule + \endlastfoot + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ + \end{longtable} + """ + ) + assert result == expected + class TestToLatexEscape: @pytest.fixture
- [x] closes #36267 - [x] tests added / passed - [x] passes black pandas - [x] passes git diff upstream/master -u -- "*.py" | flake8 --diff - [x] whatsnew entry Enable short_caption for ``DataFrame.to_latex`` by expanding the meaning of kwarg ``caption``. Optionally ``caption`` can be a ``Tuple[str, str] = full_caption, short_caption``. The final caption macro would look like this: ``` \caption[short_caption]{full_caption} ```
https://api.github.com/repos/pandas-dev/pandas/pulls/35668
2020-08-11T11:51:27Z
2020-10-17T16:23:33Z
2020-10-17T16:23:32Z
2020-11-06T15:34:10Z
Backport PR #35633 on branch 1.1.x (BUG: DataFrame.apply with func altering row in-place)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index e5860644fa371..415f9e508feb8 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) +- Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 6b8d7dc35fe95..6d44cf917a07a 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -389,6 +389,8 @@ def series_generator(self): blk = mgr.blocks[0] for (arr, name) in zip(values, self.index): + # GH#35462 re-pin mgr in case setitem changed it + ser._mgr = mgr blk.values = arr ser.name = name yield ser diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index 3a32278e2a4b1..538978358c8e7 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -1522,3 +1522,22 @@ def test_apply_dtype(self, col): expected = df.dtypes tm.assert_series_equal(result, expected) + + +def test_apply_mutating(): + # GH#35462 case where applied func pins a new BlockManager to a row + df = pd.DataFrame({"a": range(100), "b": range(100, 200)}) + + def func(row): + mgr = row._mgr + row.loc["a"] += 1 + assert row._mgr is not mgr + return row + + expected = df.copy() + expected["a"] += 1 + + result = df.apply(func, axis=1) + + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(df, result)
Backport PR #35633: BUG: DataFrame.apply with func altering row in-place
https://api.github.com/repos/pandas-dev/pandas/pulls/35666
2020-08-11T08:40:57Z
2020-08-11T09:41:03Z
2020-08-11T09:41:03Z
2020-08-11T09:41:03Z
BUG: Styler cell_ids fails on multiple renders
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 98d67e930ccc0..3f177b29d52b8 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -33,7 +33,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`). +- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`). Categorical ^^^^^^^^^^^ diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 584f42a6cab12..3bbb5271bce61 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -390,16 +390,16 @@ def format_attr(pair): "is_visible": (c not in hidden_columns), } # only add an id if the cell has a style + props = [] if self.cell_ids or (r, c) in ctx: row_dict["id"] = "_".join(cs[1:]) + for x in ctx[r, c]: + # have to handle empty styles like [''] + if x.count(":"): + props.append(tuple(x.split(":"))) + else: + props.append(("", "")) row_es.append(row_dict) - props = [] - for x in ctx[r, c]: - # have to handle empty styles like [''] - if x.count(":"): - props.append(tuple(x.split(":"))) - else: - props.append(("", "")) cellstyle_map[tuple(props)].append(f"row{r}_col{c}") body.append(row_es) diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 3ef5157655e78..6025649e9dbec 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -1684,8 +1684,11 @@ def f(a, b, styler): def test_no_cell_ids(self): # GH 35588 + # GH 35663 df = pd.DataFrame(data=[[0]]) - s = Styler(df, uuid="_", cell_ids=False).render() + styler = Styler(df, uuid="_", cell_ids=False) + styler.render() + s = styler.render() # render twice to ensure ctx is not updated assert s.find('<td class="data row0 col0" >') != -1
- [x] closes #35663 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35664
2020-08-11T06:44:45Z
2020-08-14T12:36:10Z
2020-08-14T12:36:10Z
2020-08-15T16:28:19Z
Moto server
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index a6552aa096a22..cc996f4077cd9 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -21,6 +21,7 @@ dependencies: - lxml - matplotlib>=3.3.0 - moto + - flask - nomkl - numexpr - numpy=1.16.* diff --git a/ci/deps/azure-37-slow.yaml b/ci/deps/azure-37-slow.yaml index e8ffd3d74ca5e..d17a8a2b0ed9b 100644 --- a/ci/deps/azure-37-slow.yaml +++ b/ci/deps/azure-37-slow.yaml @@ -27,9 +27,11 @@ dependencies: - python-dateutil - pytz - s3fs>=0.4.0 + - moto>=1.3.14 - scipy - sqlalchemy - xlrd - xlsxwriter - xlwt - moto + - flask diff --git a/ci/deps/azure-38-locale.yaml b/ci/deps/azure-38-locale.yaml index c7090d3a46a77..bb40127b672d3 100644 --- a/ci/deps/azure-38-locale.yaml +++ b/ci/deps/azure-38-locale.yaml @@ -14,6 +14,7 @@ dependencies: # pandas dependencies - beautifulsoup4 + - flask - html5lib - ipython - jinja2 @@ -32,6 +33,7 @@ dependencies: - xlrd - xlsxwriter - xlwt + - moto - pyarrow>=0.15 - pip - pip: diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index f4c238ab8b173..4894129915722 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -15,13 +15,14 @@ dependencies: # pandas dependencies - beautifulsoup4 - bottleneck - - fsspec>=0.7.4 + - fsspec>=0.8.0 - gcsfs>=0.6.0 - html5lib - jinja2 - lxml - matplotlib=2.2.* - - moto + - moto>=1.3.14 + - flask - numexpr - numpy=1.16.* - openpyxl @@ -29,7 +30,7 @@ dependencies: - pytables - python-dateutil - pytz - - s3fs>=0.4.0 + - s3fs>=0.4.2 - scipy - sqlalchemy - xlrd diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 1f383164b5328..2853e12b28e35 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -16,7 +16,10 @@ dependencies: - blosc - bottleneck - fastparquet>=0.3.2 + - flask + - fsspec>=0.8.0 - matplotlib=3.1.3 + - moto>=1.3.14 - numba - numexpr - numpy=1.18.* @@ -26,6 +29,7 @@ dependencies: - pytables - python-dateutil - pytz + - s3fs>=0.4.0 - scipy - xlrd - xlsxwriter diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml index 5cb53489be225..ea29cbef1272b 100644 --- a/ci/deps/travis-37-arm64.yaml +++ b/ci/deps/travis-37-arm64.yaml @@ -17,5 +17,6 @@ dependencies: - python-dateutil - pytz - pip + - flask - pip: - moto diff --git a/ci/deps/travis-37-cov.yaml b/ci/deps/travis-37-cov.yaml index edc11bdf4ab35..33ee6dfffb1a3 100644 --- a/ci/deps/travis-37-cov.yaml +++ b/ci/deps/travis-37-cov.yaml @@ -23,7 +23,8 @@ dependencies: - geopandas - html5lib - matplotlib - - moto + - moto>=1.3.14 + - flask - nomkl - numexpr - numpy=1.16.* diff --git a/ci/deps/travis-37-locale.yaml b/ci/deps/travis-37-locale.yaml index 4427c1d940bf2..2cf0e12027401 100644 --- a/ci/deps/travis-37-locale.yaml +++ b/ci/deps/travis-37-locale.yaml @@ -21,12 +21,12 @@ dependencies: - jinja2 - lxml=4.3.0 - matplotlib=3.0.* - - moto - nomkl - numexpr - numpy - openpyxl - pandas-gbq=0.12.0 + - pyarrow>=0.17 - psycopg2=2.7 - pymysql=0.7.11 - pytables diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index e896233aac63c..26d6c2910a7cc 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -20,8 +20,8 @@ dependencies: - pyarrow - pytz - s3fs>=0.4.0 + - moto>=1.3.14 + - flask - tabulate - pyreadstat - pip - - pip: - - moto diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 8ec75b4846ae2..3e6ed1cdf8f7e 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -24,6 +24,9 @@ of the individual storage backends (detailed from the fsspec docs for `builtin implementations`_ and linked to `external ones`_). See Section :ref:`io.remote`. +:issue:`35655` added fsspec support (including ``storage_options``) +for reading excel files. + .. _builtin implementations: https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations .. _external ones: https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations diff --git a/environment.yml b/environment.yml index 806119631d5ee..6afc19c227512 100644 --- a/environment.yml +++ b/environment.yml @@ -51,6 +51,7 @@ dependencies: - botocore>=1.11 - hypothesis>=3.82 - moto # mock S3 + - flask - pytest>=5.0.1 - pytest-cov - pytest-xdist>=1.21 diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index b1bbda4a4b7e0..aaef71910c9ab 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -3,11 +3,12 @@ from io import BufferedIOBase, BytesIO, RawIOBase import os from textwrap import fill -from typing import Union +from typing import Any, Mapping, Union from pandas._config import config from pandas._libs.parsers import STR_NA_VALUES +from pandas._typing import StorageOptions from pandas.errors import EmptyDataError from pandas.util._decorators import Appender, deprecate_nonkeyword_arguments @@ -199,6 +200,15 @@ Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than 'X'...'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. +storage_options : StorageOptions + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 Returns ------- @@ -298,10 +308,11 @@ def read_excel( skipfooter=0, convert_float=True, mangle_dupe_cols=True, + storage_options: StorageOptions = None, ): if not isinstance(io, ExcelFile): - io = ExcelFile(io, engine=engine) + io = ExcelFile(io, storage_options=storage_options, engine=engine) elif engine and engine != io.engine: raise ValueError( "Engine should not be specified when passing " @@ -336,12 +347,14 @@ def read_excel( class _BaseExcelReader(metaclass=abc.ABCMeta): - def __init__(self, filepath_or_buffer): + def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): # If filepath_or_buffer is a url, load the data into a BytesIO if is_url(filepath_or_buffer): filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read()) elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)): - filepath_or_buffer, _, _, _ = get_filepath_or_buffer(filepath_or_buffer) + filepath_or_buffer, _, _, _ = get_filepath_or_buffer( + filepath_or_buffer, storage_options=storage_options + ) if isinstance(filepath_or_buffer, self._workbook_class): self.book = filepath_or_buffer @@ -837,14 +850,16 @@ class ExcelFile: from pandas.io.excel._pyxlsb import _PyxlsbReader from pandas.io.excel._xlrd import _XlrdReader - _engines = { + _engines: Mapping[str, Any] = { "xlrd": _XlrdReader, "openpyxl": _OpenpyxlReader, "odf": _ODFReader, "pyxlsb": _PyxlsbReader, } - def __init__(self, path_or_buffer, engine=None): + def __init__( + self, path_or_buffer, engine=None, storage_options: StorageOptions = None + ): if engine is None: engine = "xlrd" if isinstance(path_or_buffer, (BufferedIOBase, RawIOBase)): @@ -858,13 +873,14 @@ def __init__(self, path_or_buffer, engine=None): raise ValueError(f"Unknown engine: {engine}") self.engine = engine + self.storage_options = storage_options # Could be a str, ExcelFile, Book, etc. self.io = path_or_buffer # Always a string self._io = stringify_path(path_or_buffer) - self._reader = self._engines[engine](self._io) + self._reader = self._engines[engine](self._io, storage_options=storage_options) def __fspath__(self): return self._io diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 44abaf5d3b3c9..a6cd8f524503b 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -2,7 +2,7 @@ import numpy as np -from pandas._typing import FilePathOrBuffer, Scalar +from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency import pandas as pd @@ -16,13 +16,19 @@ class _ODFReader(_BaseExcelReader): Parameters ---------- - filepath_or_buffer: string, path to be parsed or + filepath_or_buffer : string, path to be parsed or an open readable stream. + storage_options : StorageOptions + passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ - def __init__(self, filepath_or_buffer: FilePathOrBuffer): + def __init__( + self, + filepath_or_buffer: FilePathOrBuffer, + storage_options: StorageOptions = None, + ): import_optional_dependency("odf") - super().__init__(filepath_or_buffer) + super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 03a30cbd62f9a..73239190604db 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -2,7 +2,7 @@ import numpy as np -from pandas._typing import FilePathOrBuffer, Scalar +from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import ExcelWriter, _BaseExcelReader @@ -467,7 +467,11 @@ def write_cells( class _OpenpyxlReader(_BaseExcelReader): - def __init__(self, filepath_or_buffer: FilePathOrBuffer) -> None: + def __init__( + self, + filepath_or_buffer: FilePathOrBuffer, + storage_options: StorageOptions = None, + ) -> None: """ Reader using openpyxl engine. @@ -475,9 +479,11 @@ def __init__(self, filepath_or_buffer: FilePathOrBuffer) -> None: ---------- filepath_or_buffer : string, path object or Workbook Object to be parsed. + storage_options : StorageOptions + passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ import_optional_dependency("openpyxl") - super().__init__(filepath_or_buffer) + super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index 0d96c8c4acdb8..c0e281ff6c2da 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -1,25 +1,31 @@ from typing import List -from pandas._typing import FilePathOrBuffer, Scalar +from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import _BaseExcelReader class _PyxlsbReader(_BaseExcelReader): - def __init__(self, filepath_or_buffer: FilePathOrBuffer): + def __init__( + self, + filepath_or_buffer: FilePathOrBuffer, + storage_options: StorageOptions = None, + ): """ Reader using pyxlsb engine. Parameters ---------- - filepath_or_buffer: str, path object, or Workbook + filepath_or_buffer : str, path object, or Workbook Object to be parsed. + storage_options : StorageOptions + passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ import_optional_dependency("pyxlsb") # This will call load_workbook on the filepath or buffer # And set the result to the book-attribute - super().__init__(filepath_or_buffer) + super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index af82c15fd6b66..ff1b3c8bdb964 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -2,13 +2,14 @@ import numpy as np +from pandas._typing import StorageOptions from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import _BaseExcelReader class _XlrdReader(_BaseExcelReader): - def __init__(self, filepath_or_buffer): + def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): """ Reader using xlrd engine. @@ -16,10 +17,12 @@ def __init__(self, filepath_or_buffer): ---------- filepath_or_buffer : string, path object or Workbook Object to be parsed. + storage_options : StorageOptions + passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ err_msg = "Install xlrd >= 1.0.0 for Excel support" import_optional_dependency("xlrd", extra=err_msg) - super().__init__(filepath_or_buffer) + super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 2c664e73b9463..2d86fa44f22a4 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -1,5 +1,6 @@ """ feather-format compat """ +from pandas._typing import StorageOptions from pandas.compat._optional import import_optional_dependency from pandas import DataFrame, Int64Index, RangeIndex @@ -7,7 +8,7 @@ from pandas.io.common import get_filepath_or_buffer -def to_feather(df: DataFrame, path, storage_options=None, **kwargs): +def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kwargs): """ Write a DataFrame to the binary Feather format. @@ -77,7 +78,9 @@ def to_feather(df: DataFrame, path, storage_options=None, **kwargs): feather.write_feather(df, path, **kwargs) -def read_feather(path, columns=None, use_threads: bool = True, storage_options=None): +def read_feather( + path, columns=None, use_threads: bool = True, storage_options: StorageOptions = None +): """ Load a feather-format object from the file path. diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 5d49757ce7d58..983aa56324083 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -20,7 +20,7 @@ import pandas._libs.parsers as parsers from pandas._libs.parsers import STR_NA_VALUES from pandas._libs.tslibs import parsing -from pandas._typing import FilePathOrBuffer, Union +from pandas._typing import FilePathOrBuffer, StorageOptions, Union from pandas.errors import ( AbstractMethodError, EmptyDataError, @@ -596,7 +596,7 @@ def read_csv( low_memory=_c_parser_defaults["low_memory"], memory_map=False, float_precision=None, - storage_options=None, + storage_options: StorageOptions = None, ): # gh-23761 # diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index fcee25c258efa..518f31d73efa9 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -1,4 +1,7 @@ import os +import shlex +import subprocess +import time import pytest @@ -31,10 +34,62 @@ def feather_file(datapath): @pytest.fixture -def s3_resource(tips_file, jsonl_file, feather_file): +def s3so(): + return dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}) + + +@pytest.fixture(scope="module") +def s3_base(): """ Fixture for mocking S3 interaction. + Sets up moto server in separate process + """ + pytest.importorskip("s3fs") + pytest.importorskip("boto3") + requests = pytest.importorskip("requests") + + with tm.ensure_safe_environment_variables(): + # temporary workaround as moto fails for botocore >= 1.11 otherwise, + # see https://github.com/spulec/moto/issues/1924 & 1952 + os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") + os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") + + pytest.importorskip("moto", minversion="1.3.14") + pytest.importorskip("flask") # server mode needs flask too + + # Launching moto in server mode, i.e., as a separate process + # with an S3 endpoint on localhost + + endpoint_uri = "http://127.0.0.1:5555/" + + # pipe to null to avoid logging in terminal + proc = subprocess.Popen( + shlex.split("moto_server s3 -p 5555"), stdout=subprocess.DEVNULL + ) + + timeout = 5 + while timeout > 0: + try: + # OK to go once server is accepting connections + r = requests.get(endpoint_uri) + if r.ok: + break + except Exception: + pass + timeout -= 0.1 + time.sleep(0.1) + yield + + proc.terminate() + proc.wait() + + +@pytest.fixture() +def s3_resource(s3_base, tips_file, jsonl_file, feather_file): + """ + Sets up S3 bucket with contents + The primary bucket name is "pandas-test". The following datasets are loaded. @@ -46,45 +101,59 @@ def s3_resource(tips_file, jsonl_file, feather_file): A private bucket "cant_get_it" is also created. The boto3 s3 resource is yielded by the fixture. """ - s3fs = pytest.importorskip("s3fs") - boto3 = pytest.importorskip("boto3") - - with tm.ensure_safe_environment_variables(): - # temporary workaround as moto fails for botocore >= 1.11 otherwise, - # see https://github.com/spulec/moto/issues/1924 & 1952 - os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") - os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") - - moto = pytest.importorskip("moto") - - test_s3_files = [ - ("tips#1.csv", tips_file), - ("tips.csv", tips_file), - ("tips.csv.gz", tips_file + ".gz"), - ("tips.csv.bz2", tips_file + ".bz2"), - ("items.jsonl", jsonl_file), - ("simple_dataset.feather", feather_file), - ] - - def add_tips_files(bucket_name): - for s3_key, file_name in test_s3_files: - with open(file_name, "rb") as f: - conn.Bucket(bucket_name).put_object(Key=s3_key, Body=f) - - try: - s3 = moto.mock_s3() - s3.start() - - # see gh-16135 - bucket = "pandas-test" - conn = boto3.resource("s3", region_name="us-east-1") - - conn.create_bucket(Bucket=bucket) - add_tips_files(bucket) - - conn.create_bucket(Bucket="cant_get_it", ACL="private") - add_tips_files("cant_get_it") - s3fs.S3FileSystem.clear_instance_cache() - yield conn - finally: - s3.stop() + import boto3 + import s3fs + + test_s3_files = [ + ("tips#1.csv", tips_file), + ("tips.csv", tips_file), + ("tips.csv.gz", tips_file + ".gz"), + ("tips.csv.bz2", tips_file + ".bz2"), + ("items.jsonl", jsonl_file), + ("simple_dataset.feather", feather_file), + ] + + def add_tips_files(bucket_name): + for s3_key, file_name in test_s3_files: + with open(file_name, "rb") as f: + cli.put_object(Bucket=bucket_name, Key=s3_key, Body=f) + + bucket = "pandas-test" + endpoint_uri = "http://127.0.0.1:5555/" + conn = boto3.resource("s3", endpoint_url=endpoint_uri) + cli = boto3.client("s3", endpoint_url=endpoint_uri) + + try: + cli.create_bucket(Bucket=bucket) + except: # noqa + # OK is bucket already exists + pass + try: + cli.create_bucket(Bucket="cant_get_it", ACL="private") + except: # noqa + # OK is bucket already exists + pass + timeout = 2 + while not cli.list_buckets()["Buckets"] and timeout > 0: + time.sleep(0.1) + timeout -= 0.1 + + add_tips_files(bucket) + add_tips_files("cant_get_it") + s3fs.S3FileSystem.clear_instance_cache() + yield conn + + s3 = s3fs.S3FileSystem(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}) + + try: + s3.rm(bucket, recursive=True) + except: # noqa + pass + try: + s3.rm("cant_get_it", recursive=True) + except: # noqa + pass + timeout = 2 + while cli.list_buckets()["Buckets"] and timeout > 0: + time.sleep(0.1) + timeout -= 0.1 diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 51fbbf836a03f..431a50477fccc 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -606,13 +606,14 @@ def test_read_from_http_url(self, read_ext): tm.assert_frame_equal(url_table, local_table) @td.skip_if_not_us_locale - def test_read_from_s3_url(self, read_ext, s3_resource): + def test_read_from_s3_url(self, read_ext, s3_resource, s3so): # Bucket "pandas-test" created in tests/io/conftest.py with open("test1" + read_ext, "rb") as f: s3_resource.Bucket("pandas-test").put_object(Key="test1" + read_ext, Body=f) url = "s3://pandas-test/test1" + read_ext - url_table = pd.read_excel(url) + + url_table = pd.read_excel(url, storage_options=s3so) local_table = pd.read_excel("test1" + read_ext) tm.assert_frame_equal(url_table, local_table) diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index 182c21ed1d416..5bb205842269e 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -44,7 +44,11 @@ def test_with_s3_url(compression, s3_resource): with open(path, "rb") as f: s3_resource.Bucket("pandas-test").put_object(Key="test-1", Body=f) - roundtripped_df = pd.read_json("s3://pandas-test/test-1", compression=compression) + roundtripped_df = pd.read_json( + "s3://pandas-test/test-1", + compression=compression, + storage_options=dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}), + ) tm.assert_frame_equal(df, roundtripped_df) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 1280d0fd434d5..64a666079876f 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1213,10 +1213,12 @@ def test_read_inline_jsonl(self): tm.assert_frame_equal(result, expected) @td.skip_if_not_us_locale - def test_read_s3_jsonl(self, s3_resource): + def test_read_s3_jsonl(self, s3_resource, s3so): # GH17200 - result = read_json("s3n://pandas-test/items.jsonl", lines=True) + result = read_json( + "s3n://pandas-test/items.jsonl", lines=True, storage_options=s3so + ) expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) tm.assert_frame_equal(result, expected) @@ -1706,7 +1708,12 @@ def test_to_s3(self, s3_resource): # GH 28375 mock_bucket_name, target_file = "pandas-test", "test.json" df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]}) - df.to_json(f"s3://{mock_bucket_name}/{target_file}") + df.to_json( + f"s3://{mock_bucket_name}/{target_file}", + storage_options=dict( + client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"} + ), + ) timeout = 5 while True: if target_file in ( diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index b30a7b1ef34de..b8b03cbd14a1d 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -71,50 +71,62 @@ def tips_df(datapath): @td.skip_if_not_us_locale() class TestS3: @td.skip_if_no("s3fs") - def test_parse_public_s3_bucket(self, tips_df): + def test_parse_public_s3_bucket(self, tips_df, s3so): # more of an integration test due to the not-public contents portion # can probably mock this though. for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: - df = read_csv("s3://pandas-test/tips.csv" + ext, compression=comp) + df = read_csv( + "s3://pandas-test/tips.csv" + ext, + compression=comp, + storage_options=s3so, + ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(df, tips_df) # Read public file from bucket with not-public contents - df = read_csv("s3://cant_get_it/tips.csv") + df = read_csv("s3://cant_get_it/tips.csv", storage_options=s3so) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(df, tips_df) - def test_parse_public_s3n_bucket(self, tips_df): + def test_parse_public_s3n_bucket(self, tips_df, s3so): # Read from AWS s3 as "s3n" URL - df = read_csv("s3n://pandas-test/tips.csv", nrows=10) + df = read_csv("s3n://pandas-test/tips.csv", nrows=10, storage_options=s3so) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3a_bucket(self, tips_df): + def test_parse_public_s3a_bucket(self, tips_df, s3so): # Read from AWS s3 as "s3a" URL - df = read_csv("s3a://pandas-test/tips.csv", nrows=10) + df = read_csv("s3a://pandas-test/tips.csv", nrows=10, storage_options=s3so) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3_bucket_nrows(self, tips_df): + def test_parse_public_s3_bucket_nrows(self, tips_df, s3so): for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: - df = read_csv("s3://pandas-test/tips.csv" + ext, nrows=10, compression=comp) + df = read_csv( + "s3://pandas-test/tips.csv" + ext, + nrows=10, + compression=comp, + storage_options=s3so, + ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3_bucket_chunked(self, tips_df): + def test_parse_public_s3_bucket_chunked(self, tips_df, s3so): # Read with a chunksize chunksize = 5 for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: df_reader = read_csv( - "s3://pandas-test/tips.csv" + ext, chunksize=chunksize, compression=comp + "s3://pandas-test/tips.csv" + ext, + chunksize=chunksize, + compression=comp, + storage_options=s3so, ) assert df_reader.chunksize == chunksize for i_chunk in [0, 1, 2]: @@ -126,7 +138,7 @@ def test_parse_public_s3_bucket_chunked(self, tips_df): true_df = tips_df.iloc[chunksize * i_chunk : chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_chunked_python(self, tips_df): + def test_parse_public_s3_bucket_chunked_python(self, tips_df, s3so): # Read with a chunksize using the Python parser chunksize = 5 for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: @@ -135,6 +147,7 @@ def test_parse_public_s3_bucket_chunked_python(self, tips_df): chunksize=chunksize, compression=comp, engine="python", + storage_options=s3so, ) assert df_reader.chunksize == chunksize for i_chunk in [0, 1, 2]: @@ -145,46 +158,53 @@ def test_parse_public_s3_bucket_chunked_python(self, tips_df): true_df = tips_df.iloc[chunksize * i_chunk : chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_python(self, tips_df): + def test_parse_public_s3_bucket_python(self, tips_df, s3so): for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: df = read_csv( - "s3://pandas-test/tips.csv" + ext, engine="python", compression=comp + "s3://pandas-test/tips.csv" + ext, + engine="python", + compression=comp, + storage_options=s3so, ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(df, tips_df) - def test_infer_s3_compression(self, tips_df): + def test_infer_s3_compression(self, tips_df, s3so): for ext in ["", ".gz", ".bz2"]: df = read_csv( - "s3://pandas-test/tips.csv" + ext, engine="python", compression="infer" + "s3://pandas-test/tips.csv" + ext, + engine="python", + compression="infer", + storage_options=s3so, ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(df, tips_df) - def test_parse_public_s3_bucket_nrows_python(self, tips_df): + def test_parse_public_s3_bucket_nrows_python(self, tips_df, s3so): for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: df = read_csv( "s3://pandas-test/tips.csv" + ext, engine="python", nrows=10, compression=comp, + storage_options=s3so, ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_read_s3_fails(self): + def test_read_s3_fails(self, s3so): with pytest.raises(IOError): - read_csv("s3://nyqpug/asdf.csv") + read_csv("s3://nyqpug/asdf.csv", storage_options=s3so) # Receive a permission error when trying to read a private bucket. # It's irrelevant here that this isn't actually a table. with pytest.raises(IOError): read_csv("s3://cant_get_it/file.csv") - def test_write_s3_csv_fails(self, tips_df): + def test_write_s3_csv_fails(self, tips_df, s3so): # GH 32486 # Attempting to write to an invalid S3 path should raise import botocore @@ -195,10 +215,12 @@ def test_write_s3_csv_fails(self, tips_df): error = (FileNotFoundError, botocore.exceptions.ClientError) with pytest.raises(error, match="The specified bucket does not exist"): - tips_df.to_csv("s3://an_s3_bucket_data_doesnt_exit/not_real.csv") + tips_df.to_csv( + "s3://an_s3_bucket_data_doesnt_exit/not_real.csv", storage_options=s3so + ) @td.skip_if_no("pyarrow") - def test_write_s3_parquet_fails(self, tips_df): + def test_write_s3_parquet_fails(self, tips_df, s3so): # GH 27679 # Attempting to write to an invalid S3 path should raise import botocore @@ -209,7 +231,10 @@ def test_write_s3_parquet_fails(self, tips_df): error = (FileNotFoundError, botocore.exceptions.ClientError) with pytest.raises(error, match="The specified bucket does not exist"): - tips_df.to_parquet("s3://an_s3_bucket_data_doesnt_exit/not_real.parquet") + tips_df.to_parquet( + "s3://an_s3_bucket_data_doesnt_exit/not_real.parquet", + storage_options=s3so, + ) def test_read_csv_handles_boto_s3_object(self, s3_resource, tips_file): # see gh-16135 @@ -225,7 +250,7 @@ def test_read_csv_handles_boto_s3_object(self, s3_resource, tips_file): expected = read_csv(tips_file) tm.assert_frame_equal(result, expected) - def test_read_csv_chunked_download(self, s3_resource, caplog): + def test_read_csv_chunked_download(self, s3_resource, caplog, s3so): # 8 MB, S3FS usees 5MB chunks import s3fs @@ -245,18 +270,20 @@ def test_read_csv_chunked_download(self, s3_resource, caplog): s3fs.S3FileSystem.clear_instance_cache() with caplog.at_level(logging.DEBUG, logger="s3fs"): - read_csv("s3://pandas-test/large-file.csv", nrows=5) + read_csv("s3://pandas-test/large-file.csv", nrows=5, storage_options=s3so) # log of fetch_range (start, stop) assert (0, 5505024) in (x.args[-2:] for x in caplog.records) - def test_read_s3_with_hash_in_key(self, tips_df): + def test_read_s3_with_hash_in_key(self, tips_df, s3so): # GH 25945 - result = read_csv("s3://pandas-test/tips#1.csv") + result = read_csv("s3://pandas-test/tips#1.csv", storage_options=s3so) tm.assert_frame_equal(tips_df, result) @td.skip_if_no("pyarrow") - def test_read_feather_s3_file_path(self, feather_file): + def test_read_feather_s3_file_path(self, feather_file, s3so): # GH 29055 expected = read_feather(feather_file) - res = read_feather("s3://pandas-test/simple_dataset.feather") + res = read_feather( + "s3://pandas-test/simple_dataset.feather", storage_options=s3so + ) tm.assert_frame_equal(expected, res) diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py index 3e89f6ca4ae16..666da677d702e 100644 --- a/pandas/tests/io/test_fsspec.py +++ b/pandas/tests/io/test_fsspec.py @@ -131,27 +131,38 @@ def test_fastparquet_options(fsspectest): @td.skip_if_no("s3fs") -def test_from_s3_csv(s3_resource, tips_file): - tm.assert_equal(read_csv("s3://pandas-test/tips.csv"), read_csv(tips_file)) +def test_from_s3_csv(s3_resource, tips_file, s3so): + tm.assert_equal( + read_csv("s3://pandas-test/tips.csv", storage_options=s3so), read_csv(tips_file) + ) # the following are decompressed by pandas, not fsspec - tm.assert_equal(read_csv("s3://pandas-test/tips.csv.gz"), read_csv(tips_file)) - tm.assert_equal(read_csv("s3://pandas-test/tips.csv.bz2"), read_csv(tips_file)) + tm.assert_equal( + read_csv("s3://pandas-test/tips.csv.gz", storage_options=s3so), + read_csv(tips_file), + ) + tm.assert_equal( + read_csv("s3://pandas-test/tips.csv.bz2", storage_options=s3so), + read_csv(tips_file), + ) @pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"]) @td.skip_if_no("s3fs") -def test_s3_protocols(s3_resource, tips_file, protocol): +def test_s3_protocols(s3_resource, tips_file, protocol, s3so): tm.assert_equal( - read_csv("%s://pandas-test/tips.csv" % protocol), read_csv(tips_file) + read_csv("%s://pandas-test/tips.csv" % protocol, storage_options=s3so), + read_csv(tips_file), ) @td.skip_if_no("s3fs") @td.skip_if_no("fastparquet") -def test_s3_parquet(s3_resource): +def test_s3_parquet(s3_resource, s3so): fn = "s3://pandas-test/test.parquet" - df1.to_parquet(fn, index=False, engine="fastparquet", compression=None) - df2 = read_parquet(fn, engine="fastparquet") + df1.to_parquet( + fn, index=False, engine="fastparquet", compression=None, storage_options=s3so + ) + df2 = read_parquet(fn, engine="fastparquet", storage_options=s3so) tm.assert_equal(df1, df2) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 82157f3d722a9..3a3ba99484a3a 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -158,6 +158,10 @@ def check_round_trip( """ write_kwargs = write_kwargs or {"compression": None} read_kwargs = read_kwargs or {} + if isinstance(path, str) and "s3://" in path: + s3so = dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}) + read_kwargs["storage_options"] = s3so + write_kwargs["storage_options"] = s3so if expected is None: expected = df @@ -537,9 +541,11 @@ def test_categorical(self, pa): expected = df.astype(object) check_round_trip(df, pa, expected=expected) - def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa): + def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so): s3fs = pytest.importorskip("s3fs") - s3 = s3fs.S3FileSystem() + if LooseVersion(pyarrow.__version__) <= LooseVersion("0.17.0"): + pytest.skip() + s3 = s3fs.S3FileSystem(**s3so) kw = dict(filesystem=s3) check_round_trip( df_compat, @@ -550,6 +556,8 @@ def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa): ) def test_s3_roundtrip(self, df_compat, s3_resource, pa): + if LooseVersion(pyarrow.__version__) <= LooseVersion("0.17.0"): + pytest.skip() # GH #19134 check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet") @@ -560,10 +568,13 @@ def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col): # https://github.com/apache/arrow/blob/master/python/pyarrow/tests/test_parquet.py#L2716 # As per pyarrow partitioned columns become 'categorical' dtypes # and are added to back of dataframe on read + if partition_col and pd.compat.is_platform_windows(): + pytest.skip("pyarrow/win incompatibility #35791") expected_df = df_compat.copy() if partition_col: expected_df[partition_col] = expected_df[partition_col].astype("category") + check_round_trip( df_compat, pa, diff --git a/requirements-dev.txt b/requirements-dev.txt index deaed8ab9d5f1..2fbb20ddfd3bf 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -32,6 +32,7 @@ boto3 botocore>=1.11 hypothesis>=3.82 moto +flask pytest>=5.0.1 pytest-cov pytest-xdist>=1.21
Changes moto for s3 tests from monkeypatched/mocking to server mode. This allows aiobotocore exceptions to raise correctly, needed for tests to pass; the change is required by the upcoming async release of s3fs, but also works for old sync (botocore) version. A release of s3fs without this change would break pandas tests. Note: since I now need to pass storage_options to the various s3 calls in the tests to make them pass (giving the endpoint of the moto s3 server), I needed to also plumb storage_options through the excel IO, which was previously missing, and update the excel tests. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35655
2020-08-10T15:40:01Z
2020-08-21T20:14:03Z
2020-08-21T20:14:03Z
2020-08-22T19:26:28Z
BUG: GH-35558 merge_asof tolerance error
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index cdc244ca193b4..b37103910afab 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) +- Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 27b331babe692..2349cb1dcc0c7 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1667,7 +1667,7 @@ def _get_merge_keys(self): msg = ( f"incompatible tolerance {self.tolerance}, must be compat " - f"with type {repr(lk.dtype)}" + f"with type {repr(lt.dtype)}" ) if needs_i8_conversion(lt): diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 9b09f0033715d..895de2b748c34 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1339,3 +1339,25 @@ def test_merge_index_column_tz(self): index=pd.Index([0, 1, 2, 3, 4]), ) tm.assert_frame_equal(result, expected) + + def test_left_index_right_index_tolerance(self): + # https://github.com/pandas-dev/pandas/issues/35558 + dr1 = pd.date_range( + start="1/1/2020", end="1/20/2020", freq="2D" + ) + pd.Timedelta(seconds=0.4) + dr2 = pd.date_range(start="1/1/2020", end="2/1/2020") + + df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1)) + df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2)) + + expected = pd.DataFrame( + {"val1": "foo", "val2": "bar"}, index=pd.DatetimeIndex(dr1) + ) + result = pd.merge_asof( + df1, + df2, + left_index=True, + right_index=True, + tolerance=pd.Timedelta(seconds=0.5), + ) + tm.assert_frame_equal(result, expected)
- [x] closes #35558 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35654
2020-08-10T15:07:53Z
2020-08-13T10:15:52Z
2020-08-13T10:15:52Z
2020-08-13T10:17:17Z
Backport PR #35522 on branch 1.1.x (BUG: Fix assert_equal when check_exact=True for non-numeric dtypes #3…)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index f0ad9d1ca3b0f..2fa4c12d24172 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`) - Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). +- Fixed regression where :func:`pandas.testing.assert_series_equal` would raise an error when non-numeric dtypes were passed with ``check_exact=True`` (:issue:`35446`) - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) diff --git a/pandas/_testing.py b/pandas/_testing.py index a020fbff3553a..713f29466f097 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1339,10 +1339,8 @@ def assert_series_equal( else: assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") - if check_exact: - if not is_numeric_dtype(left.dtype): - raise AssertionError("check_exact may only be used with numeric Series") - + if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype): + # Only check exact if dtype is numeric assert_numpy_array_equal( left._values, right._values, diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index 1284cc9d4f49b..a7b5aeac560e4 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -281,3 +281,18 @@ class MySeries(Series): with pytest.raises(AssertionError, match="Series classes are different"): tm.assert_series_equal(s3, s1, check_series_type=True) + + +def test_series_equal_exact_for_nonnumeric(): + # https://github.com/pandas-dev/pandas/issues/35446 + s1 = Series(["a", "b"]) + s2 = Series(["a", "b"]) + s3 = Series(["b", "a"]) + + tm.assert_series_equal(s1, s2, check_exact=True) + tm.assert_series_equal(s2, s1, check_exact=True) + + with pytest.raises(AssertionError): + tm.assert_series_equal(s1, s3, check_exact=True) + with pytest.raises(AssertionError): + tm.assert_series_equal(s3, s1, check_exact=True)
Backport PR #35522: BUG: Fix assert_equal when check_exact=True for non-numeric dtypes #3…
https://api.github.com/repos/pandas-dev/pandas/pulls/35652
2020-08-10T13:32:51Z
2020-08-10T14:53:32Z
2020-08-10T14:53:32Z
2020-08-10T14:54:15Z
Backport PR #35639 on branch 1.1.x (BUG: RollingGroupby with closed and column selection no longer raises ValueError)
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index f0ad9d1ca3b0f..7f5182e3eaa6f 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -51,6 +51,10 @@ Categorical - - +**Groupby/resample/rolling** + +- Bug in :class:`pandas.core.groupby.RollingGroupby` where passing ``closed`` with column selection would raise a ``ValueError`` (:issue:`35549`) + **Plotting** - diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 58e7841d4dde5..51a067427e867 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -52,7 +52,7 @@ def __init__(self, obj, *args, **kwargs): kwargs.pop("parent", None) groupby = kwargs.pop("groupby", None) if groupby is None: - groupby, obj = obj, obj.obj + groupby, obj = obj, obj._selected_obj self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 87bcaa7d9512f..ea03a7f2f8162 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2196,7 +2196,7 @@ def _apply( # Cannot use _wrap_outputs because we calculate the result all at once # Compose MultiIndex result from grouping levels then rolling level # Aggregate the MultiIndex data as tuples then the level names - grouped_object_index = self._groupby._selected_obj.index + grouped_object_index = self.obj.index grouped_index_name = [grouped_object_index.name] groupby_keys = [grouping.name for grouping in self._groupby.grouper._groupings] result_index_names = groupby_keys + grouped_index_name @@ -2220,10 +2220,6 @@ def _apply( def _constructor(self): return Rolling - @cache_readonly - def _selected_obj(self): - return self._groupby._selected_obj - def _create_blocks(self, obj: FrameOrSeries): """ Split data into blocks & return conformed data. @@ -2262,7 +2258,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]] if self.is_freq_type: rolling_indexer = VariableWindowIndexer - index_array = self._groupby._selected_obj.index.asi8 + index_array = self.obj.index.asi8 else: rolling_indexer = FixedWindowIndexer index_array = None @@ -2279,7 +2275,7 @@ def _gotitem(self, key, ndim, subset=None): # here so our index is carried thru to the selected obj # when we do the splitting for the groupby if self.on is not None: - self._groupby.obj = self._groupby.obj.set_index(self._on) + self.obj = self.obj.set_index(self._on) self.on = None return super()._gotitem(key, ndim, subset=subset) diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index 5241b9548a442..e1dcac06c39cc 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -304,3 +304,54 @@ def test_groupby_subselect_rolling(self): name="b", ) tm.assert_series_equal(result, expected) + + def test_groupby_rolling_subset_with_closed(self): + # GH 35549 + df = pd.DataFrame( + { + "column1": range(6), + "column2": range(6), + "group": 3 * ["A", "B"], + "date": [pd.Timestamp("2019-01-01")] * 6, + } + ) + result = ( + df.groupby("group").rolling("1D", on="date", closed="left")["column1"].sum() + ) + expected = Series( + [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0], + index=pd.MultiIndex.from_tuples( + [("A", pd.Timestamp("2019-01-01"))] * 3 + + [("B", pd.Timestamp("2019-01-01"))] * 3, + names=["group", "date"], + ), + name="column1", + ) + tm.assert_series_equal(result, expected) + + def test_groupby_subset_rolling_subset_with_closed(self): + # GH 35549 + df = pd.DataFrame( + { + "column1": range(6), + "column2": range(6), + "group": 3 * ["A", "B"], + "date": [pd.Timestamp("2019-01-01")] * 6, + } + ) + + result = ( + df.groupby("group")[["column1", "date"]] + .rolling("1D", on="date", closed="left")["column1"] + .sum() + ) + expected = Series( + [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0], + index=pd.MultiIndex.from_tuples( + [("A", pd.Timestamp("2019-01-01"))] * 3 + + [("B", pd.Timestamp("2019-01-01"))] * 3, + names=["group", "date"], + ), + name="column1", + ) + tm.assert_series_equal(result, expected)
Backport PR #35639: BUG: RollingGroupby with closed and column selection no longer raises ValueError
https://api.github.com/repos/pandas-dev/pandas/pulls/35651
2020-08-10T13:32:34Z
2020-08-10T14:48:34Z
2020-08-10T14:48:34Z
2020-08-10T14:48:34Z
Refactor tables latex
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 5d6f0a08ef2b5..715b8bbdf5672 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -121,10 +121,7 @@ def pad_empties(x): else: column_format = self.column_format - if self.longtable: - self._write_longtable_begin(buf, column_format) - else: - self._write_tabular_begin(buf, column_format) + self._write_tabular_begin(buf, column_format) buf.write("\\toprule\n") @@ -190,10 +187,7 @@ def pad_empties(x): if self.multirow and i < len(strrows) - 1: self._print_cline(buf, i, len(strcols)) - if self.longtable: - self._write_longtable_end(buf) - else: - self._write_tabular_end(buf) + self._write_tabular_end(buf) def _format_multicolumn(self, row: List[str], ilevels: int) -> List[str]: r""" @@ -288,7 +282,7 @@ def _write_tabular_begin(self, buf, column_format: str): for 3 columns """ if self._table_float: - # then write output in a nested table/tabular environment + # then write output in a nested table/tabular or longtable environment if self.caption is None: caption_ = "" else: @@ -304,12 +298,27 @@ def _write_tabular_begin(self, buf, column_format: str): else: position_ = f"[{self.position}]" - buf.write(f"\\begin{{table}}{position_}\n\\centering{caption_}{label_}\n") + if self.longtable: + table_ = f"\\begin{{longtable}}{position_}{{{column_format}}}" + tabular_ = "\n" + else: + table_ = f"\\begin{{table}}{position_}\n\\centering" + tabular_ = f"\n\\begin{{tabular}}{{{column_format}}}\n" + + if self.longtable and (self.caption is not None or self.label is not None): + # a double-backslash is required at the end of the line + # as discussed here: + # https://tex.stackexchange.com/questions/219138 + backlash_ = "\\\\" + else: + backlash_ = "" + buf.write(f"{table_}{caption_}{label_}{backlash_}{tabular_}") else: - # then write output only in a tabular environment - pass - - buf.write(f"\\begin{{tabular}}{{{column_format}}}\n") + if self.longtable: + tabletype_ = "longtable" + else: + tabletype_ = "tabular" + buf.write(f"\\begin{{{tabletype_}}}{{{column_format}}}\n") def _write_tabular_end(self, buf): """ @@ -323,62 +332,12 @@ def _write_tabular_end(self, buf): a string. """ - buf.write("\\bottomrule\n") - buf.write("\\end{tabular}\n") - if self._table_float: - buf.write("\\end{table}\n") - else: - pass - - def _write_longtable_begin(self, buf, column_format: str): - """ - Write the beginning of a longtable environment including caption and - label if provided by user. - - Parameters - ---------- - buf : string or file handle - File path or object. If not specified, the result is returned as - a string. - column_format : str - The columns format as specified in `LaTeX table format - <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' - for 3 columns - """ - if self.caption is None: - caption_ = "" - else: - caption_ = f"\\caption{{{self.caption}}}" - - if self.label is None: - label_ = "" - else: - label_ = f"\\label{{{self.label}}}" - - if self.position is None: - position_ = "" + if self.longtable: + buf.write("\\end{longtable}\n") else: - position_ = f"[{self.position}]" - - buf.write( - f"\\begin{{longtable}}{position_}{{{column_format}}}\n{caption_}{label_}" - ) - if self.caption is not None or self.label is not None: - # a double-backslash is required at the end of the line - # as discussed here: - # https://tex.stackexchange.com/questions/219138 - buf.write("\\\\\n") - - @staticmethod - def _write_longtable_end(buf): - """ - Write the end of a longtable environment. - - Parameters - ---------- - buf : string or file handle - File path or object. If not specified, the result is returned as - a string. - - """ - buf.write("\\end{longtable}\n") + buf.write("\\bottomrule\n") + buf.write("\\end{tabular}\n") + if self._table_float: + buf.write("\\end{table}\n") + else: + pass diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 93ad3739e59c7..96a9ed2b86cf4 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -555,7 +555,8 @@ def test_to_latex_longtable_caption_label(self): result_cl = df.to_latex(longtable=True, caption=the_caption, label=the_label) expected_cl = r"""\begin{longtable}{lrl} -\caption{a table in a \texttt{longtable} environment}\label{tab:longtable}\\ +\caption{a table in a \texttt{longtable} environment} +\label{tab:longtable}\\ \toprule {} & a & b \\ \midrule
- [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry As suggested [here](https://github.com/pandas-dev/pandas/pull/35284#issuecomment-665273834), utils fonctions which begin or end tables / tabulars / longtables environments could be merged.
https://api.github.com/repos/pandas-dev/pandas/pulls/35649
2020-08-10T07:21:07Z
2020-08-13T18:17:39Z
2020-08-13T18:17:39Z
2020-08-14T06:13:01Z
BUG: Support custom BaseIndexers in groupby.rolling
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 415f9e508feb8..cdc244ca193b4 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) +- Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index bc36bdca982e8..7cbe34cdebf9f 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -1,6 +1,6 @@ """Indexer objects for computing start/end window bounds for rolling operations""" from datetime import timedelta -from typing import Dict, Optional, Tuple, Type, Union +from typing import Dict, Optional, Tuple, Type import numpy as np @@ -265,7 +265,8 @@ def __init__( index_array: Optional[np.ndarray], window_size: int, groupby_indicies: Dict, - rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]], + rolling_indexer: Type[BaseIndexer], + indexer_kwargs: Optional[Dict], **kwargs, ): """ @@ -276,7 +277,10 @@ def __init__( """ self.groupby_indicies = groupby_indicies self.rolling_indexer = rolling_indexer - super().__init__(index_array, window_size, **kwargs) + self.indexer_kwargs = indexer_kwargs or {} + super().__init__( + index_array, self.indexer_kwargs.pop("window_size", window_size), **kwargs + ) @Appender(get_window_bounds_doc) def get_window_bounds( @@ -298,7 +302,9 @@ def get_window_bounds( else: index_array = self.index_array indexer = self.rolling_indexer( - index_array=index_array, window_size=self.window_size, + index_array=index_array, + window_size=self.window_size, + **self.indexer_kwargs, ) start, end = indexer.get_window_bounds( len(indicies), min_periods, center, closed diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 7347d5686aabc..0306d4de2fc73 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -145,7 +145,7 @@ class _Window(PandasObject, ShallowMixin, SelectionMixin): def __init__( self, - obj, + obj: FrameOrSeries, window=None, min_periods: Optional[int] = None, center: bool = False, @@ -2271,10 +2271,16 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: ------- GroupbyRollingIndexer """ - rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]] - if self.is_freq_type: + rolling_indexer: Type[BaseIndexer] + indexer_kwargs: Optional[Dict] = None + index_array = self.obj.index.asi8 + if isinstance(self.window, BaseIndexer): + rolling_indexer = type(self.window) + indexer_kwargs = self.window.__dict__ + # We'll be using the index of each group later + indexer_kwargs.pop("index_array", None) + elif self.is_freq_type: rolling_indexer = VariableWindowIndexer - index_array = self.obj.index.asi8 else: rolling_indexer = FixedWindowIndexer index_array = None @@ -2283,6 +2289,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: window_size=window, groupby_indicies=self._groupby.indices, rolling_indexer=rolling_indexer, + indexer_kwargs=indexer_kwargs, ) return window_indexer diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index e1dcac06c39cc..a9590c7e1233a 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -305,6 +305,29 @@ def test_groupby_subselect_rolling(self): ) tm.assert_series_equal(result, expected) + def test_groupby_rolling_custom_indexer(self): + # GH 35557 + class SimpleIndexer(pd.api.indexers.BaseIndexer): + def get_window_bounds( + self, num_values=0, min_periods=None, center=None, closed=None + ): + min_periods = self.window_size if min_periods is None else 0 + end = np.arange(num_values, dtype=np.int64) + 1 + start = end.copy() - self.window_size + start[start < 0] = min_periods + return start, end + + df = pd.DataFrame( + {"a": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5 + ) + result = ( + df.groupby(df.index) + .rolling(SimpleIndexer(window_size=3), min_periods=1) + .sum() + ) + expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum() + tm.assert_frame_equal(result, expected) + def test_groupby_rolling_subset_with_closed(self): # GH 35549 df = pd.DataFrame(
- [x] closes #35557 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35647
2020-08-10T07:01:01Z
2020-08-12T22:14:31Z
2020-08-12T22:14:31Z
2020-08-13T06:14:05Z
BUG/ENH: consistent gzip compression arguments
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 35403b5c8b66f..43030d76d945a 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -287,16 +287,19 @@ Quoting, compression, and file format compression : {``'infer'``, ``'gzip'``, ``'bz2'``, ``'zip'``, ``'xz'``, ``None``, ``dict``}, default ``'infer'`` For on-the-fly decompression of on-disk data. If 'infer', then use gzip, - bz2, zip, or xz if filepath_or_buffer is a string ending in '.gz', '.bz2', + bz2, zip, or xz if ``filepath_or_buffer`` is path-like ending in '.gz', '.bz2', '.zip', or '.xz', respectively, and no decompression otherwise. If using 'zip', the ZIP file must contain only one data file to be read in. Set to ``None`` for no decompression. Can also be a dict with key ``'method'`` - set to one of {``'zip'``, ``'gzip'``, ``'bz2'``}, and other keys set to - compression settings. As an example, the following could be passed for - faster compression: ``compression={'method': 'gzip', 'compresslevel': 1}``. + set to one of {``'zip'``, ``'gzip'``, ``'bz2'``} and other key-value pairs are + forwarded to ``zipfile.ZipFile``, ``gzip.GzipFile``, or ``bz2.BZ2File``. + As an example, the following could be passed for faster compression and to + create a reproducible gzip archive: + ``compression={'method': 'gzip', 'compresslevel': 1, 'mtime': 1}``. .. versionchanged:: 0.24.0 'infer' option added and set to default. .. versionchanged:: 1.1.0 dict option extended to support ``gzip`` and ``bz2``. + .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open`. thousands : str, default ``None`` Thousands separator. decimal : str, default ``'.'`` diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index deb5697053ea8..6612f741d925d 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -235,6 +235,7 @@ I/O - Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`) - In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`) - :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`) +- :meth:`to_csv` passes compression arguments for `'gzip'` always to `gzip.GzipFile` (:issue:`28103`) Plotting ^^^^^^^^ diff --git a/pandas/_typing.py b/pandas/_typing.py index 47a102ddc70e0..1b972030ef5a5 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -109,3 +109,8 @@ # for arbitrary kwargs passed during reading/writing files StorageOptions = Optional[Dict[str, Any]] + + +# compression keywords and compression +CompressionDict = Mapping[str, Optional[Union[str, int, bool]]] +CompressionOptions = Optional[Union[str, CompressionDict]] diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 11147bffa32c3..2219d54477d9e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -35,6 +35,7 @@ from pandas._libs.tslibs import Tick, Timestamp, to_offset from pandas._typing import ( Axis, + CompressionOptions, FilePathOrBuffer, FrameOrSeries, JSONSerializable, @@ -2058,7 +2059,7 @@ def to_json( date_unit: str = "ms", default_handler: Optional[Callable[[Any], JSONSerializable]] = None, lines: bool_t = False, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", index: bool_t = True, indent: Optional[int] = None, storage_options: StorageOptions = None, @@ -2646,7 +2647,7 @@ def to_sql( def to_pickle( self, path, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: @@ -3053,7 +3054,7 @@ def to_csv( index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None, mode: str = "w", encoding: Optional[str] = None, - compression: Optional[Union[str, Mapping[str, str]]] = "infer", + compression: CompressionOptions = "infer", quoting: Optional[int] = None, quotechar: str = '"', line_terminator: Optional[str] = None, @@ -3144,6 +3145,12 @@ def to_csv( Compression is supported for binary file objects. + .. versionchanged:: 1.2.0 + + Previous versions forwarded dict entries for 'gzip' to + `gzip.open` instead of `gzip.GzipFile` which prevented + setting `mtime`. + quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC diff --git a/pandas/io/common.py b/pandas/io/common.py index 9ac642e58b544..54f35e689aac8 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -18,7 +18,6 @@ Optional, Tuple, Type, - Union, ) from urllib.parse import ( urljoin, @@ -29,7 +28,12 @@ ) import zipfile -from pandas._typing import FilePathOrBuffer, StorageOptions +from pandas._typing import ( + CompressionDict, + CompressionOptions, + FilePathOrBuffer, + StorageOptions, +) from pandas.compat import _get_lzma_file, _import_lzma from pandas.compat._optional import import_optional_dependency @@ -160,7 +164,7 @@ def is_fsspec_url(url: FilePathOrBuffer) -> bool: def get_filepath_or_buffer( filepath_or_buffer: FilePathOrBuffer, encoding: Optional[str] = None, - compression: Optional[str] = None, + compression: CompressionOptions = None, mode: Optional[str] = None, storage_options: StorageOptions = None, ): @@ -188,7 +192,7 @@ def get_filepath_or_buffer( Returns ------- - Tuple[FilePathOrBuffer, str, str, bool] + Tuple[FilePathOrBuffer, str, CompressionOptions, bool] Tuple containing the filepath or buffer, the encoding, the compression and should_close. """ @@ -291,8 +295,8 @@ def file_path_to_url(path: str) -> str: def get_compression_method( - compression: Optional[Union[str, Mapping[str, Any]]] -) -> Tuple[Optional[str], Dict[str, Any]]: + compression: CompressionOptions, +) -> Tuple[Optional[str], CompressionDict]: """ Simplifies a compression argument to a compression method string and a mapping containing additional arguments. @@ -316,7 +320,7 @@ def get_compression_method( if isinstance(compression, Mapping): compression_args = dict(compression) try: - compression_method = compression_args.pop("method") + compression_method = compression_args.pop("method") # type: ignore except KeyError as err: raise ValueError("If mapping, compression must have key 'method'") from err else: @@ -383,7 +387,7 @@ def get_handle( path_or_buf, mode: str, encoding=None, - compression: Optional[Union[str, Mapping[str, Any]]] = None, + compression: CompressionOptions = None, memory_map: bool = False, is_text: bool = True, errors=None, @@ -464,16 +468,13 @@ def get_handle( # GZ Compression if compression == "gzip": if is_path: - f = gzip.open(path_or_buf, mode, **compression_args) + f = gzip.GzipFile(filename=path_or_buf, mode=mode, **compression_args) else: f = gzip.GzipFile(fileobj=path_or_buf, mode=mode, **compression_args) # BZ Compression elif compression == "bz2": - if is_path: - f = bz2.BZ2File(path_or_buf, mode, **compression_args) - else: - f = bz2.BZ2File(path_or_buf, mode=mode, **compression_args) + f = bz2.BZ2File(path_or_buf, mode=mode, **compression_args) # ZIP Compression elif compression == "zip": @@ -577,7 +578,9 @@ def __init__( if mode in ["wb", "rb"]: mode = mode.replace("b", "") self.archive_name = archive_name - super().__init__(file, mode, zipfile.ZIP_DEFLATED, **kwargs) + kwargs_zip: Dict[str, Any] = {"compression": zipfile.ZIP_DEFLATED} + kwargs_zip.update(kwargs) + super().__init__(file, mode, **kwargs_zip) def write(self, data): archive_name = self.filename diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 6eceb94387171..c462a96da7133 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -5,13 +5,13 @@ import csv as csvlib from io import StringIO, TextIOWrapper import os -from typing import Hashable, List, Mapping, Optional, Sequence, Union +from typing import Hashable, List, Optional, Sequence, Union import warnings import numpy as np from pandas._libs import writers as libwriters -from pandas._typing import FilePathOrBuffer, StorageOptions +from pandas._typing import CompressionOptions, FilePathOrBuffer, StorageOptions from pandas.core.dtypes.generic import ( ABCDatetimeIndex, @@ -44,7 +44,7 @@ def __init__( mode: str = "w", encoding: Optional[str] = None, errors: str = "strict", - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", quoting: Optional[int] = None, line_terminator="\n", chunksize: Optional[int] = None, diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 0d2b351926343..c2bd6302940bb 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -3,13 +3,13 @@ from io import BytesIO, StringIO from itertools import islice import os -from typing import Any, Callable, Optional, Type +from typing import IO, Any, Callable, List, Optional, Type import numpy as np import pandas._libs.json as json from pandas._libs.tslibs import iNaT -from pandas._typing import JSONSerializable, StorageOptions +from pandas._typing import CompressionOptions, JSONSerializable, StorageOptions from pandas.errors import AbstractMethodError from pandas.util._decorators import deprecate_kwarg, deprecate_nonkeyword_arguments @@ -19,7 +19,12 @@ from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.reshape.concat import concat -from pandas.io.common import get_filepath_or_buffer, get_handle, infer_compression +from pandas.io.common import ( + get_compression_method, + get_filepath_or_buffer, + get_handle, + infer_compression, +) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import build_table_schema, parse_table_schema from pandas.io.parsers import _validate_integer @@ -41,7 +46,7 @@ def to_json( date_unit: str = "ms", default_handler: Optional[Callable[[Any], JSONSerializable]] = None, lines: bool = False, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", index: bool = True, indent: int = 0, storage_options: StorageOptions = None, @@ -369,7 +374,7 @@ def read_json( encoding=None, lines: bool = False, chunksize: Optional[int] = None, - compression="infer", + compression: CompressionOptions = "infer", nrows: Optional[int] = None, storage_options: StorageOptions = None, ): @@ -607,7 +612,9 @@ def read_json( if encoding is None: encoding = "utf-8" - compression = infer_compression(path_or_buf, compression) + compression_method, compression = get_compression_method(compression) + compression_method = infer_compression(path_or_buf, compression_method) + compression = dict(compression, method=compression_method) filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, @@ -667,10 +674,13 @@ def __init__( encoding, lines: bool, chunksize: Optional[int], - compression, + compression: CompressionOptions, nrows: Optional[int], ): + compression_method, compression = get_compression_method(compression) + compression = dict(compression, method=compression_method) + self.orient = orient self.typ = typ self.dtype = dtype @@ -687,6 +697,7 @@ def __init__( self.nrows_seen = 0 self.should_close = False self.nrows = nrows + self.file_handles: List[IO] = [] if self.chunksize is not None: self.chunksize = _validate_integer("chunksize", self.chunksize, 1) @@ -735,8 +746,8 @@ def _get_data_from_filepath(self, filepath_or_buffer): except (TypeError, ValueError): pass - if exists or self.compression is not None: - data, _ = get_handle( + if exists or self.compression["method"] is not None: + data, self.file_handles = get_handle( filepath_or_buffer, "r", encoding=self.encoding, @@ -816,6 +827,8 @@ def close(self): self.open_stream.close() except (IOError, AttributeError): pass + for file_handle in self.file_handles: + file_handle.close() def __next__(self): if self.nrows: diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index eee6ec7c9feca..fc1d2e385cf72 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -1,9 +1,9 @@ """ pickle compat """ import pickle -from typing import Any, Optional +from typing import Any import warnings -from pandas._typing import FilePathOrBuffer, StorageOptions +from pandas._typing import CompressionOptions, FilePathOrBuffer, StorageOptions from pandas.compat import pickle_compat as pc from pandas.io.common import get_filepath_or_buffer, get_handle @@ -12,7 +12,7 @@ def to_pickle( obj: Any, filepath_or_buffer: FilePathOrBuffer, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ): @@ -114,7 +114,7 @@ def to_pickle( def read_pickle( filepath_or_buffer: FilePathOrBuffer, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ): """ diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 7a25617885839..ec3819f1673a8 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -35,7 +35,7 @@ from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array -from pandas._typing import FilePathOrBuffer, Label, StorageOptions +from pandas._typing import CompressionOptions, FilePathOrBuffer, Label, StorageOptions from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -1938,9 +1938,9 @@ def read_stata( def _open_file_binary_write( fname: FilePathOrBuffer, - compression: Union[str, Mapping[str, str], None], + compression: CompressionOptions, storage_options: StorageOptions = None, -) -> Tuple[BinaryIO, bool, Optional[Union[str, Mapping[str, str]]]]: +) -> Tuple[BinaryIO, bool, CompressionOptions]: """ Open a binary file or no-op if file-like. @@ -1978,17 +1978,10 @@ def _open_file_binary_write( # Extract compression mode as given, if dict compression_typ, compression_args = get_compression_method(compression) compression_typ = infer_compression(fname, compression_typ) - path_or_buf, _, compression_typ, _ = get_filepath_or_buffer( - fname, - mode="wb", - compression=compression_typ, - storage_options=storage_options, + compression = dict(compression_args, method=compression_typ) + path_or_buf, _, compression, _ = get_filepath_or_buffer( + fname, mode="wb", compression=compression, storage_options=storage_options, ) - if compression_typ is not None: - compression = compression_args - compression["method"] = compression_typ - else: - compression = None f, _ = get_handle(path_or_buf, "wb", compression=compression, is_text=False) return f, True, compression else: diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index 902a3d5d2a397..bc14b485f75e5 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -1,7 +1,10 @@ +import io import os +from pathlib import Path import subprocess import sys import textwrap +import time import pytest @@ -130,6 +133,46 @@ def test_compression_binary(compression_only): ) +def test_gzip_reproducibility_file_name(): + """ + Gzip should create reproducible archives with mtime. + + Note: Archives created with different filenames will still be different! + + GH 28103 + """ + df = tm.makeDataFrame() + compression_options = {"method": "gzip", "mtime": 1} + + # test for filename + with tm.ensure_clean() as path: + path = Path(path) + df.to_csv(path, compression=compression_options) + time.sleep(2) + output = path.read_bytes() + df.to_csv(path, compression=compression_options) + assert output == path.read_bytes() + + +def test_gzip_reproducibility_file_object(): + """ + Gzip should create reproducible archives with mtime. + + GH 28103 + """ + df = tm.makeDataFrame() + compression_options = {"method": "gzip", "mtime": 1} + + # test for file object + buffer = io.BytesIO() + df.to_csv(buffer, compression=compression_options, mode="wb") + output = buffer.getvalue() + time.sleep(2) + buffer = io.BytesIO() + df.to_csv(buffer, compression=compression_options, mode="wb") + assert output == buffer.getvalue() + + def test_with_missing_lzma(): """Tests if import pandas works when lzma is not present.""" # https://github.com/pandas-dev/pandas/issues/27575
- [x] closes #28103 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry `to_csv` let's the user set all keyword arguments for gzip. Depending on whether the user provides a filename or a file object different keyword arguments can be set (`gzip.open` vs `gzip.GzipFile`). This PR always uses `gzip.GzipFile`. The additional keyword arguments valid for `gzip.open` but not valid for `gzip.GzipFile` (`encoding`, `errors`, and ~~`newline`~~) are still accessible: https://github.com/pandas-dev/pandas/blob/aefae55e1960a718561ae0369e83605e3038f292/pandas/io/common.py#L512 Using `gzip.GzipFile`, also allows us to set `mtime` to create reproducible gzip archives.
https://api.github.com/repos/pandas-dev/pandas/pulls/35645
2020-08-09T17:11:31Z
2020-08-13T22:04:50Z
2020-08-13T22:04:49Z
2020-08-13T22:04:56Z
ENH: Styler tooltips feature
diff --git a/doc/source/reference/style.rst b/doc/source/reference/style.rst index e80dc1b57ff80..3a8d912fa6ffe 100644 --- a/doc/source/reference/style.rst +++ b/doc/source/reference/style.rst @@ -39,6 +39,8 @@ Style application Styler.set_td_classes Styler.set_table_styles Styler.set_table_attributes + Styler.set_tooltips + Styler.set_tooltips_class Styler.set_caption Styler.set_properties Styler.set_uuid diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ab00b749d5725..6a85bfd852e19 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -52,6 +52,7 @@ Other enhancements - :meth:`DataFrame.apply` can now accept NumPy unary operators as strings, e.g. ``df.apply("sqrt")``, which was already the case for :meth:`Series.apply` (:issue:`39116`) - :meth:`DataFrame.apply` can now accept non-callable DataFrame properties as strings, e.g. ``df.apply("size")``, which was already the case for :meth:`Series.apply` (:issue:`39116`) - :meth:`Series.apply` can now accept list-like or dictionary-like arguments that aren't lists or dictionaries, e.g. ``ser.apply(np.array(["sum", "mean"]))``, which was already the case for :meth:`DataFrame.apply` (:issue:`39140`) +- :meth:`.Styler.set_tooltips` allows on hover tooltips to be added to styled HTML dataframes. .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index b6c1336ede597..49eb579f9bd99 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -182,6 +182,8 @@ def __init__( self.cell_ids = cell_ids self.na_rep = na_rep + self.tooltips: Optional[_Tooltips] = None + self.cell_context: Dict[str, Any] = {} # display_funcs maps (row, col) -> formatting function @@ -205,6 +207,117 @@ def _repr_html_(self) -> str: """ return self.render() + def _init_tooltips(self): + """ + Checks parameters compatible with tooltips and creates instance if necessary + """ + if not self.cell_ids: + # tooltips not optimised for individual cell check. requires reasonable + # redesign and more extensive code for a feature that might be rarely used. + raise NotImplementedError( + "Tooltips can only render with 'cell_ids' is True." + ) + if self.tooltips is None: + self.tooltips = _Tooltips() + + def set_tooltips(self, ttips: DataFrame) -> "Styler": + """ + Add string based tooltips that will appear in the `Styler` HTML result. These + tooltips are applicable only to`<td>` elements. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + ttips : DataFrame + DataFrame containing strings that will be translated to tooltips, mapped + by identical column and index values that must exist on the underlying + `Styler` data. None, NaN values, and empty strings will be ignored and + not affect the rendered HTML. + + Returns + ------- + self : Styler + + Notes + ----- + Tooltips are created by adding `<span class="pd-t"></span>` to each data cell + and then manipulating the table level CSS to attach pseudo hover and pseudo + after selectors to produce the required the results. For styling control + see `:meth:Styler.set_tooltips_class`. + Tooltips are not designed to be efficient, and can add large amounts of + additional HTML for larger tables, since they also require that `cell_ids` + is forced to `True`. + + Examples + -------- + >>> df = pd.DataFrame(data=[[0, 1], [2, 3]]) + >>> ttips = pd.DataFrame( + ... data=[["Min", ""], [np.nan, "Max"]], columns=df.columns, index=df.index + ... ) + >>> s = df.style.set_tooltips(ttips).render() + """ + self._init_tooltips() + assert self.tooltips is not None # mypy requiremen + self.tooltips.tt_data = ttips + return self + + def set_tooltips_class( + self, + name: Optional[str] = None, + properties: Optional[Sequence[Tuple[str, Union[str, int, float]]]] = None, + ) -> "Styler": + """ + Manually configure the name and/or properties of the class for + creating tooltips on hover. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + name : str, default None + Name of the tooltip class used in CSS, should conform to HTML standards. + properties : list-like, default None + List of (attr, value) tuples; see example. + + Returns + ------- + self : Styler + + Notes + ----- + If arguments are `None` will not make any changes to the underlying ``Tooltips`` + existing values. + + The default properties for the tooltip CSS class are: + + - visibility: hidden + - position: absolute + - z-index: 1 + - background-color: black + - color: white + - transform: translate(-20px, -20px) + + The property ('visibility', 'hidden') is a key prerequisite to the hover + functionality, and should always be included in any manual properties + specification. + + Examples + -------- + >>> df = pd.DataFrame(np.random.randn(10, 4)) + >>> df.style.set_tooltips_class(name='tt-add', properties=[ + ... ('visibility', 'hidden'), + ... ('position', 'absolute'), + ... ('z-index', 1)]) + """ + self._init_tooltips() + assert self.tooltips is not None # mypy requirement + if properties: + self.tooltips.class_properties = properties + if name: + self.tooltips.class_name = name + return self + @doc( NDFrame.to_excel, klass="Styler", @@ -434,7 +547,7 @@ def format_attr(pair): else: table_attr += ' class="tex2jax_ignore"' - return { + d = { "head": head, "cellstyle": cellstyle, "body": body, @@ -444,6 +557,10 @@ def format_attr(pair): "caption": caption, "table_attributes": table_attr, } + if self.tooltips: + d = self.tooltips._translate(self.data, self.uuid, d) + + return d def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> "Styler": """ @@ -689,6 +806,7 @@ def clear(self) -> None: Returns None. """ self.ctx.clear() + self.tooltips = None self.cell_context = {} self._todo = [] @@ -1658,6 +1776,179 @@ def pipe(self, func: Callable, *args, **kwargs): return com.pipe(self, func, *args, **kwargs) +class _Tooltips: + """ + An extension to ``Styler`` that allows for and manipulates tooltips on hover + of table data-cells in the HTML result. + + Parameters + ---------- + css_name: str, default "pd-t" + Name of the CSS class that controls visualisation of tooltips. + css_props: list-like, default; see Notes + List of (attr, value) tuples defining properties of the CSS class. + tooltips: DataFrame, default empty + DataFrame of strings aligned with underlying ``Styler`` data for tooltip + display. + + Notes + ----- + The default properties for the tooltip CSS class are: + + - visibility: hidden + - position: absolute + - z-index: 1 + - background-color: black + - color: white + - transform: translate(-20px, -20px) + + Hidden visibility is a key prerequisite to the hover functionality, and should + always be included in any manual properties specification. + """ + + def __init__( + self, + css_props: Sequence[Tuple[str, Union[str, int, float]]] = [ + ("visibility", "hidden"), + ("position", "absolute"), + ("z-index", 1), + ("background-color", "black"), + ("color", "white"), + ("transform", "translate(-20px, -20px)"), + ], + css_name: str = "pd-t", + tooltips: DataFrame = DataFrame(), + ): + self.class_name = css_name + self.class_properties = css_props + self.tt_data = tooltips + self.table_styles: List[Dict[str, Union[str, List[Tuple[str, str]]]]] = [] + + @property + def _class_styles(self): + """ + Combine the ``_Tooltips`` CSS class name and CSS properties to the format + required to extend the underlying ``Styler`` `table_styles` to allow + tooltips to render in HTML. + + Returns + ------- + styles : List + """ + return [{"selector": f".{self.class_name}", "props": self.class_properties}] + + def _pseudo_css(self, uuid: str, name: str, row: int, col: int, text: str): + """ + For every table data-cell that has a valid tooltip (not None, NaN or + empty string) must create two pseudo CSS entries for the specific + <td> element id which are added to overall table styles: + an on hover visibility change and a content change + dependent upon the user's chosen display string. + + For example: + [{"selector": "T__row1_col1:hover .pd-t", + "props": [("visibility", "visible")]}, + {"selector": "T__row1_col1 .pd-t::after", + "props": [("content", "Some Valid Text String")]}] + + Parameters + ---------- + uuid: str + The uuid of the Styler instance + name: str + The css-name of the class used for styling tooltips + row : int + The row index of the specified tooltip string data + col : int + The col index of the specified tooltip string data + text : str + The textual content of the tooltip to be displayed in HTML. + + Returns + ------- + pseudo_css : List + """ + return [ + { + "selector": "#T_" + + uuid + + "row" + + str(row) + + "_col" + + str(col) + + f":hover .{name}", + "props": [("visibility", "visible")], + }, + { + "selector": "#T_" + + uuid + + "row" + + str(row) + + "_col" + + str(col) + + f" .{name}::after", + "props": [("content", f'"{text}"')], + }, + ] + + def _translate(self, styler_data: FrameOrSeriesUnion, uuid: str, d: Dict): + """ + Mutate the render dictionary to allow for tooltips: + + - Add `<span>` HTML element to each data cells `display_value`. Ignores + headers. + - Add table level CSS styles to control pseudo classes. + + Parameters + ---------- + styler_data : DataFrame + Underlying ``Styler`` DataFrame used for reindexing. + uuid : str + The underlying ``Styler`` uuid for CSS id. + d : dict + The dictionary prior to final render + + Returns + ------- + render_dict : Dict + """ + self.tt_data = ( + self.tt_data.reindex_like(styler_data) + .dropna(how="all", axis=0) + .dropna(how="all", axis=1) + ) + if self.tt_data.empty: + return d + + name = self.class_name + + mask = (self.tt_data.isna()) | (self.tt_data.eq("")) # empty string = no ttip + self.table_styles = [ + style + for sublist in [ + self._pseudo_css(uuid, name, i, j, str(self.tt_data.iloc[i, j])) + for i in range(len(self.tt_data.index)) + for j in range(len(self.tt_data.columns)) + if not mask.iloc[i, j] + ] + for style in sublist + ] + + if self.table_styles: + # add span class to every cell only if at least 1 non-empty tooltip + for row in d["body"]: + for item in row: + if item["type"] == "td": + item["display_value"] = ( + str(item["display_value"]) + + f'<span class="{self.class_name}"></span>' + ) + d["table_styles"].extend(self._class_styles) + d["table_styles"].extend(self.table_styles) + + return d + + def _is_visible(idx_row, idx_col, lengths) -> bool: """ Index -> {(idx_row, idx_col): bool}). diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 0bb422658df25..c61d81d565459 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -1769,6 +1769,74 @@ def test_uuid_len_raises(self, len_): with pytest.raises(TypeError, match=msg): Styler(df, uuid_len=len_, cell_ids=False).render() + @pytest.mark.parametrize( + "ttips", + [ + DataFrame( + data=[["Min", "Max"], [np.nan, ""]], + columns=["A", "B"], + index=["a", "b"], + ), + DataFrame(data=[["Max", "Min"]], columns=["B", "A"], index=["a"]), + DataFrame( + data=[["Min", "Max", None]], columns=["A", "B", "C"], index=["a"] + ), + ], + ) + def test_tooltip_render(self, ttips): + # GH 21266 + df = DataFrame(data=[[0, 3], [1, 2]], columns=["A", "B"], index=["a", "b"]) + s = Styler(df, uuid_len=0).set_tooltips(ttips).render() + + # test tooltip table level class + assert "#T__ .pd-t {\n visibility: hidden;\n" in s + + # test 'Min' tooltip added + assert ( + "#T__ #T__row0_col0:hover .pd-t {\n visibility: visible;\n } " + + ' #T__ #T__row0_col0 .pd-t::after {\n content: "Min";\n }' + in s + ) + assert ( + '<td id="T__row0_col0" class="data row0 col0" >0<span class="pd-t">' + + "</span></td>" + in s + ) + + # test 'Max' tooltip added + assert ( + "#T__ #T__row0_col1:hover .pd-t {\n visibility: visible;\n } " + + ' #T__ #T__row0_col1 .pd-t::after {\n content: "Max";\n }' + in s + ) + assert ( + '<td id="T__row0_col1" class="data row0 col1" >3<span class="pd-t">' + + "</span></td>" + in s + ) + + def test_tooltip_ignored(self): + # GH 21266 + df = DataFrame(data=[[0, 1], [2, 3]]) + s = Styler(df).set_tooltips_class("pd-t").render() # no set_tooltips() + assert '<style type="text/css" >\n</style>' in s + assert '<span class="pd-t"></span>' not in s + + def test_tooltip_class(self): + # GH 21266 + df = DataFrame(data=[[0, 1], [2, 3]]) + s = ( + Styler(df, uuid_len=0) + .set_tooltips(DataFrame([["tooltip"]])) + .set_tooltips_class(name="other-class", properties=[("color", "green")]) + .render() + ) + assert "#T__ .other-class {\n color: green;\n" in s + assert ( + '#T__ #T__row0_col0 .other-class::after {\n content: "tooltip";\n' + in s + ) + @td.skip_if_no_mpl class TestStylerMatplotlibDep:
- [x] closes #21266 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This is a draft PR to address 21266: Tooltips for DataFrames in Styler. This PR has the following objectives: 1) Add functionality whilst being completely (almost) decoupled from the rest of the HTML rendering process 2) Be fully backwards compatible and have no impact at all on previous Styler's that have never used tooltips. 3) Use pseudo CSS class rendering via `table_styles` to control the visualisation of the tooltips. To address 1) the architecture was simple: - If `tooltips` are detected then an additional HTML element is added to every data cell: `<span class="pd-t"></span>`. If `tooltips` are not detected (by default) nothing is done. - Add generic table level CSS for the tooltip class, hiding tooltips by default but positioning, sizing, and coloring them. - Loop through a tooltips DataFrame and based on the row and col index add additional content to the <span> element using the ::after pseudo-element targeting the exact cell id. This is table level also. Rendering is performed as the last step. To address 2) default values ensure the functions make no changes unless `set_tooltips` has been called. ## Extensions The very simple architecture requires a DataFrame of tooltips to be supplied. This requires the user to have constructed it privately (possibly using the regular DataFrame.apply and DataFrame.applymap) methods. These could be incorporated. It would also be better to only add extra HTML <span> elements to data cells that have tooltips. This requires conditional looping of the render dict, which I haven't included here to save on initial complexity. For small dataframes it is also irrelevant. However, given my time constraints I would rather leave this to a more available and more competent developer!! Comments welcome..
https://api.github.com/repos/pandas-dev/pandas/pulls/35643
2020-08-09T12:41:11Z
2021-01-17T15:46:56Z
2021-01-17T15:46:56Z
2021-01-21T06:46:55Z
REF/PERF: Move MultiIndex._tuples to MultiIndex._cache
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 13927dede5542..448c2dfe4a29d 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -243,7 +243,6 @@ class MultiIndex(Index): _comparables = ["names"] rename = Index.set_names - _tuples = None sortorder: Optional[int] # -------------------------------------------------------------------- @@ -634,16 +633,9 @@ def from_frame(cls, df, sortorder=None, names=None): # -------------------------------------------------------------------- - @property + @cache_readonly def _values(self): # We override here, since our parent uses _data, which we don't use. - return self.values - - @property - def values(self): - if self._tuples is not None: - return self._tuples - values = [] for i in range(self.nlevels): @@ -657,8 +649,12 @@ def values(self): vals = np.array(vals, copy=False) values.append(vals) - self._tuples = lib.fast_zip(values) - return self._tuples + arr = lib.fast_zip(values) + return arr + + @property + def values(self): + return self._values @property def array(self): @@ -737,7 +733,6 @@ def _set_levels( if any(names): self._set_names(names) - self._tuples = None self._reset_cache() def set_levels(self, levels, level=None, inplace=None, verify_integrity=True): @@ -906,7 +901,6 @@ def _set_codes( self._codes = new_codes - self._tuples = None self._reset_cache() def set_codes(self, codes, level=None, inplace=None, verify_integrity=True): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index aeb7b3e044794..2abc570a04de3 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -320,6 +320,10 @@ def read_hdf( mode : {'r', 'r+', 'a'}, default 'r' Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. + errors : str, default 'strict' + Specifies how encoding and decoding errors are to be handled. + See the errors argument for :func:`open` for a full list + of options. where : list, optional A list of Term (or convertible) objects. start : int, optional @@ -332,10 +336,6 @@ def read_hdf( Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. - errors : str, default 'strict' - Specifies how encoding and decoding errors are to be handled. - See the errors argument for :func:`open` for a full list - of options. **kwargs Additional keyword arguments passed to HDFStore. diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py index b2500efef9e03..72b5ed0edaa78 100644 --- a/pandas/tests/indexes/multi/test_compat.py +++ b/pandas/tests/indexes/multi/test_compat.py @@ -68,24 +68,33 @@ def test_inplace_mutation_resets_values(): mi1 = MultiIndex(levels=levels, codes=codes) mi2 = MultiIndex(levels=levels2, codes=codes) + + # instantiating MultiIndex should not access/cache _.values + assert "_values" not in mi1._cache + assert "_values" not in mi2._cache + vals = mi1.values.copy() vals2 = mi2.values.copy() - assert mi1._tuples is not None + # accessing .values should cache ._values + assert mi1._values is mi1._cache["_values"] + assert mi1.values is mi1._cache["_values"] + assert isinstance(mi1._cache["_values"], np.ndarray) # Make sure level setting works new_vals = mi1.set_levels(levels2).values tm.assert_almost_equal(vals2, new_vals) - # Non-inplace doesn't kill _tuples [implementation detail] - tm.assert_almost_equal(mi1._tuples, vals) + # Non-inplace doesn't drop _values from _cache [implementation detail] + tm.assert_almost_equal(mi1._cache["_values"], vals) # ...and values is still same too tm.assert_almost_equal(mi1.values, vals) - # Inplace should kill _tuples + # Inplace should drop _values from _cache with tm.assert_produces_warning(FutureWarning): mi1.set_levels(levels2, inplace=True) + assert "_values" not in mi1._cache tm.assert_almost_equal(mi1.values, vals2) # Make sure label setting works too @@ -95,18 +104,24 @@ def test_inplace_mutation_resets_values(): # Must be 1d array of tuples assert exp_values.shape == (6,) - new_values = mi2.set_codes(codes2).values + + new_mi = mi2.set_codes(codes2) + assert "_values" not in new_mi._cache + new_values = new_mi.values + assert "_values" in new_mi._cache # Not inplace shouldn't change - tm.assert_almost_equal(mi2._tuples, vals2) + tm.assert_almost_equal(mi2._cache["_values"], vals2) # Should have correct values tm.assert_almost_equal(exp_values, new_values) - # ...and again setting inplace should kill _tuples, etc + # ...and again setting inplace should drop _values from _cache, etc with tm.assert_produces_warning(FutureWarning): mi2.set_codes(codes2, inplace=True) + assert "_values" not in mi2._cache tm.assert_almost_equal(mi2.values, new_values) + assert "_values" in mi2._cache def test_ndarray_compat_properties(idx, compat_props):
Currently, the heavy-to-calculate ``MultiIndex.values`` attribute is cached in ``MultiIndex._tuples``. It would be more dogmatic to store it in ``MultiIndex._cache`` IMO, which is what this PR does. This has the added benefit of ``._values`` getting copied over to new copies of the MultiIndex, so also gives a performance boost in cases where copying is needed: ```python >>> n = 100_000; >>> df = pd.DataFrame({'a': ['a', 'b'] * int(n / 2), 'b': range(n), 'c': range(20, n + 20)}) >>> mi = pd.MultiIndex.from_frame(df) >>> mi.values # also caches mi._values in mi._cache array([('a', 0, 20), ('b', 1, 21), ('a', 2, 22), ..., ('b', 99997, 100017), ('a', 99998, 100018), ('b', 99999, 100019)], dtype=object) >>> %timeit mi._shallow_copy().values 22.8 ms ± 3.43 ms per loop # master 34.6 µs ± 997 ns per loop # this PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/35641
2020-08-09T09:33:24Z
2020-08-10T22:52:08Z
2020-08-10T22:52:08Z
2020-08-10T23:00:12Z
DOC: Add specific Visual Studio Installer instructions
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 4ffd1d586a99a..e5c6f77eea3ef 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -204,6 +204,7 @@ You will need `Build Tools for Visual Studio 2017 You DO NOT need to install Visual Studio 2019. You only need "Build Tools for Visual Studio 2019" found by scrolling down to "All downloads" -> "Tools for Visual Studio 2019". + In the installer, select the "C++ build tools" workload. **Mac OS**
- [ ] further improves #28316 - Currently the 'Contributing to pandas', 'Installing a C compiler' section advises to install Build Tools for Visual Studio 2017 but doesn't specify which components to install. Notes: - The first time I installed the Build Tools it didn't seem to get everything needed, and I had to search around for a while before I could figure out what I'd missed. - The advice at [python.org](https://wiki.python.org/moin/WindowsCompilers#Microsoft_Visual_C.2B-.2B-_14.2_standalone:_Build_Tools_for_Visual_Studio_2019_.28x86.2C_x64.2C_ARM.2C_ARM64.29) is more specific, so I have added that in here. I'm fairly sure that's the minimum required, correct?
https://api.github.com/repos/pandas-dev/pandas/pulls/35640
2020-08-09T09:12:42Z
2020-08-31T18:47:26Z
2020-08-31T18:47:26Z
2020-08-31T18:47:35Z
BUG: RollingGroupby with closed and column selection no longer raises ValueError
diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index f0ad9d1ca3b0f..7f5182e3eaa6f 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -51,6 +51,10 @@ Categorical - - +**Groupby/resample/rolling** + +- Bug in :class:`pandas.core.groupby.RollingGroupby` where passing ``closed`` with column selection would raise a ``ValueError`` (:issue:`35549`) + **Plotting** - diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 58e7841d4dde5..51a067427e867 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -52,7 +52,7 @@ def __init__(self, obj, *args, **kwargs): kwargs.pop("parent", None) groupby = kwargs.pop("groupby", None) if groupby is None: - groupby, obj = obj, obj.obj + groupby, obj = obj, obj._selected_obj self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index a04d68a6d6745..7347d5686aabc 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2212,7 +2212,7 @@ def _apply( # Cannot use _wrap_outputs because we calculate the result all at once # Compose MultiIndex result from grouping levels then rolling level # Aggregate the MultiIndex data as tuples then the level names - grouped_object_index = self._groupby._selected_obj.index + grouped_object_index = self.obj.index grouped_index_name = [grouped_object_index.name] groupby_keys = [grouping.name for grouping in self._groupby.grouper._groupings] result_index_names = groupby_keys + grouped_index_name @@ -2236,10 +2236,6 @@ def _apply( def _constructor(self): return Rolling - @cache_readonly - def _selected_obj(self): - return self._groupby._selected_obj - def _create_blocks(self, obj: FrameOrSeries): """ Split data into blocks & return conformed data. @@ -2278,7 +2274,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]] if self.is_freq_type: rolling_indexer = VariableWindowIndexer - index_array = self._groupby._selected_obj.index.asi8 + index_array = self.obj.index.asi8 else: rolling_indexer = FixedWindowIndexer index_array = None @@ -2295,7 +2291,7 @@ def _gotitem(self, key, ndim, subset=None): # here so our index is carried thru to the selected obj # when we do the splitting for the groupby if self.on is not None: - self._groupby.obj = self._groupby.obj.set_index(self._on) + self.obj = self.obj.set_index(self._on) self.on = None return super()._gotitem(key, ndim, subset=subset) diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index 5241b9548a442..e1dcac06c39cc 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -304,3 +304,54 @@ def test_groupby_subselect_rolling(self): name="b", ) tm.assert_series_equal(result, expected) + + def test_groupby_rolling_subset_with_closed(self): + # GH 35549 + df = pd.DataFrame( + { + "column1": range(6), + "column2": range(6), + "group": 3 * ["A", "B"], + "date": [pd.Timestamp("2019-01-01")] * 6, + } + ) + result = ( + df.groupby("group").rolling("1D", on="date", closed="left")["column1"].sum() + ) + expected = Series( + [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0], + index=pd.MultiIndex.from_tuples( + [("A", pd.Timestamp("2019-01-01"))] * 3 + + [("B", pd.Timestamp("2019-01-01"))] * 3, + names=["group", "date"], + ), + name="column1", + ) + tm.assert_series_equal(result, expected) + + def test_groupby_subset_rolling_subset_with_closed(self): + # GH 35549 + df = pd.DataFrame( + { + "column1": range(6), + "column2": range(6), + "group": 3 * ["A", "B"], + "date": [pd.Timestamp("2019-01-01")] * 6, + } + ) + + result = ( + df.groupby("group")[["column1", "date"]] + .rolling("1D", on="date", closed="left")["column1"] + .sum() + ) + expected = Series( + [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0], + index=pd.MultiIndex.from_tuples( + [("A", pd.Timestamp("2019-01-01"))] * 3 + + [("B", pd.Timestamp("2019-01-01"))] * 3, + names=["group", "date"], + ), + name="column1", + ) + tm.assert_series_equal(result, expected)
- [x] closes #35549 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35639
2020-08-09T06:21:50Z
2020-08-10T13:13:31Z
2020-08-10T13:13:30Z
2020-08-10T15:26:24Z
DOC: Mention NA for missing data in README
diff --git a/README.md b/README.md index a72e8402e68a0..a2f2f1c04442a 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ its way towards this goal. Here are just a few of the things that pandas does well: - Easy handling of [**missing data**][missing-data] (represented as - `NaN`) in floating point as well as non-floating point data + `NaN`, `NA`, or `NaT`) in floating point as well as non-floating point data - Size mutability: columns can be [**inserted and deleted**][insertion-deletion] from DataFrame and higher dimensional objects
https://api.github.com/repos/pandas-dev/pandas/pulls/35638
2020-08-08T19:35:27Z
2020-08-24T06:50:12Z
2020-08-24T06:50:12Z
2020-08-24T12:52:01Z
ENH: Make explode work for sets
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index d7d2e3cf876ca..ff9e803b4990a 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -103,7 +103,7 @@ Other enhancements - Added :meth:`~DataFrame.set_flags` for setting table-wide flags on a ``Series`` or ``DataFrame`` (:issue:`28394`) - :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) -- +- :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`) - .. _whatsnew_120.api_breaking.python: diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 5c6c15fb50fed..75dbb4b74aabd 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -124,7 +124,8 @@ def explode(ndarray[object] values): counts = np.zeros(n, dtype='int64') for i in range(n): v = values[i] - if c_is_list_like(v, False): + + if c_is_list_like(v, True): if len(v): counts[i] += len(v) else: @@ -138,8 +139,9 @@ def explode(ndarray[object] values): for i in range(n): v = values[i] - if c_is_list_like(v, False): + if c_is_list_like(v, True): if len(v): + v = list(v) for j in range(len(v)): result[count] = v[j] count += 1 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 29d6fb9aa7d56..150d6e24dbb86 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7091,10 +7091,11 @@ def explode( Notes ----- - This routine will explode list-likes including lists, tuples, + This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will - be object. Scalars will be returned unchanged. Empty list-likes will - result in a np.nan for that row. + be object. Scalars will be returned unchanged, and empty list-likes will + result in a np.nan for that row. In addition, the ordering of rows in the + output will be non-deterministic when exploding sets. Examples -------- diff --git a/pandas/core/series.py b/pandas/core/series.py index d8fdaa2a60252..6cbd93135a2ca 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3829,10 +3829,11 @@ def explode(self, ignore_index: bool = False) -> "Series": Notes ----- - This routine will explode list-likes including lists, tuples, + This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will - be object. Scalars will be returned unchanged. Empty list-likes will - result in a np.nan for that row. + be object. Scalars will be returned unchanged, and empty list-likes will + result in a np.nan for that row. In addition, the ordering of elements in + the output will be non-deterministic when exploding sets. Examples -------- diff --git a/pandas/tests/frame/methods/test_explode.py b/pandas/tests/frame/methods/test_explode.py index 2bbe8ac2d5b81..bd0901387eeed 100644 --- a/pandas/tests/frame/methods/test_explode.py +++ b/pandas/tests/frame/methods/test_explode.py @@ -172,3 +172,11 @@ def test_ignore_index(): {"id": [0, 0, 10, 10], "values": list("abcd")}, index=[0, 1, 2, 3] ) tm.assert_frame_equal(result, expected) + + +def test_explode_sets(): + # https://github.com/pandas-dev/pandas/issues/35614 + df = pd.DataFrame({"a": [{"x", "y"}], "b": [1]}, index=[1]) + result = df.explode(column="a").sort_values(by="a") + expected = pd.DataFrame({"a": ["x", "y"], "b": [1, 1]}, index=[1, 1]) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_explode.py b/pandas/tests/series/methods/test_explode.py index 4b65e042f7b02..1f0fbd1cc5ecb 100644 --- a/pandas/tests/series/methods/test_explode.py +++ b/pandas/tests/series/methods/test_explode.py @@ -126,3 +126,11 @@ def test_ignore_index(): result = s.explode(ignore_index=True) expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object) tm.assert_series_equal(result, expected) + + +def test_explode_sets(): + # https://github.com/pandas-dev/pandas/issues/35614 + s = pd.Series([{"a", "b", "c"}], index=[1]) + result = s.explode().sort_values() + expected = pd.Series(["a", "b", "c"], index=[1, 1, 1]) + tm.assert_series_equal(result, expected)
- [x] closes #35614 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/35637
2020-08-08T19:33:06Z
2020-09-05T22:36:43Z
2020-09-05T22:36:43Z
2020-09-05T22:36:46Z
TST: collect tests by method, split large tests
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e8793c364586b..bc2051a130079 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -589,12 +589,14 @@ def __init__( elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: + # i.e. numpy structured array data_columns = list(data.dtype.names) data = {k: data[k] for k in data_columns} if columns is None: columns = data_columns mgr = dict_to_mgr(data, index, columns, dtype=dtype) elif getattr(data, "name", None) is not None: + # i.e. Series/Index with non-None name mgr = dict_to_mgr({data.name: data}, index, columns, dtype=dtype) else: mgr = ndarray_to_mgr(data, index, columns, dtype=dtype, copy=copy) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index e6893a1aa9da1..873c58f976508 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1722,12 +1722,15 @@ def test_constructor_with_datetimes(self): ) tm.assert_series_equal(result, expected) + def test_constructor_with_datetimes1(self): + # GH 2809 ind = date_range(start="2000-01-01", freq="D", periods=10) datetimes = [ts.to_pydatetime() for ts in ind] datetime_s = Series(datetimes) assert datetime_s.dtype == "M8[ns]" + def test_constructor_with_datetimes2(self): # GH 2810 ind = date_range(start="2000-01-01", freq="D", periods=10) datetimes = [ts.to_pydatetime() for ts in ind] @@ -1741,6 +1744,7 @@ def test_constructor_with_datetimes(self): ) tm.assert_series_equal(result, expected) + def test_constructor_with_datetimes3(self): # GH 7594 # don't coerce tz-aware tz = pytz.timezone("US/Eastern") @@ -1758,6 +1762,7 @@ def test_constructor_with_datetimes(self): df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"}) ) + def test_constructor_with_datetimes4(self): # tz-aware (UTC and other tz's) # GH 8411 dr = date_range("20130101", periods=3) @@ -1770,6 +1775,7 @@ def test_constructor_with_datetimes(self): df = DataFrame({"value": dr}) assert str(df.iat[0, 0].tz) == "US/Eastern" + def test_constructor_with_datetimes5(self): # GH 7822 # preserver an index with a tz on dict construction i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern") @@ -1782,7 +1788,9 @@ def test_constructor_with_datetimes(self): df = DataFrame({"a": i}) tm.assert_frame_equal(df, expected) + def test_constructor_with_datetimes6(self): # multiples + i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern") i_no_tz = date_range("1/1/2011", periods=5, freq="10s") df = DataFrame({"a": i, "b": i_no_tz}) expected = DataFrame({"a": i.to_series().reset_index(drop=True), "b": i_no_tz}) diff --git a/pandas/tests/indexes/categorical/test_append.py b/pandas/tests/indexes/categorical/test_append.py new file mode 100644 index 0000000000000..b48c3219f5111 --- /dev/null +++ b/pandas/tests/indexes/categorical/test_append.py @@ -0,0 +1,62 @@ +import pytest + +from pandas import ( + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +class TestAppend: + @pytest.fixture + def ci(self): + categories = list("cab") + return CategoricalIndex(list("aabbca"), categories=categories, ordered=False) + + def test_append(self, ci): + # append cats with the same categories + result = ci[:3].append(ci[3:]) + tm.assert_index_equal(result, ci, exact=True) + + foos = [ci[:1], ci[1:3], ci[3:]] + result = foos[0].append(foos[1:]) + tm.assert_index_equal(result, ci, exact=True) + + def test_append_empty(self, ci): + # empty + result = ci.append([]) + tm.assert_index_equal(result, ci, exact=True) + + def test_append_mismatched_categories(self, ci): + # appending with different categories or reordered is not ok + msg = "all inputs must be Index" + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.set_categories(list("abcd"))) + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.reorder_categories(list("abc"))) + + def test_append_category_objects(self, ci): + # with objects + result = ci.append(Index(["c", "a"])) + expected = CategoricalIndex(list("aabbcaca"), categories=ci.categories) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_non_categories(self, ci): + # invalid objects -> cast to object via concat_compat + result = ci.append(Index(["a", "d"])) + expected = Index(["a", "a", "b", "b", "c", "a", "a", "d"]) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_object(self, ci): + # GH#14298 - if base object is not categorical -> coerce to object + result = Index(["c", "a"]).append(ci) + expected = Index(list("caaabbca")) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_to_another(self): + # hits Index._concat + fst = Index(["a", "b"]) + snd = CategoricalIndex(["d", "e"]) + result = fst.append(snd) + expected = Index(["a", "b", "d", "e"]) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 8c9caf2e59011..222ca692091ab 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -30,53 +30,6 @@ def test_can_hold_identifiers(self): key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is True - def test_append(self): - - ci = self.create_index() - categories = ci.categories - - # append cats with the same categories - result = ci[:3].append(ci[3:]) - tm.assert_index_equal(result, ci, exact=True) - - foos = [ci[:1], ci[1:3], ci[3:]] - result = foos[0].append(foos[1:]) - tm.assert_index_equal(result, ci, exact=True) - - # empty - result = ci.append([]) - tm.assert_index_equal(result, ci, exact=True) - - # appending with different categories or reordered is not ok - msg = "all inputs must be Index" - with pytest.raises(TypeError, match=msg): - ci.append(ci.values.set_categories(list("abcd"))) - with pytest.raises(TypeError, match=msg): - ci.append(ci.values.reorder_categories(list("abc"))) - - # with objects - result = ci.append(Index(["c", "a"])) - expected = CategoricalIndex(list("aabbcaca"), categories=categories) - tm.assert_index_equal(result, expected, exact=True) - - # invalid objects -> cast to object via concat_compat - result = ci.append(Index(["a", "d"])) - expected = Index(["a", "a", "b", "b", "c", "a", "a", "d"]) - tm.assert_index_equal(result, expected, exact=True) - - # GH14298 - if base object is not categorical -> coerce to object - result = Index(["c", "a"]).append(ci) - expected = Index(list("caaabbca")) - tm.assert_index_equal(result, expected, exact=True) - - def test_append_to_another(self): - # hits Index._concat - fst = Index(["a", "b"]) - snd = CategoricalIndex(["d", "e"]) - result = fst.append(snd) - expected = Index(["a", "b", "d", "e"]) - tm.assert_index_equal(result, expected) - def test_insert(self): ci = self.create_index() @@ -326,12 +279,6 @@ def test_map_str(self): class TestCategoricalIndex2: # Tests that are not overriding a test in Base - def test_format_different_scalar_lengths(self): - # GH35439 - idx = CategoricalIndex(["aaaaaaaaa", "b"]) - expected = ["aaaaaaaaa", "b"] - assert idx.format() == expected - @pytest.mark.parametrize( "dtype, engine_type", [ diff --git a/pandas/tests/indexes/categorical/test_formats.py b/pandas/tests/indexes/categorical/test_formats.py index 0f1cb55b9811c..2009d78e47c1c 100644 --- a/pandas/tests/indexes/categorical/test_formats.py +++ b/pandas/tests/indexes/categorical/test_formats.py @@ -7,6 +7,12 @@ class TestCategoricalIndexRepr: + def test_format_different_scalar_lengths(self): + # GH#35439 + idx = CategoricalIndex(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + assert idx.format() == expected + def test_string_categorical_index_repr(self): # short idx = CategoricalIndex(["a", "bb", "ccc"]) diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index c65d9098a86a4..d29d4647f4753 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -551,6 +551,13 @@ def test_get_loc_reasonable_key_error(self): with pytest.raises(KeyError, match="2000"): index.get_loc("1/1/2000") + def test_get_loc_year_str(self): + rng = date_range("1/1/2000", "1/1/2010") + + result = rng.get_loc("2009") + expected = slice(3288, 3653) + assert result == expected + class TestContains: def test_dti_contains_with_duplicates(self): diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index d230aa43e43d1..eff87a2b3f275 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -37,6 +37,8 @@ def test_range_edges(self): ) tm.assert_index_equal(idx, exp) + def test_range_edges2(self): + idx = date_range( start=Timestamp("1970-01-01 00:00:00.000000004"), end=Timestamp("1970-01-01 00:00:00.000000001"), @@ -45,6 +47,8 @@ def test_range_edges(self): exp = DatetimeIndex([], freq="N") tm.assert_index_equal(idx, exp) + def test_range_edges3(self): + idx = date_range( start=Timestamp("1970-01-01 00:00:00.000000001"), end=Timestamp("1970-01-01 00:00:00.000000001"), @@ -53,6 +57,8 @@ def test_range_edges(self): exp = DatetimeIndex(["1970-01-01 00:00:00.000000001"], freq="N") tm.assert_index_equal(idx, exp) + def test_range_edges4(self): + idx = date_range( start=Timestamp("1970-01-01 00:00:00.000001"), end=Timestamp("1970-01-01 00:00:00.000004"), @@ -69,6 +75,8 @@ def test_range_edges(self): ) tm.assert_index_equal(idx, exp) + def test_range_edges5(self): + idx = date_range( start=Timestamp("1970-01-01 00:00:00.001"), end=Timestamp("1970-01-01 00:00:00.004"), @@ -85,6 +93,7 @@ def test_range_edges(self): ) tm.assert_index_equal(idx, exp) + def test_range_edges6(self): idx = date_range( start=Timestamp("1970-01-01 00:00:01"), end=Timestamp("1970-01-01 00:00:04"), @@ -101,6 +110,7 @@ def test_range_edges(self): ) tm.assert_index_equal(idx, exp) + def test_range_edges7(self): idx = date_range( start=Timestamp("1970-01-01 00:01"), end=Timestamp("1970-01-01 00:04"), @@ -117,6 +127,7 @@ def test_range_edges(self): ) tm.assert_index_equal(idx, exp) + def test_range_edges8(self): idx = date_range( start=Timestamp("1970-01-01 01:00"), end=Timestamp("1970-01-01 04:00"), @@ -133,6 +144,7 @@ def test_range_edges(self): ) tm.assert_index_equal(idx, exp) + def test_range_edges9(self): idx = date_range( start=Timestamp("1970-01-01"), end=Timestamp("1970-01-04"), freq="D" ) @@ -234,6 +246,7 @@ def test_datetimeindex_accessors(self): exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name") tm.assert_index_equal(res, exp) + def test_datetimeindex_accessors2(self): dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4) assert sum(dti.is_quarter_start) == 0 @@ -241,6 +254,7 @@ def test_datetimeindex_accessors(self): assert sum(dti.is_year_start) == 0 assert sum(dti.is_year_end) == 1 + def test_datetimeindex_accessors3(self): # Ensure is_start/end accessors throw ValueError for CustomBusinessDay, bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu") dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt) @@ -248,10 +262,12 @@ def test_datetimeindex_accessors(self): with pytest.raises(ValueError, match=msg): dti.is_month_start + def test_datetimeindex_accessors4(self): dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"]) assert dti.is_month_start[0] == 1 + def test_datetimeindex_accessors5(self): tests = [ (Timestamp("2013-06-01", freq="M").is_month_start, 1), (Timestamp("2013-06-01", freq="BM").is_month_start, 0), @@ -290,6 +306,7 @@ def test_datetimeindex_accessors(self): for ts, value in tests: assert ts == value + def test_datetimeindex_accessors6(self): # GH 6538: Check that DatetimeIndex and its TimeStamp elements # return the same weekofyear accessor close to new year w/ tz dates = ["2013/12/29", "2013/12/30", "2013/12/31"] diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 05ee67eee0da5..882515799f943 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -55,12 +55,6 @@ def test_slice_year(self): expected = df[df.index.year == 2005] tm.assert_frame_equal(result, expected) - rng = date_range("1/1/2000", "1/1/2010") - - result = rng.get_loc("2009") - expected = slice(3288, 3653) - assert result == expected - @pytest.mark.parametrize( "partial_dtime", [ diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 03e78af0b2bdd..032b376f6d6a9 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -253,14 +253,6 @@ def test_is_(self): assert not index.is_(index - 2) assert not index.is_(index - 0) - def test_periods_number_check(self): - msg = ( - "Of the three parameters: start, end, and periods, exactly two " - "must be specified" - ) - with pytest.raises(ValueError, match=msg): - period_range("2011-1-1", "2012-1-1", "B") - def test_index_duplicate_periods(self): # monotonic idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN") @@ -348,13 +340,6 @@ def test_with_multi_index(self): assert isinstance(s.index.values[0][0], Period) - def test_convert_array_of_periods(self): - rng = period_range("1/1/2000", periods=20, freq="D") - periods = list(rng) - - result = Index(periods) - assert isinstance(result, PeriodIndex) - def test_pickle_freq(self): # GH2891 prng = period_range("1/1/2011", "1/1/2012", freq="M") diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py index a5be19731b54a..c94ddf57c0ee1 100644 --- a/pandas/tests/indexes/period/test_period_range.py +++ b/pandas/tests/indexes/period/test_period_range.py @@ -12,6 +12,14 @@ class TestPeriodRange: + def test_required_arguments(self): + msg = ( + "Of the three parameters: start, end, and periods, exactly two " + "must be specified" + ) + with pytest.raises(ValueError, match=msg): + period_range("2011-1-1", "2012-1-1", "B") + @pytest.mark.parametrize("freq", ["D", "W", "M", "Q", "A"]) def test_construction_from_string(self, freq): # non-empty diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py index 4fba4b13835b3..5937f43102190 100644 --- a/pandas/tests/indexes/test_index_new.py +++ b/pandas/tests/indexes/test_index_new.py @@ -80,6 +80,13 @@ def test_constructor_infer_periodindex(self): tm.assert_index_equal(rs, xp) assert isinstance(rs, PeriodIndex) + def test_from_list_of_periods(self): + rng = period_range("1/1/2000", periods=20, freq="D") + periods = list(rng) + + result = Index(periods) + assert isinstance(result, PeriodIndex) + @pytest.mark.parametrize("pos", [0, 1]) @pytest.mark.parametrize( "klass,dtype,ctor", diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index 7acfb50fe944b..5f0101eb4478c 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -7,13 +7,15 @@ import numpy as np import pytest -import pandas as pd from pandas import ( Index, + NaT, Timedelta, TimedeltaIndex, + Timestamp, notna, timedelta_range, + to_timedelta, ) import pandas._testing as tm @@ -64,10 +66,10 @@ def test_getitem(self): @pytest.mark.parametrize( "key", [ - pd.Timestamp("1970-01-01"), - pd.Timestamp("1970-01-02"), + Timestamp("1970-01-01"), + Timestamp("1970-01-02"), datetime(1970, 1, 1), - pd.Timestamp("1970-01-03").to_datetime64(), + Timestamp("1970-01-03").to_datetime64(), # non-matching NA values np.datetime64("NaT"), ], @@ -81,7 +83,7 @@ def test_timestamp_invalid_key(self, key): class TestGetLoc: def test_get_loc(self): - idx = pd.to_timedelta(["0 days", "1 days", "2 days"]) + idx = to_timedelta(["0 days", "1 days", "2 days"]) for method in [None, "pad", "backfill", "nearest"]: assert idx.get_loc(idx[1], method) == 1 @@ -117,7 +119,7 @@ def test_get_loc(self): def test_get_loc_nat(self): tidx = TimedeltaIndex(["1 days 01:00:00", "NaT", "2 days 01:00:00"]) - assert tidx.get_loc(pd.NaT) == 1 + assert tidx.get_loc(NaT) == 1 assert tidx.get_loc(None) == 1 assert tidx.get_loc(float("nan")) == 1 assert tidx.get_loc(np.nan) == 1 @@ -125,12 +127,12 @@ def test_get_loc_nat(self): class TestGetIndexer: def test_get_indexer(self): - idx = pd.to_timedelta(["0 days", "1 days", "2 days"]) + idx = to_timedelta(["0 days", "1 days", "2 days"]) tm.assert_numpy_array_equal( idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp) ) - target = pd.to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"]) + target = to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"]) tm.assert_numpy_array_equal( idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp) ) @@ -158,25 +160,25 @@ def test_where_invalid_dtypes(self): tdi = timedelta_range("1 day", periods=3, freq="D", name="idx") tail = tdi[2:].tolist() - i2 = Index([pd.NaT, pd.NaT] + tail) + i2 = Index([NaT, NaT] + tail) mask = notna(i2) - expected = Index([pd.NaT.value, pd.NaT.value] + tail, dtype=object, name="idx") + expected = Index([NaT.value, NaT.value] + tail, dtype=object, name="idx") assert isinstance(expected[0], int) result = tdi.where(mask, i2.asi8) tm.assert_index_equal(result, expected) - ts = i2 + pd.Timestamp.now() + ts = i2 + Timestamp.now() expected = Index([ts[0], ts[1]] + tail, dtype=object, name="idx") result = tdi.where(mask, ts) tm.assert_index_equal(result, expected) - per = (i2 + pd.Timestamp.now()).to_period("D") + per = (i2 + Timestamp.now()).to_period("D") expected = Index([per[0], per[1]] + tail, dtype=object, name="idx") result = tdi.where(mask, per) tm.assert_index_equal(result, expected) - ts = pd.Timestamp.now() + ts = Timestamp.now() expected = Index([ts, ts] + tail, dtype=object, name="idx") result = tdi.where(mask, ts) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py deleted file mode 100644 index cca211c1eb155..0000000000000 --- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py +++ /dev/null @@ -1,42 +0,0 @@ -import numpy as np - -from pandas import ( - Series, - timedelta_range, -) -import pandas._testing as tm - - -class TestSlicing: - def test_partial_slice(self): - rng = timedelta_range("1 day 10:11:12", freq="h", periods=500) - s = Series(np.arange(len(rng)), index=rng) - - result = s["5 day":"6 day"] - expected = s.iloc[86:134] - tm.assert_series_equal(result, expected) - - result = s["5 day":] - expected = s.iloc[86:] - tm.assert_series_equal(result, expected) - - result = s[:"6 day"] - expected = s.iloc[:134] - tm.assert_series_equal(result, expected) - - def test_partial_slice_high_reso(self): - - # higher reso - rng = timedelta_range("1 day 10:11:12", freq="us", periods=2000) - s = Series(np.arange(len(rng)), index=rng) - - result = s["1 day 10:11:12":] - expected = s.iloc[0:] - tm.assert_series_equal(result, expected) - - result = s["1 day 10:11:12.001":] - expected = s.iloc[1000:] - tm.assert_series_equal(result, expected) - - result = s["1 days, 10:11:12.001001"] - assert result == s.iloc[1001] diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index e6dfafabbfec2..d57a4c271680b 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -202,6 +202,38 @@ def test_getitem_slice_strings_with_datetimeindex(self): expected = ts[1:4] tm.assert_series_equal(result, expected) + def test_getitem_partial_str_slice_with_timedeltaindex(self): + rng = timedelta_range("1 day 10:11:12", freq="h", periods=500) + ser = Series(np.arange(len(rng)), index=rng) + + result = ser["5 day":"6 day"] + expected = ser.iloc[86:134] + tm.assert_series_equal(result, expected) + + result = ser["5 day":] + expected = ser.iloc[86:] + tm.assert_series_equal(result, expected) + + result = ser[:"6 day"] + expected = ser.iloc[:134] + tm.assert_series_equal(result, expected) + + def test_getitem_partial_str_slice_high_reso_with_timedeltaindex(self): + # higher reso + rng = timedelta_range("1 day 10:11:12", freq="us", periods=2000) + ser = Series(np.arange(len(rng)), index=rng) + + result = ser["1 day 10:11:12":] + expected = ser.iloc[0:] + tm.assert_series_equal(result, expected) + + result = ser["1 day 10:11:12.001":] + expected = ser.iloc[1000:] + tm.assert_series_equal(result, expected) + + result = ser["1 days, 10:11:12.001001"] + assert result == ser.iloc[1001] + def test_getitem_slice_2d(self, datetime_series): # GH#30588 multi-dimensional indexing deprecated
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40133
2021-03-01T04:14:44Z
2021-03-01T13:51:02Z
2021-03-01T13:51:02Z
2021-03-01T15:45:26Z
TST: share value_counts tests
diff --git a/pandas/tests/indexes/datetimelike_/test_value_counts.py b/pandas/tests/indexes/datetimelike_/test_value_counts.py new file mode 100644 index 0000000000000..f0df6dd678ef5 --- /dev/null +++ b/pandas/tests/indexes/datetimelike_/test_value_counts.py @@ -0,0 +1,103 @@ +import numpy as np + +from pandas import ( + DatetimeIndex, + NaT, + PeriodIndex, + Series, + TimedeltaIndex, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestValueCounts: + # GH#7735 + + def test_value_counts_unique_datetimeindex(self, tz_naive_fixture): + tz = tz_naive_fixture + orig = date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz) + self._check_value_counts_with_repeats(orig) + + def test_value_counts_unique_timedeltaindex(self): + orig = timedelta_range("1 days 09:00:00", freq="H", periods=10) + self._check_value_counts_with_repeats(orig) + + def test_value_counts_unique_periodindex(self): + orig = period_range("2011-01-01 09:00", freq="H", periods=10) + self._check_value_counts_with_repeats(orig) + + def _check_value_counts_with_repeats(self, orig): + # create repeated values, 'n'th element is repeated by n+1 times + idx = type(orig)( + np.repeat(orig._values, range(1, len(orig) + 1)), dtype=orig.dtype + ) + + exp_idx = orig[::-1] + if not isinstance(exp_idx, PeriodIndex): + exp_idx = exp_idx._with_freq(None) + expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64") + + for obj in [idx, Series(idx)]: + tm.assert_series_equal(obj.value_counts(), expected) + + tm.assert_index_equal(idx.unique(), orig) + + def test_value_counts_unique_datetimeindex2(self, tz_naive_fixture): + tz = tz_naive_fixture + idx = DatetimeIndex( + [ + "2013-01-01 09:00", + "2013-01-01 09:00", + "2013-01-01 09:00", + "2013-01-01 08:00", + "2013-01-01 08:00", + NaT, + ], + tz=tz, + ) + self._check_value_counts_dropna(idx) + + def test_value_counts_unique_timedeltaindex2(self): + idx = TimedeltaIndex( + [ + "1 days 09:00:00", + "1 days 09:00:00", + "1 days 09:00:00", + "1 days 08:00:00", + "1 days 08:00:00", + NaT, + ] + ) + self._check_value_counts_dropna(idx) + + def test_value_counts_unique_periodindex2(self): + idx = PeriodIndex( + [ + "2013-01-01 09:00", + "2013-01-01 09:00", + "2013-01-01 09:00", + "2013-01-01 08:00", + "2013-01-01 08:00", + NaT, + ], + freq="H", + ) + self._check_value_counts_dropna(idx) + + def _check_value_counts_dropna(self, idx): + exp_idx = idx[[2, 3]] + expected = Series([3, 2], index=exp_idx) + + for obj in [idx, Series(idx)]: + tm.assert_series_equal(obj.value_counts(), expected) + + exp_idx = idx[[2, 3, -1]] + expected = Series([3, 2, 1], index=exp_idx) + + for obj in [idx, Series(idx)]: + tm.assert_series_equal(obj.value_counts(dropna=False), expected) + + tm.assert_index_equal(idx.unique(), exp_idx) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 49288af89ee22..7df94b5820e5d 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -1,12 +1,10 @@ from datetime import datetime from dateutil.tz import tzlocal -import numpy as np import pytest from pandas.compat import IS64 -import pandas as pd from pandas import ( DateOffset, DatetimeIndex, @@ -69,51 +67,6 @@ def test_resolution(self, request, tz_naive_fixture, freq, expected): idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz) assert idx.resolution == expected - def test_value_counts_unique(self, tz_naive_fixture): - tz = tz_naive_fixture - # GH 7735 - idx = date_range("2011-01-01 09:00", freq="H", periods=10) - # create repeated values, 'n'th element is repeated by n+1 times - idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz) - - exp_idx = date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz) - expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64") - expected.index = expected.index._with_freq(None) - - for obj in [idx, Series(idx)]: - - tm.assert_series_equal(obj.value_counts(), expected) - - expected = date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz) - expected = expected._with_freq(None) - tm.assert_index_equal(idx.unique(), expected) - - idx = DatetimeIndex( - [ - "2013-01-01 09:00", - "2013-01-01 09:00", - "2013-01-01 09:00", - "2013-01-01 08:00", - "2013-01-01 08:00", - pd.NaT, - ], - tz=tz, - ) - - exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz) - expected = Series([3, 2], index=exp_idx) - - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(), expected) - - exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz) - expected = Series([3, 2, 1], index=exp_idx) - - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(dropna=False), expected) - - tm.assert_index_equal(idx.unique(), exp_idx) - def test_infer_freq(self, freq_sample): # GH 11018 idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10) diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 4ca98f6bbcb75..9ebe44fb16c8d 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -1,12 +1,6 @@ -import numpy as np import pytest import pandas as pd -from pandas import ( - NaT, - PeriodIndex, - Series, -) import pandas._testing as tm @@ -29,61 +23,6 @@ def test_resolution(self, freq, expected): idx = pd.period_range(start="2013-04-01", periods=30, freq=freq) assert idx.resolution == expected - def test_value_counts_unique(self): - # GH 7735 - idx = pd.period_range("2011-01-01 09:00", freq="H", periods=10) - # create repeated values, 'n'th element is repeated by n+1 times - idx = PeriodIndex(np.repeat(idx._values, range(1, len(idx) + 1)), freq="H") - - exp_idx = PeriodIndex( - [ - "2011-01-01 18:00", - "2011-01-01 17:00", - "2011-01-01 16:00", - "2011-01-01 15:00", - "2011-01-01 14:00", - "2011-01-01 13:00", - "2011-01-01 12:00", - "2011-01-01 11:00", - "2011-01-01 10:00", - "2011-01-01 09:00", - ], - freq="H", - ) - expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64") - - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(), expected) - - expected = pd.period_range("2011-01-01 09:00", freq="H", periods=10) - tm.assert_index_equal(idx.unique(), expected) - - idx = PeriodIndex( - [ - "2013-01-01 09:00", - "2013-01-01 09:00", - "2013-01-01 09:00", - "2013-01-01 08:00", - "2013-01-01 08:00", - NaT, - ], - freq="H", - ) - - exp_idx = PeriodIndex(["2013-01-01 09:00", "2013-01-01 08:00"], freq="H") - expected = Series([3, 2], index=exp_idx) - - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(), expected) - - exp_idx = PeriodIndex(["2013-01-01 09:00", "2013-01-01 08:00", NaT], freq="H") - expected = Series([3, 2, 1], index=exp_idx) - - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(dropna=False), expected) - - tm.assert_index_equal(idx.unique(), exp_idx) - def test_freq_setter_deprecated(self): # GH 20678 idx = pd.period_range("2018Q1", periods=4, freq="Q") diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 8bb86057e7084..2a5051b2982bb 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -2,7 +2,6 @@ import pytest from pandas import ( - NaT, Series, TimedeltaIndex, timedelta_range, @@ -17,50 +16,6 @@ class TestTimedeltaIndexOps: - def test_value_counts_unique(self): - # GH 7735 - idx = timedelta_range("1 days 09:00:00", freq="H", periods=10) - # create repeated values, 'n'th element is repeated by n+1 times - idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1))) - - exp_idx = timedelta_range("1 days 18:00:00", freq="-1H", periods=10) - exp_idx = exp_idx._with_freq(None) - expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64") - - obj = idx - tm.assert_series_equal(obj.value_counts(), expected) - - obj = Series(idx) - tm.assert_series_equal(obj.value_counts(), expected) - - expected = timedelta_range("1 days 09:00:00", freq="H", periods=10) - tm.assert_index_equal(idx.unique(), expected) - - idx = TimedeltaIndex( - [ - "1 days 09:00:00", - "1 days 09:00:00", - "1 days 09:00:00", - "1 days 08:00:00", - "1 days 08:00:00", - NaT, - ] - ) - - exp_idx = TimedeltaIndex(["1 days 09:00:00", "1 days 08:00:00"]) - expected = Series([3, 2], index=exp_idx) - - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(), expected) - - exp_idx = TimedeltaIndex(["1 days 09:00:00", "1 days 08:00:00", NaT]) - expected = Series([3, 2, 1], index=exp_idx) - - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(dropna=False), expected) - - tm.assert_index_equal(idx.unique(), exp_idx) - def test_nonunique_contains(self): # GH 9512 for idx in map(
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40132
2021-03-01T01:04:20Z
2021-03-01T13:44:59Z
2021-03-01T13:44:58Z
2021-03-01T15:47:23Z
#39367 DOC: split contributing.rst
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index b4fa6b008be74..b9afbe387799e 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -140,266 +140,6 @@ Note that performing a shallow clone (with ``--depth==N``, for some ``N`` greate or equal to 1) might break some tests and features as ``pd.show_versions()`` as the version number cannot be computed anymore. -.. _contributing.dev_env: - -Creating a development environment ----------------------------------- - -To test out code changes, you'll need to build pandas from source, which -requires a C/C++ compiler and Python environment. If you're making documentation -changes, you can skip to :ref:`contributing.documentation` but if you skip -creating the development environment you won't be able to build the documentation -locally before pushing your changes. - -Using a Docker container -~~~~~~~~~~~~~~~~~~~~~~~~ - -Instead of manually setting up a development environment, you can use `Docker -<https://docs.docker.com/get-docker/>`_ to automatically create the environment with just several -commands. pandas provides a ``DockerFile`` in the root directory to build a Docker image -with a full pandas development environment. - -**Docker Commands** - -Pass your GitHub username in the ``DockerFile`` to use your own fork:: - - # Build the image pandas-yourname-env - docker build --tag pandas-yourname-env . - # Run a container and bind your local forked repo, pandas-yourname, to the container - docker run -it --rm -v path-to-pandas-yourname:/home/pandas-yourname pandas-yourname-env - -Even easier, you can integrate Docker with the following IDEs: - -**Visual Studio Code** - -You can use the DockerFile to launch a remote session with Visual Studio Code, -a popular free IDE, using the ``.devcontainer.json`` file. -See https://code.visualstudio.com/docs/remote/containers for details. - -**PyCharm (Professional)** - -Enable Docker support and use the Services tool window to build and manage images as well as -run and interact with containers. -See https://www.jetbrains.com/help/pycharm/docker.html for details. - -Note that you might need to rebuild the C extensions if/when you merge with upstream/master using:: - - python setup.py build_ext -j 4 - -.. _contributing.dev_c: - -Installing a C compiler -~~~~~~~~~~~~~~~~~~~~~~~ - -pandas uses C extensions (mostly written using Cython) to speed up certain -operations. To install pandas from source, you need to compile these C -extensions, which means you need a C compiler. This process depends on which -platform you're using. - -If you have setup your environment using ``conda``, the packages ``c-compiler`` -and ``cxx-compiler`` will install a fitting compiler for your platform that is -compatible with the remaining conda packages. On Windows and macOS, you will -also need to install the SDKs as they have to be distributed separately. -These packages will be automatically installed by using ``pandas``'s -``environment.yml``. - -**Windows** - -You will need `Build Tools for Visual Studio 2017 -<https://visualstudio.microsoft.com/downloads/>`_. - -.. warning:: - You DO NOT need to install Visual Studio 2019. - You only need "Build Tools for Visual Studio 2019" found by - scrolling down to "All downloads" -> "Tools for Visual Studio 2019". - In the installer, select the "C++ build tools" workload. - -You can install the necessary components on the commandline using -`vs_buildtools.exe <https://aka.ms/vs/16/release/vs_buildtools.exe>`_: - -.. code:: - - vs_buildtools.exe --quiet --wait --norestart --nocache ^ - --installPath C:\BuildTools ^ - --add "Microsoft.VisualStudio.Workload.VCTools;includeRecommended" ^ - --add Microsoft.VisualStudio.Component.VC.v141 ^ - --add Microsoft.VisualStudio.Component.VC.v141.x86.x64 ^ - --add Microsoft.VisualStudio.Component.Windows10SDK.17763 - -To setup the right paths on the commandline, call -``"C:\BuildTools\VC\Auxiliary\Build\vcvars64.bat" -vcvars_ver=14.16 10.0.17763.0``. - -**macOS** - -To use the ``conda``-based compilers, you will need to install the -Developer Tools using ``xcode-select --install``. Otherwise -information about compiler installation can be found here: -https://devguide.python.org/setup/#macos - -**Linux** - -For Linux-based ``conda`` installations, you won't have to install any -additional components outside of the conda environment. The instructions -below are only needed if your setup isn't based on conda environments. - -Some Linux distributions will come with a pre-installed C compiler. To find out -which compilers (and versions) are installed on your system:: - - # for Debian/Ubuntu: - dpkg --list | grep compiler - # for Red Hat/RHEL/CentOS/Fedora: - yum list installed | grep -i --color compiler - -`GCC (GNU Compiler Collection) <https://gcc.gnu.org/>`_, is a widely used -compiler, which supports C and a number of other languages. If GCC is listed -as an installed compiler nothing more is required. If no C compiler is -installed (or you wish to install a newer version) you can install a compiler -(GCC in the example code below) with:: - - # for recent Debian/Ubuntu: - sudo apt install build-essential - # for Red Had/RHEL/CentOS/Fedora - yum groupinstall "Development Tools" - -For other Linux distributions, consult your favourite search engine for -compiler installation instructions. - -Let us know if you have any difficulties by opening an issue or reaching out on -`Gitter`_. - -.. _contributing.dev_python: - -Creating a Python environment -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Now create an isolated pandas development environment: - -* Install either `Anaconda <https://www.anaconda.com/download/>`_, `miniconda - <https://conda.io/miniconda.html>`_, or `miniforge <https://github.com/conda-forge/miniforge>`_ -* Make sure your conda is up to date (``conda update conda``) -* Make sure that you have :ref:`cloned the repository <contributing.forking>` -* ``cd`` to the pandas source directory - -We'll now kick off a three-step process: - -1. Install the build dependencies -2. Build and install pandas -3. Install the optional dependencies - -.. code-block:: none - - # Create and activate the build environment - conda env create -f environment.yml - conda activate pandas-dev - - # or with older versions of Anaconda: - source activate pandas-dev - - # Build and install pandas - python setup.py build_ext -j 4 - python -m pip install -e . --no-build-isolation --no-use-pep517 - -At this point you should be able to import pandas from your locally built version:: - - $ python # start an interpreter - >>> import pandas - >>> print(pandas.__version__) - 0.22.0.dev0+29.g4ad6d4d74 - -This will create the new environment, and not touch any of your existing environments, -nor any existing Python installation. - -To view your environments:: - - conda info -e - -To return to your root environment:: - - conda deactivate - -See the full conda docs `here <https://conda.pydata.org/docs>`__. - -.. _contributing.pip: - -Creating a Python environment (pip) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you aren't using conda for your development environment, follow these instructions. -You'll need to have at least Python 3.7.0 installed on your system. If your Python version -is 3.8.0 (or later), you might need to update your ``setuptools`` to version 42.0.0 (or later) -in your development environment before installing the build dependencies:: - - pip install --upgrade setuptools - -**Unix**/**macOS with virtualenv** - -.. code-block:: bash - - # Create a virtual environment - # Use an ENV_DIR of your choice. We'll use ~/virtualenvs/pandas-dev - # Any parent directories should already exist - python3 -m venv ~/virtualenvs/pandas-dev - - # Activate the virtualenv - . ~/virtualenvs/pandas-dev/bin/activate - - # Install the build dependencies - python -m pip install -r requirements-dev.txt - - # Build and install pandas - python setup.py build_ext -j 4 - python -m pip install -e . --no-build-isolation --no-use-pep517 - -**Unix**/**macOS with pyenv** - -Consult the docs for setting up pyenv `here <https://github.com/pyenv/pyenv>`__. - -.. code-block:: bash - - # Create a virtual environment - # Use an ENV_DIR of your choice. We'll use ~/Users/<yourname>/.pyenv/versions/pandas-dev - - pyenv virtualenv <version> <name-to-give-it> - - # For instance: - pyenv virtualenv 3.7.6 pandas-dev - - # Activate the virtualenv - pyenv activate pandas-dev - - # Now install the build dependencies in the cloned pandas repo - python -m pip install -r requirements-dev.txt - - # Build and install pandas - python setup.py build_ext -j 4 - python -m pip install -e . --no-build-isolation --no-use-pep517 - -**Windows** - -Below is a brief overview on how to set-up a virtual environment with Powershell -under Windows. For details please refer to the -`official virtualenv user guide <https://virtualenv.pypa.io/en/stable/userguide/#activate-script>`__ - -Use an ENV_DIR of your choice. We'll use ~\\virtualenvs\\pandas-dev where -'~' is the folder pointed to by either $env:USERPROFILE (Powershell) or -%USERPROFILE% (cmd.exe) environment variable. Any parent directories -should already exist. - -.. code-block:: powershell - - # Create a virtual environment - python -m venv $env:USERPROFILE\virtualenvs\pandas-dev - - # Activate the virtualenv. Use activate.bat for cmd.exe - ~\virtualenvs\pandas-dev\Scripts\Activate.ps1 - - # Install the build dependencies - python -m pip install -r requirements-dev.txt - - # Build and install pandas - python setup.py build_ext -j 4 - python -m pip install -e . --no-build-isolation --no-use-pep517 - Creating a branch ----------------- @@ -429,1060 +169,6 @@ When you want to update the feature branch with changes in master after you created the branch, check the section on :ref:`updating a PR <contributing.update-pr>`. -.. _contributing.documentation: - -Contributing to the documentation -================================= - -Contributing to the documentation benefits everyone who uses pandas. -We encourage you to help us improve the documentation, and -you don't have to be an expert on pandas to do so! In fact, -there are sections of the docs that are worse off after being written by -experts. If something in the docs doesn't make sense to you, updating the -relevant section after you figure it out is a great way to ensure it will help -the next person. - -.. contents:: Documentation: - :local: - - -About the pandas documentation --------------------------------- - -The documentation is written in **reStructuredText**, which is almost like writing -in plain English, and built using `Sphinx <https://www.sphinx-doc.org/en/master/>`__. The -Sphinx Documentation has an excellent `introduction to reST -<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`__. Review the Sphinx docs to perform more -complex changes to the documentation as well. - -Some other important things to know about the docs: - -* The pandas documentation consists of two parts: the docstrings in the code - itself and the docs in this folder ``doc/``. - - The docstrings provide a clear explanation of the usage of the individual - functions, while the documentation in this folder consists of tutorial-like - overviews per topic together with some other information (what's new, - installation, etc). - -* The docstrings follow a pandas convention, based on the **Numpy Docstring - Standard**. Follow the :ref:`pandas docstring guide <docstring>` for detailed - instructions on how to write a correct docstring. - - .. toctree:: - :maxdepth: 2 - - contributing_docstring.rst - -* The tutorials make heavy use of the `IPython directive - <https://matplotlib.org/sampledoc/ipython_directive.html>`_ sphinx extension. - This directive lets you put code in the documentation which will be run - during the doc build. For example:: - - .. ipython:: python - - x = 2 - x**3 - - will be rendered as:: - - In [1]: x = 2 - - In [2]: x**3 - Out[2]: 8 - - Almost all code examples in the docs are run (and the output saved) during the - doc build. This approach means that code examples will always be up to date, - but it does make the doc building a bit more complex. - -* Our API documentation files in ``doc/source/reference`` house the auto-generated - documentation from the docstrings. For classes, there are a few subtleties - around controlling which methods and attributes have pages auto-generated. - - We have two autosummary templates for classes. - - 1. ``_templates/autosummary/class.rst``. Use this when you want to - automatically generate a page for every public method and attribute on the - class. The ``Attributes`` and ``Methods`` sections will be automatically - added to the class' rendered documentation by numpydoc. See ``DataFrame`` - for an example. - - 2. ``_templates/autosummary/class_without_autosummary``. Use this when you - want to pick a subset of methods / attributes to auto-generate pages for. - When using this template, you should include an ``Attributes`` and - ``Methods`` section in the class docstring. See ``CategoricalIndex`` for an - example. - - Every method should be included in a ``toctree`` in one of the documentation files in - ``doc/source/reference``, else Sphinx - will emit a warning. - -.. note:: - - The ``.rst`` files are used to automatically generate Markdown and HTML versions - of the docs. For this reason, please do not edit ``CONTRIBUTING.md`` directly, - but instead make any changes to ``doc/source/development/contributing.rst``. Then, to - generate ``CONTRIBUTING.md``, use `pandoc <https://johnmacfarlane.net/pandoc/>`_ - with the following command:: - - pandoc doc/source/development/contributing.rst -t markdown_github > CONTRIBUTING.md - -The utility script ``scripts/validate_docstrings.py`` can be used to get a csv -summary of the API documentation. And also validate common errors in the docstring -of a specific class, function or method. The summary also compares the list of -methods documented in the files in ``doc/source/reference`` (which is used to generate -the `API Reference <https://pandas.pydata.org/pandas-docs/stable/api.html>`_ page) -and the actual public methods. -This will identify methods documented in ``doc/source/reference`` that are not actually -class methods, and existing methods that are not documented in ``doc/source/reference``. - - -Updating a pandas docstring ------------------------------ - -When improving a single function or method's docstring, it is not necessarily -needed to build the full documentation (see next section). -However, there is a script that checks a docstring (for example for the ``DataFrame.mean`` method):: - - python scripts/validate_docstrings.py pandas.DataFrame.mean - -This script will indicate some formatting errors if present, and will also -run and test the examples included in the docstring. -Check the :ref:`pandas docstring guide <docstring>` for a detailed guide -on how to format the docstring. - -The examples in the docstring ('doctests') must be valid Python code, -that in a deterministic way returns the presented output, and that can be -copied and run by users. This can be checked with the script above, and is -also tested on Travis. A failing doctest will be a blocker for merging a PR. -Check the :ref:`examples <docstring.examples>` section in the docstring guide -for some tips and tricks to get the doctests passing. - -When doing a PR with a docstring update, it is good to post the -output of the validation script in a comment on github. - - -How to build the pandas documentation ---------------------------------------- - -Requirements -~~~~~~~~~~~~ - -First, you need to have a development environment to be able to build pandas -(see the docs on :ref:`creating a development environment above <contributing.dev_env>`). - -Building the documentation -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -So how do you build the docs? Navigate to your local -``doc/`` directory in the console and run:: - - python make.py html - -Then you can find the HTML output in the folder ``doc/build/html/``. - -The first time you build the docs, it will take quite a while because it has to run -all the code examples and build all the generated docstring pages. In subsequent -evocations, sphinx will try to only build the pages that have been modified. - -If you want to do a full clean build, do:: - - python make.py clean - python make.py html - -You can tell ``make.py`` to compile only a single section of the docs, greatly -reducing the turn-around time for checking your changes. - -:: - - # omit autosummary and API section - python make.py clean - python make.py --no-api - - # compile the docs with only a single section, relative to the "source" folder. - # For example, compiling only this guide (doc/source/development/contributing.rst) - python make.py clean - python make.py --single development/contributing.rst - - # compile the reference docs for a single function - python make.py clean - python make.py --single pandas.DataFrame.join - - # compile whatsnew and API section (to resolve links in the whatsnew) - python make.py clean - python make.py --whatsnew - -For comparison, a full documentation build may take 15 minutes, but a single -section may take 15 seconds. Subsequent builds, which only process portions -you have changed, will be faster. - -The build will automatically use the number of cores available on your machine -to speed up the documentation build. You can override this:: - - python make.py html --num-jobs 4 - -Open the following file in a web browser to see the full documentation you -just built:: - - doc/build/html/index.html - -And you'll have the satisfaction of seeing your new and improved documentation! - -.. _contributing.dev_docs: - -Building master branch documentation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When pull requests are merged into the pandas ``master`` branch, the main parts of -the documentation are also built by Travis-CI. These docs are then hosted `here -<https://pandas.pydata.org/docs/dev/>`__, see also -the :ref:`Continuous Integration <contributing.ci>` section. - -Previewing changes ------------------- - -Once, the pull request is submitted, GitHub Actions will automatically build the -documentation. To view the built site: - -#. Wait for the ``CI / Web and docs`` check to complete. -#. Click ``Details`` next to it. -#. From the ``Artifacts`` drop-down, click ``docs`` or ``website`` to download - the site as a ZIP file. - -.. _contributing.code: - -Contributing to the code base -============================= - -.. contents:: Code Base: - :local: - -Code standards --------------- - -Writing good code is not just about what you write. It is also about *how* you -write it. During :ref:`Continuous Integration <contributing.ci>` testing, several -tools will be run to check your code for stylistic errors. -Generating any warnings will cause the test to fail. -Thus, good style is a requirement for submitting code to pandas. - -There is a tool in pandas to help contributors verify their changes before -contributing them to the project:: - - ./ci/code_checks.sh - -The script verifies the linting of code files, it looks for common mistake patterns -(like missing spaces around sphinx directives that make the documentation not -being rendered properly) and it also validates the doctests. It is possible to -run the checks independently by using the parameters ``lint``, ``patterns`` and -``doctests`` (e.g. ``./ci/code_checks.sh lint``). - -In addition, because a lot of people use our library, it is important that we -do not make sudden changes to the code that could have the potential to break -a lot of user code as a result, that is, we need it to be as *backwards compatible* -as possible to avoid mass breakages. - -In addition to ``./ci/code_checks.sh``, some extra checks are run by -``pre-commit`` - see :ref:`here <contributing.pre-commit>` for how to -run them. - -Additional standards are outlined on the :ref:`pandas code style guide <code_style>`. - -.. _contributing.pre-commit: - -Pre-commit ----------- - -You can run many of these styling checks manually as we have described above. However, -we encourage you to use `pre-commit hooks <https://pre-commit.com/>`_ instead -to automatically run ``black``, ``flake8``, ``isort`` when you make a git commit. This -can be done by installing ``pre-commit``:: - - pip install pre-commit - -and then running:: - - pre-commit install - -from the root of the pandas repository. Now all of the styling checks will be -run each time you commit changes without your needing to run each one manually. -In addition, using ``pre-commit`` will also allow you to more easily -remain up-to-date with our code checks as they change. - -Note that if needed, you can skip these checks with ``git commit --no-verify``. - -If you don't want to use ``pre-commit`` as part of your workflow, you can still use it -to run its checks with:: - - pre-commit run --files <files you have modified> - -without needing to have done ``pre-commit install`` beforehand. - -If you want to run checks on all recently committed files on upstream/master you can use:: - - pre-commit run --from-ref=upstream/master --to-ref=HEAD --all-files - -without needing to have done ``pre-commit install`` beforehand. - -.. note:: - - If you have conflicting installations of ``virtualenv``, then you may get an - error - see `here <https://github.com/pypa/virtualenv/issues/1875>`_. - - Also, due to a `bug in virtualenv <https://github.com/pypa/virtualenv/issues/1986>`_, - you may run into issues if you're using conda. To solve this, you can downgrade - ``virtualenv`` to version ``20.0.33``. - -Optional dependencies ---------------------- - -Optional dependencies (e.g. matplotlib) should be imported with the private helper -``pandas.compat._optional.import_optional_dependency``. This ensures a -consistent error message when the dependency is not met. - -All methods using an optional dependency should include a test asserting that an -``ImportError`` is raised when the optional dependency is not found. This test -should be skipped if the library is present. - -All optional dependencies should be documented in -:ref:`install.optional_dependencies` and the minimum required version should be -set in the ``pandas.compat._optional.VERSIONS`` dict. - -C (cpplint) -~~~~~~~~~~~ - -pandas uses the `Google <https://google.github.io/styleguide/cppguide.html>`_ -standard. Google provides an open source style checker called ``cpplint``, but we -use a fork of it that can be found `here <https://github.com/cpplint/cpplint>`__. -Here are *some* of the more common ``cpplint`` issues: - -* we restrict line-length to 80 characters to promote readability -* every header file must include a header guard to avoid name collisions if re-included - -:ref:`Continuous Integration <contributing.ci>` will run the -`cpplint <https://pypi.org/project/cpplint>`_ tool -and report any stylistic errors in your code. Therefore, it is helpful before -submitting code to run the check yourself:: - - cpplint --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir modified-c-file - -You can also run this command on an entire directory if necessary:: - - cpplint --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive modified-c-directory - -To make your commits compliant with this standard, you can install the -`ClangFormat <https://clang.llvm.org/docs/ClangFormat.html>`_ tool, which can be -downloaded `here <https://llvm.org/builds/>`__. To configure, in your home directory, -run the following command:: - - clang-format style=google -dump-config > .clang-format - -Then modify the file to ensure that any indentation width parameters are at least four. -Once configured, you can run the tool as follows:: - - clang-format modified-c-file - -This will output what your file will look like if the changes are made, and to apply -them, run the following command:: - - clang-format -i modified-c-file - -To run the tool on an entire directory, you can run the following analogous commands:: - - clang-format modified-c-directory/*.c modified-c-directory/*.h - clang-format -i modified-c-directory/*.c modified-c-directory/*.h - -Do note that this tool is best-effort, meaning that it will try to correct as -many errors as possible, but it may not correct *all* of them. Thus, it is -recommended that you run ``cpplint`` to double check and make any other style -fixes manually. - -.. _contributing.code-formatting: - -Python (PEP8 / black) -~~~~~~~~~~~~~~~~~~~~~ - -pandas follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ standard -and uses `Black <https://black.readthedocs.io/en/stable/>`_ and -`Flake8 <http://flake8.pycqa.org/en/latest/>`_ to ensure a consistent code -format throughout the project. We encourage you to use :ref:`pre-commit <contributing.pre-commit>`. - -:ref:`Continuous Integration <contributing.ci>` will run those tools and -report any stylistic errors in your code. Therefore, it is helpful before -submitting code to run the check yourself:: - - black pandas - git diff upstream/master -u -- "*.py" | flake8 --diff - -to auto-format your code. Additionally, many editors have plugins that will -apply ``black`` as you edit files. - -You should use a ``black`` version 20.8b1 as previous versions are not compatible -with the pandas codebase. - -One caveat about ``git diff upstream/master -u -- "*.py" | flake8 --diff``: this -command will catch any stylistic errors in your changes specifically, but -be beware it may not catch all of them. For example, if you delete the only -usage of an imported function, it is stylistically incorrect to import an -unused function. However, style-checking the diff will not catch this because -the actual import is not part of the diff. Thus, for completeness, you should -run this command, though it may take longer:: - - git diff upstream/master --name-only -- "*.py" | xargs -r flake8 - -Note that on OSX, the ``-r`` flag is not available, so you have to omit it and -run this slightly modified command:: - - git diff upstream/master --name-only -- "*.py" | xargs flake8 - -Windows does not support the ``xargs`` command (unless installed for example -via the `MinGW <http://www.mingw.org/>`__ toolchain), but one can imitate the -behaviour as follows:: - - for /f %i in ('git diff upstream/master --name-only -- "*.py"') do flake8 %i - -This will get all the files being changed by the PR (and ending with ``.py``), -and run ``flake8`` on them, one after the other. - -Note that these commands can be run analogously with ``black``. - -.. _contributing.import-formatting: - -Import formatting -~~~~~~~~~~~~~~~~~ -pandas uses `isort <https://pypi.org/project/isort/>`__ to standardise import -formatting across the codebase. - -A guide to import layout as per pep8 can be found `here <https://www.python.org/dev/peps/pep-0008/#imports/>`__. - -A summary of our current import sections ( in order ): - -* Future -* Python Standard Library -* Third Party -* ``pandas._libs``, ``pandas.compat``, ``pandas.util._*``, ``pandas.errors`` (largely not dependent on ``pandas.core``) -* ``pandas.core.dtypes`` (largely not dependent on the rest of ``pandas.core``) -* Rest of ``pandas.core.*`` -* Non-core ``pandas.io``, ``pandas.plotting``, ``pandas.tseries`` -* Local application/library specific imports - -Imports are alphabetically sorted within these sections. - -As part of :ref:`Continuous Integration <contributing.ci>` checks we run:: - - isort --check-only pandas - -to check that imports are correctly formatted as per the ``setup.cfg``. - -If you see output like the below in :ref:`Continuous Integration <contributing.ci>` checks: - -.. code-block:: shell - - Check import format using isort - ERROR: /home/travis/build/pandas-dev/pandas/pandas/io/pytables.py Imports are incorrectly sorted - Check import format using isort DONE - The command "ci/code_checks.sh" exited with 1 - -You should run:: - - isort pandas/io/pytables.py - -to automatically format imports correctly. This will modify your local copy of the files. - -Alternatively, you can run a command similar to what was suggested for ``black`` and ``flake8`` :ref:`right above <contributing.code-formatting>`:: - - git diff upstream/master --name-only -- "*.py" | xargs -r isort - -Where similar caveats apply if you are on OSX or Windows. - -You can then verify the changes look ok, then git :ref:`commit <contributing.commit-code>` and :ref:`push <contributing.push-code>`. - -Backwards compatibility -~~~~~~~~~~~~~~~~~~~~~~~ - -Please try to maintain backward compatibility. pandas has lots of users with lots of -existing code, so don't break it if at all possible. If you think breakage is required, -clearly state why as part of the pull request. Also, be careful when changing method -signatures and add deprecation warnings where needed. Also, add the deprecated sphinx -directive to the deprecated functions or methods. - -If a function with the same arguments as the one being deprecated exist, you can use -the ``pandas.util._decorators.deprecate``: - -.. code-block:: python - - from pandas.util._decorators import deprecate - - deprecate('old_func', 'new_func', '1.1.0') - -Otherwise, you need to do it manually: - -.. code-block:: python - - import warnings - - - def old_func(): - """Summary of the function. - - .. deprecated:: 1.1.0 - Use new_func instead. - """ - warnings.warn('Use new_func instead.', FutureWarning, stacklevel=2) - new_func() - - - def new_func(): - pass - -You'll also need to - -1. Write a new test that asserts a warning is issued when calling with the deprecated argument -2. Update all of pandas existing tests and code to use the new argument - -See :ref:`contributing.warnings` for more. - -.. _contributing.type_hints: - -Type hints ----------- - -pandas strongly encourages the use of :pep:`484` style type hints. New development should contain type hints and pull requests to annotate existing code are accepted as well! - -Style guidelines -~~~~~~~~~~~~~~~~ - -Types imports should follow the ``from typing import ...`` convention. So rather than - -.. code-block:: python - - import typing - - primes: typing.List[int] = [] - -You should write - -.. code-block:: python - - from typing import List, Optional, Union - - primes: List[int] = [] - -``Optional`` should be used where applicable, so instead of - -.. code-block:: python - - maybe_primes: List[Union[int, None]] = [] - -You should write - -.. code-block:: python - - maybe_primes: List[Optional[int]] = [] - -In some cases in the code base classes may define class variables that shadow builtins. This causes an issue as described in `Mypy 1775 <https://github.com/python/mypy/issues/1775#issuecomment-310969854>`_. The defensive solution here is to create an unambiguous alias of the builtin and use that without your annotation. For example, if you come across a definition like - -.. code-block:: python - - class SomeClass1: - str = None - -The appropriate way to annotate this would be as follows - -.. code-block:: python - - str_type = str - - class SomeClass2: - str: str_type = None - -In some cases you may be tempted to use ``cast`` from the typing module when you know better than the analyzer. This occurs particularly when using custom inference functions. For example - -.. code-block:: python - - from typing import cast - - from pandas.core.dtypes.common import is_number - - def cannot_infer_bad(obj: Union[str, int, float]): - - if is_number(obj): - ... - else: # Reasonably only str objects would reach this but... - obj = cast(str, obj) # Mypy complains without this! - return obj.upper() - -The limitation here is that while a human can reasonably understand that ``is_number`` would catch the ``int`` and ``float`` types mypy cannot make that same inference just yet (see `mypy #5206 <https://github.com/python/mypy/issues/5206>`_. While the above works, the use of ``cast`` is **strongly discouraged**. Where applicable a refactor of the code to appease static analysis is preferable - -.. code-block:: python - - def cannot_infer_good(obj: Union[str, int, float]): - - if isinstance(obj, str): - return obj.upper() - else: - ... - -With custom types and inference this is not always possible so exceptions are made, but every effort should be exhausted to avoid ``cast`` before going down such paths. - -pandas-specific types -~~~~~~~~~~~~~~~~~~~~~ - -Commonly used types specific to pandas will appear in `pandas._typing <https://github.com/pandas-dev/pandas/blob/master/pandas/_typing.py>`_ and you should use these where applicable. This module is private for now but ultimately this should be exposed to third party libraries who want to implement type checking against pandas. - -For example, quite a few functions in pandas accept a ``dtype`` argument. This can be expressed as a string like ``"object"``, a ``numpy.dtype`` like ``np.int64`` or even a pandas ``ExtensionDtype`` like ``pd.CategoricalDtype``. Rather than burden the user with having to constantly annotate all of those options, this can simply be imported and reused from the pandas._typing module - -.. code-block:: python - - from pandas._typing import Dtype - - def as_type(dtype: Dtype) -> ...: - ... - -This module will ultimately house types for repeatedly used concepts like "path-like", "array-like", "numeric", etc... and can also hold aliases for commonly appearing parameters like ``axis``. Development of this module is active so be sure to refer to the source for the most up to date list of available types. - -Validating type hints -~~~~~~~~~~~~~~~~~~~~~ - -pandas uses `mypy <http://mypy-lang.org>`_ to statically analyze the code base and type hints. After making any change you can ensure your type hints are correct by running - -.. code-block:: shell - - mypy pandas - -.. _contributing.ci: - -Testing with continuous integration ------------------------------------ - -The pandas test suite will run automatically on `Travis-CI <https://travis-ci.org/>`__ and -`Azure Pipelines <https://azure.microsoft.com/en-us/services/devops/pipelines/>`__ -continuous integration services, once your pull request is submitted. -However, if you wish to run the test suite on a branch prior to submitting the pull request, -then the continuous integration services need to be hooked to your GitHub repository. Instructions are here -for `Travis-CI <http://about.travis-ci.org/docs/user/getting-started/>`__ and -`Azure Pipelines <https://docs.microsoft.com/en-us/azure/devops/pipelines/>`__. - -A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing, -then you will get a red 'X', where you can click through to see the individual failed tests. -This is an example of a green build. - -.. image:: ../_static/ci.png - -.. note:: - - Each time you push to *your* fork, a *new* run of the tests will be triggered on the CI. - You can enable the auto-cancel feature, which removes any non-currently-running tests for that same pull-request, for - `Travis-CI here <https://docs.travis-ci.com/user/customizing-the-build/#Building-only-the-latest-commit>`__. - -.. _contributing.tdd: - - -Test-driven development/code writing ------------------------------------- - -pandas is serious about testing and strongly encourages contributors to embrace -`test-driven development (TDD) <https://en.wikipedia.org/wiki/Test-driven_development>`_. -This development process "relies on the repetition of a very short development cycle: -first the developer writes an (initially failing) automated test case that defines a desired -improvement or new function, then produces the minimum amount of code to pass that test." -So, before actually writing any code, you should write your tests. Often the test can be -taken from the original GitHub issue. However, it is always worth considering additional -use cases and writing corresponding tests. - -Adding tests is one of the most common requests after code is pushed to pandas. Therefore, -it is worth getting in the habit of writing tests ahead of time so this is never an issue. - -Like many packages, pandas uses `pytest -<https://docs.pytest.org/en/latest/>`_ and the convenient -extensions in `numpy.testing -<https://numpy.org/doc/stable/reference/routines.testing.html>`_. - -.. note:: - - The earliest supported pytest version is 5.0.1. - -Writing tests -~~~~~~~~~~~~~ - -All tests should go into the ``tests`` subdirectory of the specific package. -This folder contains many current examples of tests, and we suggest looking to these for -inspiration. If your test requires working with files or -network connectivity, there is more information on the `testing page -<https://github.com/pandas-dev/pandas/wiki/Testing>`_ of the wiki. - -The ``pandas._testing`` module has many special ``assert`` functions that -make it easier to make statements about whether Series or DataFrame objects are -equivalent. The easiest way to verify that your code is correct is to -explicitly construct the result you expect, then compare the actual result to -the expected correct result:: - - def test_pivot(self): - data = { - 'index' : ['A', 'B', 'C', 'C', 'B', 'A'], - 'columns' : ['One', 'One', 'One', 'Two', 'Two', 'Two'], - 'values' : [1., 2., 3., 3., 2., 1.] - } - - frame = DataFrame(data) - pivoted = frame.pivot(index='index', columns='columns', values='values') - - expected = DataFrame({ - 'One' : {'A' : 1., 'B' : 2., 'C' : 3.}, - 'Two' : {'A' : 1., 'B' : 2., 'C' : 3.} - }) - - assert_frame_equal(pivoted, expected) - -Please remember to add the Github Issue Number as a comment to a new test. -E.g. "# brief comment, see GH#28907" - -Transitioning to ``pytest`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -pandas existing test structure is *mostly* class-based, meaning that you will typically find tests wrapped in a class. - -.. code-block:: python - - class TestReallyCoolFeature: - pass - -Going forward, we are moving to a more *functional* style using the `pytest <https://docs.pytest.org/en/latest/>`__ framework, which offers a richer testing -framework that will facilitate testing and developing. Thus, instead of writing test classes, we will write test functions like this: - -.. code-block:: python - - def test_really_cool_feature(): - pass - -Using ``pytest`` -~~~~~~~~~~~~~~~~ - -Here is an example of a self-contained set of tests that illustrate multiple features that we like to use. - -* functional style: tests are like ``test_*`` and *only* take arguments that are either fixtures or parameters -* ``pytest.mark`` can be used to set metadata on test functions, e.g. ``skip`` or ``xfail``. -* using ``parametrize``: allow testing of multiple cases -* to set a mark on a parameter, ``pytest.param(..., marks=...)`` syntax should be used -* ``fixture``, code for object construction, on a per-test basis -* using bare ``assert`` for scalars and truth-testing -* ``tm.assert_series_equal`` (and its counter part ``tm.assert_frame_equal``), for pandas object comparisons. -* the typical pattern of constructing an ``expected`` and comparing versus the ``result`` - -We would name this file ``test_cool_feature.py`` and put in an appropriate place in the ``pandas/tests/`` structure. - -.. code-block:: python - - import pytest - import numpy as np - import pandas as pd - - - @pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64']) - def test_dtypes(dtype): - assert str(np.dtype(dtype)) == dtype - - - @pytest.mark.parametrize( - 'dtype', ['float32', pytest.param('int16', marks=pytest.mark.skip), - pytest.param('int32', marks=pytest.mark.xfail( - reason='to show how it works'))]) - def test_mark(dtype): - assert str(np.dtype(dtype)) == 'float32' - - - @pytest.fixture - def series(): - return pd.Series([1, 2, 3]) - - - @pytest.fixture(params=['int8', 'int16', 'int32', 'int64']) - def dtype(request): - return request.param - - - def test_series(series, dtype): - result = series.astype(dtype) - assert result.dtype == dtype - - expected = pd.Series([1, 2, 3], dtype=dtype) - tm.assert_series_equal(result, expected) - - -A test run of this yields - -.. code-block:: shell - - ((pandas) bash-3.2$ pytest test_cool_feature.py -v - =========================== test session starts =========================== - platform darwin -- Python 3.6.2, pytest-3.6.0, py-1.4.31, pluggy-0.4.0 - collected 11 items - - tester.py::test_dtypes[int8] PASSED - tester.py::test_dtypes[int16] PASSED - tester.py::test_dtypes[int32] PASSED - tester.py::test_dtypes[int64] PASSED - tester.py::test_mark[float32] PASSED - tester.py::test_mark[int16] SKIPPED - tester.py::test_mark[int32] xfail - tester.py::test_series[int8] PASSED - tester.py::test_series[int16] PASSED - tester.py::test_series[int32] PASSED - tester.py::test_series[int64] PASSED - -Tests that we have ``parametrized`` are now accessible via the test name, for example we could run these with ``-k int8`` to sub-select *only* those tests which match ``int8``. - - -.. code-block:: shell - - ((pandas) bash-3.2$ pytest test_cool_feature.py -v -k int8 - =========================== test session starts =========================== - platform darwin -- Python 3.6.2, pytest-3.6.0, py-1.4.31, pluggy-0.4.0 - collected 11 items - - test_cool_feature.py::test_dtypes[int8] PASSED - test_cool_feature.py::test_series[int8] PASSED - - -.. _using-hypothesis: - -Using ``hypothesis`` -~~~~~~~~~~~~~~~~~~~~ - -Hypothesis is a library for property-based testing. Instead of explicitly -parametrizing a test, you can describe *all* valid inputs and let Hypothesis -try to find a failing input. Even better, no matter how many random examples -it tries, Hypothesis always reports a single minimal counterexample to your -assertions - often an example that you would never have thought to test. - -See `Getting Started with Hypothesis <https://hypothesis.works/articles/getting-started-with-hypothesis/>`_ -for more of an introduction, then `refer to the Hypothesis documentation -for details <https://hypothesis.readthedocs.io/en/latest/index.html>`_. - -.. code-block:: python - - import json - from hypothesis import given, strategies as st - - any_json_value = st.deferred(lambda: st.one_of( - st.none(), st.booleans(), st.floats(allow_nan=False), st.text(), - st.lists(any_json_value), st.dictionaries(st.text(), any_json_value) - )) - - - @given(value=any_json_value) - def test_json_roundtrip(value): - result = json.loads(json.dumps(value)) - assert value == result - -This test shows off several useful features of Hypothesis, as well as -demonstrating a good use-case: checking properties that should hold over -a large or complicated domain of inputs. - -To keep the pandas test suite running quickly, parametrized tests are -preferred if the inputs or logic are simple, with Hypothesis tests reserved -for cases with complex logic or where there are too many combinations of -options or subtle interactions to test (or think of!) all of them. - -.. _contributing.warnings: - -Testing warnings -~~~~~~~~~~~~~~~~ - -By default, one of pandas CI workers will fail if any unhandled warnings are emitted. - -If your change involves checking that a warning is actually emitted, use -``tm.assert_produces_warning(ExpectedWarning)``. - - -.. code-block:: python - - import pandas._testing as tm - - - df = pd.DataFrame() - with tm.assert_produces_warning(FutureWarning): - df.some_operation() - -We prefer this to the ``pytest.warns`` context manager because ours checks that the warning's -stacklevel is set correctly. The stacklevel is what ensure the *user's* file name and line number -is printed in the warning, rather than something internal to pandas. It represents the number of -function calls from user code (e.g. ``df.some_operation()``) to the function that actually emits -the warning. Our linter will fail the build if you use ``pytest.warns`` in a test. - -If you have a test that would emit a warning, but you aren't actually testing the -warning itself (say because it's going to be removed in the future, or because we're -matching a 3rd-party library's behavior), then use ``pytest.mark.filterwarnings`` to -ignore the error. - -.. code-block:: python - - @pytest.mark.filterwarnings("ignore:msg:category") - def test_thing(self): - ... - -If the test generates a warning of class ``category`` whose message starts -with ``msg``, the warning will be ignored and the test will pass. - -If you need finer-grained control, you can use Python's usual -`warnings module <https://docs.python.org/3/library/warnings.html>`__ -to control whether a warning is ignored / raised at different places within -a single test. - -.. code-block:: python - - with warnings.catch_warnings(): - warnings.simplefilter("ignore", FutureWarning) - # Or use warnings.filterwarnings(...) - -Alternatively, consider breaking up the unit test. - - -Running the test suite ----------------------- - -The tests can then be run directly inside your Git clone (without having to -install pandas) by typing:: - - pytest pandas - -The tests suite is exhaustive and takes around 20 minutes to run. Often it is -worth running only a subset of tests first around your changes before running the -entire suite. - -The easiest way to do this is with:: - - pytest pandas/path/to/test.py -k regex_matching_test_name - -Or with one of the following constructs:: - - pytest pandas/tests/[test-module].py - pytest pandas/tests/[test-module].py::[TestClass] - pytest pandas/tests/[test-module].py::[TestClass]::[test_method] - -Using `pytest-xdist <https://pypi.org/project/pytest-xdist>`_, one can -speed up local testing on multicore machines. To use this feature, you will -need to install ``pytest-xdist`` via:: - - pip install pytest-xdist - -Two scripts are provided to assist with this. These scripts distribute -testing across 4 threads. - -On Unix variants, one can type:: - - test_fast.sh - -On Windows, one can type:: - - test_fast.bat - -This can significantly reduce the time it takes to locally run tests before -submitting a pull request. - -For more, see the `pytest <https://docs.pytest.org/en/latest/>`_ documentation. - -Furthermore one can run - -.. code-block:: python - - pd.test() - -with an imported pandas to run tests similarly. - -Running the performance test suite ----------------------------------- - -Performance matters and it is worth considering whether your code has introduced -performance regressions. pandas is in the process of migrating to -`asv benchmarks <https://github.com/spacetelescope/asv>`__ -to enable easy monitoring of the performance of critical pandas operations. -These benchmarks are all found in the ``pandas/asv_bench`` directory, and the -test results can be found `here <https://pandas.pydata.org/speed/pandas/#/>`__. - -To use all features of asv, you will need either ``conda`` or -``virtualenv``. For more details please check the `asv installation -webpage <https://asv.readthedocs.io/en/latest/installing.html>`_. - -To install asv:: - - pip install git+https://github.com/spacetelescope/asv - -If you need to run a benchmark, change your directory to ``asv_bench/`` and run:: - - asv continuous -f 1.1 upstream/master HEAD - -You can replace ``HEAD`` with the name of the branch you are working on, -and report benchmarks that changed by more than 10%. -The command uses ``conda`` by default for creating the benchmark -environments. If you want to use virtualenv instead, write:: - - asv continuous -f 1.1 -E virtualenv upstream/master HEAD - -The ``-E virtualenv`` option should be added to all ``asv`` commands -that run benchmarks. The default value is defined in ``asv.conf.json``. - -Running the full benchmark suite can be an all-day process, depending on your -hardware and its resource utilization. However, usually it is sufficient to paste -only a subset of the results into the pull request to show that the committed changes -do not cause unexpected performance regressions. You can run specific benchmarks -using the ``-b`` flag, which takes a regular expression. For example, this will -only run benchmarks from a ``pandas/asv_bench/benchmarks/groupby.py`` file:: - - asv continuous -f 1.1 upstream/master HEAD -b ^groupby - -If you want to only run a specific group of benchmarks from a file, you can do it -using ``.`` as a separator. For example:: - - asv continuous -f 1.1 upstream/master HEAD -b groupby.GroupByMethods - -will only run the ``GroupByMethods`` benchmark defined in ``groupby.py``. - -You can also run the benchmark suite using the version of ``pandas`` -already installed in your current Python environment. This can be -useful if you do not have virtualenv or conda, or are using the -``setup.py develop`` approach discussed above; for the in-place build -you need to set ``PYTHONPATH``, e.g. -``PYTHONPATH="$PWD/.." asv [remaining arguments]``. -You can run benchmarks using an existing Python -environment by:: - - asv run -e -E existing - -or, to use a specific Python interpreter,:: - - asv run -e -E existing:python3.6 - -This will display stderr from the benchmarks, and use your local -``python`` that comes from your ``$PATH``. - -Information on how to write a benchmark and how to use asv can be found in the -`asv documentation <https://asv.readthedocs.io/en/latest/writing_benchmarks.html>`_. - -Documenting your code ---------------------- - -Changes should be reflected in the release notes located in ``doc/source/whatsnew/vx.y.z.rst``. -This file contains an ongoing change log for each release. Add an entry to this file to -document your fix, enhancement or (unavoidable) breaking change. Make sure to include the -GitHub issue number when adding your entry (using ``:issue:`1234``` where ``1234`` is the -issue/pull request number). - -If your code is an enhancement, it is most likely necessary to add usage -examples to the existing documentation. This can be done following the section -regarding documentation :ref:`above <contributing.documentation>`. -Further, to let users know when this feature was added, the ``versionadded`` -directive is used. The sphinx syntax for that is: - -.. code-block:: rst - - .. versionadded:: 1.1.0 - -This will put the text *New in version 1.1.0* wherever you put the sphinx -directive. This should also be put in the docstring when adding a new function -or method (`example <https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/frame.py#L1495>`__) -or a new keyword argument (`example <https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/generic.py#L568>`__). - Contributing your changes to pandas ===================================== @@ -1605,7 +291,7 @@ automatically updated. Pushing them to GitHub again is done by:: git push origin shiny-new-feature This will automatically update your pull request with the latest code and restart the -:ref:`Continuous Integration <contributing.ci>` tests. +:any:`Continuous Integration <contributing.ci>` tests. Another reason you might need to update your pull request is to solve conflicts with changes that have been merged into the master branch since you opened your diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst new file mode 100644 index 0000000000000..d6ff48ed5fd39 --- /dev/null +++ b/doc/source/development/contributing_codebase.rst @@ -0,0 +1,836 @@ +.. _contributing_codebase: + +{{ header }} + +============================= +Contributing to the code base +============================= + +.. contents:: Table of Contents: + :local: + +Code standards +-------------- + +Writing good code is not just about what you write. It is also about *how* you +write it. During :ref:`Continuous Integration <contributing.ci>` testing, several +tools will be run to check your code for stylistic errors. +Generating any warnings will cause the test to fail. +Thus, good style is a requirement for submitting code to pandas. + +There is a tool in pandas to help contributors verify their changes before +contributing them to the project:: + + ./ci/code_checks.sh + +The script verifies the linting of code files, it looks for common mistake patterns +(like missing spaces around sphinx directives that make the documentation not +being rendered properly) and it also validates the doctests. It is possible to +run the checks independently by using the parameters ``lint``, ``patterns`` and +``doctests`` (e.g. ``./ci/code_checks.sh lint``). + +In addition, because a lot of people use our library, it is important that we +do not make sudden changes to the code that could have the potential to break +a lot of user code as a result, that is, we need it to be as *backwards compatible* +as possible to avoid mass breakages. + +In addition to ``./ci/code_checks.sh``, some extra checks are run by +``pre-commit`` - see :ref:`here <contributing.pre-commit>` for how to +run them. + +Additional standards are outlined on the :ref:`pandas code style guide <code_style>`. + +.. _contributing.pre-commit: + +Pre-commit +---------- + +You can run many of these styling checks manually as we have described above. However, +we encourage you to use `pre-commit hooks <https://pre-commit.com/>`_ instead +to automatically run ``black``, ``flake8``, ``isort`` when you make a git commit. This +can be done by installing ``pre-commit``:: + + pip install pre-commit + +and then running:: + + pre-commit install + +from the root of the pandas repository. Now all of the styling checks will be +run each time you commit changes without your needing to run each one manually. +In addition, using ``pre-commit`` will also allow you to more easily +remain up-to-date with our code checks as they change. + +Note that if needed, you can skip these checks with ``git commit --no-verify``. + +If you don't want to use ``pre-commit`` as part of your workflow, you can still use it +to run its checks with:: + + pre-commit run --files <files you have modified> + +without needing to have done ``pre-commit install`` beforehand. + +If you want to run checks on all recently committed files on upstream/master you can use:: + + pre-commit run --from-ref=upstream/master --to-ref=HEAD --all-files + +without needing to have done ``pre-commit install`` beforehand. + +.. note:: + + If you have conflicting installations of ``virtualenv``, then you may get an + error - see `here <https://github.com/pypa/virtualenv/issues/1875>`_. + + Also, due to a `bug in virtualenv <https://github.com/pypa/virtualenv/issues/1986>`_, + you may run into issues if you're using conda. To solve this, you can downgrade + ``virtualenv`` to version ``20.0.33``. + +Optional dependencies +--------------------- + +Optional dependencies (e.g. matplotlib) should be imported with the private helper +``pandas.compat._optional.import_optional_dependency``. This ensures a +consistent error message when the dependency is not met. + +All methods using an optional dependency should include a test asserting that an +``ImportError`` is raised when the optional dependency is not found. This test +should be skipped if the library is present. + +All optional dependencies should be documented in +:ref:`install.optional_dependencies` and the minimum required version should be +set in the ``pandas.compat._optional.VERSIONS`` dict. + +C (cpplint) +~~~~~~~~~~~ + +pandas uses the `Google <https://google.github.io/styleguide/cppguide.html>`_ +standard. Google provides an open source style checker called ``cpplint``, but we +use a fork of it that can be found `here <https://github.com/cpplint/cpplint>`__. +Here are *some* of the more common ``cpplint`` issues: + +* we restrict line-length to 80 characters to promote readability +* every header file must include a header guard to avoid name collisions if re-included + +:ref:`Continuous Integration <contributing.ci>` will run the +`cpplint <https://pypi.org/project/cpplint>`_ tool +and report any stylistic errors in your code. Therefore, it is helpful before +submitting code to run the check yourself:: + + cpplint --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir modified-c-file + +You can also run this command on an entire directory if necessary:: + + cpplint --extensions=c,h --headers=h --filter=-readability/casting,-runtime/int,-build/include_subdir --recursive modified-c-directory + +To make your commits compliant with this standard, you can install the +`ClangFormat <https://clang.llvm.org/docs/ClangFormat.html>`_ tool, which can be +downloaded `here <https://llvm.org/builds/>`__. To configure, in your home directory, +run the following command:: + + clang-format style=google -dump-config > .clang-format + +Then modify the file to ensure that any indentation width parameters are at least four. +Once configured, you can run the tool as follows:: + + clang-format modified-c-file + +This will output what your file will look like if the changes are made, and to apply +them, run the following command:: + + clang-format -i modified-c-file + +To run the tool on an entire directory, you can run the following analogous commands:: + + clang-format modified-c-directory/*.c modified-c-directory/*.h + clang-format -i modified-c-directory/*.c modified-c-directory/*.h + +Do note that this tool is best-effort, meaning that it will try to correct as +many errors as possible, but it may not correct *all* of them. Thus, it is +recommended that you run ``cpplint`` to double check and make any other style +fixes manually. + +.. _contributing.code-formatting: + +Python (PEP8 / black) +~~~~~~~~~~~~~~~~~~~~~ + +pandas follows the `PEP8 <https://www.python.org/dev/peps/pep-0008/>`_ standard +and uses `Black <https://black.readthedocs.io/en/stable/>`_ and +`Flake8 <http://flake8.pycqa.org/en/latest/>`_ to ensure a consistent code +format throughout the project. We encourage you to use :ref:`pre-commit <contributing.pre-commit>`. + +:ref:`Continuous Integration <contributing.ci>` will run those tools and +report any stylistic errors in your code. Therefore, it is helpful before +submitting code to run the check yourself:: + + black pandas + git diff upstream/master -u -- "*.py" | flake8 --diff + +to auto-format your code. Additionally, many editors have plugins that will +apply ``black`` as you edit files. + +You should use a ``black`` version 20.8b1 as previous versions are not compatible +with the pandas codebase. + +One caveat about ``git diff upstream/master -u -- "*.py" | flake8 --diff``: this +command will catch any stylistic errors in your changes specifically, but +be beware it may not catch all of them. For example, if you delete the only +usage of an imported function, it is stylistically incorrect to import an +unused function. However, style-checking the diff will not catch this because +the actual import is not part of the diff. Thus, for completeness, you should +run this command, though it may take longer:: + + git diff upstream/master --name-only -- "*.py" | xargs -r flake8 + +Note that on OSX, the ``-r`` flag is not available, so you have to omit it and +run this slightly modified command:: + + git diff upstream/master --name-only -- "*.py" | xargs flake8 + +Windows does not support the ``xargs`` command (unless installed for example +via the `MinGW <http://www.mingw.org/>`__ toolchain), but one can imitate the +behaviour as follows:: + + for /f %i in ('git diff upstream/master --name-only -- "*.py"') do flake8 %i + +This will get all the files being changed by the PR (and ending with ``.py``), +and run ``flake8`` on them, one after the other. + +Note that these commands can be run analogously with ``black``. + +.. _contributing.import-formatting: + +Import formatting +~~~~~~~~~~~~~~~~~ +pandas uses `isort <https://pypi.org/project/isort/>`__ to standardise import +formatting across the codebase. + +A guide to import layout as per pep8 can be found `here <https://www.python.org/dev/peps/pep-0008/#imports/>`__. + +A summary of our current import sections ( in order ): + +* Future +* Python Standard Library +* Third Party +* ``pandas._libs``, ``pandas.compat``, ``pandas.util._*``, ``pandas.errors`` (largely not dependent on ``pandas.core``) +* ``pandas.core.dtypes`` (largely not dependent on the rest of ``pandas.core``) +* Rest of ``pandas.core.*`` +* Non-core ``pandas.io``, ``pandas.plotting``, ``pandas.tseries`` +* Local application/library specific imports + +Imports are alphabetically sorted within these sections. + +As part of :ref:`Continuous Integration <contributing.ci>` checks we run:: + + isort --check-only pandas + +to check that imports are correctly formatted as per the ``setup.cfg``. + +If you see output like the below in :ref:`Continuous Integration <contributing.ci>` checks: + +.. code-block:: shell + + Check import format using isort + ERROR: /home/travis/build/pandas-dev/pandas/pandas/io/pytables.py Imports are incorrectly sorted + Check import format using isort DONE + The command "ci/code_checks.sh" exited with 1 + +You should run:: + + isort pandas/io/pytables.py + +to automatically format imports correctly. This will modify your local copy of the files. + +Alternatively, you can run a command similar to what was suggested for ``black`` and ``flake8`` :ref:`right above <contributing.code-formatting>`:: + + git diff upstream/master --name-only -- "*.py" | xargs -r isort + +Where similar caveats apply if you are on OSX or Windows. + +You can then verify the changes look ok, then git :any:`commit <contributing.commit-code>` and :any:`push <contributing.push-code>`. + +Backwards compatibility +~~~~~~~~~~~~~~~~~~~~~~~ + +Please try to maintain backward compatibility. pandas has lots of users with lots of +existing code, so don't break it if at all possible. If you think breakage is required, +clearly state why as part of the pull request. Also, be careful when changing method +signatures and add deprecation warnings where needed. Also, add the deprecated sphinx +directive to the deprecated functions or methods. + +If a function with the same arguments as the one being deprecated exist, you can use +the ``pandas.util._decorators.deprecate``: + +.. code-block:: python + + from pandas.util._decorators import deprecate + + deprecate('old_func', 'new_func', '1.1.0') + +Otherwise, you need to do it manually: + +.. code-block:: python + + import warnings + + + def old_func(): + """Summary of the function. + + .. deprecated:: 1.1.0 + Use new_func instead. + """ + warnings.warn('Use new_func instead.', FutureWarning, stacklevel=2) + new_func() + + + def new_func(): + pass + +You'll also need to + +1. Write a new test that asserts a warning is issued when calling with the deprecated argument +2. Update all of pandas existing tests and code to use the new argument + +See :ref:`contributing.warnings` for more. + +.. _contributing.type_hints: + +Type hints +---------- + +pandas strongly encourages the use of :pep:`484` style type hints. New development should contain type hints and pull requests to annotate existing code are accepted as well! + +Style guidelines +~~~~~~~~~~~~~~~~ + +Types imports should follow the ``from typing import ...`` convention. So rather than + +.. code-block:: python + + import typing + + primes: typing.List[int] = [] + +You should write + +.. code-block:: python + + from typing import List, Optional, Union + + primes: List[int] = [] + +``Optional`` should be used where applicable, so instead of + +.. code-block:: python + + maybe_primes: List[Union[int, None]] = [] + +You should write + +.. code-block:: python + + maybe_primes: List[Optional[int]] = [] + +In some cases in the code base classes may define class variables that shadow builtins. This causes an issue as described in `Mypy 1775 <https://github.com/python/mypy/issues/1775#issuecomment-310969854>`_. The defensive solution here is to create an unambiguous alias of the builtin and use that without your annotation. For example, if you come across a definition like + +.. code-block:: python + + class SomeClass1: + str = None + +The appropriate way to annotate this would be as follows + +.. code-block:: python + + str_type = str + + class SomeClass2: + str: str_type = None + +In some cases you may be tempted to use ``cast`` from the typing module when you know better than the analyzer. This occurs particularly when using custom inference functions. For example + +.. code-block:: python + + from typing import cast + + from pandas.core.dtypes.common import is_number + + def cannot_infer_bad(obj: Union[str, int, float]): + + if is_number(obj): + ... + else: # Reasonably only str objects would reach this but... + obj = cast(str, obj) # Mypy complains without this! + return obj.upper() + +The limitation here is that while a human can reasonably understand that ``is_number`` would catch the ``int`` and ``float`` types mypy cannot make that same inference just yet (see `mypy #5206 <https://github.com/python/mypy/issues/5206>`_. While the above works, the use of ``cast`` is **strongly discouraged**. Where applicable a refactor of the code to appease static analysis is preferable + +.. code-block:: python + + def cannot_infer_good(obj: Union[str, int, float]): + + if isinstance(obj, str): + return obj.upper() + else: + ... + +With custom types and inference this is not always possible so exceptions are made, but every effort should be exhausted to avoid ``cast`` before going down such paths. + +pandas-specific types +~~~~~~~~~~~~~~~~~~~~~ + +Commonly used types specific to pandas will appear in `pandas._typing <https://github.com/pandas-dev/pandas/blob/master/pandas/_typing.py>`_ and you should use these where applicable. This module is private for now but ultimately this should be exposed to third party libraries who want to implement type checking against pandas. + +For example, quite a few functions in pandas accept a ``dtype`` argument. This can be expressed as a string like ``"object"``, a ``numpy.dtype`` like ``np.int64`` or even a pandas ``ExtensionDtype`` like ``pd.CategoricalDtype``. Rather than burden the user with having to constantly annotate all of those options, this can simply be imported and reused from the pandas._typing module + +.. code-block:: python + + from pandas._typing import Dtype + + def as_type(dtype: Dtype) -> ...: + ... + +This module will ultimately house types for repeatedly used concepts like "path-like", "array-like", "numeric", etc... and can also hold aliases for commonly appearing parameters like ``axis``. Development of this module is active so be sure to refer to the source for the most up to date list of available types. + +Validating type hints +~~~~~~~~~~~~~~~~~~~~~ + +pandas uses `mypy <http://mypy-lang.org>`_ to statically analyze the code base and type hints. After making any change you can ensure your type hints are correct by running + +.. code-block:: shell + + mypy pandas + +.. _contributing.ci: + +Testing with continuous integration +----------------------------------- + +The pandas test suite will run automatically on `Travis-CI <https://travis-ci.org/>`__ and +`Azure Pipelines <https://azure.microsoft.com/en-us/services/devops/pipelines/>`__ +continuous integration services, once your pull request is submitted. +However, if you wish to run the test suite on a branch prior to submitting the pull request, +then the continuous integration services need to be hooked to your GitHub repository. Instructions are here +for `Travis-CI <http://about.travis-ci.org/docs/user/getting-started/>`__ and +`Azure Pipelines <https://docs.microsoft.com/en-us/azure/devops/pipelines/>`__. + +A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing, +then you will get a red 'X', where you can click through to see the individual failed tests. +This is an example of a green build. + +.. image:: ../_static/ci.png + +.. note:: + + Each time you push to *your* fork, a *new* run of the tests will be triggered on the CI. + You can enable the auto-cancel feature, which removes any non-currently-running tests for that same pull-request, for + `Travis-CI here <https://docs.travis-ci.com/user/customizing-the-build/#Building-only-the-latest-commit>`__. + +.. _contributing.tdd: + + +Test-driven development/code writing +------------------------------------ + +pandas is serious about testing and strongly encourages contributors to embrace +`test-driven development (TDD) <https://en.wikipedia.org/wiki/Test-driven_development>`_. +This development process "relies on the repetition of a very short development cycle: +first the developer writes an (initially failing) automated test case that defines a desired +improvement or new function, then produces the minimum amount of code to pass that test." +So, before actually writing any code, you should write your tests. Often the test can be +taken from the original GitHub issue. However, it is always worth considering additional +use cases and writing corresponding tests. + +Adding tests is one of the most common requests after code is pushed to pandas. Therefore, +it is worth getting in the habit of writing tests ahead of time so this is never an issue. + +Like many packages, pandas uses `pytest +<https://docs.pytest.org/en/latest/>`_ and the convenient +extensions in `numpy.testing +<https://numpy.org/doc/stable/reference/routines.testing.html>`_. + +.. note:: + + The earliest supported pytest version is 5.0.1. + +Writing tests +~~~~~~~~~~~~~ + +All tests should go into the ``tests`` subdirectory of the specific package. +This folder contains many current examples of tests, and we suggest looking to these for +inspiration. If your test requires working with files or +network connectivity, there is more information on the `testing page +<https://github.com/pandas-dev/pandas/wiki/Testing>`_ of the wiki. + +The ``pandas._testing`` module has many special ``assert`` functions that +make it easier to make statements about whether Series or DataFrame objects are +equivalent. The easiest way to verify that your code is correct is to +explicitly construct the result you expect, then compare the actual result to +the expected correct result:: + + def test_pivot(self): + data = { + 'index' : ['A', 'B', 'C', 'C', 'B', 'A'], + 'columns' : ['One', 'One', 'One', 'Two', 'Two', 'Two'], + 'values' : [1., 2., 3., 3., 2., 1.] + } + + frame = DataFrame(data) + pivoted = frame.pivot(index='index', columns='columns', values='values') + + expected = DataFrame({ + 'One' : {'A' : 1., 'B' : 2., 'C' : 3.}, + 'Two' : {'A' : 1., 'B' : 2., 'C' : 3.} + }) + + assert_frame_equal(pivoted, expected) + +Please remember to add the Github Issue Number as a comment to a new test. +E.g. "# brief comment, see GH#28907" + +Transitioning to ``pytest`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +pandas existing test structure is *mostly* class-based, meaning that you will typically find tests wrapped in a class. + +.. code-block:: python + + class TestReallyCoolFeature: + pass + +Going forward, we are moving to a more *functional* style using the `pytest <https://docs.pytest.org/en/latest/>`__ framework, which offers a richer testing +framework that will facilitate testing and developing. Thus, instead of writing test classes, we will write test functions like this: + +.. code-block:: python + + def test_really_cool_feature(): + pass + +Using ``pytest`` +~~~~~~~~~~~~~~~~ + +Here is an example of a self-contained set of tests that illustrate multiple features that we like to use. + +* functional style: tests are like ``test_*`` and *only* take arguments that are either fixtures or parameters +* ``pytest.mark`` can be used to set metadata on test functions, e.g. ``skip`` or ``xfail``. +* using ``parametrize``: allow testing of multiple cases +* to set a mark on a parameter, ``pytest.param(..., marks=...)`` syntax should be used +* ``fixture``, code for object construction, on a per-test basis +* using bare ``assert`` for scalars and truth-testing +* ``tm.assert_series_equal`` (and its counter part ``tm.assert_frame_equal``), for pandas object comparisons. +* the typical pattern of constructing an ``expected`` and comparing versus the ``result`` + +We would name this file ``test_cool_feature.py`` and put in an appropriate place in the ``pandas/tests/`` structure. + +.. code-block:: python + + import pytest + import numpy as np + import pandas as pd + + + @pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64']) + def test_dtypes(dtype): + assert str(np.dtype(dtype)) == dtype + + + @pytest.mark.parametrize( + 'dtype', ['float32', pytest.param('int16', marks=pytest.mark.skip), + pytest.param('int32', marks=pytest.mark.xfail( + reason='to show how it works'))]) + def test_mark(dtype): + assert str(np.dtype(dtype)) == 'float32' + + + @pytest.fixture + def series(): + return pd.Series([1, 2, 3]) + + + @pytest.fixture(params=['int8', 'int16', 'int32', 'int64']) + def dtype(request): + return request.param + + + def test_series(series, dtype): + result = series.astype(dtype) + assert result.dtype == dtype + + expected = pd.Series([1, 2, 3], dtype=dtype) + tm.assert_series_equal(result, expected) + + +A test run of this yields + +.. code-block:: shell + + ((pandas) bash-3.2$ pytest test_cool_feature.py -v + =========================== test session starts =========================== + platform darwin -- Python 3.6.2, pytest-3.6.0, py-1.4.31, pluggy-0.4.0 + collected 11 items + + tester.py::test_dtypes[int8] PASSED + tester.py::test_dtypes[int16] PASSED + tester.py::test_dtypes[int32] PASSED + tester.py::test_dtypes[int64] PASSED + tester.py::test_mark[float32] PASSED + tester.py::test_mark[int16] SKIPPED + tester.py::test_mark[int32] xfail + tester.py::test_series[int8] PASSED + tester.py::test_series[int16] PASSED + tester.py::test_series[int32] PASSED + tester.py::test_series[int64] PASSED + +Tests that we have ``parametrized`` are now accessible via the test name, for example we could run these with ``-k int8`` to sub-select *only* those tests which match ``int8``. + + +.. code-block:: shell + + ((pandas) bash-3.2$ pytest test_cool_feature.py -v -k int8 + =========================== test session starts =========================== + platform darwin -- Python 3.6.2, pytest-3.6.0, py-1.4.31, pluggy-0.4.0 + collected 11 items + + test_cool_feature.py::test_dtypes[int8] PASSED + test_cool_feature.py::test_series[int8] PASSED + + +.. _using-hypothesis: + +Using ``hypothesis`` +~~~~~~~~~~~~~~~~~~~~ + +Hypothesis is a library for property-based testing. Instead of explicitly +parametrizing a test, you can describe *all* valid inputs and let Hypothesis +try to find a failing input. Even better, no matter how many random examples +it tries, Hypothesis always reports a single minimal counterexample to your +assertions - often an example that you would never have thought to test. + +See `Getting Started with Hypothesis <https://hypothesis.works/articles/getting-started-with-hypothesis/>`_ +for more of an introduction, then `refer to the Hypothesis documentation +for details <https://hypothesis.readthedocs.io/en/latest/index.html>`_. + +.. code-block:: python + + import json + from hypothesis import given, strategies as st + + any_json_value = st.deferred(lambda: st.one_of( + st.none(), st.booleans(), st.floats(allow_nan=False), st.text(), + st.lists(any_json_value), st.dictionaries(st.text(), any_json_value) + )) + + + @given(value=any_json_value) + def test_json_roundtrip(value): + result = json.loads(json.dumps(value)) + assert value == result + +This test shows off several useful features of Hypothesis, as well as +demonstrating a good use-case: checking properties that should hold over +a large or complicated domain of inputs. + +To keep the pandas test suite running quickly, parametrized tests are +preferred if the inputs or logic are simple, with Hypothesis tests reserved +for cases with complex logic or where there are too many combinations of +options or subtle interactions to test (or think of!) all of them. + +.. _contributing.warnings: + +Testing warnings +~~~~~~~~~~~~~~~~ + +By default, one of pandas CI workers will fail if any unhandled warnings are emitted. + +If your change involves checking that a warning is actually emitted, use +``tm.assert_produces_warning(ExpectedWarning)``. + + +.. code-block:: python + + import pandas._testing as tm + + + df = pd.DataFrame() + with tm.assert_produces_warning(FutureWarning): + df.some_operation() + +We prefer this to the ``pytest.warns`` context manager because ours checks that the warning's +stacklevel is set correctly. The stacklevel is what ensure the *user's* file name and line number +is printed in the warning, rather than something internal to pandas. It represents the number of +function calls from user code (e.g. ``df.some_operation()``) to the function that actually emits +the warning. Our linter will fail the build if you use ``pytest.warns`` in a test. + +If you have a test that would emit a warning, but you aren't actually testing the +warning itself (say because it's going to be removed in the future, or because we're +matching a 3rd-party library's behavior), then use ``pytest.mark.filterwarnings`` to +ignore the error. + +.. code-block:: python + + @pytest.mark.filterwarnings("ignore:msg:category") + def test_thing(self): + ... + +If the test generates a warning of class ``category`` whose message starts +with ``msg``, the warning will be ignored and the test will pass. + +If you need finer-grained control, you can use Python's usual +`warnings module <https://docs.python.org/3/library/warnings.html>`__ +to control whether a warning is ignored / raised at different places within +a single test. + +.. code-block:: python + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", FutureWarning) + # Or use warnings.filterwarnings(...) + +Alternatively, consider breaking up the unit test. + + +Running the test suite +---------------------- + +The tests can then be run directly inside your Git clone (without having to +install pandas) by typing:: + + pytest pandas + +The tests suite is exhaustive and takes around 20 minutes to run. Often it is +worth running only a subset of tests first around your changes before running the +entire suite. + +The easiest way to do this is with:: + + pytest pandas/path/to/test.py -k regex_matching_test_name + +Or with one of the following constructs:: + + pytest pandas/tests/[test-module].py + pytest pandas/tests/[test-module].py::[TestClass] + pytest pandas/tests/[test-module].py::[TestClass]::[test_method] + +Using `pytest-xdist <https://pypi.org/project/pytest-xdist>`_, one can +speed up local testing on multicore machines. To use this feature, you will +need to install ``pytest-xdist`` via:: + + pip install pytest-xdist + +Two scripts are provided to assist with this. These scripts distribute +testing across 4 threads. + +On Unix variants, one can type:: + + test_fast.sh + +On Windows, one can type:: + + test_fast.bat + +This can significantly reduce the time it takes to locally run tests before +submitting a pull request. + +For more, see the `pytest <https://docs.pytest.org/en/latest/>`_ documentation. + +Furthermore one can run + +.. code-block:: python + + pd.test() + +with an imported pandas to run tests similarly. + +Running the performance test suite +---------------------------------- + +Performance matters and it is worth considering whether your code has introduced +performance regressions. pandas is in the process of migrating to +`asv benchmarks <https://github.com/spacetelescope/asv>`__ +to enable easy monitoring of the performance of critical pandas operations. +These benchmarks are all found in the ``pandas/asv_bench`` directory, and the +test results can be found `here <https://pandas.pydata.org/speed/pandas/#/>`__. + +To use all features of asv, you will need either ``conda`` or +``virtualenv``. For more details please check the `asv installation +webpage <https://asv.readthedocs.io/en/latest/installing.html>`_. + +To install asv:: + + pip install git+https://github.com/spacetelescope/asv + +If you need to run a benchmark, change your directory to ``asv_bench/`` and run:: + + asv continuous -f 1.1 upstream/master HEAD + +You can replace ``HEAD`` with the name of the branch you are working on, +and report benchmarks that changed by more than 10%. +The command uses ``conda`` by default for creating the benchmark +environments. If you want to use virtualenv instead, write:: + + asv continuous -f 1.1 -E virtualenv upstream/master HEAD + +The ``-E virtualenv`` option should be added to all ``asv`` commands +that run benchmarks. The default value is defined in ``asv.conf.json``. + +Running the full benchmark suite can be an all-day process, depending on your +hardware and its resource utilization. However, usually it is sufficient to paste +only a subset of the results into the pull request to show that the committed changes +do not cause unexpected performance regressions. You can run specific benchmarks +using the ``-b`` flag, which takes a regular expression. For example, this will +only run benchmarks from a ``pandas/asv_bench/benchmarks/groupby.py`` file:: + + asv continuous -f 1.1 upstream/master HEAD -b ^groupby + +If you want to only run a specific group of benchmarks from a file, you can do it +using ``.`` as a separator. For example:: + + asv continuous -f 1.1 upstream/master HEAD -b groupby.GroupByMethods + +will only run the ``GroupByMethods`` benchmark defined in ``groupby.py``. + +You can also run the benchmark suite using the version of ``pandas`` +already installed in your current Python environment. This can be +useful if you do not have virtualenv or conda, or are using the +``setup.py develop`` approach discussed above; for the in-place build +you need to set ``PYTHONPATH``, e.g. +``PYTHONPATH="$PWD/.." asv [remaining arguments]``. +You can run benchmarks using an existing Python +environment by:: + + asv run -e -E existing + +or, to use a specific Python interpreter,:: + + asv run -e -E existing:python3.6 + +This will display stderr from the benchmarks, and use your local +``python`` that comes from your ``$PATH``. + +Information on how to write a benchmark and how to use asv can be found in the +`asv documentation <https://asv.readthedocs.io/en/latest/writing_benchmarks.html>`_. + +Documenting your code +--------------------- + +Changes should be reflected in the release notes located in ``doc/source/whatsnew/vx.y.z.rst``. +This file contains an ongoing change log for each release. Add an entry to this file to +document your fix, enhancement or (unavoidable) breaking change. Make sure to include the +GitHub issue number when adding your entry (using ``:issue:`1234``` where ``1234`` is the +issue/pull request number). + +If your code is an enhancement, it is most likely necessary to add usage +examples to the existing documentation. This can be done following the section +regarding :ref:`documentation <contributing_documentation>`. +Further, to let users know when this feature was added, the ``versionadded`` +directive is used. The sphinx syntax for that is: + +.. code-block:: rst + + .. versionadded:: 1.1.0 + +This will put the text *New in version 1.1.0* wherever you put the sphinx +directive. This should also be put in the docstring when adding a new function +or method (`example <https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/frame.py#L1495>`__) +or a new keyword argument (`example <https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/generic.py#L568>`__). diff --git a/doc/source/development/contributing_documentation.rst b/doc/source/development/contributing_documentation.rst new file mode 100644 index 0000000000000..a4a4f781d9dad --- /dev/null +++ b/doc/source/development/contributing_documentation.rst @@ -0,0 +1,222 @@ +.. _contributing_documentation: + +{{ header }} + +================================= +Contributing to the documentation +================================= + +Contributing to the documentation benefits everyone who uses pandas. +We encourage you to help us improve the documentation, and +you don't have to be an expert on pandas to do so! In fact, +there are sections of the docs that are worse off after being written by +experts. If something in the docs doesn't make sense to you, updating the +relevant section after you figure it out is a great way to ensure it will help +the next person. + +.. contents:: Documentation: + :local: + + +About the pandas documentation +-------------------------------- + +The documentation is written in **reStructuredText**, which is almost like writing +in plain English, and built using `Sphinx <https://www.sphinx-doc.org/en/master/>`__. The +Sphinx Documentation has an excellent `introduction to reST +<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`__. Review the Sphinx docs to perform more +complex changes to the documentation as well. + +Some other important things to know about the docs: + +* The pandas documentation consists of two parts: the docstrings in the code + itself and the docs in this folder ``doc/``. + + The docstrings provide a clear explanation of the usage of the individual + functions, while the documentation in this folder consists of tutorial-like + overviews per topic together with some other information (what's new, + installation, etc). + +* The docstrings follow a pandas convention, based on the **Numpy Docstring + Standard**. Follow the :ref:`pandas docstring guide <docstring>` for detailed + instructions on how to write a correct docstring. + + .. toctree:: + :maxdepth: 2 + + contributing_docstring.rst + +* The tutorials make heavy use of the `IPython directive + <https://matplotlib.org/sampledoc/ipython_directive.html>`_ sphinx extension. + This directive lets you put code in the documentation which will be run + during the doc build. For example:: + + .. ipython:: python + + x = 2 + x**3 + + will be rendered as:: + + In [1]: x = 2 + + In [2]: x**3 + Out[2]: 8 + + Almost all code examples in the docs are run (and the output saved) during the + doc build. This approach means that code examples will always be up to date, + but it does make the doc building a bit more complex. + +* Our API documentation files in ``doc/source/reference`` house the auto-generated + documentation from the docstrings. For classes, there are a few subtleties + around controlling which methods and attributes have pages auto-generated. + + We have two autosummary templates for classes. + + 1. ``_templates/autosummary/class.rst``. Use this when you want to + automatically generate a page for every public method and attribute on the + class. The ``Attributes`` and ``Methods`` sections will be automatically + added to the class' rendered documentation by numpydoc. See ``DataFrame`` + for an example. + + 2. ``_templates/autosummary/class_without_autosummary``. Use this when you + want to pick a subset of methods / attributes to auto-generate pages for. + When using this template, you should include an ``Attributes`` and + ``Methods`` section in the class docstring. See ``CategoricalIndex`` for an + example. + + Every method should be included in a ``toctree`` in one of the documentation files in + ``doc/source/reference``, else Sphinx + will emit a warning. + +.. note:: + + The ``.rst`` files are used to automatically generate Markdown and HTML versions + of the docs. For this reason, please do not edit ``CONTRIBUTING.md`` directly, + but instead make any changes to ``doc/source/development/contributing.rst``. Then, to + generate ``CONTRIBUTING.md``, use `pandoc <https://johnmacfarlane.net/pandoc/>`_ + with the following command:: + + pandoc doc/source/development/contributing.rst -t markdown_github > CONTRIBUTING.md + +The utility script ``scripts/validate_docstrings.py`` can be used to get a csv +summary of the API documentation. And also validate common errors in the docstring +of a specific class, function or method. The summary also compares the list of +methods documented in the files in ``doc/source/reference`` (which is used to generate +the `API Reference <https://pandas.pydata.org/pandas-docs/stable/api.html>`_ page) +and the actual public methods. +This will identify methods documented in ``doc/source/reference`` that are not actually +class methods, and existing methods that are not documented in ``doc/source/reference``. + + +Updating a pandas docstring +----------------------------- + +When improving a single function or method's docstring, it is not necessarily +needed to build the full documentation (see next section). +However, there is a script that checks a docstring (for example for the ``DataFrame.mean`` method):: + + python scripts/validate_docstrings.py pandas.DataFrame.mean + +This script will indicate some formatting errors if present, and will also +run and test the examples included in the docstring. +Check the :ref:`pandas docstring guide <docstring>` for a detailed guide +on how to format the docstring. + +The examples in the docstring ('doctests') must be valid Python code, +that in a deterministic way returns the presented output, and that can be +copied and run by users. This can be checked with the script above, and is +also tested on Travis. A failing doctest will be a blocker for merging a PR. +Check the :ref:`examples <docstring.examples>` section in the docstring guide +for some tips and tricks to get the doctests passing. + +When doing a PR with a docstring update, it is good to post the +output of the validation script in a comment on github. + + +How to build the pandas documentation +--------------------------------------- + +Requirements +~~~~~~~~~~~~ + +First, you need to have a development environment to be able to build pandas +(see the docs on :ref:`creating a development environment <contributing_environment>`). + +Building the documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +So how do you build the docs? Navigate to your local +``doc/`` directory in the console and run:: + + python make.py html + +Then you can find the HTML output in the folder ``doc/build/html/``. + +The first time you build the docs, it will take quite a while because it has to run +all the code examples and build all the generated docstring pages. In subsequent +evocations, sphinx will try to only build the pages that have been modified. + +If you want to do a full clean build, do:: + + python make.py clean + python make.py html + +You can tell ``make.py`` to compile only a single section of the docs, greatly +reducing the turn-around time for checking your changes. + +:: + + # omit autosummary and API section + python make.py clean + python make.py --no-api + + # compile the docs with only a single section, relative to the "source" folder. + # For example, compiling only this guide (doc/source/development/contributing.rst) + python make.py clean + python make.py --single development/contributing.rst + + # compile the reference docs for a single function + python make.py clean + python make.py --single pandas.DataFrame.join + + # compile whatsnew and API section (to resolve links in the whatsnew) + python make.py clean + python make.py --whatsnew + +For comparison, a full documentation build may take 15 minutes, but a single +section may take 15 seconds. Subsequent builds, which only process portions +you have changed, will be faster. + +The build will automatically use the number of cores available on your machine +to speed up the documentation build. You can override this:: + + python make.py html --num-jobs 4 + +Open the following file in a web browser to see the full documentation you +just built:: + + doc/build/html/index.html + +And you'll have the satisfaction of seeing your new and improved documentation! + +.. _contributing.dev_docs: + +Building master branch documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When pull requests are merged into the pandas ``master`` branch, the main parts of +the documentation are also built by Travis-CI. These docs are then hosted `here +<https://pandas.pydata.org/docs/dev/>`__, see also +the :any:`Continuous Integration <contributing.ci>` section. + +Previewing changes +------------------ + +Once, the pull request is submitted, GitHub Actions will automatically build the +documentation. To view the built site: + +#. Wait for the ``CI / Web and docs`` check to complete. +#. Click ``Details`` next to it. +#. From the ``Artifacts`` drop-down, click ``docs`` or ``website`` to download + the site as a ZIP file. diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst new file mode 100644 index 0000000000000..8a457d2c210b5 --- /dev/null +++ b/doc/source/development/contributing_environment.rst @@ -0,0 +1,265 @@ +.. _contributing_environment: + +{{ header }} + +================================== +Creating a development environment +================================== + +To test out code changes, you'll need to build pandas from source, which +requires a C/C++ compiler and Python environment. If you're making documentation +changes, you can skip to :ref:`contributing to the documentation <contributing_documentation>` but if you skip +creating the development environment you won't be able to build the documentation +locally before pushing your changes. + +.. contents:: Table of contents: + :local: + + +Creating an environment using Docker +-------------------------------------- + +Instead of manually setting up a development environment, you can use `Docker +<https://docs.docker.com/get-docker/>`_ to automatically create the environment with just several +commands. pandas provides a ``DockerFile`` in the root directory to build a Docker image +with a full pandas development environment. + +**Docker Commands** + +Pass your GitHub username in the ``DockerFile`` to use your own fork:: + + # Build the image pandas-yourname-env + docker build --tag pandas-yourname-env . + # Run a container and bind your local forked repo, pandas-yourname, to the container + docker run -it --rm -v path-to-pandas-yourname:/home/pandas-yourname pandas-yourname-env + +Even easier, you can integrate Docker with the following IDEs: + +**Visual Studio Code** + +You can use the DockerFile to launch a remote session with Visual Studio Code, +a popular free IDE, using the ``.devcontainer.json`` file. +See https://code.visualstudio.com/docs/remote/containers for details. + +**PyCharm (Professional)** + +Enable Docker support and use the Services tool window to build and manage images as well as +run and interact with containers. +See https://www.jetbrains.com/help/pycharm/docker.html for details. + +Note that you might need to rebuild the C extensions if/when you merge with upstream/master using:: + + python setup.py build_ext -j 4 + + +Creating an environment without Docker +--------------------------------------- + +Installing a C compiler +~~~~~~~~~~~~~~~~~~~~~~~ + +pandas uses C extensions (mostly written using Cython) to speed up certain +operations. To install pandas from source, you need to compile these C +extensions, which means you need a C compiler. This process depends on which +platform you're using. + +If you have setup your environment using ``conda``, the packages ``c-compiler`` +and ``cxx-compiler`` will install a fitting compiler for your platform that is +compatible with the remaining conda packages. On Windows and macOS, you will +also need to install the SDKs as they have to be distributed separately. +These packages will automatically be installed by using the ``pandas`` +``environment.yml`` file. + +**Windows** + +You will need `Build Tools for Visual Studio 2017 +<https://visualstudio.microsoft.com/downloads/>`_. + +.. warning:: + You DO NOT need to install Visual Studio 2019. + You only need "Build Tools for Visual Studio 2019" found by + scrolling down to "All downloads" -> "Tools for Visual Studio 2019". + In the installer, select the "C++ build tools" workload. + +You can install the necessary components on the commandline using +`vs_buildtools.exe <https://aka.ms/vs/16/release/vs_buildtools.exe>`_: + +.. code:: + + vs_buildtools.exe --quiet --wait --norestart --nocache ^ + --installPath C:\BuildTools ^ + --add "Microsoft.VisualStudio.Workload.VCTools;includeRecommended" ^ + --add Microsoft.VisualStudio.Component.VC.v141 ^ + --add Microsoft.VisualStudio.Component.VC.v141.x86.x64 ^ + --add Microsoft.VisualStudio.Component.Windows10SDK.17763 + +To setup the right paths on the commandline, call +``"C:\BuildTools\VC\Auxiliary\Build\vcvars64.bat" -vcvars_ver=14.16 10.0.17763.0``. + +**macOS** + +To use the ``conda``-based compilers, you will need to install the +Developer Tools using ``xcode-select --install``. Otherwise +information about compiler installation can be found here: +https://devguide.python.org/setup/#macos + +**Linux** + +For Linux-based ``conda`` installations, you won't have to install any +additional components outside of the conda environment. The instructions +below are only needed if your setup isn't based on conda environments. + +Some Linux distributions will come with a pre-installed C compiler. To find out +which compilers (and versions) are installed on your system:: + + # for Debian/Ubuntu: + dpkg --list | grep compiler + # for Red Hat/RHEL/CentOS/Fedora: + yum list installed | grep -i --color compiler + +`GCC (GNU Compiler Collection) <https://gcc.gnu.org/>`_, is a widely used +compiler, which supports C and a number of other languages. If GCC is listed +as an installed compiler nothing more is required. If no C compiler is +installed (or you wish to install a newer version) you can install a compiler +(GCC in the example code below) with:: + + # for recent Debian/Ubuntu: + sudo apt install build-essential + # for Red Had/RHEL/CentOS/Fedora + yum groupinstall "Development Tools" + +For other Linux distributions, consult your favorite search engine for +compiler installation instructions. + +Let us know if you have any difficulties by opening an issue or reaching out on `Gitter <https://gitter.im/pydata/pandas/>`_. + + +Creating a Python environment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Now create an isolated pandas development environment: + +* Install either `Anaconda <https://www.anaconda.com/download/>`_, `miniconda + <https://conda.io/miniconda.html>`_, or `miniforge <https://github.com/conda-forge/miniforge>`_ +* Make sure your conda is up to date (``conda update conda``) +* Make sure that you have :any:`cloned the repository <contributing.forking>` +* ``cd`` to the pandas source directory + +We'll now kick off a three-step process: + +1. Install the build dependencies +2. Build and install pandas +3. Install the optional dependencies + +.. code-block:: none + + # Create and activate the build environment + conda env create -f environment.yml + conda activate pandas-dev + + # or with older versions of Anaconda: + source activate pandas-dev + + # Build and install pandas + python setup.py build_ext -j 4 + python -m pip install -e . --no-build-isolation --no-use-pep517 + +At this point you should be able to import pandas from your locally built version:: + + $ python # start an interpreter + >>> import pandas + >>> print(pandas.__version__) + 0.22.0.dev0+29.g4ad6d4d74 + +This will create the new environment, and not touch any of your existing environments, +nor any existing Python installation. + +To view your environments:: + + conda info -e + +To return to your root environment:: + + conda deactivate + +See the full conda docs `here <https://conda.pydata.org/docs>`__. + + +Creating a Python environment (pip) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you aren't using conda for your development environment, follow these instructions. +You'll need to have at least Python 3.7.0 installed on your system. If your Python version +is 3.8.0 (or later), you might need to update your ``setuptools`` to version 42.0.0 (or later) +in your development environment before installing the build dependencies:: + + pip install --upgrade setuptools + +**Unix**/**macOS with virtualenv** + +.. code-block:: bash + + # Create a virtual environment + # Use an ENV_DIR of your choice. We'll use ~/virtualenvs/pandas-dev + # Any parent directories should already exist + python3 -m venv ~/virtualenvs/pandas-dev + + # Activate the virtualenv + . ~/virtualenvs/pandas-dev/bin/activate + + # Install the build dependencies + python -m pip install -r requirements-dev.txt + + # Build and install pandas + python setup.py build_ext -j 4 + python -m pip install -e . --no-build-isolation --no-use-pep517 + +**Unix**/**macOS with pyenv** + +Consult the docs for setting up pyenv `here <https://github.com/pyenv/pyenv>`__. + +.. code-block:: bash + + # Create a virtual environment + # Use an ENV_DIR of your choice. We'll use ~/Users/<yourname>/.pyenv/versions/pandas-dev + + pyenv virtualenv <version> <name-to-give-it> + + # For instance: + pyenv virtualenv 3.7.6 pandas-dev + + # Activate the virtualenv + pyenv activate pandas-dev + + # Now install the build dependencies in the cloned pandas repo + python -m pip install -r requirements-dev.txt + + # Build and install pandas + python setup.py build_ext -j 4 + python -m pip install -e . --no-build-isolation --no-use-pep517 + +**Windows** + +Below is a brief overview on how to set-up a virtual environment with Powershell +under Windows. For details please refer to the +`official virtualenv user guide <https://virtualenv.pypa.io/en/stable/userguide/#activate-script>`__ + +Use an ENV_DIR of your choice. We'll use ~\\virtualenvs\\pandas-dev where +'~' is the folder pointed to by either $env:USERPROFILE (Powershell) or +%USERPROFILE% (cmd.exe) environment variable. Any parent directories +should already exist. + +.. code-block:: powershell + + # Create a virtual environment + python -m venv $env:USERPROFILE\virtualenvs\pandas-dev + + # Activate the virtualenv. Use activate.bat for cmd.exe + ~\virtualenvs\pandas-dev\Scripts\Activate.ps1 + + # Install the build dependencies + python -m pip install -r requirements-dev.txt + + # Build and install pandas + python setup.py build_ext -j 4 + python -m pip install -e . --no-build-isolation --no-use-pep517 diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst index abe2fc1409bfb..fb50a88c6637f 100644 --- a/doc/source/development/index.rst +++ b/doc/source/development/index.rst @@ -13,6 +13,9 @@ Development :maxdepth: 2 contributing + contributing_environment + contributing_documentation + contributing_codebase code_style maintaining internals diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index a9c3d637a41e3..1184c596648fc 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -184,7 +184,7 @@ You can find simple installation instructions for pandas in this document: ``ins Installing from source ~~~~~~~~~~~~~~~~~~~~~~ -See the :ref:`contributing guide <contributing>` for complete instructions on building from the git source tree. Further, see :ref:`creating a development environment <contributing.dev_env>` if you wish to create a pandas development environment. +See the :ref:`contributing guide <contributing>` for complete instructions on building from the git source tree. Further, see :ref:`creating a development environment <contributing_environment>` if you wish to create a pandas development environment. Running the test suite ----------------------
- [x] closes #39367 I created a new PR due in an attempt to close #39367. Previously I had issues regarding environment setup but all seems to be working now thanks to Docker. I ran the linting tests prior to submitting this PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/40130
2021-03-01T00:18:29Z
2021-04-04T17:07:42Z
2021-04-04T17:07:42Z
2021-04-04T17:07:46Z
REF: share recarray constructor code
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bc2051a130079..dd3818af9ea9c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -178,10 +178,10 @@ arrays_to_mgr, dataclasses_to_dicts, dict_to_mgr, - masked_rec_array_to_mgr, mgr_to_mgr, ndarray_to_mgr, nested_data_to_arrays, + rec_array_to_mgr, reorder_arrays, to_arrays, treat_as_nested, @@ -580,7 +580,7 @@ def __init__( # masked recarray if isinstance(data, mrecords.MaskedRecords): - mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy) + mgr = rec_array_to_mgr(data, index, columns, dtype, copy) # a masked array else: @@ -590,11 +590,7 @@ def __init__( elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: # i.e. numpy structured array - data_columns = list(data.dtype.names) - data = {k: data[k] for k in data_columns} - if columns is None: - columns = data_columns - mgr = dict_to_mgr(data, index, columns, dtype=dtype) + mgr = rec_array_to_mgr(data, index, columns, dtype, copy) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name mgr = dict_to_mgr({data.name: data}, index, columns, dtype=dtype) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 73ef006caa7d9..8cb6d692e070c 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -117,8 +117,12 @@ def arrays_to_mgr( return create_block_manager_from_arrays(arrays, arr_names, axes) -def masked_rec_array_to_mgr( - data: MaskedRecords, index, columns, dtype: Optional[DtypeObj], copy: bool +def rec_array_to_mgr( + data: Union[MaskedRecords, np.recarray, np.ndarray], + index, + columns, + dtype: Optional[DtypeObj], + copy: bool, ): """ Extract from a masked rec array and create the manager. @@ -136,16 +140,10 @@ def masked_rec_array_to_mgr( arrays, arr_columns = to_arrays(fdata, columns) # fill if needed - new_arrays = [] - for col in arr_columns: - arr = data[col] - fv = arr.fill_value - - mask = ma.getmaskarray(arr) - if mask.any(): - arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) - arr[mask] = fv - new_arrays.append(arr) + if isinstance(data, np.ma.MaskedArray): + new_arrays = fill_masked_arrays(data, arr_columns) + else: + new_arrays = arrays # create the manager arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns) @@ -159,6 +157,24 @@ def masked_rec_array_to_mgr( return mgr +def fill_masked_arrays(data: MaskedRecords, arr_columns: Index) -> List[np.ndarray]: + """ + Convert numpy MaskedRecords to ensure mask is softened. + """ + new_arrays = [] + + for col in arr_columns: + arr = data[col] + fv = arr.fill_value + + mask = ma.getmaskarray(arr) + if mask.any(): + arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) + arr[mask] = fv + new_arrays.append(arr) + return new_arrays + + def mgr_to_mgr(mgr, typ: str): """ Convert to specific type of Manager. Does not copy if the type is already diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 873c58f976508..4f32cec001c5a 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -279,6 +279,7 @@ def test_constructor_rec(self, float_frame): tm.assert_index_equal(df2.columns, Index(rec.dtype.names)) tm.assert_index_equal(df2.index, index) + # case with columns != the ones we would infer from the data rng = np.arange(len(rec))[::-1] df3 = DataFrame(rec, index=rng, columns=["C", "B"]) expected = DataFrame(rec, index=rng).reindex(columns=["C", "B"])
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40129
2021-03-01T00:06:13Z
2021-03-01T19:27:52Z
2021-03-01T19:27:52Z
2021-03-01T20:50:03Z
CLN: remove conda recipe
diff --git a/conda.recipe/bld.bat b/conda.recipe/bld.bat deleted file mode 100644 index 284926fae8c04..0000000000000 --- a/conda.recipe/bld.bat +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -%PYTHON% setup.py install diff --git a/conda.recipe/build.sh b/conda.recipe/build.sh deleted file mode 100644 index f341bce6fcf96..0000000000000 --- a/conda.recipe/build.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -$PYTHON setup.py install diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml deleted file mode 100644 index 53ee212360475..0000000000000 --- a/conda.recipe/meta.yaml +++ /dev/null @@ -1,40 +0,0 @@ -package: - name: pandas - version: {{ environ.get('GIT_DESCRIBE_TAG','').replace('v', '', 1) }} - -build: - number: {{ environ.get('GIT_DESCRIBE_NUMBER', 0) }} - {% if GIT_DESCRIBE_NUMBER|int == 0 %}string: np{{ CONDA_NPY }}py{{ CONDA_PY }}_0 - {% else %}string: np{{ CONDA_NPY }}py{{ CONDA_PY }}_{{ GIT_BUILD_STR }}{% endif %} - -source: - git_url: ../ - -requirements: - build: - - {{ compiler('c') }} - - {{ compiler('cxx') }} - host: - - python - - pip - - cython - - numpy - - setuptools >=38.6.0 - - python-dateutil >=2.7.3 - - pytz - run: - - python {{ python }} - - {{ pin_compatible('numpy') }} - - python-dateutil >=2.7.3 - - pytz - -test: - requires: - - pytest - commands: - - python -c "import pandas; pandas.test()" - - -about: - home: https://pandas.pydata.org - license: BSD
xref #38852, those files are not maintained.
https://api.github.com/repos/pandas-dev/pandas/pulls/40128
2021-02-28T21:30:16Z
2021-03-01T13:55:58Z
2021-03-01T13:55:57Z
2022-11-18T02:21:59Z
BUG: Set dtypes of new columns when stacking (#36991)
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index fb8eecdaa275e..326597763d1bc 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -585,7 +585,7 @@ Reshaping - Bug in :meth:`DataFrame.append` returning incorrect dtypes with combinations of ``datetime64`` and ``timedelta64`` dtypes (:issue:`39574`) - Bug in :meth:`DataFrame.pivot_table` returning a ``MultiIndex`` for a single value when operating on and empty ``DataFrame`` (:issue:`13483`) - Allow :class:`Index` to be passed to the :func:`numpy.all` function (:issue:`40180`) -- +- Bug in :meth:`DataFrame.stack` not preserving ``CategoricalDtype`` in a ``MultiIndex`` (:issue:`36991`) Sparse ^^^^^^ diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 271bb2ca8dd75..ff6ba3f8f4164 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -600,6 +600,33 @@ def stack_multiple(frame, level, dropna=True): return result +def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex: + """Creates a MultiIndex from the first N-1 levels of this MultiIndex.""" + if len(columns.levels) <= 2: + return columns.levels[0]._rename(name=columns.names[0]) + + levs = [ + [lev[c] if c >= 0 else None for c in codes] + for lev, codes in zip(columns.levels[:-1], columns.codes[:-1]) + ] + + # Remove duplicate tuples in the MultiIndex. + tuples = zip(*levs) + unique_tuples = (key for key, _ in itertools.groupby(tuples)) + new_levs = zip(*unique_tuples) + + # The dtype of each level must be explicitly set to avoid inferring the wrong type. + # See GH-36991. + return MultiIndex.from_arrays( + [ + # Not all indices can accept None values. + Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev + for new_lev, lev in zip(new_levs, columns.levels) + ], + names=columns.names[:-1], + ) + + def _stack_multi_columns(frame, level_num=-1, dropna=True): def _convert_level_number(level_num, columns): """ @@ -634,20 +661,7 @@ def _convert_level_number(level_num, columns): level_to_sort = _convert_level_number(0, this.columns) this = this.sort_index(level=level_to_sort, axis=1) - # tuple list excluding level for grouping columns - if len(frame.columns.levels) > 2: - levs = [] - for lev, level_codes in zip(this.columns.levels[:-1], this.columns.codes[:-1]): - if -1 in level_codes: - lev = np.append(lev, None) - levs.append(np.take(lev, level_codes)) - tuples = list(zip(*levs)) - unique_groups = [key for key, _ in itertools.groupby(tuples)] - new_names = this.columns.names[:-1] - new_columns = MultiIndex.from_tuples(unique_groups, names=new_names) - else: - new_columns = this.columns.levels[0]._rename(name=this.columns.names[0]) - unique_groups = new_columns + new_columns = _stack_multi_column_index(this.columns) # time to ravel the values new_data = {} @@ -658,7 +672,7 @@ def _convert_level_number(level_num, columns): level_vals_used = np.take(level_vals_nan, level_codes) levsize = len(level_codes) drop_cols = [] - for key in unique_groups: + for key in new_columns: try: loc = this.columns.get_loc(key) except KeyError: diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index fd23ea3a7621c..4082f21254e52 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -1065,6 +1065,27 @@ def test_stack_preserve_categorical_dtype(self, ordered, labels): tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("ordered", [False, True]) + @pytest.mark.parametrize( + "labels,data", + [ + (list("xyz"), [10, 11, 12, 13, 14, 15]), + (list("zyx"), [14, 15, 12, 13, 10, 11]), + ], + ) + def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data): + # GH-36991 + cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered) + cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered) + midx = MultiIndex.from_product([cidx, cidx2]) + df = DataFrame([sorted(data)], columns=midx) + result = df.stack([0, 1]) + + s_cidx = pd.CategoricalIndex(sorted(labels), ordered=ordered) + expected = Series(data, index=MultiIndex.from_product([[0], s_cidx, cidx2])) + + tm.assert_series_equal(result, expected) + def test_stack_preserve_categorical_dtype_values(self): # GH-23077 cat = pd.Categorical(["a", "a", "b", "c"])
- [x] closes #36991 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40127
2021-02-28T20:53:35Z
2021-03-10T13:55:50Z
2021-03-10T13:55:50Z
2021-03-13T07:15:55Z
STYLE update pre-commit-config
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 47a9ae592f940..3966e8931162c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,13 +29,15 @@ repos: - id: pyupgrade args: [--py37-plus, --keep-runtime-typing] - repo: https://github.com/pre-commit/pygrep-hooks - rev: v1.7.1 + rev: v1.8.0 hooks: - id: rst-backticks - id: rst-directive-colons - types: [text] + types: [text] # overwrite types: [rst] + types_or: [python, rst] - id: rst-inline-touching-normal - types: [text] + types: [text] # overwrite types: [rst] + types_or: [python, rst] - repo: local hooks: - id: pip_to_conda @@ -212,8 +214,8 @@ repos: rev: v0.1.7 hooks: - id: no-string-hints -- repo: https://github.com/MarcoGorelli/abs-imports - rev: v0.1.2 +- repo: https://github.com/MarcoGorelli/absolufy-imports + rev: v0.2.1 hooks: - - id: abs-imports + - id: absolufy-imports files: ^pandas/
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Am doing this manually (instead of the automated job tomorrow) in order to fixup some minor things
https://api.github.com/repos/pandas-dev/pandas/pulls/40125
2021-02-28T14:15:50Z
2021-03-01T14:37:35Z
2021-03-01T14:37:35Z
2021-03-01T14:38:30Z
Use tempfile for creating a file in home directory for a IO test
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index db742fb69dd10..e1dcec56913f9 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -9,6 +9,7 @@ import mmap import os from pathlib import Path +import tempfile import pytest @@ -119,10 +120,11 @@ def test_infer_compression_from_path(self, extension, expected, path_type): @pytest.mark.parametrize("path_type", [str, CustomFSPath, Path]) def test_get_handle_with_path(self, path_type): # ignore LocalPath: it creates strange paths: /absolute/~/sometest - filename = path_type("~/sometest") - with icom.get_handle(filename, "w") as handles: - assert os.path.isabs(handles.handle.name) - assert os.path.expanduser(filename) == handles.handle.name + with tempfile.TemporaryDirectory(dir=Path.home()) as tmp: + filename = path_type("~/" + Path(tmp).name + "/sometest") + with icom.get_handle(filename, "w") as handles: + assert Path(handles.handle.name).is_absolute() + assert os.path.expanduser(filename) == handles.handle.name def test_get_handle_with_buffer(self): input_buffer = StringIO()
- [X] closes #40091 - [X] tests added / passed - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40122
2021-02-28T09:06:49Z
2021-03-02T17:26:45Z
2021-03-02T17:26:44Z
2021-03-02T17:26:50Z
TYP: to_arrays, BUG: from_records empty dtypes
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 6878227f6ae9c..41db72612a66b 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -401,7 +401,7 @@ Conversion ^^^^^^^^^^ - Bug in :meth:`Series.to_dict` with ``orient='records'`` now returns python native types (:issue:`25969`) - Bug in :meth:`Series.view` and :meth:`Index.view` when converting between datetime-like (``datetime64[ns]``, ``datetime64[ns, tz]``, ``timedelta64``, ``period``) dtypes (:issue:`39788`) -- +- Bug in creating a :class:`DataFrame` from an empty ``np.recarray`` not retaining the original dtypes (:issue:`40121`) - Strings diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bc2051a130079..07b325194b374 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -609,6 +609,8 @@ def __init__( if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if treat_as_nested(data): + if columns is not None: + columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( data, columns, index, dtype ) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 73ef006caa7d9..601661804df4e 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -586,7 +586,9 @@ def dataclasses_to_dicts(data): # Conversion of Inputs to Arrays -def to_arrays(data, columns: Optional[Index], dtype: Optional[DtypeObj] = None): +def to_arrays( + data, columns: Optional[Index], dtype: Optional[DtypeObj] = None +) -> Tuple[List[ArrayLike], Index]: """ Return list of arrays, columns. """ @@ -607,8 +609,10 @@ def to_arrays(data, columns: Optional[Index], dtype: Optional[DtypeObj] = None): if isinstance(data, np.ndarray): columns = data.dtype.names if columns is not None: - return [[]] * len(columns), columns - return [], [] # columns if columns is not None else [] + # i.e. numpy structured array + arrays = [data[name] for name in columns] + return arrays, ensure_index(columns) + return [], ensure_index([]) elif isinstance(data[0], Categorical): if columns is None: diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index 0d36f3bd80e26..1cda4b1948c6a 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -11,6 +11,7 @@ CategoricalIndex, DataFrame, Index, + Int64Index, Interval, RangeIndex, Series, @@ -437,11 +438,11 @@ def test_from_records_empty(self): def test_from_records_empty_with_nonempty_fields_gh3682(self): a = np.array([(1, 2)], dtype=[("id", np.int64), ("value", np.int64)]) df = DataFrame.from_records(a, index="id") - tm.assert_index_equal(df.index, Index([1], name="id")) - assert df.index.name == "id" - tm.assert_index_equal(df.columns, Index(["value"])) - - b = np.array([], dtype=[("id", np.int64), ("value", np.int64)]) - df = DataFrame.from_records(b, index="id") - tm.assert_index_equal(df.index, Index([], name="id")) - assert df.index.name == "id" + + ex_index = Int64Index([1], name="id") + expected = DataFrame({"value": [2]}, index=ex_index, columns=["value"]) + tm.assert_frame_equal(df, expected) + + b = a[:0] + df2 = DataFrame.from_records(b, index="id") + tm.assert_frame_equal(df2, df.iloc[:0]) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 873c58f976508..83126d0c13cf7 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1169,7 +1169,8 @@ def test_constructor_unequal_length_nested_list_column(self): # GH 32173 arrays = [list("abcd"), list("cde")] - msg = "Length of columns passed for MultiIndex columns is different" + # exception raised inside MultiIndex constructor + msg = "all arrays must be same length" with pytest.raises(ValueError, match=msg): DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays)
1) the argument annotation for columns in to_arrays isnt quite right. this is fixed by adding an ensure_index inside `DataFrame.__init__` 2) call ensure_index in the empty case in to_arrays so we can tighten from `Tuple[Any, Any]` to Tuple[Any, Index]` 3) in the structured array case in to_arrays, return a list of empty ndarrays instead of list of empty lists, so we can further tighten to `Tuple[List[ArrayLike], Index]` 4) This breaks test_from_records_empty_with_nonempty_fields_gh3682, at which point we decide that the new behavior is better, so calling it a bugfix.
https://api.github.com/repos/pandas-dev/pandas/pulls/40121
2021-02-28T03:26:10Z
2021-03-01T19:28:37Z
2021-03-01T19:28:37Z
2021-03-01T20:50:40Z
REF: re-use objects_to_datetime64ns in maybe_infer_to_datetimelike
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index f7af1bb3da86b..27361b83ee88f 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -4,6 +4,7 @@ from typing import ( List, Optional, + Tuple, Union, ) @@ -907,7 +908,9 @@ def f(x): # Constructor Helpers -def sequence_to_td64ns(data, copy=False, unit=None, errors="raise"): +def sequence_to_td64ns( + data, copy=False, unit=None, errors="raise" +) -> Tuple[np.ndarray, Optional[Tick]]: """ Parameters ---------- diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index a93bf0c9211e1..8afd5ce179e50 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1416,31 +1416,43 @@ def maybe_infer_to_datetimelike(value: Union[np.ndarray, List]): return value def try_datetime(v: np.ndarray) -> ArrayLike: - # safe coerce to datetime64 - try: - # GH19671 - # tznaive only - v = tslib.array_to_datetime(v, require_iso8601=True, errors="raise")[0] - except ValueError: + # Coerce to datetime64, datetime64tz, or in corner cases + # object[datetimes] + from pandas.core.arrays.datetimes import ( + DatetimeArray, + objects_to_datetime64ns, + tz_to_dtype, + ) + try: + # GH#19671 we pass require_iso8601 to be relatively strict + # when parsing strings. + vals, tz = objects_to_datetime64ns( + v, + require_iso8601=True, + dayfirst=False, + yearfirst=False, + allow_object=True, + ) + except (ValueError, TypeError): + # e.g. <class 'numpy.timedelta64'> is not convertible to datetime + return v.reshape(shape) + else: # we might have a sequence of the same-datetimes with tz's # if so coerce to a DatetimeIndex; if they are not the same, - # then these stay as object dtype, xref GH19671 - from pandas import DatetimeIndex - - try: - - values, tz = conversion.datetime_to_datetime64(v) - except (ValueError, TypeError): - pass - else: - dti = DatetimeIndex(values).tz_localize("UTC").tz_convert(tz=tz) - return dti._data - except TypeError: - # e.g. <class 'numpy.timedelta64'> is not convertible to datetime - pass - - return v.reshape(shape) + # then these stay as object dtype, xref GH#19671 + + if vals.dtype == object: + # This is reachable bc allow_object=True, means we cast things + # to mixed-tz datetime objects (mostly). Only 1 test + # relies on this behavior, see GH#40111 + return vals.reshape(shape) + + dta = DatetimeArray._simple_new(vals.view("M8[ns]"), dtype=tz_to_dtype(tz)) + if dta.tz is None: + # TODO(EA2D): conditional reshape kludge unnecessary with 2D EAs + return dta._ndarray.reshape(shape) + return dta def try_timedelta(v: np.ndarray) -> np.ndarray: # safe coerce to timedelta64
https://api.github.com/repos/pandas-dev/pandas/pulls/40120
2021-02-28T02:17:13Z
2021-03-01T15:14:35Z
2021-03-01T15:14:34Z
2021-03-01T15:41:11Z
CLN: Window aggregation typing
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index e7d3ebce1c404..fcd2acc3e025a 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -116,9 +116,10 @@ cdef inline void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x, def roll_sum(const float64_t[:] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp): cdef: + Py_ssize_t i, j float64_t sum_x = 0, compensation_add = 0, compensation_remove = 0 int64_t s, e - int64_t nobs = 0, i, j, N = len(values) + int64_t nobs = 0, N = len(values) ndarray[float64_t] output bint is_monotonic_increasing_bounds @@ -493,12 +494,13 @@ cdef inline void remove_skew(float64_t val, int64_t *nobs, def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp): cdef: + Py_ssize_t i, j float64_t val, prev, min_val, mean_val, sum_val = 0 float64_t compensation_xxx_add = 0, compensation_xxx_remove = 0 float64_t compensation_xx_add = 0, compensation_xx_remove = 0 float64_t compensation_x_add = 0, compensation_x_remove = 0 float64_t x = 0, xx = 0, xxx = 0 - int64_t nobs = 0, i, j, N = len(values), nobs_mean = 0 + int64_t nobs = 0, N = len(values), nobs_mean = 0 int64_t s, e ndarray[float64_t] output, mean_array, values_copy bint is_monotonic_increasing_bounds @@ -674,13 +676,14 @@ cdef inline void remove_kurt(float64_t val, int64_t *nobs, def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp): cdef: + Py_ssize_t i, j float64_t val, prev, mean_val, min_val, sum_val = 0 float64_t compensation_xxxx_add = 0, compensation_xxxx_remove = 0 float64_t compensation_xxx_remove = 0, compensation_xxx_add = 0 float64_t compensation_xx_remove = 0, compensation_xx_add = 0 float64_t compensation_x_remove = 0, compensation_x_add = 0 float64_t x = 0, xx = 0, xxx = 0, xxxx = 0 - int64_t nobs = 0, i, j, s, e, N = len(values), nobs_mean = 0 + int64_t nobs = 0, s, e, N = len(values), nobs_mean = 0 ndarray[float64_t] output, values_copy bint is_monotonic_increasing_bounds @@ -754,15 +757,13 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start, def roll_median_c(const float64_t[:] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp): cdef: - float64_t val, res, prev - bint err = False - int ret = 0 - skiplist_t *sl Py_ssize_t i, j + bint err = False, is_monotonic_increasing_bounds + int midpoint, ret = 0 int64_t nobs = 0, N = len(values), s, e, win - int midpoint + float64_t val, res, prev + skiplist_t *sl ndarray[float64_t] output - bint is_monotonic_increasing_bounds is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds( start, end @@ -933,8 +934,8 @@ cdef _roll_min_max(ndarray[numeric] values, bint is_max): cdef: numeric ai - int64_t i, k, curr_win_size, start - Py_ssize_t nobs = 0, N = len(values) + int64_t curr_win_size, start + Py_ssize_t i, k, nobs = 0, N = len(values) deque Q[int64_t] # min/max always the front deque W[int64_t] # track the whole window for nobs compute ndarray[float64_t, ndim=1] output @@ -1017,14 +1018,14 @@ def roll_quantile(const float64_t[:] values, ndarray[int64_t] start, O(N log(window)) implementation using skip list """ cdef: + Py_ssize_t i, j, s, e, N = len(values), idx + int ret = 0 + int64_t nobs = 0, win float64_t val, prev, midpoint, idx_with_fraction - skiplist_t *skiplist - int64_t nobs = 0, i, j, s, e, N = len(values), win - Py_ssize_t idx - ndarray[float64_t] output float64_t vlow, vhigh + skiplist_t *skiplist InterpolationType interpolation_type - int ret = 0 + ndarray[float64_t] output if quantile <= 0.0 or quantile >= 1.0: raise ValueError(f"quantile value {quantile} not in [0, 1]") @@ -1041,10 +1042,10 @@ def roll_quantile(const float64_t[:] values, ndarray[int64_t] start, # actual skiplist ops outweigh any window computation costs output = np.empty(N, dtype=float) - if (end - start).max() == 0: + win = (end - start).max() + if win == 0: output[:] = NaN return output - win = (end - start).max() skiplist = skiplist_init(<int>win) if skiplist == NULL: raise MemoryError("skiplist_init failed") @@ -1473,9 +1474,9 @@ def roll_weighted_var(const float64_t[:] values, const float64_t[:] weights, # ---------------------------------------------------------------------- # Exponentially weighted moving average -def ewma(float64_t[:] vals, int64_t[:] start, int64_t[:] end, int minp, - float64_t com, bint adjust, bint ignore_na, float64_t[:] times, - float64_t halflife): +def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end, + int minp, float64_t com, bint adjust, bint ignore_na, + const float64_t[:] times, float64_t halflife): """ Compute exponentially-weighted moving average using center-of-mass. @@ -1486,8 +1487,10 @@ def ewma(float64_t[:] vals, int64_t[:] start, int64_t[:] end, int minp, end: ndarray (int64 type) minp : int com : float64 - adjust : int + adjust : bool ignore_na : bool + times : ndarray (float64 type) + halflife : float64 Returns ------- @@ -1496,7 +1499,7 @@ def ewma(float64_t[:] vals, int64_t[:] start, int64_t[:] end, int minp, cdef: Py_ssize_t i, j, s, e, nobs, win_size, N = len(vals), M = len(start) - float64_t[:] sub_vals + const float64_t[:] sub_vals ndarray[float64_t] sub_output, output = np.empty(N, dtype=float) float64_t alpha, old_wt_factor, new_wt, weighted_avg, old_wt, cur, delta bint is_observation @@ -1555,8 +1558,9 @@ def ewma(float64_t[:] vals, int64_t[:] start, int64_t[:] end, int minp, # Exponentially weighted moving covariance -def ewmcov(float64_t[:] input_x, int64_t[:] start, int64_t[:] end, int minp, - float64_t[:] input_y, float64_t com, bint adjust, bint ignore_na, bint bias): +def ewmcov(const float64_t[:] input_x, const int64_t[:] start, const int64_t[:] end, + int minp, const float64_t[:] input_y, float64_t com, bint adjust, + bint ignore_na, bint bias): """ Compute exponentially-weighted moving variance using center-of-mass. @@ -1568,9 +1572,9 @@ def ewmcov(float64_t[:] input_x, int64_t[:] start, int64_t[:] end, int minp, minp : int input_y : ndarray (float64 type) com : float64 - adjust : int + adjust : bool ignore_na : bool - bias : int + bias : bool Returns ------- @@ -1583,7 +1587,7 @@ def ewmcov(float64_t[:] input_x, int64_t[:] start, int64_t[:] end, int minp, float64_t alpha, old_wt_factor, new_wt, mean_x, mean_y, cov float64_t sum_wt, sum_wt2, old_wt, cur_x, cur_y, old_mean_x, old_mean_y float64_t numerator, denominator - float64_t[:] sub_x_vals, sub_y_vals + const float64_t[:] sub_x_vals, sub_y_vals ndarray[float64_t] sub_out, output = np.empty(N, dtype=float) bint is_observation @@ -1594,6 +1598,8 @@ def ewmcov(float64_t[:] input_x, int64_t[:] start, int64_t[:] end, int minp, return output alpha = 1. / (1. + com) + old_wt_factor = 1. - alpha + new_wt = 1. if adjust else alpha for j in range(L): s = start[j] @@ -1603,9 +1609,6 @@ def ewmcov(float64_t[:] input_x, int64_t[:] start, int64_t[:] end, int minp, win_size = len(sub_x_vals) sub_out = np.empty(win_size, dtype=float) - old_wt_factor = 1. - alpha - new_wt = 1. if adjust else alpha - mean_x = sub_x_vals[0] mean_y = sub_y_vals[0] is_observation = (mean_x == mean_x) and (mean_y == mean_y)
* Use more `const` * Change some indexing variables to `Py_ssize_t`
https://api.github.com/repos/pandas-dev/pandas/pulls/40119
2021-02-28T00:35:44Z
2021-03-01T13:56:52Z
2021-03-01T13:56:52Z
2021-03-01T16:55:58Z
CI: skip array-manager test
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 8cbb9d2443cb2..b270539921c9c 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1196,6 +1196,7 @@ def convert_force_pure(x): assert isinstance(result[0], Decimal) +@td.skip_array_manager_not_yet_implemented def test_groupby_dtype_inference_empty(): # GH 6733 df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry should get the CI back to green
https://api.github.com/repos/pandas-dev/pandas/pulls/40118
2021-02-28T00:23:48Z
2021-02-28T02:02:56Z
2021-02-28T02:02:56Z
2021-03-01T09:28:37Z
Backport PR #40090 on branch 1.2.x
diff --git a/doc/source/whatsnew/v1.2.3.rst b/doc/source/whatsnew/v1.2.3.rst index f72ee78bf243a..99e997189d7b8 100644 --- a/doc/source/whatsnew/v1.2.3.rst +++ b/doc/source/whatsnew/v1.2.3.rst @@ -24,6 +24,8 @@ Fixed regressions Passing ``ascending=None`` is still considered invalid, and the new error message suggests a proper usage (``ascending`` must be a boolean or a list-like boolean). +- Fixed regression in :meth:`DataFrame.transform` and :meth:`Series.transform` giving incorrect column labels when passed a dictionary with a mix of list and non-list values (:issue:`40018`) +- .. --------------------------------------------------------------------------- diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 74f21bae39ba9..f5d4cedf7398d 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -491,6 +491,22 @@ def transform_dict_like( # GH 15931 - deprecation of renaming keys raise SpecificationError("nested renamer is not supported") + is_aggregator = lambda x: isinstance(x, (list, tuple, dict)) + + # if we have a dict of any non-scalars + # eg. {'A' : ['mean']}, normalize all to + # be list-likes + # Cannot use func.values() because arg may be a Series + if any(is_aggregator(x) for _, x in func.items()): + new_func: AggFuncTypeDict = {} + for k, v in func.items(): + if not is_aggregator(v): + # mypy can't realize v is not a list here + new_func[k] = [v] # type:ignore[list-item] + else: + new_func[k] = v + func = new_func + results: Dict[Label, FrameOrSeriesUnion] = {} for name, how in func.items(): colg = obj._gotitem(name, ndim=1) diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py index d3a3b1482affd..c2ee2bbbc54e4 100644 --- a/pandas/tests/frame/apply/test_frame_transform.py +++ b/pandas/tests/frame/apply/test_frame_transform.py @@ -99,6 +99,17 @@ def test_transform_dictlike(axis, float_frame, box): tm.assert_frame_equal(result, expected) +def test_transform_dictlike_mixed(): + # GH 40018 - mix of lists and non-lists in values of a dictionary + df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]}) + result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"}) + expected = DataFrame( + [[1.0, 1, 1.0], [2.0, 4, 2.0]], + columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( "ops", [ diff --git a/pandas/tests/series/apply/test_series_transform.py b/pandas/tests/series/apply/test_series_transform.py index 992aaa540a65f..27d769c3bd5f3 100644 --- a/pandas/tests/series/apply/test_series_transform.py +++ b/pandas/tests/series/apply/test_series_transform.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from pandas import DataFrame, Series, concat +from pandas import DataFrame, MultiIndex, Series, concat import pandas._testing as tm from pandas.core.base import SpecificationError from pandas.core.groupby.base import transformation_kernels @@ -52,6 +52,17 @@ def test_transform_dictlike(string_series, box): tm.assert_frame_equal(result, expected) +def test_transform_dictlike_mixed(): + # GH 40018 - mix of lists and non-lists in values of a dictionary + df = Series([1, 4]) + result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"}) + expected = DataFrame( + [[1.0, 1, 1.0], [2.0, 4, 2.0]], + columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]), + ) + tm.assert_frame_equal(result, expected) + + def test_transform_wont_agg(string_series): # GH 35964 # we are trying to transform with an aggregator
Backport PR #40090
https://api.github.com/repos/pandas-dev/pandas/pulls/40117
2021-02-27T20:47:43Z
2021-02-28T14:07:08Z
2021-02-28T14:07:08Z
2021-02-28T14:08:19Z
PERF: DTA/TDA _simple_new disallow i8 values
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index e476c3566c10f..5245c736a2eea 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -465,15 +465,15 @@ def view(self, dtype: Optional[Dtype] = None) -> ArrayLike: dtype = pandas_dtype(dtype) if isinstance(dtype, (PeriodDtype, DatetimeTZDtype)): cls = dtype.construct_array_type() - return cls._simple_new(self.asi8, dtype=dtype) + return cls(self.asi8, dtype=dtype) elif dtype == "M8[ns]": from pandas.core.arrays import DatetimeArray - return DatetimeArray._simple_new(self.asi8, dtype=dtype) + return DatetimeArray(self.asi8, dtype=dtype) elif dtype == "m8[ns]": from pandas.core.arrays import TimedeltaArray - return TimedeltaArray._simple_new(self.asi8.view("m8[ns]"), dtype=dtype) + return TimedeltaArray(self.asi8, dtype=dtype) return self._ndarray.view(dtype=dtype) # ------------------------------------------------------------------ @@ -1102,10 +1102,10 @@ def _add_timedeltalike_scalar(self, other): return type(self)(new_values, dtype=self.dtype) inc = delta_to_nanoseconds(other) - new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view( - "i8" - ) + new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan) + new_values = new_values.view("i8") new_values = self._maybe_mask_results(new_values) + new_values = new_values.view(self._ndarray.dtype) new_freq = None if isinstance(self.freq, Tick) or is_period_dtype(self.dtype): @@ -1700,6 +1700,7 @@ def _round(self, freq, mode, ambiguous, nonexistent): nanos = to_offset(freq).nanos result = round_nsint64(values, mode, nanos) result = self._maybe_mask_results(result, fill_value=iNaT) + result = result.view(self._ndarray.dtype) return self._simple_new(result, dtype=self.dtype) @Appender((_round_doc + _round_example).format(op="round")) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 28e469547fe62..e28a1a2326d17 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -315,9 +315,7 @@ def _simple_new( cls, values, freq: Optional[BaseOffset] = None, dtype=DT64NS_DTYPE ) -> DatetimeArray: assert isinstance(values, np.ndarray) - if values.dtype != DT64NS_DTYPE: - assert values.dtype == "i8" - values = values.view(DT64NS_DTYPE) + assert values.dtype == DT64NS_DTYPE result = object.__new__(cls) result._ndarray = values @@ -439,6 +437,7 @@ def _generate_range( values = np.array([x.value for x in xdr], dtype=np.int64) _tz = start.tz if start is not None else end.tz + values = values.view("M8[ns]") index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz)) if tz is not None and index.tz is None: @@ -464,9 +463,8 @@ def _generate_range( + start.value ) dtype = tz_to_dtype(tz) - index = cls._simple_new( - arr.astype("M8[ns]", copy=False), freq=None, dtype=dtype - ) + arr = arr.astype("M8[ns]", copy=False) + index = cls._simple_new(arr, freq=None, dtype=dtype) if not left_closed and len(index) and index[0] == start: # TODO: overload DatetimeLikeArrayMixin.__getitem__ @@ -476,7 +474,7 @@ def _generate_range( index = cast(DatetimeArray, index[:-1]) dtype = tz_to_dtype(tz) - return cls._simple_new(index.asi8, freq=freq, dtype=dtype) + return cls._simple_new(index._ndarray, freq=freq, dtype=dtype) # ----------------------------------------------------------------- # DatetimeLike Interface @@ -710,7 +708,7 @@ def _add_offset(self, offset): values = self.tz_localize(None) else: values = self - result = offset._apply_array(values) + result = offset._apply_array(values).view("M8[ns]") result = DatetimeArray._simple_new(result) result = result.tz_localize(self.tz) @@ -833,7 +831,7 @@ def tz_convert(self, tz): # No conversion since timestamps are all UTC to begin with dtype = tz_to_dtype(tz) - return self._simple_new(self.asi8, dtype=dtype, freq=self.freq) + return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq) @dtl.ravel_compat def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"): diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index f7af1bb3da86b..2b6319054e245 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -229,13 +229,11 @@ def _simple_new( ) -> TimedeltaArray: assert dtype == TD64NS_DTYPE, dtype assert isinstance(values, np.ndarray), type(values) - if values.dtype != TD64NS_DTYPE: - assert values.dtype == "i8" - values = values.view(TD64NS_DTYPE) + assert values.dtype == TD64NS_DTYPE result = object.__new__(cls) result._ndarray = values - result._freq = to_offset(freq) + result._freq = freq result._dtype = TD64NS_DTYPE return result @@ -317,7 +315,7 @@ def _generate_range(cls, start, end, periods, freq, closed=None): if not right_closed: index = index[:-1] - return cls._simple_new(index, freq=freq) + return cls._simple_new(index.view("m8[ns]"), freq=freq) # ---------------------------------------------------------------- # DatetimeLike Interface diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index a93bf0c9211e1..f873b32dfb805 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -288,7 +288,8 @@ def maybe_downcast_to_dtype( i8values = result.astype("i8", copy=False) cls = dtype.construct_array_type() # equiv: DatetimeArray(i8values).tz_localize("UTC").tz_convert(dtype.tz) - result = cls._simple_new(i8values, dtype=dtype) + dt64values = i8values.view("M8[ns]") + result = cls._simple_new(dt64values, dtype=dtype) else: result = result.astype(dtype) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 6187dffc2bfda..05d75bdda5131 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -542,7 +542,7 @@ def _ea_wrap_cython_operation( return res_values res_values = res_values.astype("i8", copy=False) - result = type(orig_values)._simple_new(res_values, dtype=orig_values.dtype) + result = type(orig_values)(res_values, dtype=orig_values.dtype) return result elif is_integer_dtype(values.dtype) or is_bool_dtype(values.dtype): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 41a769094dbe9..30190ef950af5 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -820,7 +820,7 @@ def view(self, cls=None): arr = self._data.view("i8") idx_cls = self._dtype_to_subclass(dtype) arr_cls = idx_cls._data_cls - arr = arr_cls._simple_new(self._data.view("i8"), dtype=dtype) + arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name) result = self._data.view(cls) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 29df20c609a4f..47adc13f49499 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -100,6 +100,7 @@ def wrapper(left, right): join_index = orig_left._from_backing_data(join_index) return join_index, left_indexer, right_indexer + return results return wrapper @@ -645,7 +646,8 @@ def _get_join_freq(self, other): def _wrap_joined_index(self, joined: np.ndarray, other): assert other.dtype == self.dtype, (other.dtype, self.dtype) - + assert joined.dtype == "i8" or joined.dtype == self.dtype, joined.dtype + joined = joined.view(self._data._ndarray.dtype) result = super()._wrap_joined_index(joined, other) result._data._freq = self._get_join_freq(other) return result diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 24e75a2bbeff2..a0dfb1c83a70b 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1743,8 +1743,9 @@ def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike: result = result.view(orig_dtype) else: # DatetimeArray + # TODO: have this case go through a DTA method? result = type(values)._simple_new( # type: ignore[attr-defined] - result, dtype=orig_dtype + result.view("M8[ns]"), dtype=orig_dtype ) elif skipna and not issubclass(values.dtype.type, (np.integer, np.bool_)): diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 070dec307f527..87a095e1003c4 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -85,12 +85,10 @@ def arr1d(self): arr = self.array_cls(data, freq="D") return arr - def test_compare_len1_raises(self): + def test_compare_len1_raises(self, arr1d): # make sure we raise when comparing with different lengths, specific # to the case where one has length-1, which numpy would broadcast - data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9 - - arr = self.array_cls._simple_new(data, freq="D") + arr = arr1d idx = self.index_cls(arr) with pytest.raises(ValueError, match="Lengths must match"): @@ -153,7 +151,9 @@ def test_take(self): data = np.arange(100, dtype="i8") * 24 * 3600 * 10 ** 9 np.random.shuffle(data) - arr = self.array_cls._simple_new(data, freq="D") + freq = None if self.array_cls is not PeriodArray else "D" + + arr = self.array_cls(data, freq=freq) idx = self.index_cls._simple_new(arr) takers = [1, 4, 94] @@ -172,7 +172,7 @@ def test_take(self): def test_take_fill_raises(self, fill_value): data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9 - arr = self.array_cls._simple_new(data, freq="D") + arr = self.array_cls(data, freq="D") msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got" with pytest.raises(TypeError, match=msg): @@ -181,7 +181,7 @@ def test_take_fill_raises(self, fill_value): def test_take_fill(self): data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9 - arr = self.array_cls._simple_new(data, freq="D") + arr = self.array_cls(data, freq="D") result = arr.take([-1, 1], allow_fill=True, fill_value=None) assert result[0] is pd.NaT @@ -202,10 +202,8 @@ def test_take_fill_str(self, arr1d): with pytest.raises(TypeError, match=msg): arr1d.take([-1, 1], allow_fill=True, fill_value="foo") - def test_concat_same_type(self): - data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9 - - arr = self.array_cls._simple_new(data, freq="D") + def test_concat_same_type(self, arr1d): + arr = arr1d idx = self.index_cls(arr) idx = idx.insert(0, pd.NaT) arr = self.array_cls(idx) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 97fe35bb7f2c9..5cf0134795b74 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -175,7 +175,7 @@ def test_get_unique_index(self, index_flat): vals = index[[0] * 5]._data vals[0] = pd.NaT elif needs_i8_conversion(index.dtype): - vals = index.asi8[[0] * 5] + vals = index._data._ndarray[[0] * 5] vals[0] = iNaT else: vals = index.values[[0] * 5] @@ -184,7 +184,7 @@ def test_get_unique_index(self, index_flat): vals_unique = vals[:2] if index.dtype.kind in ["m", "M"]: # i.e. needs_i8_conversion but not period_dtype, as above - vals = type(index._data)._simple_new(vals, dtype=index.dtype) + vals = type(index._data)(vals, dtype=index.dtype) vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype) idx_nan = index._shallow_copy(vals) idx_unique_nan = index._shallow_copy(vals_unique)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40116
2021-02-27T19:55:51Z
2021-03-01T15:42:01Z
2021-03-01T15:42:01Z
2021-03-11T13:02:16Z
CLN/TST: normalize test_frame_apply
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index 3532040a2fd7b..eb4aeea5e424a 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -38,17 +38,20 @@ def int_frame_const_col(): def test_apply(float_frame): with np.errstate(all="ignore"): # ufunc - applied = float_frame.apply(np.sqrt) - tm.assert_series_equal(np.sqrt(float_frame["A"]), applied["A"]) + result = np.sqrt(float_frame["A"]) + expected = float_frame.apply(np.sqrt)["A"] + tm.assert_series_equal(result, expected) # aggregator - applied = float_frame.apply(np.mean) - assert applied["A"] == np.mean(float_frame["A"]) + result = float_frame.apply(np.mean)["A"] + expected = np.mean(float_frame["A"]) + assert result == expected d = float_frame.index[0] - applied = float_frame.apply(np.mean, axis=1) - assert applied[d] == np.mean(float_frame.xs(d)) - assert applied.index is float_frame.index # want this + result = float_frame.apply(np.mean, axis=1) + expected = np.mean(float_frame.xs(d)) + assert result[d] == expected + assert result.index is float_frame.index # invalid axis df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"]) @@ -58,42 +61,42 @@ def test_apply(float_frame): # GH 9573 df = DataFrame({"c0": ["A", "A", "B", "B"], "c1": ["C", "C", "D", "D"]}) - df = df.apply(lambda ts: ts.astype("category")) + result = df.apply(lambda ts: ts.astype("category")) - assert df.shape == (4, 2) - assert isinstance(df["c0"].dtype, CategoricalDtype) - assert isinstance(df["c1"].dtype, CategoricalDtype) + assert result.shape == (4, 2) + assert isinstance(result["c0"].dtype, CategoricalDtype) + assert isinstance(result["c1"].dtype, CategoricalDtype) def test_apply_axis1_with_ea(): # GH#36785 - df = DataFrame({"A": [Timestamp("2013-01-01", tz="UTC")]}) - result = df.apply(lambda x: x, axis=1) - tm.assert_frame_equal(result, df) + expected = DataFrame({"A": [Timestamp("2013-01-01", tz="UTC")]}) + result = expected.apply(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) def test_apply_mixed_datetimelike(): # mixed datetimelike # GH 7778 - df = DataFrame( + expected = DataFrame( { "A": date_range("20130101", periods=3), "B": pd.to_timedelta(np.arange(3), unit="s"), } ) - result = df.apply(lambda x: x, axis=1) - tm.assert_frame_equal(result, df) + result = expected.apply(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) def test_apply_empty(float_frame): # empty empty_frame = DataFrame() - applied = empty_frame.apply(np.sqrt) - assert applied.empty + result = empty_frame.apply(np.sqrt) + assert result.empty - applied = empty_frame.apply(np.mean) - assert applied.empty + result = empty_frame.apply(np.mean) + assert result.empty no_rows = float_frame[:0] result = no_rows.apply(lambda x: x.mean()) @@ -108,7 +111,7 @@ def test_apply_empty(float_frame): # GH 2476 expected = DataFrame(index=["a"]) result = expected.apply(lambda x: x["a"], axis=1) - tm.assert_frame_equal(expected, result) + tm.assert_frame_equal(result, expected) def test_apply_with_reduce_empty(): @@ -285,14 +288,13 @@ def _assert_raw(x): float_frame.apply(_assert_raw, raw=True) float_frame.apply(_assert_raw, axis=1, raw=True) - result0 = float_frame.apply(np.mean, raw=True) - result1 = float_frame.apply(np.mean, axis=1, raw=True) - - expected0 = float_frame.apply(lambda x: x.values.mean()) - expected1 = float_frame.apply(lambda x: x.values.mean(), axis=1) + result = float_frame.apply(np.mean, raw=True) + expected = float_frame.apply(lambda x: x.values.mean()) + tm.assert_series_equal(result, expected) - tm.assert_series_equal(result0, expected0) - tm.assert_series_equal(result1, expected1) + result = float_frame.apply(np.mean, axis=1, raw=True) + expected = float_frame.apply(lambda x: x.values.mean(), axis=1) + tm.assert_series_equal(result, expected) # no reduction result = float_frame.apply(lambda x: x * 2, raw=True) @@ -306,8 +308,9 @@ def _assert_raw(x): def test_apply_axis1(float_frame): d = float_frame.index[0] - tapplied = float_frame.apply(np.mean, axis=1) - assert tapplied[d] == np.mean(float_frame.xs(d)) + result = float_frame.apply(np.mean, axis=1)[d] + expected = np.mean(float_frame.xs(d)) + assert result == expected def test_apply_mixed_dtype_corner(): @@ -401,27 +404,25 @@ def test_apply_reduce_to_dict(): # GH 25196 37544 data = DataFrame([[1, 2], [3, 4]], columns=["c0", "c1"], index=["i0", "i1"]) - result0 = data.apply(dict, axis=0) - expected0 = Series([{"i0": 1, "i1": 3}, {"i0": 2, "i1": 4}], index=data.columns) - tm.assert_series_equal(result0, expected0) + result = data.apply(dict, axis=0) + expected = Series([{"i0": 1, "i1": 3}, {"i0": 2, "i1": 4}], index=data.columns) + tm.assert_series_equal(result, expected) - result1 = data.apply(dict, axis=1) - expected1 = Series([{"c0": 1, "c1": 2}, {"c0": 3, "c1": 4}], index=data.index) - tm.assert_series_equal(result1, expected1) + result = data.apply(dict, axis=1) + expected = Series([{"c0": 1, "c1": 2}, {"c0": 3, "c1": 4}], index=data.index) + tm.assert_series_equal(result, expected) def test_apply_differently_indexed(): df = DataFrame(np.random.randn(20, 10)) - result0 = df.apply(Series.describe, axis=0) - expected0 = DataFrame({i: v.describe() for i, v in df.items()}, columns=df.columns) - tm.assert_frame_equal(result0, expected0) + result = df.apply(Series.describe, axis=0) + expected = DataFrame({i: v.describe() for i, v in df.items()}, columns=df.columns) + tm.assert_frame_equal(result, expected) - result1 = df.apply(Series.describe, axis=1) - expected1 = DataFrame( - {i: v.describe() for i, v in df.T.items()}, columns=df.index - ).T - tm.assert_frame_equal(result1, expected1) + result = df.apply(Series.describe, axis=1) + expected = DataFrame({i: v.describe() for i, v in df.T.items()}, columns=df.index).T + tm.assert_frame_equal(result, expected) def test_apply_modify_traceback(): @@ -525,7 +526,7 @@ def f(r): def test_apply_convert_objects(): - data = DataFrame( + expected = DataFrame( { "A": [ "foo", @@ -572,8 +573,8 @@ def test_apply_convert_objects(): } ) - result = data.apply(lambda x: x, axis=1) - tm.assert_frame_equal(result._convert(datetime=True), data) + result = expected.apply(lambda x: x, axis=1)._convert(datetime=True) + tm.assert_frame_equal(result, expected) def test_apply_attach_name(float_frame): @@ -635,17 +636,17 @@ def test_applymap(float_frame): float_frame.applymap(type) # GH 465: function returning tuples - result = float_frame.applymap(lambda x: (x, x)) - assert isinstance(result["A"][0], tuple) + result = float_frame.applymap(lambda x: (x, x))["A"][0] + assert isinstance(result, tuple) # GH 2909: object conversion to float in constructor? df = DataFrame(data=[1, "a"]) - result = df.applymap(lambda x: x) - assert result.dtypes[0] == object + result = df.applymap(lambda x: x).dtypes[0] + assert result == object df = DataFrame(data=[1.0, "a"]) - result = df.applymap(lambda x: x) - assert result.dtypes[0] == object + result = df.applymap(lambda x: x).dtypes[0] + assert result == object # GH 2786 df = DataFrame(np.random.random((3, 4))) @@ -672,10 +673,10 @@ def test_applymap(float_frame): DataFrame(index=list("ABC")), DataFrame({"A": [], "B": [], "C": []}), ] - for frame in empty_frames: + for expected in empty_frames: for func in [round, lambda x: x]: - result = frame.applymap(func) - tm.assert_frame_equal(result, frame) + result = expected.applymap(func) + tm.assert_frame_equal(result, expected) def test_applymap_na_ignore(float_frame): @@ -743,7 +744,8 @@ def test_frame_apply_dont_convert_datetime64(): df = df.applymap(lambda x: x + BDay()) df = df.applymap(lambda x: x + BDay()) - assert df.x1.dtype == "M8[ns]" + result = df.x1.dtype + assert result == "M8[ns]" def test_apply_non_numpy_dtype(): @@ -787,11 +789,13 @@ def apply_list(row): def test_apply_noreduction_tzaware_object(): # https://github.com/pandas-dev/pandas/issues/31505 - df = DataFrame({"foo": [Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]") - result = df.apply(lambda x: x) - tm.assert_frame_equal(result, df) - result = df.apply(lambda x: x.copy()) - tm.assert_frame_equal(result, df) + expected = DataFrame( + {"foo": [Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]" + ) + result = expected.apply(lambda x: x) + tm.assert_frame_equal(result, expected) + result = expected.apply(lambda x: x.copy()) + tm.assert_frame_equal(result, expected) def test_apply_function_runs_once(): @@ -885,11 +889,11 @@ def test_infer_row_shape(): # GH 17437 # if row shape is changing, infer it df = DataFrame(np.random.rand(10, 2)) - result = df.apply(np.fft.fft, axis=0) - assert result.shape == (10, 2) + result = df.apply(np.fft.fft, axis=0).shape + assert result == (10, 2) - result = df.apply(np.fft.rfft, axis=0) - assert result.shape == (6, 2) + result = df.apply(np.fft.rfft, axis=0).shape + assert result == (6, 2) def test_with_dictlike_columns():
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Some minor normalizations before breaking up or parametrizing these tests
https://api.github.com/repos/pandas-dev/pandas/pulls/40113
2021-02-27T17:49:11Z
2021-03-03T03:06:36Z
2021-03-03T03:06:36Z
2021-03-03T03:11:13Z
CLN: remove never-hit branch from maybe_infer_to_datetimelike
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index e06c781cb27a4..4e04425436af4 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1562,7 +1562,7 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: seen_tz_aware = True if seen_tz_naive and seen_tz_aware: - return 'mixed' + return "mixed" elif util.is_datetime64_object(v): # np.datetime64 seen_datetime = True diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 0917cf1787d5b..b991e3c7102ae 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -698,7 +698,7 @@ def maybe_promote(dtype: np.dtype, fill_value=np.nan): return dtype, fill_value -def _ensure_dtype_type(value, dtype: DtypeObj): +def _ensure_dtype_type(value, dtype: np.dtype): """ Ensure that the given value is an instance of the given dtype. @@ -708,21 +708,17 @@ def _ensure_dtype_type(value, dtype: DtypeObj): Parameters ---------- value : object - dtype : np.dtype or ExtensionDtype + dtype : np.dtype Returns ------- object """ # Start with exceptions in which we do _not_ cast to numpy types - if is_extension_array_dtype(dtype): - return value - elif dtype == np.object_: - return value - elif isna(value): - # e.g. keep np.nan rather than try to cast to np.float32(np.nan) + if dtype == np.object_: return value + # Note: before we get here we have already excluded isna(value) return dtype.type(value) @@ -1139,7 +1135,7 @@ def astype_nansafe( if isinstance(dtype, ExtensionDtype): return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy) - elif not isinstance(dtype, np.dtype): + elif not isinstance(dtype, np.dtype): # pragma: no cover raise ValueError("dtype must be np.dtype or ExtensionDtype") if arr.dtype.kind in ["m", "M"] and ( @@ -1389,9 +1385,7 @@ def maybe_castable(dtype: np.dtype) -> bool: return dtype.name not in POSSIBLY_CAST_DTYPES -def maybe_infer_to_datetimelike( - value: Union[np.ndarray, List], convert_dates: bool = False -): +def maybe_infer_to_datetimelike(value: Union[np.ndarray, List]): """ we might have a array (or single object) that is datetime like, and no dtype is passed don't change the value unless we find a @@ -1403,13 +1397,10 @@ def maybe_infer_to_datetimelike( Parameters ---------- value : np.ndarray or list - convert_dates : bool, default False - if True try really hard to convert dates (such as datetime.date), other - leave inferred dtype 'date' alone """ if not isinstance(value, (np.ndarray, list)): - raise TypeError(type(value)) + raise TypeError(type(value)) # pragma: no cover v = np.array(value, copy=False) @@ -1466,9 +1457,7 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: inferred_type = lib.infer_datetimelike_array(ensure_object(v)) - if inferred_type == "date" and convert_dates: - value = try_datetime(v) - elif inferred_type == "datetime": + if inferred_type == "datetime": value = try_datetime(v) elif inferred_type == "timedelta": value = try_timedelta(v) diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 18f9ece3e3812..9bfe852083390 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -248,13 +248,13 @@ def _convert_and_box_cache( return _box_as_indexlike(result, utc=None, name=name) -def _return_parsed_timezone_results(result, timezones, tz, name): +def _return_parsed_timezone_results(result: np.ndarray, timezones, tz, name) -> Index: """ Return results from array_strptime if a %z or %Z directive was passed. Parameters ---------- - result : ndarray + result : ndarray[int64] int64 date representations of the dates timezones : ndarray pytz timezone objects @@ -287,7 +287,7 @@ def _convert_listlike_datetimes( infer_datetime_format: Optional[bool] = None, dayfirst: Optional[bool] = None, yearfirst: Optional[bool] = None, - exact: Optional[bool] = None, + exact: bool = True, ): """ Helper function for to_datetime. Performs the conversions of 1D listlike @@ -311,7 +311,7 @@ def _convert_listlike_datetimes( dayfirst parsing behavior from to_datetime yearfirst : boolean yearfirst parsing behavior from to_datetime - exact : boolean + exact : bool, default True exact format matching behavior from to_datetime Returns
Also some annotations and "pragma: no cover" for cases that are also checked by mypy
https://api.github.com/repos/pandas-dev/pandas/pulls/40109
2021-02-27T16:00:55Z
2021-02-27T18:33:53Z
2021-02-27T18:33:53Z
2021-11-20T23:23:32Z
REF: Remove series_apply
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index db4203e5158ef..4e7fbcb9efeb4 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -98,22 +98,6 @@ def frame_apply( ) -def series_apply( - obj: Series, - func: AggFuncType, - convert_dtype: bool = True, - args=None, - kwargs=None, -) -> SeriesApply: - return SeriesApply( - obj, - func, - convert_dtype, - args, - kwargs, - ) - - class Apply(metaclass=abc.ABCMeta): axis: int diff --git a/pandas/core/series.py b/pandas/core/series.py index 5fece72ccddca..e4b4826e4561c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -96,7 +96,7 @@ ops, ) from pandas.core.accessor import CachedAccessor -from pandas.core.apply import series_apply +from pandas.core.apply import SeriesApply from pandas.core.arrays import ExtensionArray from pandas.core.arrays.categorical import CategoricalAccessor from pandas.core.arrays.sparse import SparseAccessor @@ -4003,7 +4003,7 @@ def aggregate(self, func=None, axis=0, *args, **kwargs): if func is None: func = dict(kwargs.items()) - op = series_apply(self, func, args=args, kwargs=kwargs) + op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result @@ -4019,7 +4019,9 @@ def transform( ) -> FrameOrSeriesUnion: # Validate axis argument self._get_axis_number(axis) - result = series_apply(self, func=func, args=args, kwargs=kwargs).transform() + result = SeriesApply( + self, func=func, convert_dtype=True, args=args, kwargs=kwargs + ).transform() return result def apply( @@ -4131,7 +4133,7 @@ def apply( Helsinki 2.484907 dtype: float64 """ - return series_apply(self, func, convert_dtype, args, kwargs).apply() + return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self,
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/40108
2021-02-27T14:35:28Z
2021-02-27T18:14:21Z
2021-02-27T18:14:21Z
2021-02-27T20:08:28Z
BLD: updated mypy version to 0.812
diff --git a/environment.yml b/environment.yml index 113780ed0264a..f54bf41c14c75 100644 --- a/environment.yml +++ b/environment.yml @@ -23,7 +23,7 @@ dependencies: - flake8 - flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions - isort>=5.2.1 # check that imports are in the right order - - mypy=0.800 + - mypy=0.812 - pre-commit>=2.9.2 - pycodestyle # used by flake8 - pyupgrade diff --git a/requirements-dev.txt b/requirements-dev.txt index be60c90aef8aa..37adbbb8e671f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -11,7 +11,7 @@ cpplint flake8 flake8-comprehensions>=3.1.0 isort>=5.2.1 -mypy==0.800 +mypy==0.812 pre-commit>=2.9.2 pycodestyle pyupgrade
- [x] closes #40106 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry After making necessary changes ```bash mypy pandas ``` O/P ``` Success: no issues found in 1237 source files ``` ![Screenshot from 2021-02-27 19-13-10](https://user-images.githubusercontent.com/31338369/109389161-5157d080-7931-11eb-9ead-82a0c8ca0d91.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/40107
2021-02-27T13:54:11Z
2021-02-27T18:31:25Z
2021-02-27T18:31:25Z
2021-02-27T18:31:47Z
REF: don't call libalgos.groupsort_indexer directly for simple argsort usage
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index e633d6b28a8c5..9b0c85abc7ebd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -148,6 +148,7 @@ from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( ensure_key_mapped, + get_group_index_sorter, nargsort, ) from pandas.core.strings import StringMethods @@ -4098,9 +4099,7 @@ def _get_leaf_sorter(labels): return np.empty(0, dtype="int64") if len(labels) == 1: - lab = ensure_int64(labels[0]) - sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max()) - return sorter + return get_group_index_sorter(labels[0]) # find indexers of beginning of each set of # same-key labels w.r.t all but last level diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 543bf44e61216..271bb2ca8dd75 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -9,7 +9,6 @@ import numpy as np -import pandas._libs.algos as libalgos import pandas._libs.reshape as libreshape from pandas._libs.sparse import IntIndex from pandas._typing import Dtype @@ -42,6 +41,7 @@ decons_obs_group_ids, get_compressed_ids, get_group_index, + get_group_index_sorter, ) @@ -139,8 +139,7 @@ def _indexer_and_to_sort(self): comp_index, obs_ids = get_compressed_ids(to_sort, sizes) ngroups = len(obs_ids) - indexer = libalgos.groupsort_indexer(comp_index, ngroups)[0] - indexer = ensure_platform_int(indexer) + indexer = get_group_index_sorter(comp_index, ngroups) return indexer, to_sort diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 0195969de1f17..973fed2c1436f 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -567,7 +567,9 @@ def get_indexer_dict( # sorting levels...cleverly? -def get_group_index_sorter(group_index, ngroups: int): +def get_group_index_sorter( + group_index: np.ndarray, ngroups: int | None = None +) -> np.ndarray: """ algos.groupsort_indexer implements `counting sort` and it is at least O(ngroups), where @@ -581,6 +583,8 @@ def get_group_index_sorter(group_index, ngroups: int): groupby operations. e.g. consider: df.groupby(key)[col].transform('first') """ + if ngroups is None: + ngroups = 1 + group_index.max() count = len(group_index) alpha = 0.0 # taking complexities literally; there may be beta = 1.0 # some room for fine-tuning these parameters
https://api.github.com/repos/pandas-dev/pandas/pulls/40105
2021-02-27T09:55:57Z
2021-02-27T18:28:05Z
2021-02-27T18:28:05Z
2021-02-28T08:41:31Z
BUG: Series([Timestamp, int], dtype=m8ns) dropping nanoseconds
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 605e2135edc9f..337e131f0a2c9 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -376,7 +376,8 @@ cpdef array_to_datetime( bint dayfirst=False, bint yearfirst=False, bint utc=False, - bint require_iso8601=False + bint require_iso8601=False, + bint allow_mixed=False, ): """ Converts a 1D array of date-like values to a numpy array of either: @@ -405,6 +406,8 @@ cpdef array_to_datetime( indicator whether the dates should be UTC require_iso8601 : bool, default False indicator whether the datetime string should be iso8601 + allow_mixed : bool, default False + Whether to allow mixed datetimes and integers. Returns ------- @@ -597,7 +600,7 @@ cpdef array_to_datetime( return ignore_errors_out_of_bounds_fallback(values), tz_out except TypeError: - return array_to_datetime_object(values, errors, dayfirst, yearfirst) + return _array_to_datetime_object(values, errors, dayfirst, yearfirst) if seen_datetime and seen_integer: # we have mixed datetimes & integers @@ -609,10 +612,12 @@ cpdef array_to_datetime( val = values[i] if is_integer_object(val) or is_float_object(val): result[i] = NPY_NAT + elif allow_mixed: + pass elif is_raise: raise ValueError("mixed datetimes and integers in passed array") else: - return array_to_datetime_object(values, errors, dayfirst, yearfirst) + return _array_to_datetime_object(values, errors, dayfirst, yearfirst) if seen_datetime_offset and not utc_convert: # GH#17697 @@ -623,7 +628,7 @@ cpdef array_to_datetime( # (with individual dateutil.tzoffsets) are returned is_same_offsets = len(out_tzoffset_vals) == 1 if not is_same_offsets: - return array_to_datetime_object(values, errors, dayfirst, yearfirst) + return _array_to_datetime_object(values, errors, dayfirst, yearfirst) else: tz_offset = out_tzoffset_vals.pop() tz_out = pytz.FixedOffset(tz_offset / 60.) @@ -670,7 +675,7 @@ cdef ignore_errors_out_of_bounds_fallback(ndarray[object] values): @cython.wraparound(False) @cython.boundscheck(False) -cdef array_to_datetime_object( +cdef _array_to_datetime_object( ndarray[object] values, str errors, bint dayfirst=False, diff --git a/pandas/core/construction.py b/pandas/core/construction.py index f5f49e0e5fc20..d0fe5b5ab0c19 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -669,6 +669,8 @@ def _try_cast( subarr = arr else: subarr = maybe_cast_to_datetime(arr, dtype) + if dtype is not None and dtype.kind == "M": + return subarr if not isinstance(subarr, ABCExtensionArray): subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 0917cf1787d5b..a6b4befb8ea5c 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1583,10 +1583,24 @@ def maybe_cast_to_datetime( value = to_timedelta(value, errors="raise")._values except OutOfBoundsDatetime: raise - except ValueError: + except ValueError as err: # TODO(GH#40048): only catch dateutil's ParserError # once we can reliably import it in all supported versions - pass + if "mixed datetimes and integers in passed array" in str(err): + # We need to catch this in array_to_datetime, otherwise + # we end up going through numpy which will lose nanoseconds + # from Timestamps + try: + i8vals, tz = tslib.array_to_datetime( + value, allow_mixed=True + ) + except ValueError: + pass + else: + from pandas.core.arrays import DatetimeArray + + dta = DatetimeArray(i8vals).tz_localize(tz) + value = dta # coerce datetimelike to object elif is_datetime64_dtype( diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 20536c7a94695..fa6c9a7a5b7b7 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -232,15 +232,26 @@ def ndarray_to_mgr(values, index, columns, dtype: Optional[DtypeObj], copy: bool values = _prep_ndarray(values, copy=copy) if dtype is not None and not is_dtype_equal(values.dtype, dtype): - try: - values = construct_1d_ndarray_preserving_na( - values.ravel(), dtype=dtype, copy=False - ).reshape(values.shape) - except Exception as orig: - # e.g. ValueError when trying to cast object dtype to float64 - raise ValueError( - f"failed to cast to '{dtype}' (Exception was: {orig})" - ) from orig + shape = values.shape + flat = values.ravel() + + if not is_integer_dtype(dtype): + # TODO: skipping integer_dtype is needed to keep the tests passing, + # not clear it is correct + # Note: we really only need _try_cast, but keeping to exposed funcs + values = sanitize_array( + flat, None, dtype=dtype, copy=copy, raise_cast_failure=True + ) + else: + try: + values = construct_1d_ndarray_preserving_na( + flat, dtype=dtype, copy=False + ) + except Exception as err: + # e.g. ValueError when trying to cast object dtype to float64 + msg = f"failed to cast to '{dtype}' (Exception was: {err})" + raise ValueError(msg) from err + values = values.reshape(shape) # _prep_ndarray ensures that values.ndim == 2 at this point index, columns = _get_axes( diff --git a/pandas/tests/base/test_constructors.py b/pandas/tests/base/test_constructors.py index b042e29986c80..ceb882ff9c963 100644 --- a/pandas/tests/base/test_constructors.py +++ b/pandas/tests/base/test_constructors.py @@ -124,9 +124,7 @@ class TestConstruction: [ Series, lambda x, **kwargs: DataFrame({"a": x}, **kwargs)["a"], - pytest.param( - lambda x, **kwargs: DataFrame(x, **kwargs)[0], marks=pytest.mark.xfail - ), + lambda x, **kwargs: DataFrame(x, **kwargs)[0], Index, ], ) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index afc7ccb516c7f..5fb805ecd77f6 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1100,7 +1100,8 @@ def test_constructor_more(self, float_frame): # can't cast mat = np.array(["foo", "bar"], dtype=object).reshape(2, 1) - with pytest.raises(ValueError, match="cast"): + msg = "could not convert string to float: 'foo'" + with pytest.raises(ValueError, match=msg): DataFrame(mat, index=[0, 1], columns=[0], dtype=float) dm = DataFrame(DataFrame(float_frame._series)) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index c2d0bf5975059..63c9b4d899622 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -760,6 +760,14 @@ def test_constructor_datelike_coercion(self): result = df.loc["216"] assert result.dtype == object + def test_constructor_mixed_int_and_timestamp(self, frame_or_series): + # specifically Timestamp with nanos, not datetimes + objs = [Timestamp(9), 10, NaT.value] + result = frame_or_series(objs, dtype="M8[ns]") + + expected = frame_or_series([Timestamp(9), Timestamp(10), NaT]) + tm.assert_equal(result, expected) + def test_constructor_datetimes_with_nulls(self): # gh-15869 for arr in [
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40100
2021-02-27T04:06:36Z
2021-02-27T19:15:57Z
2021-02-27T19:15:57Z
2021-02-27T19:31:26Z
REF: unify casting logic in Categorical.__init__
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index bdc64d5f14a27..9db21800d2499 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -48,8 +48,6 @@ from pandas.core.dtypes.cast import ( coerce_indexer_dtype, maybe_cast_to_extension_array, - maybe_infer_to_datetimelike, - sanitize_to_nanoseconds, ) from pandas.core.dtypes.common import ( ensure_int64, @@ -396,24 +394,27 @@ def __init__( if dtype.categories is None: dtype = CategoricalDtype(values.categories, dtype.ordered) elif not isinstance(values, (ABCIndex, ABCSeries, ExtensionArray)): - # sanitize_array coerces np.nan to a string under certain versions - # of numpy - if not isinstance(values, (np.ndarray, list)): - # convert e.g. range, tuple to allow for stronger typing - # of maybe_infer_to_datetimelike - values = list(values) - values = maybe_infer_to_datetimelike(values) - if isinstance(values, np.ndarray): - values = sanitize_to_nanoseconds(values) - elif not isinstance(values, ExtensionArray): - values = com.convert_to_list_like(values) - + values = com.convert_to_list_like(values) + if isinstance(values, list) and len(values) == 0: # By convention, empty lists result in object dtype: - sanitize_dtype = np.dtype("O") if len(values) == 0 else None - null_mask = isna(values) + values = np.array([], dtype=object) + elif isinstance(values, np.ndarray): + if values.ndim > 1: + # preempt sanitize_array from raising ValueError + raise NotImplementedError( + "> 1 ndim Categorical are not supported at this time" + ) + values = sanitize_array(values, None) + else: + # i.e. must be a list + arr = sanitize_array(values, None) + null_mask = isna(arr) if null_mask.any(): - values = [values[idx] for idx in np.where(~null_mask)[0]] - values = sanitize_array(values, None, dtype=sanitize_dtype) + # We remove null values here, then below will re-insert + # them, grep "full_codes" + arr = [values[idx] for idx in np.where(~null_mask)[0]] + arr = sanitize_array(arr, None) + values = arr if dtype.categories is None: try: diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index 7c144c390a128..93ba16c5fda22 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -42,6 +42,12 @@ def test_categorical_scalar_deprecated(self): with tm.assert_produces_warning(FutureWarning): Categorical("A", categories=["A", "B"]) + def test_categorical_1d_only(self): + # ndim > 1 + msg = "> 1 ndim Categorical are not supported at this time" + with pytest.raises(NotImplementedError, match=msg): + Categorical(np.array([list("abcd")])) + def test_validate_ordered(self): # see gh-14058 exp_msg = "'ordered' must either be 'True' or 'False'" diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index afc7ccb516c7f..280d1da4070d9 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2062,13 +2062,6 @@ def test_construct_from_listlikes_mismatched_lengths(self): with pytest.raises(ValueError, match=msg): DataFrame([Categorical(list("abc")), Categorical(list("abdefg"))]) - def test_categorical_1d_only(self): - # TODO: belongs in Categorical tests - # ndim > 1 - msg = "> 1 ndim Categorical are not supported at this time" - with pytest.raises(NotImplementedError, match=msg): - Categorical(np.array([list("abcd")])) - def test_constructor_categorical_series(self): items = [1, 2, 3, 1]
Big picture, trying to call maybe_infer_to_datetimelike in fewer places so we can simplify scattered datetimelike casting.
https://api.github.com/repos/pandas-dev/pandas/pulls/40097
2021-02-27T02:32:48Z
2021-02-27T18:24:20Z
2021-02-27T18:24:20Z
2021-11-20T23:23:32Z
BUG: df.loc setitem-with-expansion with duplicate index
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 41db72612a66b..51ea52b591096 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -442,7 +442,7 @@ Indexing - Bug in :meth:`RangeIndex.append` where a single object of length 1 was concatenated incorrectly (:issue:`39401`) - Bug in setting ``numpy.timedelta64`` values into an object-dtype :class:`Series` using a boolean indexer (:issue:`39488`) - Bug in setting numeric values into a into a boolean-dtypes :class:`Series` using ``at`` or ``iat`` failing to cast to object-dtype (:issue:`39582`) -- +- Bug in :meth:`DataFrame.loc.__setitem__` when setting-with-expansion incorrectly raising when the index in the expanding axis contains duplicates (:issue:`40096`) Missing ^^^^^^^ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 47adc13f49499..f7e37b10ef74c 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -614,13 +614,10 @@ def delete(self: _T, loc) -> _T: @doc(NDArrayBackedExtensionIndex.insert) def insert(self, loc: int, item): - try: - result = super().insert(loc, item) - except (ValueError, TypeError): - # i.e. self._data._validate_scalar raised - return self.astype(object).insert(loc, item) - - result._data._freq = self._get_insert_freq(loc, item) + result = super().insert(loc, item) + if isinstance(result, type(self)): + # i.e. parent class method did not cast + result._data._freq = self._get_insert_freq(loc, item) return result # -------------------------------------------------------------------- diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index f1418869713d6..ac70200c0c404 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -16,6 +16,10 @@ doc, ) +from pandas.core.dtypes.cast import ( + find_common_type, + infer_dtype_from, +) from pandas.core.dtypes.common import ( is_dtype_equal, is_object_dtype, @@ -370,11 +374,19 @@ def insert(self: _T, loc: int, item) -> _T: ValueError if the item is not valid for this dtype. """ arr = self._data - code = arr._validate_scalar(item) - - new_vals = np.concatenate((arr._ndarray[:loc], [code], arr._ndarray[loc:])) - new_arr = arr._from_backing_data(new_vals) - return type(self)._simple_new(new_arr, name=self.name) + try: + code = arr._validate_scalar(item) + except (ValueError, TypeError): + # e.g. trying to insert an integer into a DatetimeIndex + # We cannot keep the same dtype, so cast to the (often object) + # minimal shared dtype before doing the insert. + dtype, _ = infer_dtype_from(item, pandas_dtype=True) + dtype = find_common_type([self.dtype, dtype]) + return self.astype(dtype).insert(loc, item) + else: + new_vals = np.concatenate((arr._ndarray[:loc], [code], arr._ndarray[loc:])) + new_arr = arr._from_backing_data(new_vals) + return type(self)._simple_new(new_arr, name=self.name) def putmask(self, mask, value): res_values = self._data.copy() diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 1889821c79756..88b92c7b304ae 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3719,12 +3719,7 @@ def insert(self, loc: int, item) -> MultiIndex: # must insert at end otherwise you have to recompute all the # other codes lev_loc = len(level) - try: - level = level.insert(lev_loc, k) - except TypeError: - # TODO: Should this be done inside insert? - # TODO: smarter casting rules? - level = level.astype(object).insert(lev_loc, k) + level = level.insert(lev_loc, k) else: lev_loc = level.get_loc(k) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index cfe16627d5c64..411480a1b1201 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1641,7 +1641,17 @@ def _setitem_with_indexer(self, indexer, value, name="iloc"): # so the object is the same index = self.obj._get_axis(i) labels = index.insert(len(index), key) - self.obj._mgr = self.obj.reindex(labels, axis=i)._mgr + + # We are expanding the Series/DataFrame values to match + # the length of thenew index `labels`. GH#40096 ensure + # this is valid even if the index has duplicates. + taker = np.arange(len(index) + 1, dtype=np.intp) + taker[-1] = -1 + reindexers = {i: (labels, taker)} + new_obj = self.obj._reindex_with_indexers( + reindexers, allow_dups=True + ) + self.obj._mgr = new_obj._mgr self.obj._maybe_update_cacher(clear=True) self.obj._is_copy = None diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 222ca692091ab..d3c9b02b3ba23 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -50,10 +50,10 @@ def test_insert(self): expected = CategoricalIndex(["a"], categories=categories) tm.assert_index_equal(result, expected, exact=True) - # invalid - msg = "'fill_value=d' is not present in this Categorical's categories" - with pytest.raises(TypeError, match=msg): - ci.insert(0, "d") + # invalid -> cast to object + expected = ci.astype(object).insert(0, "d") + result = ci.insert(0, "d") + tm.assert_index_equal(result, expected, exact=True) # GH 18295 (test missing) expected = CategoricalIndex(["a", np.nan, "a", "b", "c", "b"]) @@ -63,9 +63,9 @@ def test_insert(self): def test_insert_na_mismatched_dtype(self): ci = CategoricalIndex([0, 1, 1]) - msg = "'fill_value=NaT' is not present in this Categorical's categories" - with pytest.raises(TypeError, match=msg): - ci.insert(0, pd.NaT) + result = ci.insert(0, pd.NaT) + expected = Index([pd.NaT, 0, 1, 1], dtype=object) + tm.assert_index_equal(result, expected) def test_delete(self): diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 68ae1a0dd6f3d..f104587ebbded 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -37,20 +37,24 @@ def setup_method(self, method): ) def test_loc_scalar(self): + dtype = CDT(list("cab")) result = self.df.loc["a"] - expected = DataFrame( - {"A": [0, 1, 5], "B": (Series(list("aaa")).astype(CDT(list("cab"))))} - ).set_index("B") + bidx = Series(list("aaa"), name="B").astype(dtype) + assert bidx.dtype == dtype + + expected = DataFrame({"A": [0, 1, 5]}, index=Index(bidx)) tm.assert_frame_equal(result, expected) df = self.df.copy() df.loc["a"] = 20 + bidx2 = Series(list("aabbca"), name="B").astype(dtype) + assert bidx2.dtype == dtype expected = DataFrame( { "A": [20, 20, 2, 3, 4, 20], - "B": (Series(list("aabbca")).astype(CDT(list("cab")))), - } - ).set_index("B") + }, + index=Index(bidx2), + ) tm.assert_frame_equal(df, expected) # value not in the categories @@ -64,14 +68,38 @@ def test_loc_scalar(self): df2.loc["d"] = 10 tm.assert_frame_equal(df2, expected) - msg = "'fill_value=d' is not present in this Categorical's categories" - with pytest.raises(TypeError, match=msg): - df.loc["d", "A"] = 10 - with pytest.raises(TypeError, match=msg): - df.loc["d", "C"] = 10 + def test_loc_setitem_with_expansion_non_category(self): + # Setting-with-expansion with a new key "d" that is not among caegories + df = self.df + df.loc["a"] = 20 + + # Setting a new row on an existing column + df3 = df.copy() + df3.loc["d", "A"] = 10 + bidx3 = Index(list("aabbcad"), name="B") + expected3 = DataFrame( + { + "A": [20, 20, 2, 3, 4, 20, 10.0], + }, + index=Index(bidx3), + ) + tm.assert_frame_equal(df3, expected3) + + # Settig a new row _and_ new column + df4 = df.copy() + df4.loc["d", "C"] = 10 + expected3 = DataFrame( + { + "A": [20, 20, 2, 3, 4, 20, np.nan], + "C": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 10], + }, + index=Index(bidx3), + ) + tm.assert_frame_equal(df4, expected3) + def test_loc_getitem_scalar_non_category(self): with pytest.raises(KeyError, match="^1$"): - df.loc[1] + self.df.loc[1] def test_slicing(self): cat = Series(Categorical([1, 2, 3, 4])) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 7c9b11650f05a..5b6c042a11332 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -23,6 +23,7 @@ DatetimeIndex, Index, IndexSlice, + IntervalIndex, MultiIndex, Period, Series, @@ -1657,6 +1658,55 @@ def test_loc_setitem_with_expansion_inf_upcast_empty(self): expected = pd.Float64Index([0, 1, np.inf]) tm.assert_index_equal(result, expected) + @pytest.mark.filterwarnings("ignore:indexing past lexsort depth") + def test_loc_setitem_with_expansion_nonunique_index(self, index, request): + # GH#40096 + if not len(index): + return + if isinstance(index, IntervalIndex): + mark = pytest.mark.xfail(reason="IntervalIndex raises") + request.node.add_marker(mark) + + index = index.repeat(2) # ensure non-unique + N = len(index) + arr = np.arange(N).astype(np.int64) + + orig = DataFrame(arr, index=index, columns=[0]) + + # key that will requiring object-dtype casting in the index + key = "kapow" + assert key not in index # otherwise test is invalid + # TODO: using a tuple key breaks here in many cases + + exp_index = index.insert(len(index), key) + if isinstance(index, MultiIndex): + assert exp_index[-1][0] == key + else: + assert exp_index[-1] == key + exp_data = np.arange(N + 1).astype(np.float64) + expected = DataFrame(exp_data, index=exp_index, columns=[0]) + + # Add new row, but no new columns + df = orig.copy() + df.loc[key, 0] = N + tm.assert_frame_equal(df, expected) + + # add new row on a Series + ser = orig.copy()[0] + ser.loc[key] = N + # the series machinery lets us preserve int dtype instead of float + expected = expected[0].astype(np.int64) + tm.assert_series_equal(ser, expected) + + # add new row and new column + df = orig.copy() + df.loc[key, 1] = N + expected = DataFrame( + {0: list(arr) + [np.nan], 1: [np.nan] * N + [float(N)]}, + index=exp_index, + ) + tm.assert_frame_equal(df, expected) + class TestLocCallable: def test_frame_loc_getitem_callable(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry also CategoricalIndex.insert
https://api.github.com/repos/pandas-dev/pandas/pulls/40096
2021-02-27T02:25:44Z
2021-03-02T21:14:35Z
2021-03-02T21:14:35Z
2021-03-02T22:16:21Z
TST: Fix pandas.io.stata imports in test_stata.py
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index e63a32aff9546..05a6b3c360c61 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -31,6 +31,7 @@ StataMissingValue, StataReader, StataWriterUTF8, + ValueLabelTypeMismatch, read_stata, ) @@ -435,7 +436,7 @@ def test_read_write_dta11(self): formatted = formatted.astype(np.int32) with tm.ensure_clean() as path: - with tm.assert_produces_warning(io.stata.InvalidColumnName): + with tm.assert_produces_warning(InvalidColumnName): original.to_stata(path, None) written_and_read_again = self.read_dta(path) @@ -1022,7 +1023,7 @@ def test_categorical_warnings_and_errors(self): [original[col].astype("category") for col in original], axis=1 ) - with tm.assert_produces_warning(io.stata.ValueLabelTypeMismatch): + with tm.assert_produces_warning(ValueLabelTypeMismatch): original.to_stata(path) # should get a warning for mixed content @@ -1652,7 +1653,7 @@ def test_convert_strl_name_swap(self): ) original.index.name = "index" - with tm.assert_produces_warning(io.stata.InvalidColumnName): + with tm.assert_produces_warning(InvalidColumnName): with tm.ensure_clean() as path: original.to_stata(path, convert_strl=["long", 1], version=117) reread = self.read_dta(path)
- [x] Fixes test failures introduced in #40069 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/40094
2021-02-26T22:33:39Z
2021-02-26T23:53:33Z
2021-02-26T23:53:33Z
2021-02-27T09:22:30Z
TST/REF: Consolidate tests in apply.test_invalid_arg
diff --git a/pandas/tests/apply/conftest.py b/pandas/tests/apply/conftest.py new file mode 100644 index 0000000000000..b68c6235cb0b8 --- /dev/null +++ b/pandas/tests/apply/conftest.py @@ -0,0 +1,18 @@ +import numpy as np +import pytest + +from pandas import DataFrame + + +@pytest.fixture +def int_frame_const_col(): + """ + Fixture for DataFrame of ints which are constant per column + + Columns are ['A', 'B', 'C'], with values (per column): [1, 2, 3] + """ + df = DataFrame( + np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1, + columns=["A", "B", "C"], + ) + return df diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index 3532040a2fd7b..9406a15d9dc5c 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -14,27 +14,11 @@ Series, Timestamp, date_range, - notna, ) import pandas._testing as tm -from pandas.core.base import SpecificationError from pandas.tests.frame.common import zip_frames -@pytest.fixture -def int_frame_const_col(): - """ - Fixture for DataFrame of ints which are constant per column - - Columns are ['A', 'B', 'C'], with values (per column): [1, 2, 3] - """ - df = DataFrame( - np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1, - columns=["A", "B", "C"], - ) - return df - - def test_apply(float_frame): with np.errstate(all="ignore"): # ufunc @@ -192,17 +176,6 @@ def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize( - "how, args", [("pct_change", ()), ("nsmallest", (1, ["a", "b"])), ("tail", 1)] -) -def test_apply_str_axis_1_raises(how, args): - # GH 39211 - some ops don't support axis=1 - df = DataFrame({"a": [1, 2], "b": [3, 4]}) - msg = f"Operation {how} does not support axis=1" - with pytest.raises(ValueError, match=msg): - df.apply(how, axis=1, args=args) - - def test_apply_broadcast(float_frame, int_frame_const_col): # scalars @@ -256,27 +229,6 @@ def test_apply_broadcast(float_frame, int_frame_const_col): tm.assert_frame_equal(result, expected) -def test_apply_broadcast_error(int_frame_const_col): - df = int_frame_const_col - - # > 1 ndim - msg = "too many dims to broadcast" - with pytest.raises(ValueError, match=msg): - df.apply( - lambda x: np.array([1, 2]).reshape(-1, 2), - axis=1, - result_type="broadcast", - ) - - # cannot broadcast - msg = "cannot broadcast result" - with pytest.raises(ValueError, match=msg): - df.apply(lambda x: [1, 2], axis=1, result_type="broadcast") - - with pytest.raises(ValueError, match=msg): - df.apply(lambda x: Series([1, 2]), axis=1, result_type="broadcast") - - def test_apply_raw(float_frame, mixed_type_frame): def _assert_raw(x): assert isinstance(x, np.ndarray) @@ -424,71 +376,6 @@ def test_apply_differently_indexed(): tm.assert_frame_equal(result1, expected1) -def test_apply_modify_traceback(): - data = DataFrame( - { - "A": [ - "foo", - "foo", - "foo", - "foo", - "bar", - "bar", - "bar", - "bar", - "foo", - "foo", - "foo", - ], - "B": [ - "one", - "one", - "one", - "two", - "one", - "one", - "one", - "two", - "two", - "two", - "one", - ], - "C": [ - "dull", - "dull", - "shiny", - "dull", - "dull", - "shiny", - "shiny", - "dull", - "shiny", - "shiny", - "shiny", - ], - "D": np.random.randn(11), - "E": np.random.randn(11), - "F": np.random.randn(11), - } - ) - - data.loc[4, "C"] = np.nan - - def transform(row): - if row["C"].startswith("shin") and row["A"] == "foo": - row["D"] = 7 - return row - - def transform2(row): - if notna(row["C"]) and row["C"].startswith("shin") and row["A"] == "foo": - row["D"] = 7 - return row - - msg = "'float' object has no attribute 'startswith'" - with pytest.raises(AttributeError, match=msg): - data.apply(transform, axis=1) - - def test_apply_bug(): # GH 6125 @@ -1101,19 +988,6 @@ def test_result_type(int_frame_const_col): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("result_type", ["foo", 1]) -def test_result_type_error(result_type, int_frame_const_col): - # allowed result_type - df = int_frame_const_col - - msg = ( - "invalid value for result_type, must be one of " - "{None, 'reduce', 'broadcast', 'expand'}" - ) - with pytest.raises(ValueError, match=msg): - df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type) - - @pytest.mark.parametrize( "box", [lambda x: list(x), lambda x: tuple(x), lambda x: np.array(x, dtype="int64")], @@ -1170,20 +1044,6 @@ def test_agg_transform(axis, float_frame): tm.assert_frame_equal(result, expected) -def test_transform_and_agg_err(axis, float_frame): - # cannot both transform and agg - msg = "cannot combine transform and aggregation operations" - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - float_frame.agg(["max", "sqrt"], axis=axis) - - df = DataFrame({"A": range(5), "B": 5}) - - def f(): - with np.errstate(all="ignore"): - df.agg({"A": ["abs", "sum"], "B": ["mean", "max"]}, axis=axis) - - def test_demo(): # demonstration tests df = DataFrame({"A": range(5), "B": 5}) @@ -1254,16 +1114,6 @@ def test_agg_multiple_mixed_no_warning(): tm.assert_frame_equal(result, expected) -def test_agg_dict_nested_renaming_depr(): - - df = DataFrame({"A": range(5), "B": 5}) - - # nested renaming - msg = r"nested renamer is not supported" - with pytest.raises(SpecificationError, match=msg): - df.agg({"A": {"foo": "min"}, "B": {"bar": "max"}}) - - def test_agg_reduce(axis, float_frame): other_axis = 1 if axis in {0, "index"} else 0 name1, name2 = float_frame.axes[other_axis].unique()[:2].sort_values() @@ -1516,19 +1366,6 @@ def test_agg_cython_table_transform(df, func, expected, axis): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize( - "df, func, expected", - tm.get_cython_table_params( - DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]] - ), -) -def test_agg_cython_table_raises(df, func, expected, axis): - # GH 21224 - msg = "can't multiply sequence by non-int of type 'str'" - with pytest.raises(expected, match=msg): - df.agg(func, axis=axis) - - @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize( "args, kwargs", diff --git a/pandas/tests/apply/test_frame_apply_relabeling.py b/pandas/tests/apply/test_frame_apply_relabeling.py index 732aff24428ac..2da4a78991f5a 100644 --- a/pandas/tests/apply/test_frame_apply_relabeling.py +++ b/pandas/tests/apply/test_frame_apply_relabeling.py @@ -1,5 +1,4 @@ import numpy as np -import pytest import pandas as pd import pandas._testing as tm @@ -96,12 +95,3 @@ def test_agg_namedtuple(): index=pd.Index(["foo", "bar", "cat"]), ) tm.assert_frame_equal(result, expected) - - -def test_agg_raises(): - # GH 26513 - df = pd.DataFrame({"A": [0, 1], "B": [1, 2]}) - msg = "Must provide" - - with pytest.raises(TypeError, match=msg): - df.agg() diff --git a/pandas/tests/apply/test_frame_transform.py b/pandas/tests/apply/test_frame_transform.py index 47bc69656a597..5dc828dea9e35 100644 --- a/pandas/tests/apply/test_frame_transform.py +++ b/pandas/tests/apply/test_frame_transform.py @@ -1,5 +1,4 @@ import operator -import re import numpy as np import pytest @@ -10,7 +9,6 @@ Series, ) import pandas._testing as tm -from pandas.core.base import SpecificationError from pandas.core.groupby.base import transformation_kernels from pandas.tests.frame.common import zip_frames @@ -159,47 +157,6 @@ def test_transform_method_name(method): tm.assert_frame_equal(result, expected) -def test_transform_and_agg_err(axis, float_frame): - # GH 35964 - # cannot both transform and agg - msg = "Function did not transform" - with pytest.raises(ValueError, match=msg): - float_frame.transform(["max", "min"], axis=axis) - - msg = "Function did not transform" - with pytest.raises(ValueError, match=msg): - float_frame.transform(["max", "sqrt"], axis=axis) - - -def test_agg_dict_nested_renaming_depr(): - df = DataFrame({"A": range(5), "B": 5}) - - # nested renaming - msg = r"nested renamer is not supported" - with pytest.raises(SpecificationError, match=msg): - # mypy identifies the argument as an invalid type - df.transform({"A": {"foo": "min"}, "B": {"bar": "max"}}) - - -def test_transform_reducer_raises(all_reductions, frame_or_series): - # GH 35964 - op = all_reductions - - obj = DataFrame({"A": [1, 2, 3]}) - if frame_or_series is not DataFrame: - obj = obj["A"] - - msg = "Function did not transform" - with pytest.raises(ValueError, match=msg): - obj.transform(op) - with pytest.raises(ValueError, match=msg): - obj.transform([op]) - with pytest.raises(ValueError, match=msg): - obj.transform({"A": op}) - with pytest.raises(ValueError, match=msg): - obj.transform({"A": [op]}) - - wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"] frame_kernels_raise = [x for x in frame_kernels if x not in wont_fail] @@ -267,30 +224,6 @@ def f(x, a, b, c): frame_or_series([1]).transform(f, 0, *expected_args, **expected_kwargs) -def test_transform_missing_columns(axis): - # GH#35964 - df = DataFrame({"A": [1, 2], "B": [3, 4]}) - match = re.escape("Column(s) ['C'] do not exist") - with pytest.raises(KeyError, match=match): - df.transform({"C": "cumsum"}) - - -def test_transform_none_to_type(): - # GH#34377 - df = DataFrame({"a": [None]}) - msg = "Transform function failed" - with pytest.raises(ValueError, match=msg): - df.transform({"a": int}) - - -def test_transform_mixed_column_name_dtypes(): - # GH39025 - df = DataFrame({"a": ["1"]}) - msg = r"Column\(s\) \[1, 'b'\] do not exist" - with pytest.raises(KeyError, match=msg): - df.transform({"a": int, 1: str, "b": int}) - - def test_transform_empty_dataframe(): # https://github.com/pandas-dev/pandas/issues/39636 df = DataFrame([], columns=["col1", "col2"]) diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py index c67259d3c8194..5ad5390ab3e16 100644 --- a/pandas/tests/apply/test_invalid_arg.py +++ b/pandas/tests/apply/test_invalid_arg.py @@ -1,15 +1,48 @@ # Tests specifically aimed at detecting bad arguments. +# This file is organized by reason for exception. +# 1. always invalid argument values +# 2. missing column(s) +# 3. incompatible ops/dtype/args/kwargs +# 4. invalid result shape/type +# If your test does not fit into one of these categories, add to this list. + import re +import numpy as np import pytest from pandas import ( DataFrame, Series, + date_range, + notna, ) +import pandas._testing as tm from pandas.core.base import SpecificationError +@pytest.mark.parametrize("result_type", ["foo", 1]) +def test_result_type_error(result_type, int_frame_const_col): + # allowed result_type + df = int_frame_const_col + + msg = ( + "invalid value for result_type, must be one of " + "{None, 'reduce', 'broadcast', 'expand'}" + ) + with pytest.raises(ValueError, match=msg): + df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type) + + +def test_agg_raises(): + # GH 26513 + df = DataFrame({"A": [0, 1], "B": [1, 2]}) + msg = "Must provide" + + with pytest.raises(TypeError, match=msg): + df.agg() + + @pytest.mark.parametrize("box", [DataFrame, Series]) @pytest.mark.parametrize("method", ["apply", "agg", "transform"]) @pytest.mark.parametrize("func", [{"A": {"B": "sum"}}, {"A": {"B": ["sum"]}}]) @@ -21,6 +54,45 @@ def test_nested_renamer(box, method, func): getattr(obj, method)(func) +def test_transform_nested_renamer(): + # GH 35964 + match = "nested renamer is not supported" + with pytest.raises(SpecificationError, match=match): + Series([1]).transform({"A": {"B": ["sum"]}}) + + +def test_agg_dict_nested_renaming_depr_agg(): + + df = DataFrame({"A": range(5), "B": 5}) + + # nested renaming + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + df.agg({"A": {"foo": "min"}, "B": {"bar": "max"}}) + + +def test_agg_dict_nested_renaming_depr_transform(): + df = DataFrame({"A": range(5), "B": 5}) + + # nested renaming + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + # mypy identifies the argument as an invalid type + df.transform({"A": {"foo": "min"}, "B": {"bar": "max"}}) + + +def test_apply_dict_depr(): + + tsdf = DataFrame( + np.random.randn(10, 3), + columns=["A", "B", "C"], + index=date_range("1/1/2000", periods=10), + ) + msg = "nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + tsdf.A.agg({"foo": ["sum", "mean"]}) + + @pytest.mark.parametrize("method", ["apply", "agg", "transform"]) @pytest.mark.parametrize("func", [{"B": "sum"}, {"B": ["sum"]}]) def test_missing_column(method, func): @@ -29,3 +101,215 @@ def test_missing_column(method, func): match = re.escape("Column(s) ['B'] do not exist") with pytest.raises(KeyError, match=match): getattr(obj, method)(func) + + +def test_transform_missing_columns(axis): + # GH#35964 + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + match = re.escape("Column(s) ['C'] do not exist") + with pytest.raises(KeyError, match=match): + df.transform({"C": "cumsum"}) + + +def test_transform_mixed_column_name_dtypes(): + # GH39025 + df = DataFrame({"a": ["1"]}) + msg = r"Column\(s\) \[1, 'b'\] do not exist" + with pytest.raises(KeyError, match=msg): + df.transform({"a": int, 1: str, "b": int}) + + +@pytest.mark.parametrize( + "how, args", [("pct_change", ()), ("nsmallest", (1, ["a", "b"])), ("tail", 1)] +) +def test_apply_str_axis_1_raises(how, args): + # GH 39211 - some ops don't support axis=1 + df = DataFrame({"a": [1, 2], "b": [3, 4]}) + msg = f"Operation {how} does not support axis=1" + with pytest.raises(ValueError, match=msg): + df.apply(how, axis=1, args=args) + + +def test_transform_axis_1_raises(): + # GH 35964 + msg = "No axis named 1 for object type Series" + with pytest.raises(ValueError, match=msg): + Series([1]).transform("sum", axis=1) + + +def test_apply_modify_traceback(): + data = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.randn(11), + "E": np.random.randn(11), + "F": np.random.randn(11), + } + ) + + data.loc[4, "C"] = np.nan + + def transform(row): + if row["C"].startswith("shin") and row["A"] == "foo": + row["D"] = 7 + return row + + def transform2(row): + if notna(row["C"]) and row["C"].startswith("shin") and row["A"] == "foo": + row["D"] = 7 + return row + + msg = "'float' object has no attribute 'startswith'" + with pytest.raises(AttributeError, match=msg): + data.apply(transform, axis=1) + + +@pytest.mark.parametrize( + "df, func, expected", + tm.get_cython_table_params( + DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]] + ), +) +def test_agg_cython_table_raises(df, func, expected, axis): + # GH 21224 + msg = "can't multiply sequence by non-int of type 'str'" + with pytest.raises(expected, match=msg): + df.agg(func, axis=axis) + + +def test_transform_none_to_type(): + # GH#34377 + df = DataFrame({"a": [None]}) + msg = "Transform function failed" + with pytest.raises(ValueError, match=msg): + df.transform({"a": int}) + + +def test_apply_broadcast_error(int_frame_const_col): + df = int_frame_const_col + + # > 1 ndim + msg = "too many dims to broadcast" + with pytest.raises(ValueError, match=msg): + df.apply( + lambda x: np.array([1, 2]).reshape(-1, 2), + axis=1, + result_type="broadcast", + ) + + # cannot broadcast + msg = "cannot broadcast result" + with pytest.raises(ValueError, match=msg): + df.apply(lambda x: [1, 2], axis=1, result_type="broadcast") + + with pytest.raises(ValueError, match=msg): + df.apply(lambda x: Series([1, 2]), axis=1, result_type="broadcast") + + +def test_transform_and_agg_err_agg(axis, float_frame): + # cannot both transform and agg + msg = "cannot combine transform and aggregation operations" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + float_frame.agg(["max", "sqrt"], axis=axis) + + df = DataFrame({"A": range(5), "B": 5}) + + def f(): + with np.errstate(all="ignore"): + df.agg({"A": ["abs", "sum"], "B": ["mean", "max"]}, axis=axis) + + +def test_transform_and_agg_error_agg(string_series): + # we are trying to transform with an aggregator + msg = "cannot combine transform and aggregation" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + string_series.agg(["sqrt", "max"]) + + msg = "cannot perform both aggregation and transformation" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + string_series.agg({"foo": np.sqrt, "bar": "sum"}) + + +def test_transform_and_agg_err_transform(axis, float_frame): + # GH 35964 + # cannot both transform and agg + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + float_frame.transform(["max", "min"], axis=axis) + + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + float_frame.transform(["max", "sqrt"], axis=axis) + + +def test_transform_reducer_raises(all_reductions, frame_or_series): + # GH 35964 + op = all_reductions + + obj = DataFrame({"A": [1, 2, 3]}) + if frame_or_series is not DataFrame: + obj = obj["A"] + + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + obj.transform(op) + with pytest.raises(ValueError, match=msg): + obj.transform([op]) + with pytest.raises(ValueError, match=msg): + obj.transform({"A": op}) + with pytest.raises(ValueError, match=msg): + obj.transform({"A": [op]}) + + +def test_transform_wont_agg(string_series): + # GH 35964 + # we are trying to transform with an aggregator + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + string_series.transform(["min", "max"]) + + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + string_series.transform(["sqrt", "max"]) diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py index 19e6cda4ebd22..5d4a2e489e172 100644 --- a/pandas/tests/apply/test_series_apply.py +++ b/pandas/tests/apply/test_series_apply.py @@ -182,18 +182,6 @@ def f(x): tm.assert_series_equal(result, exp) -def test_apply_dict_depr(): - - tsdf = DataFrame( - np.random.randn(10, 3), - columns=["A", "B", "C"], - index=pd.date_range("1/1/2000", periods=10), - ) - msg = "nested renamer is not supported" - with pytest.raises(SpecificationError, match=msg): - tsdf.A.agg({"foo": ["sum", "mean"]}) - - def test_apply_categorical(): values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True) ser = Series(values, name="XX", index=list("abcdefg")) @@ -269,19 +257,6 @@ def test_transform(string_series): tm.assert_series_equal(result.reindex_like(expected), expected) -def test_transform_and_agg_error(string_series): - # we are trying to transform with an aggregator - msg = "cannot combine transform and aggregation" - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - string_series.agg(["sqrt", "max"]) - - msg = "cannot perform both aggregation and transformation" - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - string_series.agg({"foo": np.sqrt, "bar": "sum"}) - - def test_demo(): # demonstration tests s = Series(range(6), dtype="int64", name="series") diff --git a/pandas/tests/apply/test_series_transform.py b/pandas/tests/apply/test_series_transform.py index 24d619cb2bbb1..90065d20e1a59 100644 --- a/pandas/tests/apply/test_series_transform.py +++ b/pandas/tests/apply/test_series_transform.py @@ -8,7 +8,6 @@ concat, ) import pandas._testing as tm -from pandas.core.base import SpecificationError from pandas.core.groupby.base import transformation_kernels # tshift only works on time index and is deprecated @@ -66,30 +65,3 @@ def test_transform_dictlike_mixed(): columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]), ) tm.assert_frame_equal(result, expected) - - -def test_transform_wont_agg(string_series): - # GH 35964 - # we are trying to transform with an aggregator - msg = "Function did not transform" - with pytest.raises(ValueError, match=msg): - string_series.transform(["min", "max"]) - - msg = "Function did not transform" - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - string_series.transform(["sqrt", "max"]) - - -def test_transform_axis_1_raises(): - # GH 35964 - msg = "No axis named 1 for object type Series" - with pytest.raises(ValueError, match=msg): - Series([1]).transform("sum", axis=1) - - -def test_transform_nested_renamer(): - # GH 35964 - match = "nested renamer is not supported" - with pytest.raises(SpecificationError, match=match): - Series([1]).transform({"A": {"B": ["sum"]}})
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Moves relevant tests into test_invalid_arg. Had to rename two tests because they were of the same name, but no other changes. I organized the file by the reason for the exception, and indicated this at the top of the file. Certainly open to any other ways of organizing.
https://api.github.com/repos/pandas-dev/pandas/pulls/40092
2021-02-26T21:49:49Z
2021-03-03T03:07:31Z
2021-03-03T03:07:31Z
2021-03-03T03:10:36Z
REG: DataFrame/Series.transform with list and non-list dict values
diff --git a/doc/source/whatsnew/v1.2.3.rst b/doc/source/whatsnew/v1.2.3.rst index f72ee78bf243a..99e997189d7b8 100644 --- a/doc/source/whatsnew/v1.2.3.rst +++ b/doc/source/whatsnew/v1.2.3.rst @@ -24,6 +24,8 @@ Fixed regressions Passing ``ascending=None`` is still considered invalid, and the new error message suggests a proper usage (``ascending`` must be a boolean or a list-like boolean). +- Fixed regression in :meth:`DataFrame.transform` and :meth:`Series.transform` giving incorrect column labels when passed a dictionary with a mix of list and non-list values (:issue:`40018`) +- .. --------------------------------------------------------------------------- diff --git a/pandas/core/apply.py b/pandas/core/apply.py index db4203e5158ef..970629f4abfe9 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -280,7 +280,7 @@ def transform_dict_like(self, func): if len(func) == 0: raise ValueError("No transform functions were provided") - self.validate_dictlike_arg("transform", obj, func) + func = self.normalize_dictlike_arg("transform", obj, func) results: Dict[Hashable, FrameOrSeriesUnion] = {} for name, how in func.items(): @@ -421,32 +421,17 @@ def agg_dict_like(self, _axis: int) -> FrameOrSeriesUnion: ------- Result of aggregation. """ + from pandas.core.reshape.concat import concat + obj = self.obj arg = cast(AggFuncTypeDict, self.f) - is_aggregator = lambda x: isinstance(x, (list, tuple, dict)) - if _axis != 0: # pragma: no cover raise ValueError("Can only pass dict with axis=0") selected_obj = obj._selected_obj - self.validate_dictlike_arg("agg", selected_obj, arg) - - # if we have a dict of any non-scalars - # eg. {'A' : ['mean']}, normalize all to - # be list-likes - # Cannot use arg.values() because arg may be a Series - if any(is_aggregator(x) for _, x in arg.items()): - new_arg: AggFuncTypeDict = {} - for k, v in arg.items(): - if not isinstance(v, (tuple, list, dict)): - new_arg[k] = [v] - else: - new_arg[k] = v - arg = new_arg - - from pandas.core.reshape.concat import concat + arg = self.normalize_dictlike_arg("agg", selected_obj, arg) if selected_obj.ndim == 1: # key only used for output @@ -540,14 +525,15 @@ def maybe_apply_multiple(self) -> Optional[FrameOrSeriesUnion]: return None return self.obj.aggregate(self.f, self.axis, *self.args, **self.kwargs) - def validate_dictlike_arg( + def normalize_dictlike_arg( self, how: str, obj: FrameOrSeriesUnion, func: AggFuncTypeDict - ) -> None: + ) -> AggFuncTypeDict: """ - Raise if dict-like argument is invalid. + Handler for dict-like argument. Ensures that necessary columns exist if obj is a DataFrame, and - that a nested renamer is not passed. + that a nested renamer is not passed. Also normalizes to all lists + when values consists of a mix of list and non-lists. """ assert how in ("apply", "agg", "transform") @@ -567,6 +553,23 @@ def validate_dictlike_arg( cols_sorted = list(safe_sort(list(cols))) raise KeyError(f"Column(s) {cols_sorted} do not exist") + is_aggregator = lambda x: isinstance(x, (list, tuple, dict)) + + # if we have a dict of any non-scalars + # eg. {'A' : ['mean']}, normalize all to + # be list-likes + # Cannot use func.values() because arg may be a Series + if any(is_aggregator(x) for _, x in func.items()): + new_func: AggFuncTypeDict = {} + for k, v in func.items(): + if not is_aggregator(v): + # mypy can't realize v is not a list here + new_func[k] = [v] # type:ignore[list-item] + else: + new_func[k] = v + func = new_func + return func + class FrameApply(Apply): obj: DataFrame diff --git a/pandas/tests/apply/test_frame_transform.py b/pandas/tests/apply/test_frame_transform.py index 1888ddd8ec4aa..47bc69656a597 100644 --- a/pandas/tests/apply/test_frame_transform.py +++ b/pandas/tests/apply/test_frame_transform.py @@ -103,6 +103,17 @@ def test_transform_dictlike(axis, float_frame, box): tm.assert_frame_equal(result, expected) +def test_transform_dictlike_mixed(): + # GH 40018 - mix of lists and non-lists in values of a dictionary + df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]}) + result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"}) + expected = DataFrame( + [[1.0, 1, 1.0], [2.0, 4, 2.0]], + columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( "ops", [ diff --git a/pandas/tests/apply/test_series_transform.py b/pandas/tests/apply/test_series_transform.py index e67ea4f14e4ac..24d619cb2bbb1 100644 --- a/pandas/tests/apply/test_series_transform.py +++ b/pandas/tests/apply/test_series_transform.py @@ -2,6 +2,8 @@ import pytest from pandas import ( + DataFrame, + MultiIndex, Series, concat, ) @@ -55,6 +57,17 @@ def test_transform_dictlike(string_series, box): tm.assert_frame_equal(result, expected) +def test_transform_dictlike_mixed(): + # GH 40018 - mix of lists and non-lists in values of a dictionary + df = Series([1, 4]) + result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"}) + expected = DataFrame( + [[1.0, 1, 1.0], [2.0, 4, 2.0]], + columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]), + ) + tm.assert_frame_equal(result, expected) + + def test_transform_wont_agg(string_series): # GH 35964 # we are trying to transform with an aggregator
- [x] closes #40018 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Currently on master, `agg` does some preprocessing on dict-like arguments that `transform` does not, leading to incorrect behavior in `transform`. This PR moves `agg`'s preprocessing into `validate_dictlike_arg` which is used by both `agg` and `transform`, fixing the issue. As identified [in #40018](https://github.com/pandas-dev/pandas/issues/40018#issuecomment-785678614), this is a regression from 1.1 -> 1.2 (almost certainly from one of my PRs, but I haven't checked which). Backporting this to 1.2 will probably require a bit of a different fix.
https://api.github.com/repos/pandas-dev/pandas/pulls/40090
2021-02-26T20:56:54Z
2021-02-27T18:18:33Z
2021-02-27T18:18:33Z
2021-02-28T14:08:34Z
REF: remove sanitize_index
diff --git a/pandas/core/common.py b/pandas/core/common.py index 8625c5063382f..871f5ac651cce 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -3,6 +3,7 @@ Note: pandas.core.common is *not* part of the public API. """ +from __future__ import annotations from collections import ( abc, @@ -12,6 +13,7 @@ from functools import partial import inspect from typing import ( + TYPE_CHECKING, Any, Callable, Collection, @@ -51,6 +53,9 @@ from pandas.core.dtypes.inference import iterable_not_string from pandas.core.dtypes.missing import isna +if TYPE_CHECKING: + from pandas import Index + class SettingWithCopyError(ValueError): pass @@ -512,3 +517,16 @@ def temp_setattr(obj, attr: str, value) -> Iterator[None]: setattr(obj, attr, value) yield obj setattr(obj, attr, old_value) + + +def require_length_match(data, index: Index): + """ + Check the length of data matches the length of the index. + """ + if len(data) != len(index): + raise ValueError( + "Length of values " + f"({len(data)}) " + "does not match length of index " + f"({len(index)})" + ) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 469783913dc42..cf985ba621cfb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -183,7 +183,6 @@ ndarray_to_mgr, nested_data_to_arrays, reorder_arrays, - sanitize_index, to_arrays, treat_as_nested, ) @@ -4024,15 +4023,14 @@ def _sanitize_column(self, value) -> ArrayLike: value = _reindex_for_setitem(value, self.index) elif isinstance(value, ExtensionArray): - # Explicitly copy here, instead of in sanitize_index, - # as sanitize_index won't copy an EA, even with copy=True + # Explicitly copy here value = value.copy() - value = sanitize_index(value, self.index) + com.require_length_match(value, self.index) elif is_sequence(value): + com.require_length_match(value, self.index) # turn me into an ndarray - value = sanitize_index(value, self.index) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 20536c7a94695..0f1fe77c6bb3a 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -36,7 +36,6 @@ maybe_convert_platform, maybe_infer_to_datetimelike, maybe_upcast, - sanitize_to_nanoseconds, ) from pandas.core.dtypes.common import ( is_datetime64tz_dtype, @@ -810,28 +809,3 @@ def convert(arr): arrays = [convert(arr) for arr in content] return arrays - - -# --------------------------------------------------------------------- -# Series-Based - - -def sanitize_index(data, index: Index): - """ - Sanitize an index type to return an ndarray of the underlying, pass - through a non-Index. - """ - if len(data) != len(index): - raise ValueError( - "Length of values " - f"({len(data)}) " - "does not match length of index " - f"({len(index)})" - ) - - if isinstance(data, np.ndarray): - - # coerce datetimelike types to ns - data = sanitize_to_nanoseconds(data) - - return data diff --git a/pandas/core/series.py b/pandas/core/series.py index 5fece72ccddca..58c9f116e011a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -126,7 +126,6 @@ from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexing import check_bool_indexer from pandas.core.internals import SingleBlockManager -from pandas.core.internals.construction import sanitize_index from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import ( ensure_key_mapped, @@ -388,7 +387,7 @@ def __init__( data = [data] index = ibase.default_index(len(data)) elif is_list_like(data): - sanitize_index(data, index) + com.require_length_match(data, index) # create/copy the manager if isinstance(data, SingleBlockManager):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40089
2021-02-26T19:50:24Z
2021-02-27T18:23:08Z
2021-02-27T18:23:08Z
2021-02-27T18:35:22Z
CI: typo fixup for ResourceWarning
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 06e71c18c55cc..0917cf1787d5b 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -94,6 +94,7 @@ PeriodDtype, ) from pandas.core.dtypes.generic import ( + ABCDataFrame, ABCExtensionArray, ABCSeries, ) @@ -238,6 +239,9 @@ def maybe_downcast_to_dtype( try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32 """ + if isinstance(result, ABCDataFrame): + # see test_pivot_table_doctest_case + return result do_round = False if isinstance(dtype, str): @@ -307,7 +311,7 @@ def maybe_downcast_numeric( ------- ndarray or ExtensionArray """ - if not isinstance(dtype, np.dtype): + if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype): # e.g. SparseDtype has no itemsize attr return result diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py index 8e1e9fb6e458f..4bc3f3c38f506 100644 --- a/pandas/tests/io/parser/common/test_chunksize.py +++ b/pandas/tests/io/parser/common/test_chunksize.py @@ -193,7 +193,7 @@ def test_warn_if_chunks_have_mismatched_type(all_parsers, request): # 2021-02-21 this occasionally fails on the CI with an unexpected # ResourceWarning that we have been unable to track down, # see GH#38630 - if "ResourceError" not in str(err) or parser.engine != "python": + if "ResourceWarning" not in str(err) or parser.engine != "python": raise # Check the main assertion of the test before re-raising diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 8d2b4f2b325c2..131fb14d0bf85 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -2063,6 +2063,55 @@ def agg(arr): with pytest.raises(KeyError, match="notpresent"): foo.pivot_table("notpresent", "X", "Y", aggfunc=agg) + def test_pivot_table_doctest_case(self): + # TODO: better name. the relevant characteristic is that + # the call to maybe_downcast_to_dtype(agged[v], data[v].dtype) in + # __internal_pivot_table has `agged[v]` a DataFrame instead of Series, + # i.e agged.columns is not unique + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + + table = pivot_table( + df, + values=["D", "E"], + index=["A", "C"], + aggfunc={"D": np.mean, "E": [min, max, np.mean]}, + ) + cols = MultiIndex.from_tuples( + [("D", "mean"), ("E", "max"), ("E", "mean"), ("E", "min")] + ) + index = MultiIndex.from_tuples( + [("bar", "large"), ("bar", "small"), ("foo", "large"), ("foo", "small")], + names=["A", "C"], + ) + vals = np.array( + [ + [5.5, 9.0, 7.5, 6.0], + [5.5, 9.0, 8.5, 8.0], + [2.0, 5.0, 4.5, 4.0], + [2.33333333, 6.0, 4.33333333, 2.0], + ] + ) + expected = DataFrame(vals, columns=cols, index=index) + tm.assert_frame_equal(table, expected) + class TestPivot: def test_pivot(self):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40088
2021-02-26T19:05:06Z
2021-02-27T03:58:42Z
2021-02-27T03:58:42Z
2021-02-27T03:59:43Z
REF: tighten types in maybe_downcast_numeric, maybe_downcast_to_dtype
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b30dbe32eec4b..8e5f386ae8d43 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -94,7 +94,6 @@ PeriodDtype, ) from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCExtensionArray, ABCIndex, ABCSeries, @@ -233,19 +232,15 @@ def _disallow_mismatched_datetimelike(value, dtype: DtypeObj): raise TypeError(f"Cannot cast {repr(value)} to {dtype}") -def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]): +def maybe_downcast_to_dtype( + result: ArrayLike, dtype: Union[str, np.dtype] +) -> ArrayLike: """ try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32 """ do_round = False - if is_scalar(result): - return result - elif isinstance(result, ABCDataFrame): - # occurs in pivot_table doctest - return result - if isinstance(dtype, str): if dtype == "infer": inferred_type = lib.infer_dtype(ensure_object(result), skipna=False) @@ -265,6 +260,7 @@ def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]): do_round = True else: + # TODO: complex? what if result is already non-object? dtype = "object" dtype = np.dtype(dtype) @@ -296,7 +292,9 @@ def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]): return result -def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False): +def maybe_downcast_numeric( + result: ArrayLike, dtype: DtypeObj, do_round: bool = False +) -> ArrayLike: """ Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40087
2021-02-26T17:36:36Z
2021-02-27T00:00:45Z
2021-02-27T00:00:45Z
2021-02-27T02:35:47Z
REF: tighter types in maybe_infer_to_datetimelike
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 29a172dcdd2c7..bdc64d5f14a27 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -398,6 +398,10 @@ def __init__( elif not isinstance(values, (ABCIndex, ABCSeries, ExtensionArray)): # sanitize_array coerces np.nan to a string under certain versions # of numpy + if not isinstance(values, (np.ndarray, list)): + # convert e.g. range, tuple to allow for stronger typing + # of maybe_infer_to_datetimelike + values = list(values) values = maybe_infer_to_datetimelike(values) if isinstance(values, np.ndarray): values = sanitize_to_nanoseconds(values) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 0c0084f2492d3..f5f49e0e5fc20 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -670,7 +670,7 @@ def _try_cast( else: subarr = maybe_cast_to_datetime(arr, dtype) - if not isinstance(subarr, (ABCExtensionArray, ABCIndex)): + if not isinstance(subarr, ABCExtensionArray): subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy) except OutOfBoundsDatetime: # in case of out of bound datetime64 -> always raise diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b30dbe32eec4b..8d25fc2e4aded 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -96,7 +96,6 @@ from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, - ABCIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like @@ -1389,7 +1388,7 @@ def maybe_castable(dtype: np.dtype) -> bool: def maybe_infer_to_datetimelike( - value: Union[ArrayLike, Scalar], convert_dates: bool = False + value: Union[np.ndarray, List], convert_dates: bool = False ): """ we might have a array (or single object) that is datetime like, @@ -1401,21 +1400,16 @@ def maybe_infer_to_datetimelike( Parameters ---------- - value : np.array / Series / Index / list-like + value : np.ndarray or list convert_dates : bool, default False if True try really hard to convert dates (such as datetime.date), other leave inferred dtype 'date' alone """ - if isinstance(value, (ABCIndex, ABCExtensionArray)): - if not is_object_dtype(value.dtype): - raise ValueError("array-like value must be object-dtype") + if not isinstance(value, (np.ndarray, list)): + raise TypeError(type(value)) - v = value - - if not is_list_like(v): - v = [v] - v = np.array(v, copy=False) + v = np.array(value, copy=False) # we only care about object dtypes if not is_object_dtype(v.dtype): @@ -1616,7 +1610,7 @@ def maybe_cast_to_datetime( elif value.dtype == object: value = maybe_infer_to_datetimelike(value) - else: + elif not isinstance(value, ABCExtensionArray): # only do this if we have an array and the dtype of the array is not # setup already we are not an integer/object, so don't bother with this # conversion
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40086
2021-02-26T17:32:03Z
2021-02-26T22:52:19Z
2021-02-26T22:52:19Z
2021-02-26T22:55:19Z
[ArrayManager] TST: resample tests
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b63811e08e182..0a7ac2325740a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -158,6 +158,7 @@ jobs: pytest pandas/tests/generic/test_generic.py --array-manager pytest pandas/tests/arithmetic/ --array-manager pytest pandas/tests/groupby/ --array-manager + pytest pandas/tests/resample/ --array-manager pytest pandas/tests/reshape/merge --array-manager # indexing subset (temporary since other tests don't pass yet) diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index bf3e6d822ab19..733a8c0aa58ec 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -3,6 +3,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import ( DataFrame, NaT, @@ -245,6 +247,7 @@ def test_resampler_is_iterable(series): tm.assert_series_equal(rv, gv) +@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) quantile @all_ts def test_resample_quantile(series): # GH 15023
xref https://github.com/pandas-dev/pandas/issues/39146/ This will only pass once https://github.com/pandas-dev/pandas/pull/40050 is merged
https://api.github.com/repos/pandas-dev/pandas/pulls/40085
2021-02-26T16:56:21Z
2021-03-01T13:51:23Z
2021-03-01T13:51:23Z
2021-03-01T13:51:26Z
API: dont-special-case datetimelike in setting new column
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 469783913dc42..9aa5180133bd4 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -99,7 +99,6 @@ maybe_box_native, maybe_convert_platform, maybe_downcast_to_dtype, - maybe_infer_to_datetimelike, validate_numeric_casting, ) from pandas.core.dtypes.common import ( @@ -147,6 +146,7 @@ from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.construction import ( extract_array, + sanitize_array, sanitize_masked_array, ) from pandas.core.generic import ( @@ -4045,7 +4045,7 @@ def _sanitize_column(self, value) -> ArrayLike: # possibly infer to datetimelike if is_object_dtype(value.dtype): - value = maybe_infer_to_datetimelike(value) + value = sanitize_array(value, None) else: value = construct_1d_arraylike_from_scalar(value, len(self), dtype=None) diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index f2edfed019bdb..9d61be5887b7e 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -421,19 +421,26 @@ def test_setitem_intervals(self): # B & D end up as Categoricals # the remainer are converted to in-line objects - # contining an IntervalIndex.values + # containing an IntervalIndex.values df["B"] = ser df["C"] = np.array(ser) df["D"] = ser.values df["E"] = np.array(ser.values) + df["F"] = ser.astype(object) assert is_categorical_dtype(df["B"].dtype) assert is_interval_dtype(df["B"].cat.categories) assert is_categorical_dtype(df["D"].dtype) assert is_interval_dtype(df["D"].cat.categories) - assert is_object_dtype(df["C"]) - assert is_object_dtype(df["E"]) + # Thes goes through the Series constructor and so get inferred back + # to IntervalDtype + assert is_interval_dtype(df["C"]) + assert is_interval_dtype(df["E"]) + + # But the Series constructor doesn't do inference on Series objects, + # so setting df["F"] doesnt get cast back to IntervalDtype + assert is_object_dtype(df["F"]) # they compare equal as Index # when converted to numpy objects
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/40084
2021-02-26T16:52:09Z
2021-02-27T18:34:22Z
2021-02-27T18:34:22Z
2021-03-31T22:22:40Z
Backport PR #39451: BUG: raise when sort_index with ascending=None
diff --git a/doc/source/whatsnew/v1.2.3.rst b/doc/source/whatsnew/v1.2.3.rst index b1b51e64ae997..f72ee78bf243a 100644 --- a/doc/source/whatsnew/v1.2.3.rst +++ b/doc/source/whatsnew/v1.2.3.rst @@ -19,6 +19,11 @@ Fixed regressions - Fixed regression in nullable integer unary ops propagating mask on assignment (:issue:`39943`) - Fixed regression in :meth:`DataFrame.__setitem__` not aligning :class:`DataFrame` on right-hand side for boolean indexer (:issue:`39931`) - Fixed regression in :meth:`~DataFrame.to_json` failing to use ``compression`` with URL-like paths that are internally opened in binary mode or with user-provided file objects that are opened in binary mode (:issue:`39985`) +- Fixed regression in :meth:`~Series.sort_index` and :meth:`~DataFrame.sort_index`, + which exited with an ungraceful error when having kwarg ``ascending=None`` passed (:issue:`39434`). + Passing ``ascending=None`` is still considered invalid, + and the new error message suggests a proper usage + (``ascending`` must be a boolean or a list-like boolean). .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d1855774e5692..0094ebc744a34 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5482,7 +5482,7 @@ def sort_index( self, axis=0, level=None, - ascending: bool = True, + ascending: Union[Union[bool, int], Sequence[Union[bool, int]]] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", @@ -5503,7 +5503,7 @@ def sort_index( and 1 identifies the columns. level : int or level name or list of ints or list of level names If not None, sort on values in specified index level(s). - ascending : bool or list of bools, default True + ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False diff --git a/pandas/core/generic.py b/pandas/core/generic.py index dc0d25848886f..4350cd60c99d7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -56,6 +56,7 @@ from pandas.errors import AbstractMethodError, InvalidIndexError from pandas.util._decorators import doc, rewrite_axis_style_signature from pandas.util._validators import ( + validate_ascending, validate_bool_kwarg, validate_fillna_kwargs, validate_percentile, @@ -4518,7 +4519,7 @@ def sort_index( self, axis=0, level=None, - ascending: bool_t = True, + ascending: Union[Union[bool_t, int], Sequence[Union[bool_t, int]]] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", @@ -4529,6 +4530,8 @@ def sort_index( inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) + ascending = validate_ascending(ascending) + target = self._get_axis(axis) indexer = get_indexer_indexer( diff --git a/pandas/core/series.py b/pandas/core/series.py index 4b0d5f0b407be..1f209e68e5acf 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -12,9 +12,11 @@ Iterable, List, Optional, + Sequence, Tuple, Type, Union, + cast, ) import warnings @@ -3065,7 +3067,7 @@ def update(self, other) -> None: def sort_values( self, axis=0, - ascending=True, + ascending: Union[Union[bool, int], Sequence[Union[bool, int]]] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", @@ -3083,7 +3085,7 @@ def sort_values( axis : {0 or 'index'}, default 0 Axis to direct sorting. The value 'index' is accepted for compatibility with DataFrame.sort_values. - ascending : bool, default True + ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. @@ -3243,6 +3245,7 @@ def sort_values( ) if is_list_like(ascending): + ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" @@ -3257,7 +3260,7 @@ def sort_values( # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values - sorted_index = nargsort(values_to_sort, kind, ascending, na_position) + sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index] @@ -3275,7 +3278,7 @@ def sort_index( self, axis=0, level=None, - ascending: bool = True, + ascending: Union[Union[bool, int], Sequence[Union[bool, int]]] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", @@ -3295,7 +3298,7 @@ def sort_index( Axis to direct sorting. This can only be 0 for Series. level : int, optional If not None, sort on values in specified index level(s). - ascending : bool or list of bools, default True + ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 0a1cbc6de1cda..fdbe5dfc38524 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -8,6 +8,7 @@ Iterable, List, Optional, + Sequence, Tuple, Union, ) @@ -39,7 +40,7 @@ def get_indexer_indexer( target: "Index", level: Union[str, int, List[str], List[int]], - ascending: bool, + ascending: Union[Sequence[Union[bool, int]], Union[bool, int]], kind: str, na_position: str, sort_remaining: bool, diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index de847c12723b2..0fcf98d9b677a 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -765,6 +765,23 @@ def test_sort_index_with_categories(self, categories): ) tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( + "ascending", + [ + None, + [True, None], + [False, "True"], + ], + ) + def test_sort_index_ascending_bad_value_raises(self, ascending): + # GH 39434 + df = DataFrame(np.arange(64)) + length = len(df.index) + df.index = [(i - length / 2) % length for i in range(length)] + match = 'For argument "ascending" expected type bool' + with pytest.raises(ValueError, match=match): + df.sort_index(axis=0, ascending=ascending, na_position="first") + class TestDataFrameSortIndexKey: def test_sort_multi_index_key(self): diff --git a/pandas/tests/series/methods/test_sort_index.py b/pandas/tests/series/methods/test_sort_index.py index 6c6be1506255a..422eedd431b11 100644 --- a/pandas/tests/series/methods/test_sort_index.py +++ b/pandas/tests/series/methods/test_sort_index.py @@ -199,6 +199,20 @@ def test_sort_index_ascending_list(self): expected = ser.iloc[[0, 4, 1, 5, 2, 6, 3, 7]] tm.assert_series_equal(result, expected) + @pytest.mark.parametrize( + "ascending", + [ + None, + (True, None), + (False, "True"), + ], + ) + def test_sort_index_ascending_bad_value_raises(self, ascending): + ser = Series(range(10), index=[0, 3, 2, 1, 4, 5, 7, 6, 8, 9]) + match = 'For argument "ascending" expected type bool' + with pytest.raises(ValueError, match=match): + ser.sort_index(ascending=ascending) + class TestSeriesSortIndexKey: def test_sort_index_multiindex_key(self): diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index fa7201a5188a5..289ea63be62e2 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -2,7 +2,7 @@ Module that contains many useful utilities for validating data or function arguments """ -from typing import Iterable, Union +from typing import Iterable, Sequence, Union import warnings import numpy as np @@ -205,9 +205,39 @@ def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_ar validate_kwargs(fname, kwargs, compat_args) -def validate_bool_kwarg(value, arg_name): - """ Ensures that argument passed in arg_name is of type bool. """ - if not (is_bool(value) or value is None): +def validate_bool_kwarg(value, arg_name, none_allowed=True, int_allowed=False): + """ + Ensure that argument passed in arg_name can be interpreted as boolean. + + Parameters + ---------- + value : bool + Value to be validated. + arg_name : str + Name of the argument. To be reflected in the error message. + none_allowed : bool, default True + Whether to consider None to be a valid boolean. + int_allowed : bool, default False + Whether to consider integer value to be a valid boolean. + + Returns + ------- + value + The same value as input. + + Raises + ------ + ValueError + If the value is not a valid boolean. + """ + good_value = is_bool(value) + if none_allowed: + good_value = good_value or value is None + + if int_allowed: + good_value = good_value or isinstance(value, int) + + if not good_value: raise ValueError( f'For argument "{arg_name}" expected type bool, received ' f"type {type(value).__name__}." @@ -381,3 +411,14 @@ def validate_percentile(q: Union[float, Iterable[float]]) -> np.ndarray: if not all(0 <= qs <= 1 for qs in q_arr): raise ValueError(msg.format(q_arr / 100.0)) return q_arr + + +def validate_ascending( + ascending: Union[Union[bool, int], Sequence[Union[bool, int]]] = True, +): + """Validate ``ascending`` kwargs for ``sort_index`` method.""" + kwargs = {"none_allowed": False, "int_allowed": True} + if not isinstance(ascending, (list, tuple)): + return validate_bool_kwarg(ascending, "ascending", **kwargs) + + return [validate_bool_kwarg(item, "ascending", **kwargs) for item in ascending]
Backport PR #39451
https://api.github.com/repos/pandas-dev/pandas/pulls/40083
2021-02-26T16:37:32Z
2021-02-26T18:26:30Z
2021-02-26T18:26:30Z
2021-02-26T18:26:34Z
REF: avoid object-dtype casting in Block.replace
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 689a067e1c211..b65043be6fda6 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -796,7 +796,6 @@ def replace( It is used in ObjectBlocks. It is here for API compatibility. """ inplace = validate_bool_kwarg(inplace, "inplace") - original_to_replace = to_replace if not self._can_hold_element(to_replace): # We cannot hold `to_replace`, so we know immediately that @@ -814,9 +813,20 @@ def replace( return [self] if inplace else [self.copy()] if not self._can_hold_element(value): - blk = self.astype(object) + if self.ndim == 2 and self.shape[0] > 1: + # split so that we only upcast where necessary + nbs = self._split() + res_blocks = extend_blocks( + [ + blk.replace(to_replace, value, inplace=inplace, regex=regex) + for blk in nbs + ] + ) + return res_blocks + + blk = self.coerce_to_target_dtype(value) return blk.replace( - to_replace=original_to_replace, + to_replace=to_replace, value=value, inplace=True, regex=regex, @@ -824,7 +834,7 @@ def replace( blk = self if inplace else self.copy() putmask_inplace(blk.values, mask, value) - blocks = blk.convert(numeric=False, copy=not inplace) + blocks = blk.convert(numeric=False, copy=False) return blocks @final @@ -867,11 +877,7 @@ def _replace_regex( replace_regex(new_values, rx, value, mask) block = self.make_block(new_values) - if convert: - nbs = block.convert(numeric=False) - else: - nbs = [block] - return nbs + return [block] @final def _replace_list( diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py index 58016be82c405..564481d01abc8 100644 --- a/pandas/tests/frame/methods/test_fillna.py +++ b/pandas/tests/frame/methods/test_fillna.py @@ -265,12 +265,13 @@ def test_fillna_dtype_conversion(self): expected = DataFrame("nan", index=range(3), columns=["A", "B"]) tm.assert_frame_equal(result, expected) - # equiv of replace + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) object upcasting + @pytest.mark.parametrize("val", ["", 1, np.nan, 1.0]) + def test_fillna_dtype_conversion_equiv_replace(self, val): df = DataFrame({"A": [1, np.nan], "B": [1.0, 2.0]}) - for v in ["", 1, np.nan, 1.0]: - expected = df.replace(np.nan, v) - result = df.fillna(v) - tm.assert_frame_equal(result, expected) + expected = df.replace(np.nan, val) + result = df.fillna(val) + tm.assert_frame_equal(result, expected) @td.skip_array_manager_invalid_test def test_fillna_datetime_columns(self): diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 9ae5bb151b685..6d1e90e2f9646 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -783,6 +783,8 @@ def test_replace_mixed(self, float_string_frame): tm.assert_frame_equal(result, expected) tm.assert_frame_equal(result.replace(-1e8, np.nan), float_string_frame) + def test_replace_mixed_int_block_upcasting(self): + # int block upcasting df = DataFrame( { @@ -803,6 +805,8 @@ def test_replace_mixed(self, float_string_frame): assert return_value is None tm.assert_frame_equal(df, expected) + def test_replace_mixed_int_block_splitting(self): + # int block splitting df = DataFrame( { @@ -821,6 +825,8 @@ def test_replace_mixed(self, float_string_frame): result = df.replace(0, 0.5) tm.assert_frame_equal(result, expected) + def test_replace_mixed2(self): + # to object block upcasting df = DataFrame( { @@ -846,6 +852,7 @@ def test_replace_mixed(self, float_string_frame): result = df.replace([1, 2], ["foo", "bar"]) tm.assert_frame_equal(result, expected) + def test_replace_mixed3(self): # test case from df = DataFrame( {"A": Series([3, 0], dtype="int64"), "B": Series([0, 3], dtype="int64")}
ATM we do `self.astype(object)`, where the idiomatic thing to do is `self.coerce_to_target_dtype(value)`, which in some cases means we dont need to cast on the back-end
https://api.github.com/repos/pandas-dev/pandas/pulls/40082
2021-02-26T15:36:06Z
2021-02-26T23:09:25Z
2021-02-26T23:09:25Z
2021-02-26T23:10:46Z
[ArrayManager] Remove apply_with_block usage for diff()
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index e0447378c4542..8585dc5ebcdd4 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -452,7 +452,13 @@ def putmask(self, mask, new, align: bool = True): ) def diff(self, n: int, axis: int) -> ArrayManager: - return self.apply_with_block("diff", n=n, axis=axis) + axis = self._normalize_axis(axis) + if axis == 1: + # DataFrame only calls this for n=0, in which case performing it + # with axis=0 is equivalent + assert n == 0 + axis = 0 + return self.apply(algos.diff, n=n, axis=axis) def interpolate(self, **kwargs) -> ArrayManager: return self.apply_with_block("interpolate", **kwargs)
xref https://github.com/pandas-dev/pandas/issues/39146/ This turned out to be an easy one
https://api.github.com/repos/pandas-dev/pandas/pulls/40079
2021-02-26T14:08:21Z
2021-02-27T19:04:30Z
2021-02-27T19:04:30Z
2021-03-01T10:01:11Z
REF, TYP: factor out _mark_right from _add_legend_handle
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index b355cba6354da..37d9ed45d4050 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -579,13 +579,24 @@ def legend_title(self) -> Optional[str]: stringified = map(pprint_thing, self.data.columns.names) return ",".join(stringified) - def _add_legend_handle(self, handle, label, index=None): - if label is not None: - if self.mark_right and index is not None: - if self.on_right(index): - label = label + " (right)" - self.legend_handles.append(handle) - self.legend_labels.append(label) + def _mark_right_label(self, label: str, index: int) -> str: + """ + Append ``(right)`` to the label of a line if it's plotted on the right axis. + + Note that ``(right)`` is only appended when ``subplots=False``. + """ + if not self.subplots and self.mark_right and self.on_right(index): + label += " (right)" + return label + + def _append_legend_handles_labels(self, handle: Artist, label: str) -> None: + """ + Append current handle and label to ``legend_handles`` and ``legend_labels``. + + These will be used to make the legend. + """ + self.legend_handles.append(handle) + self.legend_labels.append(label) def _make_legend(self): ax, leg, handle = self._get_ax_legend_handle(self.axes[0]) @@ -1078,7 +1089,7 @@ def _make_plot(self): cbar.ax.set_yticklabels(self.data[c].cat.categories) if label is not None: - self._add_legend_handle(scatter, label) + self._append_legend_handles_labels(scatter, label) else: self.legend = False @@ -1170,6 +1181,7 @@ def _make_plot(self): kwds = dict(kwds, **errors) label = pprint_thing(label) # .encode('utf-8') + label = self._mark_right_label(label, index=i) kwds["label"] = label newlines = plotf( @@ -1182,7 +1194,7 @@ def _make_plot(self): is_errorbar=is_errorbar, **kwds, ) - self._add_legend_handle(newlines[0], label, index=i) + self._append_legend_handles_labels(newlines[0], label) if self._is_ts_plot(): @@ -1458,6 +1470,7 @@ def _make_plot(self): kwds = dict(kwds, **errors) label = pprint_thing(label) + label = self._mark_right_label(label, index=i) if (("yerr" in kwds) or ("xerr" in kwds)) and (kwds.get("ecolor") is None): kwds["ecolor"] = mpl.rcParams["xtick.color"] @@ -1508,7 +1521,7 @@ def _make_plot(self): log=self.log, **kwds, ) - self._add_legend_handle(rect, label, index=i) + self._append_legend_handles_labels(rect, label) def _post_plot_logic(self, ax: Axes, data): if self.use_index: @@ -1620,4 +1633,4 @@ def blank_labeler(label, value): # leglabels is used for legend labels leglabels = labels if labels is not None else idx for p, l in zip(patches, leglabels): - self._add_legend_handle(p, l) + self._append_legend_handles_labels(p, l) diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index 3de467c77d289..a02d9a2b9dc8d 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -89,6 +89,7 @@ def _make_plot(self): kwds = self.kwds.copy() label = pprint_thing(label) + label = self._mark_right_label(label, index=i) kwds["label"] = label style, kwds = self._apply_style_colors(colors, kwds, i, label) @@ -105,7 +106,7 @@ def _make_plot(self): kwds["weights"] = weights[:, i] artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds) - self._add_legend_handle(artists[0], label, index=i) + self._append_legend_handles_labels(artists[0], label) def _make_plot_keywords(self, kwds, y): """merge BoxPlot/KdePlot properties to passed kwds"""
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them This PR factors out adding ` (right)` to labels which are meant to go on the right y-axis. This does not change anything user-facing, and is simply a precursor to fixing #40044 and #39522 . The change is that on `master`, only `leg.get_texts()` shows `b (right)` while `ax.right_ax.get_legend_handles_labels()` shows `b`. The former is used for constructing the user-facing legend. The reason I think it needs doing is that I believe that to address #40044 and #39522, we need to use `.get_legend_handles_labels()` for both handles and labels, while currently `.get_legend_handles_labels` is used for handles and `leg.get_texts()` is used for labels. Putting a breakpoint after https://github.com/pandas-dev/pandas/blob/ab687aec38668cdb0139ca02d6e37e8c8386ae7d/pandas/plotting/_matplotlib/core.py#L591 results in , on this branch: ```python (Pdb) ax.right_ax.get_legend_handles_labels() ([<matplotlib.lines.Line2D object at 0x7fc94adf1dc0>], ['b (right)']) ``` and on master: ```python (Pdb) ax.right_ax.get_legend_handles_labels() ([<matplotlib.lines.Line2D object at 0x7ff2892abbe0>], ['b']) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/40078
2021-02-26T12:44:46Z
2021-03-27T07:42:45Z
2021-03-27T07:42:45Z
2021-03-27T11:11:01Z