title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
TST/CI: xfail test_round_sanity for 32 bit
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index b6559385e1597..0dd3a88670ece 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -14,6 +14,7 @@ iNaT, ) from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas.compat import IS64 from pandas.errors import OutOfBoundsTimedelta import pandas as pd @@ -690,6 +691,7 @@ def test_round_implementation_bounds(self): with pytest.raises(OverflowError, match=msg): Timedelta.max.ceil("s") + @pytest.mark.xfail(not IS64, reason="Failing on 32 bit build", strict=False) @given(val=st.integers(min_value=iNaT + 1, max_value=lib.i8max)) @pytest.mark.parametrize( "method", [Timedelta.round, Timedelta.floor, Timedelta.ceil] diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index 2146e32a437a9..cc11037660ad2 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -21,6 +21,7 @@ ) from pandas._libs.tslibs.dtypes import NpyDatetimeUnit from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG +from pandas.compat import IS64 import pandas.util._test_decorators as td import pandas._testing as tm @@ -297,6 +298,7 @@ def test_round_implementation_bounds(self): with pytest.raises(OverflowError, match=msg): Timestamp.max.ceil("s") + @pytest.mark.xfail(not IS64, reason="Failing on 32 bit build", strict=False) @given(val=st.integers(iNaT + 1, lib.i8max)) @pytest.mark.parametrize( "method", [Timestamp.round, Timestamp.floor, Timestamp.ceil]
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). xfailing just to get the CI to green. Might be addressable in the future cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/47803
2022-07-20T16:32:48Z
2022-07-22T19:38:10Z
2022-07-22T19:38:10Z
2022-07-23T11:06:04Z
TST: Test series construct of dtype timedelta64 #35465
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 4e4ee4fd12d5f..de9a682acdfd6 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1879,6 +1879,28 @@ def test_constructor_bool_dtype_missing_values(self): expected = Series(True, index=[0], dtype="bool") tm.assert_series_equal(result, expected) + def test_constructor_dtype_timedelta_alternative_construct(self): + # GH#35465 + result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]") + expected = Series(pd.to_timedelta([1000000, 200000, 3000000], unit="ns")) + tm.assert_series_equal(result, expected) + + def test_constructor_dtype_timedelta_ns_s(self): + # GH#35465 + result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]") + expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]") + tm.assert_series_equal(result, expected) + + def test_constructor_dtype_timedelta_ns_s_astype_int64(self): + # GH#35465 + result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]").astype( + "int64" + ) + expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]").astype( + "int64" + ) + tm.assert_series_equal(result, expected) + @pytest.mark.filterwarnings( "ignore:elementwise comparison failed:DeprecationWarning" )
- [x] closes #35465 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/47801
2022-07-20T15:25:24Z
2022-07-27T16:58:17Z
2022-07-27T16:58:17Z
2022-07-27T16:58:23Z
DEPS: drop np19 support
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 2e1ffe6d0d17e..1a06ea31ccbb8 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -79,9 +79,9 @@ jobs: run: | case "${{matrix.python-version}}" in 3.8) - pip install numpy==1.19.5 ;; + pip install numpy==1.20.3 ;; 3.9) - pip install numpy==1.19.5 ;; + pip install numpy==1.20.3 ;; 3.10) pip install numpy==1.21.2 ;; esac diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml index 3ab27830060b2..89ebabbbc7469 100644 --- a/ci/deps/actions-38-minimum_versions.yaml +++ b/ci/deps/actions-38-minimum_versions.yaml @@ -17,7 +17,7 @@ dependencies: # required dependencies - python-dateutil=2.8.1 - - numpy=1.19.5 + - numpy=1.20.3 - pytz=2020.1 # optional dependencies diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 5d9bfd97030b5..605a69b26a646 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -235,7 +235,7 @@ Dependencies ================================================================ ========================== Package Minimum supported version ================================================================ ========================== -`NumPy <https://numpy.org>`__ 1.19.5 +`NumPy <https://numpy.org>`__ 1.20.3 `python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__ 2.8.1 `pytz <https://pypi.org/project/pytz/>`__ 2020.1 ================================================================ ========================== diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 4674f28744f7e..1572fdebe2643 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -416,7 +416,7 @@ If installed, we now require: +-----------------+-----------------+----------+---------+ | Package | Minimum Version | Required | Changed | +=================+=================+==========+=========+ -| numpy | 1.19.5 | X | X | +| numpy | 1.20.3 | X | X | +-----------------+-----------------+----------+---------+ | mypy (dev) | 0.971 | | X | +-----------------+-----------------+----------+---------+ diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 147134afd70c3..91d05ea66402b 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -17,7 +17,7 @@ from pandas._typing import F from pandas.compat.numpy import ( is_numpy_dev, - np_version_under1p20, + np_version_under1p21, ) from pandas.compat.pyarrow import ( pa_version_under1p01, @@ -152,7 +152,7 @@ def get_lzma_file() -> type[lzma.LZMAFile]: __all__ = [ "is_numpy_dev", - "np_version_under1p20", + "np_version_under1p21", "pa_version_under1p01", "pa_version_under2p0", "pa_version_under3p0", diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index 803f495b311b9..60ec74553a207 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -6,12 +6,11 @@ # numpy versioning _np_version = np.__version__ _nlv = Version(_np_version) -np_version_under1p20 = _nlv < Version("1.20") +np_version_under1p21 = _nlv < Version("1.21") np_version_under1p22 = _nlv < Version("1.22") np_version_gte1p22 = _nlv >= Version("1.22") is_numpy_dev = _nlv.dev is not None -_min_numpy_ver = "1.19.5" -is_numpy_min = _nlv == Version(_min_numpy_ver) +_min_numpy_ver = "1.20.3" if is_numpy_dev or not np_version_under1p22: np_percentile_argname = "method" diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index 84160344437b5..17622e78d1b12 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -12,7 +12,7 @@ ArrayLike, npt, ) -from pandas.compat import np_version_under1p20 +from pandas.compat import np_version_under1p21 from pandas.core.dtypes.cast import infer_dtype_from from pandas.core.dtypes.common import is_list_like @@ -66,7 +66,7 @@ def putmask_without_repeat( mask : np.ndarray[bool] new : Any """ - if np_version_under1p20: + if np_version_under1p21: new = setitem_datetimelike_compat(values, mask.sum(), new) if getattr(new, "ndim", 0) >= 1: @@ -78,7 +78,6 @@ def putmask_without_repeat( shape = np.shape(new) # np.shape compat for if setitem_datetimelike_compat # changed arraylike to list e.g. test_where_dt64_2d - if nlocs == shape[-1]: # GH#30567 # If length of ``new`` is less than the length of ``values``, diff --git a/pandas/tests/arrays/floating/test_construction.py b/pandas/tests/arrays/floating/test_construction.py index ebce80cba237d..2dcd54f443029 100644 --- a/pandas/tests/arrays/floating/test_construction.py +++ b/pandas/tests/arrays/floating/test_construction.py @@ -1,8 +1,6 @@ import numpy as np import pytest -from pandas.compat import np_version_under1p20 - import pandas as pd import pandas._testing as tm from pandas.core.arrays import FloatingArray @@ -54,11 +52,6 @@ def test_floating_array_disallows_float16(): def test_floating_array_disallows_Float16_dtype(request): # GH#44715 - if np_version_under1p20: - # https://github.com/numpy/numpy/issues/20512 - mark = pytest.mark.xfail(reason="numpy does not raise on np.dtype('Float16')") - request.node.add_marker(mark) - with pytest.raises(TypeError, match="data type 'Float16' not understood"): pd.array([1.0, 2.0], dtype="Float16") diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index 9593152735ed6..1a32c995f4afa 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -3,11 +3,8 @@ import numpy as np import pytest -from pandas.compat import np_version_under1p20 - import pandas as pd import pandas._testing as tm -from pandas.core import ops from pandas.core.arrays.sparse import ( SparseArray, SparseDtype, @@ -121,19 +118,7 @@ def test_float_scalar( self, kind, mix, all_arithmetic_functions, fill_value, scalar, request ): op = all_arithmetic_functions - - if np_version_under1p20: - if op in [operator.floordiv, ops.rfloordiv]: - if op is operator.floordiv and scalar != 0: - pass - elif op is ops.rfloordiv and scalar == 0: - pass - else: - mark = pytest.mark.xfail(raises=AssertionError, reason="GH#38172") - request.node.add_marker(mark) - values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) - a = SparseArray(values, kind=kind, fill_value=fill_value) self._check_numeric_ops(a, scalar, values, scalar, mix, op) @@ -171,14 +156,6 @@ def test_float_same_index_with_nans( ): # when sp_index are the same op = all_arithmetic_functions - - if ( - np_version_under1p20 - and op is ops.rfloordiv - and not (mix and kind == "block") - ): - mark = pytest.mark.xfail(raises=AssertionError, reason="GH#38172") - request.node.add_marker(mark) values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) @@ -353,13 +330,7 @@ def test_bool_array_logical(self, kind, fill_value): def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request): op = all_arithmetic_functions - - if np_version_under1p20 and op in [operator.floordiv, ops.rfloordiv] and mix: - mark = pytest.mark.xfail(raises=AssertionError, reason="GH#38172") - request.node.add_marker(mark) - rdtype = "int64" - values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py index 4987751f31dac..0eb8123e6bdb8 100644 --- a/pandas/tests/extension/base/casting.py +++ b/pandas/tests/extension/base/casting.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from pandas.compat import np_version_under1p20 +from pandas.compat import np_version_under1p21 import pandas.util._test_decorators as td import pandas as pd @@ -32,8 +32,7 @@ def test_astype_object_frame(self, all_data): assert result._mgr.arrays[0].dtype == np.dtype(object) # earlier numpy raises TypeError on e.g. np.dtype(np.int64) == "Int64" - # instead of returning False - if not np_version_under1p20: + if not np_version_under1p21: # check that we can compare the dtypes comp = result.dtypes == df.dtypes assert not comp.any() diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index d13f6dab1cc9b..60eef0d8097e4 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -17,7 +17,6 @@ import numpy as np import pytest -from pandas.compat import np_version_under1p20 from pandas.errors import PerformanceWarning from pandas.core.dtypes.common import is_object_dtype @@ -415,12 +414,9 @@ def test_astype_object_frame(self, all_data): result = df.astype(object) assert is_object_dtype(result._mgr.arrays[0].dtype) - # earlier numpy raises TypeError on e.g. np.dtype(np.int64) == "Int64" - # instead of returning False - if not np_version_under1p20: - # check that we can compare the dtypes - comp = result.dtypes == df.dtypes - assert not comp.any() + # check that we can compare the dtypes + comp = result.dtypes == df.dtypes + assert not comp.any() def test_astype_str(self, data): with tm.assert_produces_warning(FutureWarning, match="astype from Sparse"): diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 5b9883f3866e7..aa55a7c91d0e6 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -4,8 +4,6 @@ import numpy as np import pytest -from pandas.compat import np_version_under1p20 - from pandas.core.dtypes.common import is_scalar import pandas as pd @@ -1006,7 +1004,6 @@ def _check_where_equivalences(df, mask, other, expected): tm.assert_frame_equal(df, expected) -@pytest.mark.xfail(np_version_under1p20, reason="failed on Numpy 1.19.5") def test_where_dt64_2d(): dti = date_range("2016-01-01", periods=6) dta = dti._data.reshape(3, 2) diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 798212f957e3c..16b82727fd069 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -1,7 +1,10 @@ import numpy as np import pytest -from pandas.compat.numpy import np_percentile_argname +from pandas.compat.numpy import ( + np_percentile_argname, + np_version_under1p21, +) import pandas as pd from pandas import ( @@ -655,7 +658,7 @@ def compute_quantile(self, obj, qs): result = obj.quantile(qs, numeric_only=False) return result - def test_quantile_ea(self, obj, index): + def test_quantile_ea(self, request, obj, index): # result should be invariant to shuffling indexer = np.arange(len(index), dtype=np.intp) @@ -665,6 +668,11 @@ def test_quantile_ea(self, obj, index): qs = [0.5, 0, 1] result = self.compute_quantile(obj, qs) + if np_version_under1p21 and index.dtype == "timedelta64[ns]": + msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood" + mark = pytest.mark.xfail(reason=msg, raises=TypeError) + request.node.add_marker(mark) + exp_dtype = index.dtype if index.dtype == "Int64": # match non-nullable casting behavior @@ -700,7 +708,7 @@ def test_quantile_ea_with_na(self, obj, index): # TODO(GH#39763): filtering can be removed after GH#39763 is fixed @pytest.mark.filterwarnings("ignore:Using .astype to convert:FutureWarning") - def test_quantile_ea_all_na(self, obj, index): + def test_quantile_ea_all_na(self, request, obj, index): obj.iloc[:] = index._na_value # TODO(ArrayManager): this casting should be unnecessary after GH#39763 is fixed @@ -715,6 +723,11 @@ def test_quantile_ea_all_na(self, obj, index): qs = [0.5, 0, 1] result = self.compute_quantile(obj, qs) + if np_version_under1p21 and index.dtype == "timedelta64[ns]": + msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood" + mark = pytest.mark.xfail(reason=msg, raises=TypeError) + request.node.add_marker(mark) + expected = index.take([-1, -1, -1], allow_fill=True, fill_value=index._na_value) expected = Series(expected, index=qs, name="A") if expected.dtype == "Int64": @@ -722,7 +735,7 @@ def test_quantile_ea_all_na(self, obj, index): expected = type(obj)(expected) tm.assert_equal(result, expected) - def test_quantile_ea_scalar(self, obj, index): + def test_quantile_ea_scalar(self, request, obj, index): # scalar qs # result should be invariant to shuffling @@ -733,6 +746,11 @@ def test_quantile_ea_scalar(self, obj, index): qs = 0.5 result = self.compute_quantile(obj, qs) + if np_version_under1p21 and index.dtype == "timedelta64[ns]": + msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood" + mark = pytest.mark.xfail(reason=msg, raises=TypeError) + request.node.add_marker(mark) + exp_dtype = index.dtype if index.dtype == "Int64": exp_dtype = "Float64" diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index f7504e9173bf5..555d24e747f44 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -6,8 +6,6 @@ import numpy as np import pytest -from pandas.compat import np_version_under1p20 - import pandas as pd from pandas import ( DataFrame, @@ -1316,12 +1314,6 @@ def test_replace_commutative(self, df, to_replace, exp): ) def test_replace_replacer_dtype(self, request, replacer): # GH26632 - if np.isscalar(replacer) and replacer.dtype.itemsize < 8: - request.node.add_marker( - pytest.mark.xfail( - np_version_under1p20, reason="np.putmask doesn't coerce dtype" - ) - ) df = DataFrame(["a"]) result = df.replace({"a": replacer, "b": replacer}) expected = DataFrame([replacer]) diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 2a116c992231b..fdf741040407f 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -10,7 +10,6 @@ import numpy as np import pytest -from pandas.compat.numpy import is_numpy_min from pandas.errors import IndexingError import pandas.util._test_decorators as td @@ -1199,7 +1198,6 @@ def test_iloc_getitem_int_single_ea_block_view(self): arr[2] = arr[-1] assert ser[0] == arr[-1] - @pytest.mark.xfail(is_numpy_min, reason="Column A gets coerced to integer type") def test_iloc_setitem_multicolumn_to_datetime(self, using_array_manager): # GH#20511 diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index de9a682acdfd6..fc17f0b942d09 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -13,7 +13,6 @@ iNaT, lib, ) -from pandas.compat.numpy import np_version_under1p20 import pandas.util._test_decorators as td from pandas.core.dtypes.common import ( @@ -1904,9 +1903,6 @@ def test_constructor_dtype_timedelta_ns_s_astype_int64(self): @pytest.mark.filterwarnings( "ignore:elementwise comparison failed:DeprecationWarning" ) - @pytest.mark.xfail( - np_version_under1p20, reason="np.array([td64nat, float, float]) raises" - ) @pytest.mark.parametrize("func", [Series, DataFrame, Index, pd.array]) def test_constructor_mismatched_null_nullable_dtype( self, func, any_numeric_ea_dtype diff --git a/setup.cfg b/setup.cfg index b191930acf4c5..8f7cfc288ecdb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -31,9 +31,7 @@ project_urls = [options] packages = find: install_requires = - numpy>=1.18.5; platform_machine!='aarch64' and platform_machine!='arm64' and python_version<'3.10' - numpy>=1.19.2; platform_machine=='aarch64' and python_version<'3.10' - numpy>=1.20.0; platform_machine=='arm64' and python_version<'3.10' + numpy>=1.20.3; python_version<'3.10' numpy>=1.21.0; python_version>='3.10' python-dateutil>=2.8.1 pytz>=2020.1
null
https://api.github.com/repos/pandas-dev/pandas/pulls/47796
2022-07-20T05:59:17Z
2022-08-01T23:31:34Z
2022-08-01T23:31:34Z
2022-11-18T02:18:01Z
Backport PR #47792 on branch 1.4.x (DOC: Fix versionadded for callable in on_bad_lines)
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 3e792786b863a..d3a8b01e1da7b 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -371,6 +371,8 @@ .. versionadded:: 1.3.0 + .. versionadded:: 1.4.0 + - callable, function with signature ``(bad_line: list[str]) -> list[str] | None`` that will process a single bad line. ``bad_line`` is a list of strings split by the ``sep``. @@ -379,8 +381,6 @@ expected, a ``ParserWarning`` will be emitted while dropping extra elements. Only supported when ``engine="python"`` - .. versionadded:: 1.4.0 - delim_whitespace : bool, default False Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
Backport PR #47792
https://api.github.com/repos/pandas-dev/pandas/pulls/47795
2022-07-20T00:38:36Z
2022-07-20T13:36:35Z
2022-07-20T13:36:35Z
2022-08-01T18:08:46Z
BUG: fixed OutOfBoundsDatetime exception when errors=coerce #45319
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index a0d33cb513722..3a9c24251b12f 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -897,6 +897,7 @@ Datetimelike - Bug in :meth:`DatetimeIndex.resolution` incorrectly returning "day" instead of "nanosecond" for nanosecond-resolution indexes (:issue:`46903`) - Bug in :class:`Timestamp` with an integer or float value and ``unit="Y"`` or ``unit="M"`` giving slightly-wrong results (:issue:`47266`) - Bug in :class:`.DatetimeArray` construction when passed another :class:`.DatetimeArray` and ``freq=None`` incorrectly inferring the freq from the given array (:issue:`47296`) +- Bug in :func:`to_datetime` where ``OutOfBoundsDatetime`` would be thrown even if ``errors=coerce`` if there were more than 50 rows (:issue:`45319`) - Bug when adding a :class:`DateOffset` to a :class:`Series` would not add the ``nanoseconds`` field (:issue:`47856`) - diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 7ec4bc1016a9d..4739fb49e0d07 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -227,7 +227,11 @@ def _maybe_cache( unique_dates = unique(arg) if len(unique_dates) < len(arg): cache_dates = convert_listlike(unique_dates, format) - cache_array = Series(cache_dates, index=unique_dates) + # GH#45319 + try: + cache_array = Series(cache_dates, index=unique_dates) + except OutOfBoundsDatetime: + return cache_array # GH#39882 and GH#35888 in case of None and NaT we get duplicates if not cache_array.index.is_unique: cache_array = cache_array[~cache_array.index.duplicated()] diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index b128838318ac0..aa5c3324fe0f0 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -2777,3 +2777,34 @@ def test_to_datetime_monotonic_increasing_index(cache): result = to_datetime(times.iloc[:, 0], cache=cache) expected = times.iloc[:, 0] tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "series_length", + [40, start_caching_at, (start_caching_at + 1), (start_caching_at + 5)], +) +def test_to_datetime_cache_coerce_50_lines_outofbounds(series_length): + # GH#45319 + s = Series( + [datetime.fromisoformat("1446-04-12 00:00:00+00:00")] + + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length) + ) + result1 = to_datetime(s, errors="coerce", utc=True) + + expected1 = Series( + [NaT] + ([Timestamp("1991-10-20 00:00:00+00:00")] * series_length) + ) + + tm.assert_series_equal(result1, expected1) + + result2 = to_datetime(s, errors="ignore", utc=True) + + expected2 = Series( + [datetime.fromisoformat("1446-04-12 00:00:00+00:00")] + + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length) + ) + + tm.assert_series_equal(result2, expected2) + + with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond timestamp"): + to_datetime(s, errors="raise", utc=True)
- [ ] closes #45319 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47794
2022-07-19T23:56:13Z
2022-08-15T16:49:43Z
2022-08-15T16:49:43Z
2022-08-15T16:49:51Z
TST: test for inconsistency due to dtype=string #46512
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 6a17a56a47cbc..5b4d586e69c1b 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -611,3 +611,11 @@ def test_setitem_scalar_with_mask_validation(dtype): msg = "Scalar must be NA or str" with pytest.raises(ValueError, match=msg): ser[mask] = 1 + + +def test_consitency_inplace(): + expected = pd.DataFrame({"M": [""]}, dtype="string") + df = pd.DataFrame({"M": [""]}, dtype="string") + df.where(df != "", np.nan, inplace=True) + expected = expected.where(expected != "", np.nan) + tm.assert_frame_equal(expected, df)
- [x] closes #46512 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] Added an entry in the latest `pandas/core/generic.py` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47793
2022-07-19T23:49:48Z
2022-10-04T18:30:57Z
null
2022-10-04T18:30:58Z
DOC: Fix versionadded for callable in on_bad_lines
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 35227dcf6a82d..4858d56d71c42 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -381,6 +381,8 @@ .. versionadded:: 1.3.0 + .. versionadded:: 1.4.0 + - callable, function with signature ``(bad_line: list[str]) -> list[str] | None`` that will process a single bad line. ``bad_line`` is a list of strings split by the ``sep``. @@ -389,8 +391,6 @@ expected, a ``ParserWarning`` will be emitted while dropping extra elements. Only supported when ``engine="python"`` - .. versionadded:: 1.4.0 - delim_whitespace : bool, default False Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
- [x] closes #47788 (Replace xxxx with the Github issue number) - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). would backport this
https://api.github.com/repos/pandas-dev/pandas/pulls/47792
2022-07-19T18:34:43Z
2022-07-20T00:35:30Z
2022-07-20T00:35:29Z
2022-07-20T00:39:09Z
Backport PR #47763 on branch 1.4.x (BUG: fix regression in Series[string] setitem setting a scalar with a mask)
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index 6ee140f59e096..6bd7378e05404 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`) +- Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 919b882f22ecb..655ccb3a474ae 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -17,6 +17,7 @@ from pandas._typing import ( Dtype, Scalar, + npt, type_t, ) from pandas.compat import pa_version_under1p01 @@ -413,6 +414,12 @@ def __setitem__(self, key, value): super().__setitem__(key, value) + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + # the super() method NDArrayBackedExtensionArray._putmask uses + # np.putmask which doesn't properly handle None/pd.NA, so using the + # base class implementation that uses __setitem__ + ExtensionArray._putmask(self, mask, value) + def astype(self, dtype, copy: bool = True): dtype = pandas_dtype(dtype) diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index b5b4007798135..24bb9df296a03 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -553,3 +553,23 @@ def test_isin(dtype, request, fixed_now_ts): result = s.isin(["a", fixed_now_ts]) expected = pd.Series([True, False, False]) tm.assert_series_equal(result, expected) + + +def test_setitem_scalar_with_mask_validation(dtype): + # https://github.com/pandas-dev/pandas/issues/47628 + # setting None with a boolean mask (through _putmaks) should still result + # in pd.NA values in the underlying array + ser = pd.Series(["a", "b", "c"], dtype=dtype) + mask = np.array([False, True, False]) + + ser[mask] = None + assert ser.array[1] is pd.NA + + # for other non-string we should also raise an error + ser = pd.Series(["a", "b", "c"], dtype=dtype) + if type(ser.array) is pd.arrays.StringArray: + msg = "Cannot set non-string value" + else: + msg = "Scalar must be NA or str" + with pytest.raises(ValueError, match=msg): + ser[mask] = 1
Backport PR #47763: BUG: fix regression in Series[string] setitem setting a scalar with a mask
https://api.github.com/repos/pandas-dev/pandas/pulls/47784
2022-07-18T22:29:38Z
2022-07-19T14:10:03Z
2022-07-19T14:10:03Z
2022-07-19T14:10:03Z
BUG: PeriodIndex + TimedeltaArray-with-NaT
diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd index 6e41a55f30929..352680143113d 100644 --- a/pandas/_libs/tslibs/dtypes.pxd +++ b/pandas/_libs/tslibs/dtypes.pxd @@ -3,7 +3,7 @@ from numpy cimport int64_t from pandas._libs.tslibs.np_datetime cimport NPY_DATETIMEUNIT -cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit) +cpdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit) cdef NPY_DATETIMEUNIT abbrev_to_npy_unit(str abbrev) cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=*) except? -1 diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi index 041c51533d8da..82f62e16c4205 100644 --- a/pandas/_libs/tslibs/dtypes.pyi +++ b/pandas/_libs/tslibs/dtypes.pyi @@ -8,6 +8,7 @@ _period_code_map: dict[str, int] def periods_per_day(reso: int) -> int: ... def periods_per_second(reso: int) -> int: ... def is_supported_unit(reso: int) -> bool: ... +def npy_unit_to_abbrev(reso: int) -> str: ... class PeriodDtypeBase: _dtype_code: int # PeriodDtypeCode diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index a0a7ab90ebb30..c09ac2a686d5c 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -289,7 +289,7 @@ def is_supported_unit(NPY_DATETIMEUNIT reso): ) -cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit): +cpdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit): if unit == NPY_DATETIMEUNIT.NPY_FR_ns or unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC: # generic -> default to nanoseconds return "ns" diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 420d83909a78d..bfccedba9431e 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -101,6 +101,7 @@ cpdef cnp.ndarray astype_overflowsafe( cnp.ndarray values, # ndarray[datetime64[anyunit]] cnp.dtype dtype, # ndarray[datetime64[anyunit]] bint copy=*, + bint round_ok=*, ) cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1 diff --git a/pandas/_libs/tslibs/np_datetime.pyi b/pandas/_libs/tslibs/np_datetime.pyi index 757165fbad268..d80d26375412b 100644 --- a/pandas/_libs/tslibs/np_datetime.pyi +++ b/pandas/_libs/tslibs/np_datetime.pyi @@ -9,7 +9,10 @@ class OutOfBoundsTimedelta(ValueError): ... def py_get_unit_from_dtype(dtype: np.dtype): ... def py_td64_to_tdstruct(td64: int, unit: int) -> dict: ... def astype_overflowsafe( - arr: np.ndarray, dtype: np.dtype, copy: bool = ... + arr: np.ndarray, + dtype: np.dtype, + copy: bool = ..., + round_ok: bool = ..., ) -> np.ndarray: ... def is_unitless(dtype: np.dtype) -> bool: ... def compare_mismatched_resolutions( diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 494eb5da7e107..5f8bad8398076 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -282,6 +282,7 @@ cpdef ndarray astype_overflowsafe( ndarray values, cnp.dtype dtype, bint copy=True, + bint round_ok=True, ): """ Convert an ndarray with datetime64[X] to datetime64[Y] @@ -314,10 +315,6 @@ cpdef ndarray astype_overflowsafe( "datetime64/timedelta64 values and dtype must have a unit specified" ) - if (<object>values).dtype.byteorder == ">": - # GH#29684 we incorrectly get OutOfBoundsDatetime if we dont swap - values = values.astype(values.dtype.newbyteorder("<")) - if from_unit == to_unit: # Check this before allocating result for perf, might save some memory if copy: @@ -325,9 +322,17 @@ cpdef ndarray astype_overflowsafe( return values elif from_unit > to_unit: - # e.g. ns -> us, so there is no risk of overflow, so we can use - # numpy's astype safely. Note there _is_ risk of truncation. - return values.astype(dtype) + if round_ok: + # e.g. ns -> us, so there is no risk of overflow, so we can use + # numpy's astype safely. Note there _is_ risk of truncation. + return values.astype(dtype) + else: + iresult2 = astype_round_check(values.view("i8"), from_unit, to_unit) + return iresult2.view(dtype) + + if (<object>values).dtype.byteorder == ">": + # GH#29684 we incorrectly get OutOfBoundsDatetime if we dont swap + values = values.astype(values.dtype.newbyteorder("<")) cdef: ndarray i8values = values.view("i8") @@ -356,10 +361,11 @@ cpdef ndarray astype_overflowsafe( check_dts_bounds(&dts, to_unit) except OutOfBoundsDatetime as err: if is_td: - tdval = np.timedelta64(value).view(values.dtype) + from_abbrev = np.datetime_data(values.dtype)[0] + np_val = np.timedelta64(value, from_abbrev) msg = ( - "Cannot convert {tdval} to {dtype} without overflow" - .format(tdval=str(tdval), dtype=str(dtype)) + "Cannot convert {np_val} to {dtype} without overflow" + .format(np_val=str(np_val), dtype=str(dtype)) ) raise OutOfBoundsTimedelta(msg) from err else: @@ -453,6 +459,52 @@ cdef int op_to_op_code(op): return Py_GT +cdef ndarray astype_round_check( + ndarray i8values, + NPY_DATETIMEUNIT from_unit, + NPY_DATETIMEUNIT to_unit +): + # cases with from_unit > to_unit, e.g. ns->us, raise if the conversion + # involves truncation, e.g. 1500ns->1us + cdef: + Py_ssize_t i, N = i8values.size + + # equiv: iresult = np.empty((<object>i8values).shape, dtype="i8") + ndarray iresult = cnp.PyArray_EMPTY( + i8values.ndim, i8values.shape, cnp.NPY_INT64, 0 + ) + cnp.broadcast mi = cnp.PyArray_MultiIterNew2(iresult, i8values) + + # Note the arguments to_unit, from unit are swapped vs how they + # are passed when going to a higher-frequency reso. + int64_t mult = get_conversion_factor(to_unit, from_unit) + int64_t value, mod + + for i in range(N): + # Analogous to: item = i8values[i] + value = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0] + + if value == NPY_DATETIME_NAT: + new_value = NPY_DATETIME_NAT + else: + new_value, mod = divmod(value, mult) + if mod != 0: + # TODO: avoid runtime import + from pandas._libs.tslibs.dtypes import npy_unit_to_abbrev + from_abbrev = npy_unit_to_abbrev(from_unit) + to_abbrev = npy_unit_to_abbrev(to_unit) + raise ValueError( + f"Cannot losslessly cast '{value} {from_abbrev}' to {to_abbrev}" + ) + + # Analogous to: iresult[i] = new_value + (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = new_value + + cnp.PyArray_MultiIter_NEXT(mi) + + return iresult + + @cython.overflowcheck(True) cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1: """ diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi index ed6ddf7b02be1..c885b869f983a 100644 --- a/pandas/_libs/tslibs/offsets.pyi +++ b/pandas/_libs/tslibs/offsets.pyi @@ -111,6 +111,8 @@ def to_offset(freq: timedelta | str) -> BaseOffset: ... class Tick(SingleConstructorOffset): _reso: int + _prefix: str + _td64_unit: str def __init__(self, n: int = ..., normalize: bool = ...) -> None: ... @property def delta(self) -> Timedelta: ... diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 81b59db6f0e18..5f4f6b998a60a 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -796,6 +796,7 @@ cdef class SingleConstructorOffset(BaseOffset): cdef class Tick(SingleConstructorOffset): _adjust_dst = False _prefix = "undefined" + _td64_unit = "undefined" _attributes = tuple(["n", "normalize"]) def __init__(self, n=1, normalize=False): @@ -968,6 +969,7 @@ cdef class Tick(SingleConstructorOffset): cdef class Day(Tick): _nanos_inc = 24 * 3600 * 1_000_000_000 _prefix = "D" + _td64_unit = "D" _period_dtype_code = PeriodDtypeCode.D _reso = NPY_DATETIMEUNIT.NPY_FR_D @@ -975,6 +977,7 @@ cdef class Day(Tick): cdef class Hour(Tick): _nanos_inc = 3600 * 1_000_000_000 _prefix = "H" + _td64_unit = "h" _period_dtype_code = PeriodDtypeCode.H _reso = NPY_DATETIMEUNIT.NPY_FR_h @@ -982,6 +985,7 @@ cdef class Hour(Tick): cdef class Minute(Tick): _nanos_inc = 60 * 1_000_000_000 _prefix = "T" + _td64_unit = "m" _period_dtype_code = PeriodDtypeCode.T _reso = NPY_DATETIMEUNIT.NPY_FR_m @@ -989,6 +993,7 @@ cdef class Minute(Tick): cdef class Second(Tick): _nanos_inc = 1_000_000_000 _prefix = "S" + _td64_unit = "s" _period_dtype_code = PeriodDtypeCode.S _reso = NPY_DATETIMEUNIT.NPY_FR_s @@ -996,6 +1001,7 @@ cdef class Second(Tick): cdef class Milli(Tick): _nanos_inc = 1_000_000 _prefix = "L" + _td64_unit = "ms" _period_dtype_code = PeriodDtypeCode.L _reso = NPY_DATETIMEUNIT.NPY_FR_ms @@ -1003,6 +1009,7 @@ cdef class Milli(Tick): cdef class Micro(Tick): _nanos_inc = 1000 _prefix = "U" + _td64_unit = "us" _period_dtype_code = PeriodDtypeCode.U _reso = NPY_DATETIMEUNIT.NPY_FR_us @@ -1010,6 +1017,7 @@ cdef class Micro(Tick): cdef class Nano(Tick): _nanos_inc = 1 _prefix = "N" + _td64_unit = "ns" _period_dtype_code = PeriodDtypeCode.N _reso = NPY_DATETIMEUNIT.NPY_FR_ns diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 6e6de8399cc38..2d676f94c6a64 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -72,10 +72,7 @@ ABCSeries, ABCTimedeltaArray, ) -from pandas.core.dtypes.missing import ( - isna, - notna, -) +from pandas.core.dtypes.missing import notna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl @@ -792,20 +789,30 @@ def _add_timedelta_arraylike( ------- result : ndarray[int64] """ - if not isinstance(self.freq, Tick): + freq = self.freq + if not isinstance(freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray raise TypeError( f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}" ) - if not np.all(isna(other)): - delta = self._check_timedeltalike_freq_compat(other) - else: - # all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT - return self + np.timedelta64("NaT") + dtype = np.dtype(f"m8[{freq._td64_unit}]") + + try: + delta = astype_overflowsafe( + np.asarray(other), dtype=dtype, copy=False, round_ok=False + ) + except ValueError as err: + # TODO: not actually a great exception message in this case + raise raise_on_incompatible(self, other) from err + + b_mask = np.isnat(delta) - ordinals = self._addsub_int_array_or_scalar(delta, operator.add).asi8 - return type(self)(ordinals, dtype=self.dtype) + res_values = algos.checked_add_with_arr( + self.asi8, delta.view("i8"), arr_mask=self._isnan, b_mask=b_mask + ) + np.putmask(res_values, self._isnan | b_mask, iNaT) + return type(self)(res_values, freq=self.freq) def _check_timedeltalike_freq_compat(self, other): """ diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index 7adc407fd5de1..50f5ab8aee9dd 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -1243,6 +1243,21 @@ def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other): with pytest.raises(TypeError, match=msg): other - obj + # some but not *all* NaT + other = other.copy() + other[0] = np.timedelta64(0, "ns") + expected = PeriodIndex([pi[0]] + ["NaT"] * 8, freq="19D") + expected = tm.box_expected(expected, box_with_array) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + with pytest.raises(TypeError, match=msg): + other - obj + # --------------------------------------------------------------- # Unsorted diff --git a/pandas/tests/tslibs/test_np_datetime.py b/pandas/tests/tslibs/test_np_datetime.py index cc09f0fc77039..02edf1a093877 100644 --- a/pandas/tests/tslibs/test_np_datetime.py +++ b/pandas/tests/tslibs/test_np_datetime.py @@ -208,3 +208,15 @@ def test_astype_overflowsafe_td64(self): result = astype_overflowsafe(arr, dtype2) expected = arr.astype(dtype2) tm.assert_numpy_array_equal(result, expected) + + def test_astype_overflowsafe_disallow_rounding(self): + arr = np.array([-1500, 1500], dtype="M8[ns]") + dtype = np.dtype("M8[us]") + + msg = "Cannot losslessly cast '-1500 ns' to us" + with pytest.raises(ValueError, match=msg): + astype_overflowsafe(arr, dtype, round_ok=False) + + result = astype_overflowsafe(arr, dtype, round_ok=True) + expected = arr.astype(dtype) + tm.assert_numpy_array_equal(result, expected)
Implements the same round_ok behavior in astype_overflowsafe that we use in Timestamp/Timedelta _as_reso, i.e. this will lead to more code-sharing in follow-ups.
https://api.github.com/repos/pandas-dev/pandas/pulls/47783
2022-07-18T21:23:11Z
2022-07-20T00:10:24Z
2022-07-20T00:10:24Z
2022-07-20T16:43:19Z
Concatenating unordered ``CategoricalIndex`` overrides indices - GH24845
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index c1ae3cb1b16ea..baf13b2f62417 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -571,6 +571,26 @@ def map(self, mapper): def _concat(self, to_concat: list[Index], name: Hashable) -> Index: # if calling index is category, don't check dtype of others + + permutations = True + + dummy_cat1 = CategoricalIndex([], categories=to_concat[0].categories, + ordered=False + ) + for i in to_concat[1:]: + + dummy_cat2 = CategoricalIndex([], categories=i.categories, + ordered=False + ) + if not dummy_cat1.equals(dummy_cat2): + permutations = False + + cat_dummy = CategoricalIndex([], categories=to_concat[0].categories, + ordered=True + ) + if permutations: + to_concat.append(cat_dummy) + try: codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat]) except TypeError: diff --git a/pandas/tests/reshape/concat/test_categorical.py b/pandas/tests/reshape/concat/test_categorical.py index 5bafd2e8e8503..0aadf3d531a83 100644 --- a/pandas/tests/reshape/concat/test_categorical.py +++ b/pandas/tests/reshape/concat/test_categorical.py @@ -184,6 +184,28 @@ def test_concat_categorical_unchanged(self): ) tm.assert_equal(result, expected) + def test_categorical_concat_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/24845 + + c1 = CategoricalIndex(["a", "a"], categories=["a", "b"], + ordered=False + ) + c2 = CategoricalIndex(["b", "b"], categories=["b", "a"], + ordered=False + ) + c3 = CategoricalIndex(["a", "a", "b", "b"], + categories=["a", "b"], ordered=False + ) + + df1 = DataFrame({"A": [1, 2]}, index=c1) + df2 = DataFrame({"A": [3, 4]}, index=c2) + + result = pd.concat([df1, df2]) + + expected = DataFrame({"A": [1, 2, 3, 4]}, index=c3) + + tm.assert_frame_equal(result, expected) + def test_categorical_concat_gh7864(self): # GH 7864 # make sure ordering is preserved @@ -237,4 +259,4 @@ def test_categorical_missing_from_one_frame(self): }, index=[0, 1, 2, 0, 1, 2], ) - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected) \ No newline at end of file
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47782
2022-07-18T20:08:02Z
2022-07-24T22:38:02Z
null
2022-07-24T22:38:02Z
PERF: Bypass chunking/validation logic in StringDtype__from_arrow__
diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst index f6a6c81bfe25d..3b24310014ff8 100644 --- a/doc/source/whatsnew/v2.1.0.rst +++ b/doc/source/whatsnew/v2.1.0.rst @@ -102,6 +102,7 @@ Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Performance improvement in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` for extension array dtypes (:issue:`51549`) - Performance improvement in :meth:`DataFrame.clip` and :meth:`Series.clip` (:issue:`51472`) +- Performance improvement in :func:`read_parquet` on string columns when using ``use_nullable_dtypes=True`` (:issue:`47345`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index d3a64055f6c10..30b18bac7b73b 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -203,16 +203,19 @@ def __from_arrow__( # pyarrow.ChunkedArray chunks = array.chunks - results = [] - for arr in chunks: - # using _from_sequence to ensure None is converted to NA - str_arr = StringArray._from_sequence(np.array(arr)) - results.append(str_arr) - - if results: - return StringArray._concat_same_type(results) + if len(chunks) == 0: + arr = np.array([], dtype=object) else: - return StringArray(np.array([], dtype="object")) + arr = pyarrow.concat_arrays(chunks).to_numpy(zero_copy_only=False) + arr = lib.convert_nans_to_NA(arr) + # Bypass validation inside StringArray constructor, see GH#47781 + new_string_array = StringArray.__new__(StringArray) + NDArrayBacked.__init__( + new_string_array, + arr, + StringDtype(storage="python"), + ) + return new_string_array class BaseStringArray(ExtensionArray):
Instead of converting each chunk to a StringArray after casting to array and then concatenating, instead use pyarrow to concatenate chunks and convert to numpy. Finally, bypass validation logic (unneeded as validated on parquet write) by initializing NDArrayBacked instead of StringArray. This removes most of the performance overhead seen in #47345. There is still a slight overhead when comparing to `object` string arrays because of None -> NA conversion. I found that leaving that out still results in NA types in the example I gave (and would actually improve performance over the `object` case), but this is not consistent and thus conversion is left in. - [x] closes #47345 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47781
2022-07-18T19:25:46Z
2023-02-24T18:08:10Z
2023-02-24T18:08:10Z
2023-02-24T18:08:24Z
BUG: PeriodIndex fails to handle NA, rather than putting NaT in its place (#46673)
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 7f07187e34c78..b9f2bf00355f0 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -908,6 +908,7 @@ Indexing - Bug in :meth:`NDFrame.xs`, :meth:`DataFrame.iterrows`, :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` not always propagating metadata (:issue:`28283`) - Bug in :meth:`DataFrame.sum` min_count changes dtype if input contains NaNs (:issue:`46947`) - Bug in :class:`IntervalTree` that lead to an infinite recursion. (:issue:`46658`) +- Bug in :class:`PeriodIndex` raising ``AttributeError`` when indexing on ``NA``, rather than putting ``NaT`` in its place. (:issue:`46673`) - Missing diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 3332628627739..9b01bbc433b3b 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -43,6 +43,7 @@ from libc.time cimport ( import_datetime() cimport pandas._libs.tslibs.util as util +from pandas._libs.missing cimport C_NA from pandas._libs.tslibs.np_datetime cimport ( NPY_DATETIMEUNIT, NPY_FR_D, @@ -1470,7 +1471,7 @@ cdef inline int64_t _extract_ordinal(object item, str freqstr, freq) except? -1: cdef: int64_t ordinal - if checknull_with_nat(item): + if checknull_with_nat(item) or item is C_NA: ordinal = NPY_NAT elif util.is_integer_object(item): if item == NPY_NAT: diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py index 73ac51c258a94..940aa5ffff040 100644 --- a/pandas/tests/arrays/categorical/test_indexing.py +++ b/pandas/tests/arrays/categorical/test_indexing.py @@ -1,12 +1,16 @@ +import math + import numpy as np import pytest from pandas import ( + NA, Categorical, CategoricalIndex, Index, Interval, IntervalIndex, + NaT, PeriodIndex, Series, Timedelta, @@ -194,6 +198,17 @@ def test_categories_assignments(self): tm.assert_numpy_array_equal(cat.__array__(), exp) tm.assert_index_equal(cat.categories, Index([1, 2, 3])) + @pytest.mark.parametrize( + "null_val", + [None, np.nan, NaT, NA, math.nan, "NaT", "nat", "NAT", "nan", "NaN", "NAN"], + ) + def test_periodindex_on_null_types(self, null_val): + # GH 46673 + result = PeriodIndex(["2022-04-06", "2022-04-07", null_val], freq="D") + expected = PeriodIndex(["2022-04-06", "2022-04-07", "NaT"], dtype="period[D]") + assert result[2] is NaT + tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]]) def test_categories_assignments_wrong_length_raises(self, new_categories): cat = Categorical(["a", "b", "c", "a"]) diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py index 6bdf93c43c986..47d6cad0e1743 100644 --- a/pandas/tests/series/methods/test_astype.py +++ b/pandas/tests/series/methods/test_astype.py @@ -446,7 +446,7 @@ def test_astype_string_to_extension_dtype_roundtrip( self, data, dtype, request, nullable_string_dtype ): if dtype == "boolean" or ( - dtype in ("period[M]", "datetime64[ns]", "timedelta64[ns]") and NaT in data + dtype in ("datetime64[ns]", "timedelta64[ns]") and NaT in data ): mark = pytest.mark.xfail( reason="TODO StringArray.astype() with missing values #GH40566"
- [x] closes #46673 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47780
2022-07-18T18:21:57Z
2022-08-01T20:08:54Z
2022-08-01T20:08:53Z
2022-08-01T20:09:02Z
PERF: efficient argmax/argmin for SparseArray
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index f313b49cd198d..7a90d96926475 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -802,6 +802,7 @@ Performance improvements - Performance improvement in datetime arrays string formatting when one of the default strftime formats ``"%Y-%m-%d %H:%M:%S"`` or ``"%Y-%m-%d %H:%M:%S.%f"`` is used. (:issue:`44764`) - Performance improvement in :meth:`Series.to_sql` and :meth:`DataFrame.to_sql` (:class:`SQLiteTable`) when processing time arrays. (:issue:`44764`) - Performance improvements to :func:`read_sas` (:issue:`47403`, :issue:`47404`, :issue:`47405`) +- Performance improvement in ``argmax`` and ``argmin`` for :class:`arrays.SparseArray` (:issue:`34197`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 5653d87a4570b..bf65da0412642 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -42,7 +42,10 @@ from pandas.compat.numpy import function as nv from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level -from pandas.util._validators import validate_insert_loc +from pandas.util._validators import ( + validate_bool_kwarg, + validate_insert_loc, +) from pandas.core.dtypes.astype import astype_nansafe from pandas.core.dtypes.cast import ( @@ -1636,6 +1639,45 @@ def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar: else: return na_value_for_dtype(self.dtype.subtype, compat=False) + def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int: + + values = self._sparse_values + index = self._sparse_index.indices + mask = np.asarray(isna(values)) + func = np.argmax if kind == "argmax" else np.argmin + + idx = np.arange(values.shape[0]) + non_nans = values[~mask] + non_nan_idx = idx[~mask] + + _candidate = non_nan_idx[func(non_nans)] + candidate = index[_candidate] + + if isna(self.fill_value): + return candidate + if kind == "argmin" and self[candidate] < self.fill_value: + return candidate + if kind == "argmax" and self[candidate] > self.fill_value: + return candidate + _loc = self._first_fill_value_loc() + if _loc == -1: + # fill_value doesn't exist + return candidate + else: + return _loc + + def argmax(self, skipna: bool = True) -> int: + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return self._argmin_argmax("argmax") + + def argmin(self, skipna: bool = True) -> int: + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return self._argmin_argmax("argmin") + # ------------------------------------------------------------------------ # Ufuncs # ------------------------------------------------------------------------ diff --git a/pandas/tests/arrays/sparse/test_reductions.py b/pandas/tests/arrays/sparse/test_reductions.py index a33a282bb4869..2dd80c52f1419 100644 --- a/pandas/tests/arrays/sparse/test_reductions.py +++ b/pandas/tests/arrays/sparse/test_reductions.py @@ -268,3 +268,41 @@ def test_na_value_if_no_valid_values(self, func, data, dtype, expected): assert result is NaT or np.isnat(result) else: assert np.isnan(result) + + +class TestArgmaxArgmin: + @pytest.mark.parametrize( + "arr,argmax_expected,argmin_expected", + [ + (SparseArray([1, 2, 0, 1, 2]), 1, 2), + (SparseArray([-1, -2, 0, -1, -2]), 2, 1), + (SparseArray([np.nan, 1, 0, 0, np.nan, -1]), 1, 5), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2]), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=-1), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=0), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=1), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=2), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=3), 5, 2), + (SparseArray([0] * 10 + [-1], fill_value=0), 0, 10), + (SparseArray([0] * 10 + [-1], fill_value=-1), 0, 10), + (SparseArray([0] * 10 + [-1], fill_value=1), 0, 10), + (SparseArray([-1] + [0] * 10, fill_value=0), 1, 0), + (SparseArray([1] + [0] * 10, fill_value=0), 0, 1), + (SparseArray([-1] + [0] * 10, fill_value=-1), 1, 0), + (SparseArray([1] + [0] * 10, fill_value=1), 0, 1), + ], + ) + def test_argmax_argmin(self, arr, argmax_expected, argmin_expected): + argmax_result = arr.argmax() + argmin_result = arr.argmin() + assert argmax_result == argmax_expected + assert argmin_result == argmin_expected + + @pytest.mark.parametrize( + "arr,method", + [(SparseArray([]), "argmax"), (SparseArray([]), "argmin")], + ) + def test_empty_array(self, arr, method): + msg = f"attempt to get {method} of an empty sequence" + with pytest.raises(ValueError, match=msg): + arr.argmax() if method == "argmax" else arr.argmin()
- [ ] partially closes #34197 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Thanks for the review. Currently, only argmax/argmin are implemented since argsort has many annoying corner cases and I have to spent more time on the correctness and take the follow-up in a separate PR. Simple benchmark: ``` >>> val = np.random.rand(1000000) >>> mask = val < 0.99 >>> val[mask] = np.nan >>> arr = SparseArray(val) >>> %timeit arr.argmax() 8.6 ms ± 343 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) <- master 44.3 µs ± 934 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each) <- this pr ```
https://api.github.com/repos/pandas-dev/pandas/pulls/47779
2022-07-18T18:04:22Z
2022-07-27T16:40:20Z
2022-07-27T16:40:20Z
2022-07-27T17:03:36Z
Specify that both ``by`` and ``level`` should not be specified in ``groupby`` - GH40378
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index 5d8ef7ce02097..34244a8edcbfa 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -345,6 +345,17 @@ Index level names may be supplied as keys. More on the ``sum`` function and aggregation later. +When using ``.groupby()`` on a DatFrame with a MultiIndex, do not specify both ``by`` and ``level``. +The argument validation should be done in ``.groupby()``, using the name of the specific index. + +.. ipython:: python + + df = pd.DataFrame({"col1": ["a", "b", "c"]}) + df.index = pd.MultiIndex.from_arrays([["a", "a", "b"], + [1, 2, 1]], + names=["x", "y"]) + df.groupby(["col1", "x"]) + Grouping DataFrame with Index levels and columns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A DataFrame may be grouped by a combination of columns and index levels by
- [ ] closes #40378 (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47778
2022-07-18T14:07:41Z
2022-07-18T16:59:08Z
2022-07-18T16:59:07Z
2022-07-22T20:50:36Z
BUG: PeriodIndex fails to handle NA, rather than putting NaT in its place (#46673)
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 9651269963803..c2038eeb42631 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -906,6 +906,7 @@ Indexing - Bug in :meth:`NDFrame.xs`, :meth:`DataFrame.iterrows`, :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` not always propagating metadata (:issue:`28283`) - Bug in :meth:`DataFrame.sum` min_count changes dtype if input contains NaNs (:issue:`46947`) - Bug in :class:`IntervalTree` that lead to an infinite recursion. (:issue:`46658`) +- Bug in :class:`PeriodIndex` raising ``AttributeError`` when indexing on ``NA``, rather than putting ``NaT`` in its place. (:issue:`46673`) - Missing diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 93687abdf9153..c2bda8d97d1b4 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -28,6 +28,7 @@ from numpy cimport int64_t cnp.import_array() cimport pandas._libs.tslibs.util as util +from pandas._libs.missing cimport C_NA from pandas._libs.tslibs.np_datetime cimport ( get_datetime64_value, get_timedelta64_value, @@ -1217,7 +1218,7 @@ cdef inline bint checknull_with_nat(object val): """ Utility to check if a value is a nat or not. """ - return val is None or util.is_nan(val) or val is c_NaT + return val is None or util.is_nan(val) or val is c_NaT or val is C_NA cdef inline bint is_dt64nat(object val): diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py index 73ac51c258a94..09a69e65d4b47 100644 --- a/pandas/tests/arrays/categorical/test_indexing.py +++ b/pandas/tests/arrays/categorical/test_indexing.py @@ -1,12 +1,16 @@ +import math + import numpy as np import pytest from pandas import ( + NA, Categorical, CategoricalIndex, Index, Interval, IntervalIndex, + NaT, PeriodIndex, Series, Timedelta, @@ -194,6 +198,17 @@ def test_categories_assignments(self): tm.assert_numpy_array_equal(cat.__array__(), exp) tm.assert_index_equal(cat.categories, Index([1, 2, 3])) + @pytest.mark.parametrize( + "null_val", + [None, np.nan, NaT, NA, math.nan, "NaT", "nat", "NAT", "nan", "NaN", "NAN"], + ) + def test_periodindex_on_null_types(self, null_val): + # GH 46673 + result = PeriodIndex(["2022-04-06", "2022-04-07", null_val], freq="D") + expected = PeriodIndex(["2022-04-06", "2022-04-07", "NaT"], dtype="period[D]") + assert type(result[2]) == type(NaT) + tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]]) def test_categories_assignments_wrong_length_raises(self, new_categories): cat = Categorical(["a", "b", "c", "a"])
- [x] closes #46673 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47777
2022-07-18T08:00:47Z
2022-07-18T16:25:49Z
null
2022-07-18T16:25:49Z
Cln tests interval wrt inclusive
diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index f7f015cbe4a23..79e73fec706f1 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -298,7 +298,7 @@ def test_array_inference(data, expected): [ # mix of frequencies [pd.Period("2000", "D"), pd.Period("2001", "A")], - # mix of closed + # mix of inclusive [pd.Interval(0, 1, "left"), pd.Interval(1, 2, "right")], # Mix of timezones [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")], diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 695b06690b358..64849c4223486 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -593,13 +593,13 @@ def test_construction_string_regex(self, subtype): @pytest.mark.parametrize( "subtype", ["interval[int64]", "Interval[int64]", "int64", np.dtype("int64")] ) - def test_construction_allows_closed_none(self, subtype): + def test_construction_allows_inclusive_none(self, subtype): # GH#38394 dtype = IntervalDtype(subtype) assert dtype.inclusive is None - def test_closed_mismatch(self): + def test_inclusive_mismatch(self): msg = "'inclusive' keyword does not match value specified in dtype string" with pytest.raises(ValueError, match=msg): IntervalDtype("interval[int64, left]", "right") @@ -638,7 +638,7 @@ def test_construction_errors(self, subtype): with pytest.raises(TypeError, match=msg): IntervalDtype(subtype) - def test_closed_must_match(self): + def test_inclusive_must_match(self): # GH#37933 dtype = IntervalDtype(np.float64, "left") @@ -646,7 +646,7 @@ def test_closed_must_match(self): with pytest.raises(ValueError, match=msg): IntervalDtype(dtype, inclusive="both") - def test_closed_invalid(self): + def test_inclusive_invalid(self): with pytest.raises(ValueError, match="inclusive must be one of"): IntervalDtype(np.float64, "foo") @@ -822,7 +822,7 @@ def test_not_string(self): # GH30568: though IntervalDtype has object kind, it cannot be string assert not is_string_dtype(IntervalDtype()) - def test_unpickling_without_closed(self): + def test_unpickling_without_inclusive(self): # GH#38394 dtype = IntervalDtype("interval") diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py index 8c8998a8e4be9..a23f66d241cd9 100644 --- a/pandas/tests/indexes/interval/test_constructors.py +++ b/pandas/tests/indexes/interval/test_constructors.py @@ -104,8 +104,8 @@ def test_constructor_dtype(self, constructor, breaks, subtype): timedelta_range("1 day", periods=5), ], ) - def test_constructor_pass_closed(self, constructor, breaks): - # not passing closed to IntervalDtype, but to IntervalArray constructor + def test_constructor_pass_inclusive(self, constructor, breaks): + # not passing inclusive to IntervalDtype, but to IntervalArray constructor warn = None if isinstance(constructor, partial) and constructor.func is Index: # passing kwargs to Index is deprecated @@ -193,7 +193,7 @@ def test_generic_errors(self, constructor): # filler input data to be used when supplying invalid kwargs filler = self.get_kwargs_from_breaks(range(10)) - # invalid closed + # invalid inclusive msg = "inclusive must be one of 'right', 'left', 'both', 'neither'" with pytest.raises(ValueError, match=msg): constructor(inclusive="invalid", **filler) @@ -399,7 +399,7 @@ def test_constructor_string(self): pass def test_constructor_errors(self, constructor): - # mismatched closed within intervals with no constructor override + # mismatched inclusive within intervals with no constructor override ivs = [Interval(0, 1, inclusive="right"), Interval(2, 3, inclusive="left")] msg = "intervals must all be inclusive on the same side" with pytest.raises(ValueError, match=msg): @@ -420,7 +420,7 @@ def test_constructor_errors(self, constructor): @pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning") @pytest.mark.parametrize( - "data, closed", + "data, inclusive", [ ([], "both"), ([np.nan, np.nan], "neither"), @@ -438,14 +438,14 @@ def test_constructor_errors(self, constructor): (IntervalIndex.from_breaks(range(5), inclusive="both"), "right"), ], ) - def test_override_inferred_closed(self, constructor, data, closed): + def test_override_inferred_inclusive(self, constructor, data, inclusive): # GH 19370 if isinstance(data, IntervalIndex): tuples = data.to_tuples() else: tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data] - expected = IntervalIndex.from_tuples(tuples, inclusive=closed) - result = constructor(data, inclusive=closed) + expected = IntervalIndex.from_tuples(tuples, inclusive=inclusive) + result = constructor(data, inclusive=inclusive) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( @@ -460,7 +460,7 @@ def test_index_object_dtype(self, values_constructor): assert type(result) is Index tm.assert_numpy_array_equal(result.values, np.array(values)) - def test_index_mixed_closed(self): + def test_index_mixed_inclusive(self): # GH27172 intervals = [ Interval(0, 1, inclusive="left"), @@ -473,8 +473,8 @@ def test_index_mixed_closed(self): tm.assert_index_equal(result, expected) -def test_dtype_closed_mismatch(): - # GH#38394 closed specified in both dtype and IntervalIndex constructor +def test_dtype_inclusive_mismatch(): + # GH#38394 dtype = IntervalDtype(np.int64, "left") diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py index 4cf754a7e52e0..e05cb73cfe446 100644 --- a/pandas/tests/indexes/interval/test_indexing.py +++ b/pandas/tests/indexes/interval/test_indexing.py @@ -76,12 +76,12 @@ def test_get_loc_length_one_scalar(self, scalar, closed): with pytest.raises(KeyError, match=str(scalar)): index.get_loc(scalar) - @pytest.mark.parametrize("other_closed", ["left", "right", "both", "neither"]) + @pytest.mark.parametrize("other_inclusive", ["left", "right", "both", "neither"]) @pytest.mark.parametrize("left, right", [(0, 5), (-1, 4), (-1, 6), (6, 7)]) - def test_get_loc_length_one_interval(self, left, right, closed, other_closed): + def test_get_loc_length_one_interval(self, left, right, closed, other_inclusive): # GH 20921 index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed) - interval = Interval(left, right, inclusive=other_closed) + interval = Interval(left, right, inclusive=other_inclusive) if interval == index[0]: result = index.get_loc(interval) assert result == 0 @@ -89,7 +89,7 @@ def test_get_loc_length_one_interval(self, left, right, closed, other_closed): with pytest.raises( KeyError, match=re.escape( - f"Interval({left}, {right}, inclusive='{other_closed}')" + f"Interval({left}, {right}, inclusive='{other_inclusive}')" ), ): index.get_loc(interval) diff --git a/pandas/tests/indexes/interval/test_pickle.py b/pandas/tests/indexes/interval/test_pickle.py index 7f5784b6d76b9..ef6db9c8a0513 100644 --- a/pandas/tests/indexes/interval/test_pickle.py +++ b/pandas/tests/indexes/interval/test_pickle.py @@ -1,13 +1,10 @@ -import pytest - from pandas import IntervalIndex import pandas._testing as tm class TestPickle: - @pytest.mark.parametrize("inclusive", ["left", "right", "both"]) - def test_pickle_round_trip_closed(self, inclusive): + def test_pickle_round_trip_inclusive(self, closed): # https://github.com/pandas-dev/pandas/issues/35658 - idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], inclusive=inclusive) + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], inclusive=closed) result = tm.round_trip_pickle(idx) tm.assert_index_equal(result, idx) diff --git a/pandas/tests/indexes/interval/test_setops.py b/pandas/tests/indexes/interval/test_setops.py index 5933961cc0f9d..2e1f6f7925374 100644 --- a/pandas/tests/indexes/interval/test_setops.py +++ b/pandas/tests/indexes/interval/test_setops.py @@ -10,22 +10,22 @@ import pandas._testing as tm -def monotonic_index(start, end, dtype="int64", closed="right"): +def monotonic_index(start, end, dtype="int64", inclusive="right"): return IntervalIndex.from_breaks( - np.arange(start, end, dtype=dtype), inclusive=closed + np.arange(start, end, dtype=dtype), inclusive=inclusive ) -def empty_index(dtype="int64", closed="right"): - return IntervalIndex(np.array([], dtype=dtype), inclusive=closed) +def empty_index(dtype="int64", inclusive="right"): + return IntervalIndex(np.array([], dtype=dtype), inclusive=inclusive) class TestIntervalIndex: def test_union(self, closed, sort): - index = monotonic_index(0, 11, closed=closed) - other = monotonic_index(5, 13, closed=closed) + index = monotonic_index(0, 11, inclusive=closed) + other = monotonic_index(5, 13, inclusive=closed) - expected = monotonic_index(0, 13, closed=closed) + expected = monotonic_index(0, 13, inclusive=closed) result = index[::-1].union(other, sort=sort) if sort is None: tm.assert_index_equal(result, expected) @@ -41,12 +41,12 @@ def test_union(self, closed, sort): def test_union_empty_result(self, closed, sort): # GH 19101: empty result, same dtype - index = empty_index(dtype="int64", closed=closed) + index = empty_index(dtype="int64", inclusive=closed) result = index.union(index, sort=sort) tm.assert_index_equal(result, index) # GH 19101: empty result, different numeric dtypes -> common dtype is f8 - other = empty_index(dtype="float64", closed=closed) + other = empty_index(dtype="float64", inclusive=closed) result = index.union(other, sort=sort) expected = other tm.assert_index_equal(result, expected) @@ -54,7 +54,7 @@ def test_union_empty_result(self, closed, sort): other = index.union(index, sort=sort) tm.assert_index_equal(result, expected) - other = empty_index(dtype="uint64", closed=closed) + other = empty_index(dtype="uint64", inclusive=closed) result = index.union(other, sort=sort) tm.assert_index_equal(result, expected) @@ -62,10 +62,10 @@ def test_union_empty_result(self, closed, sort): tm.assert_index_equal(result, expected) def test_intersection(self, closed, sort): - index = monotonic_index(0, 11, closed=closed) - other = monotonic_index(5, 13, closed=closed) + index = monotonic_index(0, 11, inclusive=closed) + other = monotonic_index(5, 13, inclusive=closed) - expected = monotonic_index(5, 11, closed=closed) + expected = monotonic_index(5, 11, inclusive=closed) result = index[::-1].intersection(other, sort=sort) if sort is None: tm.assert_index_equal(result, expected) @@ -100,21 +100,21 @@ def test_intersection(self, closed, sort): tm.assert_index_equal(result, expected) def test_intersection_empty_result(self, closed, sort): - index = monotonic_index(0, 11, closed=closed) + index = monotonic_index(0, 11, inclusive=closed) # GH 19101: empty result, same dtype - other = monotonic_index(300, 314, closed=closed) - expected = empty_index(dtype="int64", closed=closed) + other = monotonic_index(300, 314, inclusive=closed) + expected = empty_index(dtype="int64", inclusive=closed) result = index.intersection(other, sort=sort) tm.assert_index_equal(result, expected) # GH 19101: empty result, different numeric dtypes -> common dtype is float64 - other = monotonic_index(300, 314, dtype="float64", closed=closed) + other = monotonic_index(300, 314, dtype="float64", inclusive=closed) result = index.intersection(other, sort=sort) expected = other[:0] tm.assert_index_equal(result, expected) - other = monotonic_index(300, 314, dtype="uint64", closed=closed) + other = monotonic_index(300, 314, dtype="uint64", inclusive=closed) result = index.intersection(other, sort=sort) tm.assert_index_equal(result, expected) @@ -136,7 +136,7 @@ def test_difference(self, closed, sort): # GH 19101: empty result, same dtype result = index.difference(index, sort=sort) - expected = empty_index(dtype="int64", closed=closed) + expected = empty_index(dtype="int64", inclusive=closed) tm.assert_index_equal(result, expected) # GH 19101: empty result, different dtypes @@ -147,7 +147,7 @@ def test_difference(self, closed, sort): tm.assert_index_equal(result, expected) def test_symmetric_difference(self, closed, sort): - index = monotonic_index(0, 11, closed=closed) + index = monotonic_index(0, 11, inclusive=closed) result = index[1:].symmetric_difference(index[:-1], sort=sort) expected = IntervalIndex([index[0], index[-1]]) if sort is None: @@ -156,7 +156,7 @@ def test_symmetric_difference(self, closed, sort): # GH 19101: empty result, same dtype result = index.symmetric_difference(index, sort=sort) - expected = empty_index(dtype="int64", closed=closed) + expected = empty_index(dtype="int64", inclusive=closed) if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) @@ -166,7 +166,7 @@ def test_symmetric_difference(self, closed, sort): index.left.astype("float64"), index.right, inclusive=closed ) result = index.symmetric_difference(other, sort=sort) - expected = empty_index(dtype="float64", closed=closed) + expected = empty_index(dtype="float64", inclusive=closed) tm.assert_index_equal(result, expected) @pytest.mark.filterwarnings("ignore:'<' not supported between:RuntimeWarning") @@ -174,7 +174,7 @@ def test_symmetric_difference(self, closed, sort): "op_name", ["union", "intersection", "difference", "symmetric_difference"] ) def test_set_incompatible_types(self, closed, op_name, sort): - index = monotonic_index(0, 11, closed=closed) + index = monotonic_index(0, 11, inclusive=closed) set_op = getattr(index, op_name) # TODO: standardize return type of non-union setops type(self vs other) @@ -187,8 +187,8 @@ def test_set_incompatible_types(self, closed, op_name, sort): tm.assert_index_equal(result, expected) # mixed closed -> cast to object - for other_closed in {"right", "left", "both", "neither"} - {closed}: - other = monotonic_index(0, 11, closed=other_closed) + for other_inclusive in {"right", "left", "both", "neither"} - {closed}: + other = monotonic_index(0, 11, inclusive=other_inclusive) expected = getattr(index.astype(object), op_name)(other, sort=sort) if op_name == "difference": expected = index diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index cec06d054d766..4e4ee4fd12d5f 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1200,8 +1200,8 @@ def test_constructor_infer_interval(self, data_constructor): @pytest.mark.parametrize( "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] ) - def test_constructor_interval_mixed_closed(self, data_constructor): - # GH 23563: mixed closed results in object dtype (not interval dtype) + def test_constructor_interval_mixed_inclusive(self, data_constructor): + # GH 23563: mixed inclusive results in object dtype (not interval dtype) data = [Interval(0, 1, inclusive="both"), Interval(0, 2, inclusive="neither")] result = Series(data_constructor(data)) assert result.dtype == object
I think this is the last one of those
https://api.github.com/repos/pandas-dev/pandas/pulls/47775
2022-07-18T00:48:10Z
2022-07-18T17:00:23Z
2022-07-18T17:00:23Z
2022-07-20T00:39:38Z
BUG: Fix fillna on multi indexed Dataframe doesn't work
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index dbdeebad06af0..3d0a6e01826f8 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -14,6 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in :meth:`DataFrame.fillna` not working :class:`DataFrame` with :class:`MultiIndex` (:issue:`47649`) - Fixed regression in taking NULL :class:`objects` from a :class:`DataFrame` causing a segmentation violation. These NULL values are created by :meth:`numpy.empty_like` (:issue:`46848`) - Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`) - Fixed regression in calling bitwise numpy ufuncs (for example, ``np.bitwise_and``) on Index objects (:issue:`46769`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8b1a427e1658a..abab32ae145bd 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6861,6 +6861,7 @@ def fillna( for k, v in value.items(): if k not in result: continue + # error: Item "None" of "Optional[Dict[Any, Any]]" has no # attribute "get" downcast_k = ( @@ -6868,9 +6869,14 @@ def fillna( if not is_dict else downcast.get(k) # type: ignore[union-attr] ) - result.loc[:, k] = result[k].fillna( - v, limit=limit, downcast=downcast_k + # GH47649 + result.loc[:, k] = ( + result[k].fillna(v, limit=limit, downcast=downcast_k).values ) + # TODO: result.loc[:, k] = result.loc[:, k].fillna( + # v, limit=limit, downcast=downcast_k + # ) + # Revert when GH45751 is fixed return result if not inplace else None elif not is_list_like(value): diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py index 20e59ed72666a..8355502c47c61 100644 --- a/pandas/tests/frame/methods/test_fillna.py +++ b/pandas/tests/frame/methods/test_fillna.py @@ -715,6 +715,34 @@ def test_single_block_df_with_horizontal_axis(self): ) tm.assert_frame_equal(result, expected) + def test_fillna_with_multi_index_frame(self): + # GH 47649 + pdf = DataFrame( + { + ("x", "a"): [np.nan, 2.0, 3.0], + ("x", "b"): [1.0, 2.0, np.nan], + ("y", "c"): [1.0, 2.0, np.nan], + } + ) + expected = DataFrame( + { + ("x", "a"): [-1.0, 2.0, 3.0], + ("x", "b"): [1.0, 2.0, -1.0], + ("y", "c"): [1.0, 2.0, np.nan], + } + ) + tm.assert_frame_equal(pdf.fillna({"x": -1}), expected) + tm.assert_frame_equal(pdf.fillna({"x": -1, ("x", "b"): -2}), expected) + + expected = DataFrame( + { + ("x", "a"): [-1.0, 2.0, 3.0], + ("x", "b"): [1.0, 2.0, -2.0], + ("y", "c"): [1.0, 2.0, np.nan], + } + ) + tm.assert_frame_equal(pdf.fillna({("x", "b"): -2, "x": -1}), expected) + def test_fillna_nonconsolidated_frame(): # https://github.com/pandas-dev/pandas/issues/36495
- [ ] closes #47649 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47774
2022-07-17T23:34:50Z
2022-08-23T12:10:45Z
2022-08-23T12:10:45Z
2022-08-23T16:29:32Z
REF: avoid converting input to cut if passing IntervalIndex bins
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 00b2b30eb3122..05005006ad9e4 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -240,7 +240,9 @@ def cut( original = x x = _preprocess_for_cut(x) - x, dtype = _coerce_to_type(x) + dtype = None + if not isinstance(bins, IntervalIndex): + x, dtype = _coerce_to_type(x) if not np.iterable(bins): if is_scalar(bins) and bins < 1:
xref https://github.com/pandas-dev/pandas/issues/47772, https://github.com/pandas-dev/pandas/issues/46218#issuecomment-1186600310
https://api.github.com/repos/pandas-dev/pandas/pulls/47773
2022-07-17T20:23:37Z
2023-02-22T13:55:12Z
null
2023-02-22T13:55:13Z
REGR: fix pd.cut with datetime IntervalIndex as bins
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index 96e4ad4321c60..0393879a766a5 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in taking NULL :class:`objects` from a :class:`DataFrame` causing a segmentation violation. These NULL values are created by :meth:`numpy.empty_like` (:issue:`46848`) - Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`) +- Fixed regression in :func:`cut` using a ``datetime64`` IntervalIndex as bins (:issue:`46218`) - Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`) - Fixed regression in :meth:`DataFrame.loc` not aligning index in some cases when setting a :class:`DataFrame` (:issue:`47578`) - Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 816260c8a6d2d..1e8ba81c877ac 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3987,8 +3987,14 @@ def _should_partial_index(self, target: Index) -> bool: Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): + if is_interval_dtype(target.dtype): + return False + # See https://github.com/pandas-dev/pandas/issues/47772 the commented + # out code can be restored (instead of hardcoding `return True`) + # once that issue if fixed # "Index" has no attribute "left" - return self.left._should_compare(target) # type: ignore[attr-defined] + # return self.left._should_compare(target) # type: ignore[attr-defined] + return True return False @final diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py index 9b4afcc9c00b8..4653981a1285d 100644 --- a/pandas/tests/indexes/interval/test_indexing.py +++ b/pandas/tests/indexes/interval/test_indexing.py @@ -8,6 +8,7 @@ from pandas import ( NA, CategoricalIndex, + DatetimeIndex, Index, Interval, IntervalIndex, @@ -302,6 +303,20 @@ def test_get_indexer_categorical_with_nans(self): expected = np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) + def test_get_indexer_datetime(self): + ii = IntervalIndex.from_breaks(date_range("2018-01-01", periods=4)) + result = ii.get_indexer(DatetimeIndex(["2018-01-02"])) + expected = np.array([0], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + result = ii.get_indexer(DatetimeIndex(["2018-01-02"]).astype(str)) + tm.assert_numpy_array_equal(result, expected) + + # TODO this should probably be deprecated? + # https://github.com/pandas-dev/pandas/issues/47772 + result = ii.get_indexer(DatetimeIndex(["2018-01-02"]).asi8) + tm.assert_numpy_array_equal(result, expected) + @pytest.mark.parametrize( "tuples, closed", [ diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py index 1425686f027e4..3b9ab6a83a575 100644 --- a/pandas/tests/reshape/test_cut.py +++ b/pandas/tests/reshape/test_cut.py @@ -14,6 +14,7 @@ Timestamp, cut, date_range, + interval_range, isna, qcut, timedelta_range, @@ -734,3 +735,12 @@ def test_cut_with_timestamp_tuple_labels(): expected = Categorical.from_codes([0, 1, 2], labels, ordered=True) tm.assert_categorical_equal(result, expected) + + +def test_cut_bins_datetime_intervalindex(): + # https://github.com/pandas-dev/pandas/issues/46218 + bins = interval_range(Timestamp("2022-02-25"), Timestamp("2022-02-27"), freq="1D") + # passing Series instead of list is important to trigger bug + result = cut(Series([Timestamp("2022-02-26")]), bins=bins) + expected = Categorical.from_codes([0], bins, ordered=True) + tm.assert_categorical_equal(result.array, expected)
Closes #46218 xref https://github.com/pandas-dev/pandas/issues/46218#issuecomment-1073362376 for the actual reason that causes `cut` to fail: inside the implementation, we convert the actual timestamp data to floats (to pass to the underlying algorithm), but then when passing those values to `IntervalIndex.get_indexer`, those numeric values no longer "match" with the datetime64 interval dtype. And in theory, `get_indexer` should then fail (return -1 for not finding the target values), but until https://github.com/pandas-dev/pandas/pull/42227 this actually happily worked (and therefore also let `cut` with datetime64 interval bins work). This PR doesn't solve the root cause (we should change the logic inside `cut` so that we don't create this mismatch in values vs bins), but it is a short-term fix of the regression. It basically reverts the (unintended, I think) behaviour change introduced by https://github.com/pandas-dev/pandas/pull/42227, but without actually reverting that PR (I am keeping the refactor introducing `_should_partial_index` of that PR, but I am only changing `_should_partial_index` itself a little bit to match better with what happened before in practice). I will open a separate issue about the issue in `cut` and that we should make `IntervalIndex.from_indexer` more strict. (opened -> https://github.com/pandas-dev/pandas/issues/47772)
https://api.github.com/repos/pandas-dev/pandas/pulls/47771
2022-07-17T19:52:47Z
2022-08-19T19:31:05Z
2022-08-19T19:31:05Z
2022-08-20T06:51:44Z
REF: de-duplicate get_conversion_factor
diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd index 356bd9dc3d7a0..6e41a55f30929 100644 --- a/pandas/_libs/tslibs/dtypes.pxd +++ b/pandas/_libs/tslibs/dtypes.pxd @@ -8,7 +8,6 @@ cdef NPY_DATETIMEUNIT abbrev_to_npy_unit(str abbrev) cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=*) except? -1 cpdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1 -cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1 cdef dict attrname_to_abbrevs diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index 01616666bba3f..a0a7ab90ebb30 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -4,7 +4,10 @@ cimport cython from enum import Enum -from pandas._libs.tslibs.np_datetime cimport NPY_DATETIMEUNIT +from pandas._libs.tslibs.np_datetime cimport ( + NPY_DATETIMEUNIT, + get_conversion_factor, +) cdef class PeriodDtypeBase: @@ -386,83 +389,11 @@ cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns) """ How many of the given time units fit into a single day? """ - cdef: - int64_t day_units - - if reso == NPY_DATETIMEUNIT.NPY_FR_ps: - # pico is the smallest unit for which we don't overflow, so - # we exclude femto and atto - day_units = 24 * 3600 * 1_000_000_000_000 - elif reso == NPY_DATETIMEUNIT.NPY_FR_ns: - day_units = 24 * 3600 * 1_000_000_000 - elif reso == NPY_DATETIMEUNIT.NPY_FR_us: - day_units = 24 * 3600 * 1_000_000 - elif reso == NPY_DATETIMEUNIT.NPY_FR_ms: - day_units = 24 * 3600 * 1_000 - elif reso == NPY_DATETIMEUNIT.NPY_FR_s: - day_units = 24 * 3600 - elif reso == NPY_DATETIMEUNIT.NPY_FR_m: - day_units = 24 * 60 - elif reso == NPY_DATETIMEUNIT.NPY_FR_h: - day_units = 24 - elif reso == NPY_DATETIMEUNIT.NPY_FR_D: - day_units = 1 - else: - raise NotImplementedError(reso) - return day_units + return get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_D, reso) cpdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1: - if reso == NPY_DATETIMEUNIT.NPY_FR_ns: - return 1_000_000_000 - elif reso == NPY_DATETIMEUNIT.NPY_FR_us: - return 1_000_000 - elif reso == NPY_DATETIMEUNIT.NPY_FR_ms: - return 1_000 - elif reso == NPY_DATETIMEUNIT.NPY_FR_s: - return 1 - else: - raise NotImplementedError(reso) - - -@cython.overflowcheck(True) -cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1: - """ - Find the factor by which we need to multiply to convert from from_unit to to_unit. - """ - if ( - from_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC - or to_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC - ): - raise ValueError("unit-less resolutions are not supported") - if from_unit > to_unit: - raise ValueError - - if from_unit == to_unit: - return 1 - - if from_unit == NPY_DATETIMEUNIT.NPY_FR_W: - return 7 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_D, to_unit) - elif from_unit == NPY_DATETIMEUNIT.NPY_FR_D: - return 24 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_h, to_unit) - elif from_unit == NPY_DATETIMEUNIT.NPY_FR_h: - return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_m, to_unit) - elif from_unit == NPY_DATETIMEUNIT.NPY_FR_m: - return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_s, to_unit) - elif from_unit == NPY_DATETIMEUNIT.NPY_FR_s: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ms, to_unit) - elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ms: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_us, to_unit) - elif from_unit == NPY_DATETIMEUNIT.NPY_FR_us: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ns, to_unit) - elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ns: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ps, to_unit) - elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ps: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_fs, to_unit) - elif from_unit == NPY_DATETIMEUNIT.NPY_FR_fs: - return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_as, to_unit) - else: - raise ValueError(from_unit, to_unit) + return get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_s, reso) cdef dict _reso_str_map = { diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 290483a741fe7..420d83909a78d 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -102,6 +102,7 @@ cpdef cnp.ndarray astype_overflowsafe( cnp.dtype dtype, # ndarray[datetime64[anyunit]] bint copy=*, ) +cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1 cdef bint cmp_dtstructs(npy_datetimestruct* left, npy_datetimestruct* right, int op) cdef get_implementation_bounds( diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 692b4430fa577..494eb5da7e107 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -1,3 +1,4 @@ +cimport cython from cpython.datetime cimport ( PyDateTime_DATE_GET_HOUR, PyDateTime_DATE_GET_MICROSECOND, @@ -450,3 +451,43 @@ cdef int op_to_op_code(op): return Py_GE if op is operator.gt: return Py_GT + + +@cython.overflowcheck(True) +cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1: + """ + Find the factor by which we need to multiply to convert from from_unit to to_unit. + """ + if ( + from_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC + or to_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC + ): + raise ValueError("unit-less resolutions are not supported") + if from_unit > to_unit: + raise ValueError + + if from_unit == to_unit: + return 1 + + if from_unit == NPY_DATETIMEUNIT.NPY_FR_W: + return 7 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_D, to_unit) + elif from_unit == NPY_DATETIMEUNIT.NPY_FR_D: + return 24 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_h, to_unit) + elif from_unit == NPY_DATETIMEUNIT.NPY_FR_h: + return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_m, to_unit) + elif from_unit == NPY_DATETIMEUNIT.NPY_FR_m: + return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_s, to_unit) + elif from_unit == NPY_DATETIMEUNIT.NPY_FR_s: + return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ms, to_unit) + elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ms: + return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_us, to_unit) + elif from_unit == NPY_DATETIMEUNIT.NPY_FR_us: + return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ns, to_unit) + elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ns: + return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ps, to_unit) + elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ps: + return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_fs, to_unit) + elif from_unit == NPY_DATETIMEUNIT.NPY_FR_fs: + return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_as, to_unit) + else: + raise ValueError(from_unit, to_unit) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index fef2a317a4f26..c64a9fb4d9c36 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -35,10 +35,7 @@ from pandas._libs.tslibs.conversion cimport ( cast_from_unit, precision_from_unit, ) -from pandas._libs.tslibs.dtypes cimport ( - get_conversion_factor, - npy_unit_to_abbrev, -) +from pandas._libs.tslibs.dtypes cimport npy_unit_to_abbrev from pandas._libs.tslibs.nattype cimport ( NPY_NAT, c_NaT as NaT, @@ -50,6 +47,7 @@ from pandas._libs.tslibs.np_datetime cimport ( NPY_FR_ns, cmp_dtstructs, cmp_scalar, + get_conversion_factor, get_datetime64_unit, get_timedelta64_value, get_unit_from_dtype, diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index ae3ce46cbc3c8..3cf9c9bcda538 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -54,7 +54,6 @@ from pandas._libs.tslibs.conversion cimport ( maybe_localize_tso, ) from pandas._libs.tslibs.dtypes cimport ( - get_conversion_factor, npy_unit_to_abbrev, periods_per_day, periods_per_second, @@ -83,6 +82,7 @@ from pandas._libs.tslibs.np_datetime cimport ( NPY_FR_ns, cmp_dtstructs, cmp_scalar, + get_conversion_factor, get_datetime64_unit, get_datetime64_value, get_unit_from_dtype,
Moving it to np_datetime bc we'll end up using it in astype_overflowsafe
https://api.github.com/repos/pandas-dev/pandas/pulls/47770
2022-07-17T19:35:54Z
2022-07-18T17:14:44Z
2022-07-18T17:14:44Z
2022-07-18T18:46:39Z
STYLE add future annotations where possible
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 06025c730700f..92f3b3ce83297 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -94,8 +94,6 @@ repos: stages: [manual] additional_dependencies: &pyright_dependencies - pyright@1.1.258 -- repo: local - hooks: - id: pyright_reportGeneralTypeIssues name: pyright reportGeneralTypeIssues entry: pyright --skipunannotated -p pyright_reportGeneralTypeIssues.json @@ -105,8 +103,6 @@ repos: types: [python] stages: [manual] additional_dependencies: *pyright_dependencies -- repo: local - hooks: - id: mypy name: mypy entry: mypy @@ -115,8 +111,6 @@ repos: pass_filenames: false types: [python] stages: [manual] -- repo: local - hooks: - id: flake8-rst name: flake8-rst description: Run flake8 on code snippets in docstrings or RST files @@ -237,3 +231,15 @@ repos: additional_dependencies: - flake8==4.0.1 - flake8-pyi==22.5.1 + - id: future-annotations + name: import annotations from __future__ + entry: 'from __future__ import annotations' + language: pygrep + args: [--negate] + files: ^pandas/ + types: [python] + exclude: | + (?x) + /(__init__\.py)|(api\.py)|(_version\.py)|(testing\.py)|(conftest\.py)$ + |/tests/ + |/_testing/ diff --git a/pandas/_config/dates.py b/pandas/_config/dates.py index 5bf2b49ce5904..b37831f96eb73 100644 --- a/pandas/_config/dates.py +++ b/pandas/_config/dates.py @@ -1,6 +1,8 @@ """ config for datetime formatting """ +from __future__ import annotations + from pandas._config import config as cf pc_date_dayfirst_doc = """ diff --git a/pandas/compat/chainmap.py b/pandas/compat/chainmap.py index 9af7962fe4ad0..5bec8e5fa1913 100644 --- a/pandas/compat/chainmap.py +++ b/pandas/compat/chainmap.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import ( ChainMap, TypeVar, diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index eef2bb6639c36..833cda20368a2 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -1,5 +1,7 @@ """ support pyarrow compatibility across versions """ +from __future__ import annotations + from pandas.util.version import Version try: diff --git a/pandas/core/_numba/kernels/shared.py b/pandas/core/_numba/kernels/shared.py index ec25e78a8d897..6e6bcef590d06 100644 --- a/pandas/core/_numba/kernels/shared.py +++ b/pandas/core/_numba/kernels/shared.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numba import numpy as np diff --git a/pandas/core/array_algos/transforms.py b/pandas/core/array_algos/transforms.py index 27aebb9911e83..93b029c21760e 100644 --- a/pandas/core/array_algos/transforms.py +++ b/pandas/core/array_algos/transforms.py @@ -2,6 +2,8 @@ transforms.py is for shape-preserving functions. """ +from __future__ import annotations + import numpy as np diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index d2875be0f58cd..280a599de84ed 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -4,6 +4,8 @@ Index ExtensionArray """ +from __future__ import annotations + import operator from typing import Any import warnings diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py index 7be617de63a40..3221b158241f5 100644 --- a/pandas/core/computation/check.py +++ b/pandas/core/computation/check.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from pandas.compat._optional import import_optional_dependency ne = import_optional_dependency("numexpr", errors="warn") diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py index ebf4d4ea9154e..a1ac3dfa06ee0 100644 --- a/pandas/core/computation/common.py +++ b/pandas/core/computation/common.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from functools import reduce import numpy as np diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index a49e35539656f..8c1a3fece255e 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -9,6 +9,8 @@ module is imported, register them here rather than in the module. """ +from __future__ import annotations + import os from typing import Callable import warnings diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index f47aeb16e19f1..893e4a9be58ef 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -1,5 +1,7 @@ """ basic inference routines """ +from __future__ import annotations + from collections import abc from numbers import Number import re diff --git a/pandas/core/exchange/buffer.py b/pandas/core/exchange/buffer.py index 098c596bff4cd..a3b05a0c5d24a 100644 --- a/pandas/core/exchange/buffer.py +++ b/pandas/core/exchange/buffer.py @@ -1,7 +1,4 @@ -from typing import ( - Optional, - Tuple, -) +from __future__ import annotations import numpy as np from packaging import version @@ -60,7 +57,7 @@ def __dlpack__(self): return self._x.__dlpack__() raise NotImplementedError("__dlpack__") - def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]: + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: """ Device type and device ID for where the data in the buffer resides. """ diff --git a/pandas/core/exchange/dataframe_protocol.py b/pandas/core/exchange/dataframe_protocol.py index ee2ae609e73f9..367b906332741 100644 --- a/pandas/core/exchange/dataframe_protocol.py +++ b/pandas/core/exchange/dataframe_protocol.py @@ -2,6 +2,8 @@ A verbatim copy (vendored) of the spec from https://github.com/data-apis/dataframe-api """ +from __future__ import annotations + from abc import ( ABC, abstractmethod, @@ -9,11 +11,8 @@ import enum from typing import ( Any, - Dict, Iterable, - Optional, Sequence, - Tuple, TypedDict, ) @@ -90,18 +89,18 @@ class ColumnNullType(enum.IntEnum): class ColumnBuffers(TypedDict): # first element is a buffer containing the column data; # second element is the data buffer's associated dtype - data: Tuple["Buffer", Any] + data: tuple[Buffer, Any] # first element is a buffer containing mask values indicating missing data; # second element is the mask value buffer's associated dtype. # None if the null representation is not a bit or byte mask - validity: Optional[Tuple["Buffer", Any]] + validity: tuple[Buffer, Any] | None # first element is a buffer containing the offset values for # variable-size binary data (e.g., variable-length strings); # second element is the offsets buffer's associated dtype. # None if the data buffer does not have an associated offsets buffer - offsets: Optional[Tuple["Buffer", Any]] + offsets: tuple[Buffer, Any] | None class CategoricalDescription(TypedDict): @@ -111,7 +110,7 @@ class CategoricalDescription(TypedDict): is_dictionary: bool # Python-level only (e.g. ``{int: str}``). # None if not a dictionary-style categorical. - mapping: Optional[dict] + mapping: dict | None class Buffer(ABC): @@ -161,7 +160,7 @@ def __dlpack__(self): raise NotImplementedError("__dlpack__") @abstractmethod - def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]: + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: """ Device type and device ID for where the data in the buffer resides. Uses device type codes matching DLPack. @@ -239,7 +238,7 @@ def offset(self) -> int: @property @abstractmethod - def dtype(self) -> Tuple[DtypeKind, int, str, str]: + def dtype(self) -> tuple[DtypeKind, int, str, str]: """ Dtype description as a tuple ``(kind, bit-width, format string, endianness)``. @@ -293,7 +292,7 @@ def describe_categorical(self) -> CategoricalDescription: @property @abstractmethod - def describe_null(self) -> Tuple[ColumnNullType, Any]: + def describe_null(self) -> tuple[ColumnNullType, Any]: """ Return the missing value (or "null") representation the column dtype uses, as a tuple ``(kind, value)``. @@ -306,7 +305,7 @@ def describe_null(self) -> Tuple[ColumnNullType, Any]: @property @abstractmethod - def null_count(self) -> Optional[int]: + def null_count(self) -> int | None: """ Number of null elements, if known. @@ -316,7 +315,7 @@ def null_count(self) -> Optional[int]: @property @abstractmethod - def metadata(self) -> Dict[str, Any]: + def metadata(self) -> dict[str, Any]: """ The metadata for the column. See `DataFrame.metadata` for more details. """ @@ -330,7 +329,7 @@ def num_chunks(self) -> int: pass @abstractmethod - def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable["Column"]: + def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]: """ Return an iterator yielding the chunks. @@ -395,7 +394,7 @@ def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True): @property @abstractmethod - def metadata(self) -> Dict[str, Any]: + def metadata(self) -> dict[str, Any]: """ The metadata for the data frame, as a dictionary with string keys. The contents of `metadata` may be anything, they are meant for a library @@ -415,7 +414,7 @@ def num_columns(self) -> int: pass @abstractmethod - def num_rows(self) -> Optional[int]: + def num_rows(self) -> int | None: # TODO: not happy with Optional, but need to flag it may be expensive # why include it if it may be None - what do we expect consumers # to do here? @@ -460,21 +459,21 @@ def get_columns(self) -> Iterable[Column]: pass @abstractmethod - def select_columns(self, indices: Sequence[int]) -> "DataFrame": + def select_columns(self, indices: Sequence[int]) -> DataFrame: """ Create a new DataFrame by selecting a subset of columns by index. """ pass @abstractmethod - def select_columns_by_name(self, names: Sequence[str]) -> "DataFrame": + def select_columns_by_name(self, names: Sequence[str]) -> DataFrame: """ Create a new DataFrame by selecting a subset of columns by name. """ pass @abstractmethod - def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable["DataFrame"]: + def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]: """ Return an iterator yielding the chunks. diff --git a/pandas/core/exchange/from_dataframe.py b/pandas/core/exchange/from_dataframe.py index cb1967b5701a0..a33e47ba3b68e 100644 --- a/pandas/core/exchange/from_dataframe.py +++ b/pandas/core/exchange/from_dataframe.py @@ -1,13 +1,8 @@ +from __future__ import annotations + import ctypes import re -from typing import ( - Any, - Dict, - List, - Optional, - Tuple, - Union, -) +from typing import Any import numpy as np @@ -24,7 +19,7 @@ Endianness, ) -_NP_DTYPES: Dict[DtypeKind, Dict[int, Any]] = { +_NP_DTYPES: dict[DtypeKind, dict[int, Any]] = { DtypeKind.INT: {8: np.int8, 16: np.int16, 32: np.int32, 64: np.int64}, DtypeKind.UINT: {8: np.uint8, 16: np.uint16, 32: np.uint32, 64: np.uint64}, DtypeKind.FLOAT: {32: np.float32, 64: np.float64}, @@ -108,7 +103,7 @@ def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame: """ # We need a dict of columns here, with each column being a NumPy array (at # least for now, deal with non-NumPy dtypes later). - columns: Dict[str, Any] = {} + columns: dict[str, Any] = {} buffers = [] # hold on to buffers, keeps memory alive for name in df.column_names(): if not isinstance(name, str): @@ -140,7 +135,7 @@ def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame: return pandas_df -def primitive_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]: +def primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: """ Convert a column holding one of the primitive dtypes to a NumPy array. @@ -165,7 +160,7 @@ def primitive_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]: return data, buffers -def categorical_column_to_series(col: Column) -> Tuple[pd.Series, Any]: +def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]: """ Convert a column holding categorical data to a pandas Series. @@ -205,7 +200,7 @@ def categorical_column_to_series(col: Column) -> Tuple[pd.Series, Any]: return data, buffers -def string_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]: +def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: """ Convert a column holding string data to a NumPy array. @@ -268,7 +263,7 @@ def string_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]: null_pos = ~null_pos # Assemble the strings from the code units - str_list: List[Union[None, float, str]] = [None] * col.size + str_list: list[None | float | str] = [None] * col.size for i in range(col.size): # Check for missing values if null_pos is not None and null_pos[i]: @@ -324,7 +319,7 @@ def parse_datetime_format_str(format_str, data): raise NotImplementedError(f"DateTime kind is not supported: {format_str}") -def datetime_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]: +def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: """ Convert a column holding DateTime data to a NumPy array. @@ -362,9 +357,9 @@ def datetime_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]: def buffer_to_ndarray( buffer: Buffer, - dtype: Tuple[DtypeKind, int, str, str], + dtype: tuple[DtypeKind, int, str, str], offset: int = 0, - length: Optional[int] = None, + length: int | None = None, ) -> np.ndarray: """ Build a NumPy array from the passed buffer. @@ -470,9 +465,9 @@ def bitmask_to_bool_ndarray( def set_nulls( - data: Union[np.ndarray, pd.Series], + data: np.ndarray | pd.Series, col: Column, - validity: Optional[Tuple[Buffer, Tuple[DtypeKind, int, str, str]]], + validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, allow_modify_inplace: bool = True, ): """ diff --git a/pandas/core/exchange/utils.py b/pandas/core/exchange/utils.py index 0c746113babee..2cc5126591718 100644 --- a/pandas/core/exchange/utils.py +++ b/pandas/core/exchange/utils.py @@ -2,6 +2,8 @@ Utility functions and objects for implementing the exchange API. """ +from __future__ import annotations + import re import typing diff --git a/pandas/core/flags.py b/pandas/core/flags.py index b4e1039e216c0..f07c6917d91e5 100644 --- a/pandas/core/flags.py +++ b/pandas/core/flags.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import weakref diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 2caaadbc05cff..6a1c586d90b6e 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -2,6 +2,8 @@ Functions for arithmetic and comparison operations on NumPy arrays and ExtensionArrays. """ +from __future__ import annotations + import datetime from functools import partial import operator diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py index b883fe7751daa..f0e6aa3750cee 100644 --- a/pandas/core/ops/common.py +++ b/pandas/core/ops/common.py @@ -1,6 +1,8 @@ """ Boilerplate functions used in defining binary operations. """ +from __future__ import annotations + from functools import wraps from typing import Callable diff --git a/pandas/core/ops/dispatch.py b/pandas/core/ops/dispatch.py index bfd4afe0de86f..2f500703ccfb3 100644 --- a/pandas/core/ops/dispatch.py +++ b/pandas/core/ops/dispatch.py @@ -1,6 +1,8 @@ """ Functions for defining unary operations. """ +from __future__ import annotations + from typing import Any from pandas._typing import ArrayLike diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py index e069c765d5299..eb27cf7450119 100644 --- a/pandas/core/ops/invalid.py +++ b/pandas/core/ops/invalid.py @@ -1,6 +1,8 @@ """ Templates for invalid operations. """ +from __future__ import annotations + import operator import numpy as np diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py index d1f704635ba64..e8a930083a778 100644 --- a/pandas/core/ops/methods.py +++ b/pandas/core/ops/methods.py @@ -1,6 +1,8 @@ """ Functions to generate methods and pin them to the appropriate classes. """ +from __future__ import annotations + import operator from pandas.core.dtypes.generic import ( diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index 8d5f7fb8de758..850ca44e996c4 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -21,6 +21,8 @@ 3) divmod behavior consistent with 1) and 2). """ +from __future__ import annotations + import operator import numpy as np diff --git a/pandas/core/roperator.py b/pandas/core/roperator.py index 15b16b6fa976a..2f320f4e9c6b9 100644 --- a/pandas/core/roperator.py +++ b/pandas/core/roperator.py @@ -2,6 +2,8 @@ Reversed Operations not available in the stdlib operator module. Defining these instead of using lambdas allows us to reference them by name. """ +from __future__ import annotations + import operator diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 15144116fa924..ed2a4002f5ce7 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -1,4 +1,6 @@ """Common utility functions for rolling operations""" +from __future__ import annotations + from collections import defaultdict from typing import cast diff --git a/pandas/core/window/doc.py b/pandas/core/window/doc.py index 61cfa29ffc481..4fe08e2fa20b3 100644 --- a/pandas/core/window/doc.py +++ b/pandas/core/window/doc.py @@ -1,4 +1,6 @@ """Any shareable docstring components for rolling/expanding/ewm""" +from __future__ import annotations + from textwrap import dedent from pandas.core.shared_docs import _shared_docs diff --git a/pandas/core/window/online.py b/pandas/core/window/online.py index bb973f05687e2..2e25bdd12d3e0 100644 --- a/pandas/core/window/online.py +++ b/pandas/core/window/online.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import TYPE_CHECKING import numpy as np diff --git a/pandas/io/formats/_color_data.py b/pandas/io/formats/_color_data.py index e5b72b2befa4f..2e7cb7f29646e 100644 --- a/pandas/io/formats/_color_data.py +++ b/pandas/io/formats/_color_data.py @@ -3,6 +3,8 @@ # This data has been copied here, instead of being imported from matplotlib, # not to have ``to_excel`` methods require matplotlib. # source: matplotlib._color_data (3.3.3) +from __future__ import annotations + CSS4_COLORS = { "aliceblue": "F0F8FF", "antiquewhite": "FAEBD7", diff --git a/pandas/io/sas/sas_constants.py b/pandas/io/sas/sas_constants.py index 979b2cacbf706..366e6924a1e16 100644 --- a/pandas/io/sas/sas_constants.py +++ b/pandas/io/sas/sas_constants.py @@ -1,3 +1,5 @@ +from __future__ import annotations + magic = ( b"\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\xc2\xea\x81\x60" diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py index c731c40f10a05..6015662999a7d 100644 --- a/pandas/plotting/_matplotlib/compat.py +++ b/pandas/plotting/_matplotlib/compat.py @@ -1,4 +1,6 @@ # being a bit too dynamic +from __future__ import annotations + import operator from pandas.util.version import Version diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index b995c6ac78b80..169c9cc18a7fd 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from pandas._libs.tslibs.offsets import ( FY5253, BaseOffset,
- [x] closes #41901 (Replace xxxx with the Github issue number) - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/47769
2022-07-17T19:24:02Z
2022-07-18T17:16:24Z
2022-07-18T17:16:24Z
2022-07-19T14:52:33Z
TYP: Appender also works with properties
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index cec4ee40a8c7a..f8359edaa8d44 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -12,7 +12,10 @@ import warnings from pandas._libs.properties import cache_readonly -from pandas._typing import F +from pandas._typing import ( + F, + T, +) from pandas.util._exceptions import find_stack_level @@ -485,7 +488,7 @@ def __init__(self, addendum: str | None, join: str = "", indents: int = 0) -> No self.addendum = addendum self.join = join - def __call__(self, func: F) -> F: + def __call__(self, func: T) -> T: func.__doc__ = func.__doc__ if func.__doc__ else "" self.addendum = self.addendum if self.addendum else "" docitems = [func.__doc__, self.addendum]
Technically, `Appender` works with literally any `object` as all of them have `__doc__`. This change helps pyright in this case https://github.com/pandas-dev/pandas/blob/bdd9314c7006611021bab2b7adf7210cd874a0c2/pandas/core/series.py#L738 Mypy still needs the ignore because it doesn't support decorating a property.
https://api.github.com/repos/pandas-dev/pandas/pulls/47768
2022-07-17T19:16:53Z
2022-07-18T17:17:17Z
2022-07-18T17:17:17Z
2022-09-10T01:39:01Z
PERF: operations with zoneinfo tzinfos
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 2b7f9b9659354..4487136aa7fb8 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -642,9 +642,7 @@ cdef int64_t _tz_localize_using_tzinfo_api( if not to_utc: # tz.utcoffset only makes sense if datetime # is _wall time_, so if val is a UTC timestamp convert to wall time - dt = datetime_new(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, utc_pytz) - dt = dt.astimezone(tz) + dt = _astimezone(dts, tz) if fold is not NULL: # NB: fold is only passed with to_utc=False @@ -658,6 +656,27 @@ cdef int64_t _tz_localize_using_tzinfo_api( return delta +cdef datetime _astimezone(npy_datetimestruct dts, tzinfo tz): + """ + Optimized equivalent to: + + dt = datetime(dts.year, dts.month, dts.day, dts.hour, + dts.min, dts.sec, dts.us, utc_pytz) + dt = dt.astimezone(tz) + + Derived from the datetime.astimezone implementation at + https://github.com/python/cpython/blob/main/Modules/_datetimemodule.c#L6187 + + NB: we are assuming tz is not None. + """ + cdef: + datetime result + + result = datetime_new(dts.year, dts.month, dts.day, dts.hour, + dts.min, dts.sec, dts.us, tz) + return tz.fromutc(result) + + # NB: relies on dateutil internals, subject to change. @cython.boundscheck(False) @cython.wraparound(False)
``` import zoneinfo import pandas as pd tz = zoneinfo.ZoneInfo("US/Pacific") dti = pd.date_range("2016-01-01", periods=10**5, freq="s", tz=tz) In [3]: %timeit dti.normalize() 142 ms ± 3.44 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) # <- main 90.4 ms ± 4.34 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) # <- PR ``` The corresponding timing with pytz is 49.9 ms. @pganssle any thoughts on getting to near-parity (so we can drop pytz xref #46463)? Is there any prospect of exposing zoneinfo_fromutc in the C-API?
https://api.github.com/repos/pandas-dev/pandas/pulls/47767
2022-07-17T17:59:21Z
2022-07-18T17:19:28Z
2022-07-18T17:19:28Z
2022-07-18T18:19:52Z
add ignore for new mypy error 'type-var'
diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 83626a42134d6..917382544199a 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -205,7 +205,7 @@ def __getattr__(self, attr: str): # error: Signature of "obj" incompatible with supertype "BaseGroupBy" @property - def obj(self) -> NDFrameT: # type: ignore[override] + def obj(self) -> NDFrame: # type: ignore[override] # error: Incompatible return value type (got "Optional[Any]", # expected "NDFrameT") return self.groupby.obj # type: ignore[return-value]
A new error introduced to mypy is detected in pandas' repository. This PR adds comment to ignore that error. > pandas (https://github.com/pandas-dev/pandas) > + pandas/core/resample.py:208: error: A function returning TypeVar should receive at least one argument containing the same Typevar [type-var] > + pandas/core/resample.py:208: note: Error code "type-var" not covered by "type: ignore" comment Reference: https://github.com/python/mypy/pull/13166 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
https://api.github.com/repos/pandas-dev/pandas/pulls/47766
2022-07-17T17:08:29Z
2022-07-17T23:29:07Z
2022-07-17T23:29:07Z
2022-07-18T11:51:05Z
TST: misplaced string array test
diff --git a/pandas/tests/arrays/numpy_/test_indexing.py b/pandas/tests/arrays/numpy_/test_indexing.py index f92411efe774c..225d64ad7d258 100644 --- a/pandas/tests/arrays/numpy_/test_indexing.py +++ b/pandas/tests/arrays/numpy_/test_indexing.py @@ -7,6 +7,17 @@ class TestSearchsorted: + def test_searchsorted_string(self, string_dtype): + arr = pd.array(["a", "b", "c"], dtype=string_dtype) + + result = arr.searchsorted("a", side="left") + assert is_scalar(result) + assert result == 0 + + result = arr.searchsorted("a", side="right") + assert is_scalar(result) + assert result == 1 + def test_searchsorted_numeric_dtypes_scalar(self, any_real_numpy_dtype): arr = pd.array([1, 3, 90], dtype=any_real_numpy_dtype) result = arr.searchsorted(30) diff --git a/pandas/tests/arrays/string_/test_indexing.py b/pandas/tests/arrays/string_/test_indexing.py deleted file mode 100644 index 41466c43288c3..0000000000000 --- a/pandas/tests/arrays/string_/test_indexing.py +++ /dev/null @@ -1,16 +0,0 @@ -from pandas.core.dtypes.common import is_scalar - -import pandas as pd - - -class TestSearchsorted: - def test_searchsorted(self, string_dtype): - arr = pd.array(["a", "b", "c"], dtype=string_dtype) - - result = arr.searchsorted("a", side="left") - assert is_scalar(result) - assert result == 0 - - result = arr.searchsorted("a", side="right") - assert is_scalar(result) - assert result == 1
Just encountered this because I wanted to add a test there for another PR. Those tests were moved and splitted in https://github.com/pandas-dev/pandas/pull/46136, but the "string" tests are not actually about StringArray, they are about PandasArray with string numpy dtype (`np.dtype("U")`), and thus belong with the others in `tests/arrays/numpy_`, since `tests/arrays/string_` is for StringArray
https://api.github.com/repos/pandas-dev/pandas/pulls/47765
2022-07-17T15:27:21Z
2022-07-18T15:36:09Z
2022-07-18T15:36:08Z
2022-07-18T19:48:43Z
TYP: a few mismatches found by stubtest
diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi index 29d1365cad6fc..f55ff0ae8b574 100644 --- a/pandas/_libs/algos.pyi +++ b/pandas/_libs/algos.pyi @@ -42,7 +42,7 @@ def groupsort_indexer( np.ndarray, # ndarray[int64_t, ndim=1] ]: ... def kth_smallest( - a: np.ndarray, # numeric[:] + arr: np.ndarray, # numeric[:] k: int, ) -> Any: ... # numeric diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi index 2f0c3980c0c02..c7cb9705d7cb9 100644 --- a/pandas/_libs/groupby.pyi +++ b/pandas/_libs/groupby.pyi @@ -105,8 +105,9 @@ def group_last( values: np.ndarray, # ndarray[rank_t, ndim=2] labels: np.ndarray, # const int64_t[:] mask: npt.NDArray[np.bool_] | None, - result_mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None = ..., min_count: int = ..., # Py_ssize_t + is_datetimelike: bool = ..., ) -> None: ... def group_nth( out: np.ndarray, # rank_t[:, ::1] @@ -114,9 +115,10 @@ def group_nth( values: np.ndarray, # ndarray[rank_t, ndim=2] labels: np.ndarray, # const int64_t[:] mask: npt.NDArray[np.bool_] | None, - result_mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None = ..., min_count: int = ..., # int64_t rank: int = ..., # int64_t + is_datetimelike: bool = ..., ) -> None: ... def group_rank( out: np.ndarray, # float64_t[:, ::1] @@ -124,7 +126,7 @@ def group_rank( labels: np.ndarray, # const int64_t[:] ngroups: int, is_datetimelike: bool, - ties_method: Literal["aveage", "min", "max", "first", "dense"] = ..., + ties_method: Literal["average", "min", "max", "first", "dense"] = ..., ascending: bool = ..., pct: bool = ..., na_option: Literal["keep", "top", "bottom"] = ..., @@ -136,6 +138,7 @@ def group_max( values: np.ndarray, # ndarray[groupby_t, ndim=2] labels: np.ndarray, # const int64_t[:] min_count: int = ..., + is_datetimelike: bool = ..., mask: np.ndarray | None = ..., result_mask: np.ndarray | None = ..., ) -> None: ... @@ -145,6 +148,7 @@ def group_min( values: np.ndarray, # ndarray[groupby_t, ndim=2] labels: np.ndarray, # const int64_t[:] min_count: int = ..., + is_datetimelike: bool = ..., mask: np.ndarray | None = ..., result_mask: np.ndarray | None = ..., ) -> None: ... @@ -154,6 +158,9 @@ def group_cummin( labels: np.ndarray, # const int64_t[:] ngroups: int, is_datetimelike: bool, + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., ) -> None: ... def group_cummax( out: np.ndarray, # groupby_t[:, ::1] @@ -161,4 +168,7 @@ def group_cummax( labels: np.ndarray, # const int64_t[:] ngroups: int, is_datetimelike: bool, + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., ) -> None: ... diff --git a/pandas/_libs/internals.pyi b/pandas/_libs/internals.pyi index 6a90fbc729580..201c7b7b565cc 100644 --- a/pandas/_libs/internals.pyi +++ b/pandas/_libs/internals.pyi @@ -32,7 +32,7 @@ def update_blklocs_and_blknos( loc: int, nblocks: int, ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... - +@final class BlockPlacement: def __init__(self, val: int | slice | np.ndarray): ... @property diff --git a/pandas/_libs/join.pyi b/pandas/_libs/join.pyi index a5e91e2ce83eb..8d02f8f57dee1 100644 --- a/pandas/_libs/join.pyi +++ b/pandas/_libs/join.pyi @@ -56,6 +56,7 @@ def asof_join_backward_on_X_by_Y( right_by_values: np.ndarray, # by_t[:] allow_exact_matches: bool = ..., tolerance: np.number | int | float | None = ..., + use_hashtable: bool = ..., ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def asof_join_forward_on_X_by_Y( left_values: np.ndarray, # asof_t[:] @@ -64,6 +65,7 @@ def asof_join_forward_on_X_by_Y( right_by_values: np.ndarray, # by_t[:] allow_exact_matches: bool = ..., tolerance: np.number | int | float | None = ..., + use_hashtable: bool = ..., ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def asof_join_nearest_on_X_by_Y( left_values: np.ndarray, # asof_t[:] @@ -72,22 +74,5 @@ def asof_join_nearest_on_X_by_Y( right_by_values: np.ndarray, # by_t[:] allow_exact_matches: bool = ..., tolerance: np.number | int | float | None = ..., -) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... -def asof_join_backward( - left_values: np.ndarray, # asof_t[:] - right_values: np.ndarray, # asof_t[:] - allow_exact_matches: bool = ..., - tolerance: np.number | int | float | None = ..., -) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... -def asof_join_forward( - left_values: np.ndarray, # asof_t[:] - right_values: np.ndarray, # asof_t[:] - allow_exact_matches: bool = ..., - tolerance: np.number | int | float | None = ..., -) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... -def asof_join_nearest( - left_values: np.ndarray, # asof_t[:] - right_values: np.ndarray, # asof_t[:] - allow_exact_matches: bool = ..., - tolerance: np.number | int | float | None = ..., + use_hashtable: bool = ..., ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... diff --git a/pandas/_libs/missing.pyi b/pandas/_libs/missing.pyi index 3a4cc9def07bd..27f227558dee5 100644 --- a/pandas/_libs/missing.pyi +++ b/pandas/_libs/missing.pyi @@ -1,7 +1,8 @@ import numpy as np from numpy import typing as npt -class NAType: ... +class NAType: + def __new__(cls, *args, **kwargs): ... NA: NAType diff --git a/pandas/_libs/parsers.pyi b/pandas/_libs/parsers.pyi index 01f5d5802ccd5..6b0bbf183f07e 100644 --- a/pandas/_libs/parsers.pyi +++ b/pandas/_libs/parsers.pyi @@ -63,7 +63,6 @@ class TextReader: skip_blank_lines: bool = ..., encoding_errors: bytes | str = ..., ): ... - def set_error_bad_lines(self, status: int) -> None: ... def set_noconvert(self, i: int) -> None: ... def remove_noconvert(self, i: int) -> None: ... def close(self) -> None: ... diff --git a/pandas/_libs/tslibs/ccalendar.pyi b/pandas/_libs/tslibs/ccalendar.pyi index 5d5b935ffa54b..993f18a61d74a 100644 --- a/pandas/_libs/tslibs/ccalendar.pyi +++ b/pandas/_libs/tslibs/ccalendar.pyi @@ -8,7 +8,5 @@ def get_firstbday(year: int, month: int) -> int: ... def get_lastbday(year: int, month: int) -> int: ... def get_day_of_year(year: int, month: int, day: int) -> int: ... def get_iso_calendar(year: int, month: int, day: int) -> tuple[int, int, int]: ... -def is_leapyear(year: int) -> bool: ... def get_week_of_year(year: int, month: int, day: int) -> int: ... def get_days_in_month(year: int, month: int) -> int: ... -def dayofweek(y: int, m: int, d: int) -> int: ... diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi index dd439ebfc4798..041c51533d8da 100644 --- a/pandas/_libs/tslibs/dtypes.pyi +++ b/pandas/_libs/tslibs/dtypes.pyi @@ -14,10 +14,12 @@ class PeriodDtypeBase: # actually __cinit__ def __new__(cls, code: int): ... + @property def _freq_group_code(self) -> int: ... @property def _resolution_obj(self) -> Resolution: ... def _get_to_timestamp_base(self) -> int: ... + @property def _freqstr(self) -> str: ... class FreqGroup(Enum): diff --git a/pandas/_libs/tslibs/nattype.pyi b/pandas/_libs/tslibs/nattype.pyi index e5a7e0223e534..0aa80330b15bc 100644 --- a/pandas/_libs/tslibs/nattype.pyi +++ b/pandas/_libs/tslibs/nattype.pyi @@ -12,8 +12,6 @@ NaT: NaTType iNaT: int nat_strings: set[str] -def is_null_datetimelike(val: object, inat_is_null: bool = ...) -> bool: ... - _NaTComparisonTypes = datetime | timedelta | Period | np.datetime64 | np.timedelta64 class _NatComparison: @@ -21,6 +19,7 @@ class _NatComparison: class NaTType: value: np.int64 + @property def asm8(self) -> np.datetime64: ... def to_datetime64(self) -> np.datetime64: ... def to_numpy( diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi index 12b113f0b73b1..1fe92e2870400 100644 --- a/pandas/_libs/tslibs/offsets.pyi +++ b/pandas/_libs/tslibs/offsets.pyi @@ -80,6 +80,7 @@ class BaseOffset: def name(self) -> str: ... @property def rule_code(self) -> str: ... + @property def freqstr(self) -> str: ... def apply_index(self, dtindex: DatetimeIndex) -> DatetimeIndex: ... def _apply_array(self, dtarr) -> None: ... diff --git a/pandas/_libs/tslibs/timezones.pyi b/pandas/_libs/tslibs/timezones.pyi index 20c403e93b149..d241a35f21cca 100644 --- a/pandas/_libs/tslibs/timezones.pyi +++ b/pandas/_libs/tslibs/timezones.pyi @@ -6,8 +6,6 @@ from typing import Callable import numpy as np -from pandas._typing import npt - # imported from dateutil.tz dateutil_gettz: Callable[[str], tzinfo] @@ -17,9 +15,6 @@ def infer_tzinfo( start: datetime | None, end: datetime | None, ) -> tzinfo | None: ... -def get_dst_info( - tz: tzinfo, -) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64], str]: ... def maybe_get_tz(tz: str | int | np.int64 | tzinfo | None) -> tzinfo | None: ... def get_timezone(tz: tzinfo) -> tzinfo | str: ... def is_utc(tz: tzinfo | None) -> bool: ...
xref #47760
https://api.github.com/repos/pandas-dev/pandas/pulls/47764
2022-07-17T14:19:18Z
2022-07-18T17:20:26Z
2022-07-18T17:20:26Z
2022-07-18T17:20:34Z
BUG: fix regression in Series[string] setitem setting a scalar with a mask
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index 6ee140f59e096..6bd7378e05404 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`) +- Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index c9abef226770c..c68ffec600c8a 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -14,6 +14,7 @@ from pandas._typing import ( Dtype, Scalar, + npt, type_t, ) from pandas.compat import pa_version_under1p01 @@ -410,6 +411,12 @@ def __setitem__(self, key, value): super().__setitem__(key, value) + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + # the super() method NDArrayBackedExtensionArray._putmask uses + # np.putmask which doesn't properly handle None/pd.NA, so using the + # base class implementation that uses __setitem__ + ExtensionArray._putmask(self, mask, value) + def astype(self, dtype, copy: bool = True): dtype = pandas_dtype(dtype) diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index a5eb6189db6f1..4376a0de37a8c 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -588,3 +588,23 @@ def test_isin(dtype, fixed_now_ts): result = s.isin(["a", fixed_now_ts]) expected = pd.Series([True, False, False]) tm.assert_series_equal(result, expected) + + +def test_setitem_scalar_with_mask_validation(dtype): + # https://github.com/pandas-dev/pandas/issues/47628 + # setting None with a boolean mask (through _putmaks) should still result + # in pd.NA values in the underlying array + ser = pd.Series(["a", "b", "c"], dtype=dtype) + mask = np.array([False, True, False]) + + ser[mask] = None + assert ser.array[1] is pd.NA + + # for other non-string we should also raise an error + ser = pd.Series(["a", "b", "c"], dtype=dtype) + if type(ser.array) is pd.arrays.StringArray: + msg = "Cannot set non-string value" + else: + msg = "Scalar must be NA or str" + with pytest.raises(ValueError, match=msg): + ser[mask] = 1
- [x] closes #47628 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47763
2022-07-17T13:47:12Z
2022-07-18T22:29:09Z
2022-07-18T22:29:08Z
2022-07-19T06:13:31Z
REGR: preserve reindexed array object (instead of creating new array) for concat with all-NA array
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index e03e6cd41ebd3..2ce4d4b37f922 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.fillna` not working :class:`DataFrame` with :class:`MultiIndex` (:issue:`47649`) - Fixed regression in taking NULL :class:`objects` from a :class:`DataFrame` causing a segmentation violation. These NULL values are created by :meth:`numpy.empty_like` (:issue:`46848`) - Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`) +- Fixed regression in :func:`concat` or :func:`merge` handling of all-NaN ExtensionArrays with custom attributes (:issue:`47762`) - Fixed regression in calling bitwise numpy ufuncs (for example, ``np.bitwise_and``) on Index objects (:issue:`46769`) - Fixed regression in :func:`cut` using a ``datetime64`` IntervalIndex as bins (:issue:`46218`) - Fixed regression in :meth:`DataFrame.select_dtypes` where ``include="number"`` included :class:`BooleanDtype` (:issue:`46870`) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 77197dac3363b..0df8aa5a055b0 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -476,16 +476,21 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: return DatetimeArray(i8values, dtype=empty_dtype) elif is_1d_only_ea_dtype(empty_dtype): - empty_dtype = cast(ExtensionDtype, empty_dtype) - cls = empty_dtype.construct_array_type() - - missing_arr = cls._from_sequence([], dtype=empty_dtype) - ncols, nrows = self.shape - assert ncols == 1, ncols - empty_arr = -1 * np.ones((nrows,), dtype=np.intp) - return missing_arr.take( - empty_arr, allow_fill=True, fill_value=fill_value - ) + if is_dtype_equal(blk_dtype, empty_dtype) and self.indexers: + # avoid creating new empty array if we already have an array + # with correct dtype that can be reindexed + pass + else: + empty_dtype = cast(ExtensionDtype, empty_dtype) + cls = empty_dtype.construct_array_type() + + missing_arr = cls._from_sequence([], dtype=empty_dtype) + ncols, nrows = self.shape + assert ncols == 1, ncols + empty_arr = -1 * np.ones((nrows,), dtype=np.intp) + return missing_arr.take( + empty_arr, allow_fill=True, fill_value=fill_value + ) elif isinstance(empty_dtype, ExtensionDtype): # TODO: no tests get here, a handful would if we disabled # the dt64tz special-case above (which is faster) diff --git a/pandas/tests/extension/array_with_attr/__init__.py b/pandas/tests/extension/array_with_attr/__init__.py new file mode 100644 index 0000000000000..49da6af024a31 --- /dev/null +++ b/pandas/tests/extension/array_with_attr/__init__.py @@ -0,0 +1,6 @@ +from pandas.tests.extension.array_with_attr.array import ( + FloatAttrArray, + FloatAttrDtype, +) + +__all__ = ["FloatAttrArray", "FloatAttrDtype"] diff --git a/pandas/tests/extension/array_with_attr/array.py b/pandas/tests/extension/array_with_attr/array.py new file mode 100644 index 0000000000000..d9327ca9f2f3f --- /dev/null +++ b/pandas/tests/extension/array_with_attr/array.py @@ -0,0 +1,84 @@ +""" +Test extension array that has custom attribute information (not stored on the dtype). + +""" +from __future__ import annotations + +import numbers + +import numpy as np + +from pandas._typing import type_t + +from pandas.core.dtypes.base import ExtensionDtype + +import pandas as pd +from pandas.core.arrays import ExtensionArray + + +class FloatAttrDtype(ExtensionDtype): + type = float + name = "float_attr" + na_value = np.nan + + @classmethod + def construct_array_type(cls) -> type_t[FloatAttrArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return FloatAttrArray + + +class FloatAttrArray(ExtensionArray): + dtype = FloatAttrDtype() + __array_priority__ = 1000 + + def __init__(self, values, attr=None) -> None: + if not isinstance(values, np.ndarray): + raise TypeError("Need to pass a numpy array of float64 dtype as values") + if not values.dtype == "float64": + raise TypeError("Need to pass a numpy array of float64 dtype as values") + self.data = values + self.attr = attr + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + data = np.array(scalars, dtype="float64", copy=copy) + return cls(data) + + def __getitem__(self, item): + if isinstance(item, numbers.Integral): + return self.data[item] + else: + # slice, list-like, mask + item = pd.api.indexers.check_array_indexer(self, item) + return type(self)(self.data[item], self.attr) + + def __len__(self) -> int: + return len(self.data) + + def isna(self): + return np.isnan(self.data) + + def take(self, indexer, allow_fill=False, fill_value=None): + from pandas.api.extensions import take + + data = self.data + if allow_fill and fill_value is None: + fill_value = self.dtype.na_value + + result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill) + return type(self)(result, self.attr) + + def copy(self): + return type(self)(self.data.copy(), self.attr) + + @classmethod + def _concat_same_type(cls, to_concat): + data = np.concatenate([x.data for x in to_concat]) + attr = to_concat[0].attr if len(to_concat) else None + return cls(data, attr) diff --git a/pandas/tests/extension/array_with_attr/test_array_with_attr.py b/pandas/tests/extension/array_with_attr/test_array_with_attr.py new file mode 100644 index 0000000000000..3735fe40a0d67 --- /dev/null +++ b/pandas/tests/extension/array_with_attr/test_array_with_attr.py @@ -0,0 +1,33 @@ +import numpy as np + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension.array_with_attr import FloatAttrArray + + +def test_concat_with_all_na(): + # https://github.com/pandas-dev/pandas/pull/47762 + # ensure that attribute of the column array is preserved (when it gets + # preserved in reindexing the array) during merge/concat + arr = FloatAttrArray(np.array([np.nan, np.nan], dtype="float64"), attr="test") + + df1 = pd.DataFrame({"col": arr, "key": [0, 1]}) + df2 = pd.DataFrame({"key": [0, 1], "col2": [1, 2]}) + result = pd.merge(df1, df2, on="key") + expected = pd.DataFrame({"col": arr, "key": [0, 1], "col2": [1, 2]}) + tm.assert_frame_equal(result, expected) + assert result["col"].array.attr == "test" + + df1 = pd.DataFrame({"col": arr, "key": [0, 1]}) + df2 = pd.DataFrame({"key": [0, 2], "col2": [1, 2]}) + result = pd.merge(df1, df2, on="key") + expected = pd.DataFrame({"col": arr.take([0]), "key": [0], "col2": [1]}) + tm.assert_frame_equal(result, expected) + assert result["col"].array.attr == "test" + + result = pd.concat([df1.set_index("key"), df2.set_index("key")], axis=1) + expected = pd.DataFrame( + {"col": arr.take([0, 1, -1]), "col2": [1, np.nan, 2], "key": [0, 1, 2]} + ).set_index("key") + tm.assert_frame_equal(result, expected) + assert result["col"].array.attr == "test"
Originally reported in GeoPandas: https://github.com/geopandas/geopandas/issues/2493 This worked in pandas 1.3.5, was then originally "broken" by https://github.com/pandas-dev/pandas/pull/43043 (as an unintended side effect of some of the (proper) changes in that PR) somewhere between 1.3 and 1.4.0 but got fixed again before the final 1.4.0 because of subsequent refactoring in the internal concat code. Some of those refactors were then reverted in 1.4.3 for being able to revert the all-NA change (https://github.com/pandas-dev/pandas/pull/47372), surfacing the "bug" again. This is not necessarily strictly a "bug" in pandas (I also don't know if we make any guarantees about preserving array objects, let alone its attributes), but I think this is a decent change nevertheless. Currently, when concatting/merging a column with all-NA data, we create the "empty" (all-NA) array for the result from scratch. While if there is no dtype change, and so the original column already has the correct dtype, we can actually just reindex that array instead of creating a new all-NA array. For builtin dtypes that might not matter much, but for custom ExtensionArrays that avoids going through some complex code to create this new all-NA array: https://github.com/pandas-dev/pandas/blob/fc68a9a290fc314c090e037597138c74fa23ee6d/pandas/core/internals/concat.py#L479-L488 In GeoPandas, our ExtensionArray holds some optional attribute information (eg the coordinate reference system, a spatial index, ..) that is not part of the dtype. And so in that case, reindexing the original array lets geopandas preserve that information (if appropriate), while if creating an empty array from scratch with the code above, this information is definitely lost. Apart from the two-line actual code change, this PR adds a new, minimal test ExtensionArray that exhibits this behaviour of having an attribute, to be able to test this without dependency on GeoPandas.
https://api.github.com/repos/pandas-dev/pandas/pulls/47762
2022-07-17T12:04:03Z
2022-08-30T12:16:17Z
2022-08-30T12:16:17Z
2022-09-06T17:59:48Z
DEPR: returning tuple when grouping by a list containing single element
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index b69cf415ac21e..b5a6cf50fcb6a 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -776,6 +776,7 @@ Other Deprecations - Deprecated argument ``errors`` for :meth:`Series.mask`, :meth:`Series.where`, :meth:`DataFrame.mask`, and :meth:`DataFrame.where` as ``errors`` had no effect on this methods (:issue:`47728`) - Deprecated arguments ``*args`` and ``**kwargs`` in :class:`Rolling`, :class:`Expanding`, and :class:`ExponentialMovingWindow` ops. (:issue:`47836`) - Deprecated unused arguments ``encoding`` and ``verbose`` in :meth:`Series.to_excel` and :meth:`DataFrame.to_excel` (:issue:`47912`) +- Deprecated producing a single element when iterating over a :class:`DataFrameGroupBy` or a :class:`SeriesGroupBy` that has been grouped by a list of length 1; A tuple of length one will be returned instead (:issue:`42795`) .. --------------------------------------------------------------------------- .. _whatsnew_150.performance: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 9e26598d85e74..631f70f390319 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -465,7 +465,9 @@ def _transform_general(self, func: Callable, *args, **kwargs) -> Series: klass = type(self.obj) results = [] - for name, group in self: + for name, group in self.grouper.get_iterator( + self._selected_obj, axis=self.axis + ): # this setattr is needed for test_transform_lambda_with_datetimetz object.__setattr__(group, "name", name) res = func(group, *args, **kwargs) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 28e1b2b388035..8e0ed959fabc3 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -645,6 +645,7 @@ class BaseGroupBy(PandasObject, SelectionMixin[NDFrameT], GroupByIndexingMixin): axis: int grouper: ops.BaseGrouper + keys: _KeysArgType | None = None group_keys: bool | lib.NoDefault @final @@ -821,6 +822,19 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]: Generator yielding sequence of (name, subsetted object) for each group """ + keys = self.keys + if isinstance(keys, list) and len(keys) == 1: + warnings.warn( + ( + "In a future version of pandas, a length 1 " + "tuple will be returned when iterating over a " + "a groupby with a grouper equal to a list of " + "length 1. Don't supply a list with a single grouper " + "to avoid this warning." + ), + FutureWarning, + stacklevel=find_stack_level(), + ) return self.grouper.get_iterator(self._selected_obj, axis=self.axis) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 6ce5ffac9de52..e06a288c1eb38 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -150,7 +150,7 @@ def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces): if all(item in right.columns for item in by): rby = right.groupby(by, sort=False) - for key, lhs in lby: + for key, lhs in lby.grouper.get_iterator(lby._selected_obj, axis=lby.axis): if rby is None: rhs = right diff --git a/pandas/plotting/_matplotlib/groupby.py b/pandas/plotting/_matplotlib/groupby.py index 4f1cd3f38343a..17a214292608b 100644 --- a/pandas/plotting/_matplotlib/groupby.py +++ b/pandas/plotting/_matplotlib/groupby.py @@ -16,6 +16,8 @@ concat, ) +from pandas.plotting._matplotlib.misc import unpack_single_str_list + def create_iter_data_given_by( data: DataFrame, kind: str = "hist" @@ -108,7 +110,8 @@ def reconstruct_data_with_by( 1 3.0 4.0 NaN NaN 2 NaN NaN 5.0 6.0 """ - grouped = data.groupby(by) + by_modified = unpack_single_str_list(by) + grouped = data.groupby(by_modified) data_list = [] for key, group in grouped: diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index 3b151d67c70be..62242a4a2ddab 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -33,6 +33,7 @@ create_iter_data_given_by, reformat_hist_y_given_by, ) +from pandas.plotting._matplotlib.misc import unpack_single_str_list from pandas.plotting._matplotlib.tools import ( create_subplots, flatten_axes, @@ -67,7 +68,8 @@ def _args_adjust(self): # where subplots are created based on by argument if is_integer(self.bins): if self.by is not None: - grouped = self.data.groupby(self.by)[self.columns] + by_modified = unpack_single_str_list(self.by) + grouped = self.data.groupby(by_modified)[self.columns] self.bins = [self._calculate_bins(group) for key, group in grouped] else: self.bins = self._calculate_bins(self.data) diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py index e2a0d50544f22..4b74b067053a6 100644 --- a/pandas/plotting/_matplotlib/misc.py +++ b/pandas/plotting/_matplotlib/misc.py @@ -475,3 +475,11 @@ def r(h): ax.legend() ax.grid() return ax + + +def unpack_single_str_list(keys): + # GH 42795 + if isinstance(keys, list): + if len(keys) == 1 and isinstance(keys[0], str): + keys = keys[0] + return keys diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 920b869ef799b..73aeb17d8c274 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2795,3 +2795,19 @@ def test_groupby_none_column_name(): result = df.groupby(by=[None]).sum() expected = DataFrame({"b": [2, 5], "c": [9, 13]}, index=Index([1, 2], name=None)) tm.assert_frame_equal(result, expected) + + +def test_single_element_list_grouping(): + # GH 42795 + df = DataFrame( + {"a": [np.nan, 1], "b": [np.nan, 5], "c": [np.nan, 2]}, index=["x", "y"] + ) + msg = ( + "In a future version of pandas, a length 1 " + "tuple will be returned when iterating over a " + "a groupby with a grouper equal to a list of " + "length 1. Don't supply a list with a single grouper " + "to avoid this warning." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + values, _ = next(iter(df.groupby(["a"])))
- [X] closes #42795 (Replace xxxx with the Github issue number) - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature. - [x] Applied the removal in #47719
https://api.github.com/repos/pandas-dev/pandas/pulls/47761
2022-07-17T03:32:25Z
2022-08-01T20:15:10Z
2022-08-01T20:15:10Z
2022-12-05T03:22:33Z
TYP: reflect ensure_* function removals
diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi index 0cc9209fbdfc5..29d1365cad6fc 100644 --- a/pandas/_libs/algos.pyi +++ b/pandas/_libs/algos.pyi @@ -129,18 +129,11 @@ def diff_2d( ) -> None: ... def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ... def ensure_object(arr: object) -> npt.NDArray[np.object_]: ... -def ensure_complex64(arr: object, copy=...) -> npt.NDArray[np.complex64]: ... -def ensure_complex128(arr: object, copy=...) -> npt.NDArray[np.complex128]: ... def ensure_float64(arr: object, copy=...) -> npt.NDArray[np.float64]: ... -def ensure_float32(arr: object, copy=...) -> npt.NDArray[np.float32]: ... def ensure_int8(arr: object, copy=...) -> npt.NDArray[np.int8]: ... def ensure_int16(arr: object, copy=...) -> npt.NDArray[np.int16]: ... def ensure_int32(arr: object, copy=...) -> npt.NDArray[np.int32]: ... def ensure_int64(arr: object, copy=...) -> npt.NDArray[np.int64]: ... -def ensure_uint8(arr: object, copy=...) -> npt.NDArray[np.uint8]: ... -def ensure_uint16(arr: object, copy=...) -> npt.NDArray[np.uint16]: ... -def ensure_uint32(arr: object, copy=...) -> npt.NDArray[np.uint32]: ... -def ensure_uint64(arr: object, copy=...) -> npt.NDArray[np.uint64]: ... def take_1d_int8_int8( values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ...
These were removed in #44207
https://api.github.com/repos/pandas-dev/pandas/pulls/47758
2022-07-16T21:00:12Z
2022-07-17T14:38:21Z
2022-07-17T14:38:21Z
2022-08-25T05:21:37Z
BUG: wide_to_long fails when stubname misses and i contains string type column
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 22a5f2a08362f..841b24bb06b6c 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -1019,6 +1019,7 @@ Reshaping - Bug in :meth:`DataFrame.join` with a list when using suffixes to join DataFrames with duplicate column names (:issue:`46396`) - Bug in :meth:`DataFrame.pivot_table` with ``sort=False`` results in sorted index (:issue:`17041`) - Bug in :meth:`concat` when ``axis=1`` and ``sort=False`` where the resulting Index was a :class:`Int64Index` instead of a :class:`RangeIndex` (:issue:`46675`) +- Bug in :meth:`wide_to_long` raises when ``stubnames`` is missing in columns and ``i`` contains string dtype column (:issue:`46044`) Sparse ^^^^^^ diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 06127c8ecb932..5de9c8e2f4108 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -131,7 +131,11 @@ def melt( for col in id_vars: id_data = frame.pop(col) if is_extension_array_dtype(id_data): - id_data = concat([id_data] * K, ignore_index=True) + if K > 0: + id_data = concat([id_data] * K, ignore_index=True) + else: + # We can't concat empty list. (GH 46044) + id_data = type(id_data)([], name=id_data.name, dtype=id_data.dtype) else: # error: Incompatible types in assignment (expression has type # "ndarray[Any, dtype[Any]]", variable has type "Series") diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 4fbfee6f829ba..2013b3484ebff 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -1086,3 +1086,27 @@ def test_warn_of_column_name_value(self): with tm.assert_produces_warning(FutureWarning): result = df.melt(id_vars="value") tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["O", "string"]) + def test_missing_stubname(self, dtype): + # GH46044 + df = DataFrame({"id": ["1", "2"], "a-1": [100, 200], "a-2": [300, 400]}) + df = df.astype({"id": dtype}) + result = wide_to_long( + df, + stubnames=["a", "b"], + i="id", + j="num", + sep="-", + ) + index = pd.Index( + [("1", 1), ("2", 1), ("1", 2), ("2", 2)], + name=("id", "num"), + ) + expected = DataFrame( + {"a": [100, 200, 300, 400], "b": [np.nan] * 4}, + index=index, + ) + new_level = expected.index.levels[0].astype(dtype) + expected.index = expected.index.set_levels(new_level, level=0) + tm.assert_frame_equal(result, expected)
- [x] closes #46044 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47757
2022-07-16T20:57:00Z
2022-07-19T19:22:39Z
2022-07-19T19:22:39Z
2022-07-19T19:27:37Z
TYP: Update timestamps.pyi
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi index f6a62688fc72d..082f26cf6f213 100644 --- a/pandas/_libs/tslibs/timestamps.pyi +++ b/pandas/_libs/tslibs/timestamps.pyi @@ -85,10 +85,10 @@ class Timestamp(datetime): def fold(self) -> int: ... @classmethod def fromtimestamp( - cls: type[_DatetimeT], t: float, tz: _tzinfo | None = ... + cls: type[_DatetimeT], ts: float, tz: _tzinfo | None = ... ) -> _DatetimeT: ... @classmethod - def utcfromtimestamp(cls: type[_DatetimeT], t: float) -> _DatetimeT: ... + def utcfromtimestamp(cls: type[_DatetimeT], ts: float) -> _DatetimeT: ... @classmethod def today(cls: type[_DatetimeT], tz: _tzinfo | str | None = ...) -> _DatetimeT: ... @classmethod @@ -118,19 +118,25 @@ class Timestamp(datetime): def date(self) -> _date: ... def time(self) -> _time: ... def timetz(self) -> _time: ... - def replace( + # LSP violation: nanosecond is not present in datetime.datetime.replace + # and has positional args following it + def replace( # type: ignore[override] self: _DatetimeT, - year: int = ..., - month: int = ..., - day: int = ..., - hour: int = ..., - minute: int = ..., - second: int = ..., - microsecond: int = ..., - tzinfo: _tzinfo | None = ..., - fold: int = ..., + year: int | None = ..., + month: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + microsecond: int | None = ..., + nanosecond: int | None = ..., + tzinfo: _tzinfo | type[object] | None = ..., + fold: int | None = ..., + ) -> _DatetimeT: ... + # LSP violation: datetime.datetime.astimezone has a default value for tz + def astimezone( # type: ignore[override] + self: _DatetimeT, tz: _tzinfo | None ) -> _DatetimeT: ... - def astimezone(self: _DatetimeT, tz: _tzinfo | None = ...) -> _DatetimeT: ... def ctime(self) -> str: ... def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... @classmethod @@ -206,8 +212,6 @@ class Timestamp(datetime): @property def dayofweek(self) -> int: ... @property - def day_of_month(self) -> int: ... - @property def day_of_year(self) -> int: ... @property def dayofyear(self) -> int: ...
null
https://api.github.com/repos/pandas-dev/pandas/pulls/47756
2022-07-16T20:42:59Z
2022-07-17T14:39:24Z
2022-07-17T14:39:24Z
2022-07-18T11:54:41Z
TST: Add additional test for future warning when call Series.str.cat(Series.str)
diff --git a/pandas/tests/strings/test_cat.py b/pandas/tests/strings/test_cat.py index 8abbc59343e78..4decdff8063a8 100644 --- a/pandas/tests/strings/test_cat.py +++ b/pandas/tests/strings/test_cat.py @@ -376,3 +376,22 @@ def test_cat_different_classes(klass): result = s.str.cat(klass(["x", "y", "z"])) expected = Series(["ax", "by", "cz"]) tm.assert_series_equal(result, expected) + + +def test_cat_on_series_dot_str(): + # GH 28277 + # Test future warning of `Series.str.__iter__` + ps = Series(["AbC", "de", "FGHI", "j", "kLLLm"]) + with tm.assert_produces_warning(FutureWarning): + ps.str.cat(others=ps.str) + # TODO(2.0): The following code can be uncommented + # when `Series.str.__iter__` is removed. + + # message = re.escape( + # "others must be Series, Index, DataFrame, np.ndarray " + # "or list-like (either containing only strings or " + # "containing only objects of type Series/Index/" + # "np.ndarray[1-dim])" + # ) + # with pytest.raises(TypeError, match=message): + # ps.str.cat(others=ps.str)
- [x] closes #28277 (Replace xxxx with the Github issue number) - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47755
2022-07-16T20:41:33Z
2022-07-19T19:06:30Z
2022-07-19T19:06:30Z
2022-07-19T21:30:34Z
BUG: Set y-axis label, limits and ticks for a secondary y-axis (#47753)
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index b081f743f9b0b..6283321c7f710 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -977,6 +977,7 @@ Plotting - The function :meth:`DataFrame.plot.scatter` now accepts ``color`` as an alias for ``c`` and ``size`` as an alias for ``s`` for consistency to other plotting functions (:issue:`44670`) - Fix showing "None" as ylabel in :meth:`Series.plot` when not setting ylabel (:issue:`46129`) - Bug in :meth:`DataFrame.plot` that led to xticks and vertical grids being improperly placed when plotting a quarterly series (:issue:`47602`) +- Bug in :meth:`DataFrame.plot` that prevented setting y-axis label, limits and ticks for a secondary y-axis (:issue:`47753`) Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 3641cd7213fec..301474edc6a8e 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -679,6 +679,7 @@ def _adorn_subplots(self): ) for ax in self.axes: + ax = getattr(ax, "right_ax", ax) if self.yticks is not None: ax.set_yticks(self.yticks) diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 3ec3744e43653..538c9c2fb5059 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -2204,6 +2204,17 @@ def test_xlabel_ylabel_dataframe_plane_plot(self, kind, xlabel, ylabel): assert ax.get_xlabel() == (xcol if xlabel is None else xlabel) assert ax.get_ylabel() == (ycol if ylabel is None else ylabel) + @pytest.mark.parametrize("secondary_y", (False, True)) + def test_secondary_y(self, secondary_y): + ax_df = DataFrame([0]).plot( + secondary_y=secondary_y, ylabel="Y", ylim=(0, 100), yticks=[99] + ) + for ax in ax_df.figure.axes: + if ax.yaxis.get_visible(): + assert ax.get_ylabel() == "Y" + assert ax.get_ylim() == (0, 100) + assert ax.get_yticks()[0] == 99 + def _generate_4_axes_via_gridspec(): import matplotlib as mpl
When passing `secondary_y=True` to a plotting function, a second axes with a y-axis on the right side is created. Passing `ylabel`, `ylim` or `yticks` changed these properties of the original invisible left y-axis, not the secondary y-axis. - [x] closes #47753 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47754
2022-07-16T19:03:10Z
2022-07-18T17:37:41Z
2022-07-18T17:37:41Z
2022-07-19T08:22:49Z
BUG: Fix pc.power_checked min version
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 07b09d78016fd..8957ea493e9ad 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -18,6 +18,7 @@ from pandas.compat import ( pa_version_under1p01, pa_version_under2p0, + pa_version_under4p0, pa_version_under5p0, pa_version_under6p0, ) @@ -121,9 +122,9 @@ def floordiv_compat( "rmod": NotImplemented, "divmod": NotImplemented, "rdivmod": NotImplemented, - "pow": NotImplemented if pa_version_under2p0 else pc.power_checked, + "pow": NotImplemented if pa_version_under4p0 else pc.power_checked, "rpow": NotImplemented - if pa_version_under2p0 + if pa_version_under4p0 else lambda x, y: pc.power_checked(y, x), }
xref https://github.com/pandas-dev/pandas/pull/47645#discussion_r922685768= First supported in pyarrow 4.0: https://arrow.apache.org/docs/4.0/python/api/compute.html
https://api.github.com/repos/pandas-dev/pandas/pulls/47752
2022-07-16T18:01:28Z
2022-07-17T09:53:59Z
2022-07-17T09:53:59Z
2022-07-17T17:27:16Z
FIX: REGR: setting numeric value in Categorical Series with enlargement raise internal error
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 502e37705abfb..aace7ae221304 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -962,6 +962,7 @@ Indexing - Bug in :meth:`DataFrame.sum` min_count changes dtype if input contains NaNs (:issue:`46947`) - Bug in :class:`IntervalTree` that lead to an infinite recursion. (:issue:`46658`) - Bug in :class:`PeriodIndex` raising ``AttributeError`` when indexing on ``NA``, rather than putting ``NaT`` in its place. (:issue:`46673`) +- Bug in :meth:`DataFrame.loc` when enlarging a :class:`Series` with dtype :class:`CategoricalDtype` with a scalar (:issue:`47677`) - Missing diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 769656d1c4755..00b18ef154f27 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -591,7 +591,9 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): return dtype, fv elif isna(fill_value): - dtype = _dtype_obj + # preserve dtype in case of categoricaldtype + if not isinstance(dtype, CategoricalDtype): + dtype = _dtype_obj if fill_value is None: # but we retain e.g. pd.NA fill_value = np.nan @@ -646,6 +648,12 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): return np.dtype("object"), fill_value + elif isinstance(dtype, CategoricalDtype): + if fill_value in dtype.categories: + return dtype, fill_value + else: + return object, ensure_object(fill_value) + elif is_float(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.dtype(np.object_) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 4c38a2219372d..60c4ee5518047 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -18,6 +18,7 @@ import pandas as pd from pandas import ( Categorical, + CategoricalDtype, CategoricalIndex, DataFrame, DatetimeIndex, @@ -1820,6 +1821,46 @@ def test_loc_getitem_sorted_index_level_with_duplicates(self): result = df.loc[("foo", "bar")] tm.assert_frame_equal(result, expected) + def test_additional_element_to_categorical_series_loc(self): + # GH#47677 + result = Series(["a", "b", "c"], dtype="category") + result.loc[3] = 0 + expected = Series(["a", "b", "c", 0], dtype="object") + tm.assert_series_equal(result, expected) + + def test_additional_categorical_element_loc(self): + # GH#47677 + result = Series(["a", "b", "c"], dtype="category") + result.loc[3] = "a" + expected = Series(["a", "b", "c", "a"], dtype="category") + tm.assert_series_equal(result, expected) + + def test_loc_set_nan_in_categorical_series(self, any_numeric_ea_dtype): + # GH#47677 + srs = Series( + [1, 2, 3], + dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)), + ) + # enlarge + srs.loc[3] = np.nan + assert srs.values.dtype._categories.dtype == any_numeric_ea_dtype + # set into + srs.loc[1] = np.nan + assert srs.values.dtype._categories.dtype == any_numeric_ea_dtype + + @pytest.mark.parametrize("na", (np.nan, pd.NA, None)) + def test_loc_consistency_series_enlarge_set_into(self, na): + # GH#47677 + srs_enlarge = Series(["a", "b", "c"], dtype="category") + srs_enlarge.loc[3] = na + + srs_setinto = Series(["a", "b", "c", "a"], dtype="category") + srs_setinto.loc[3] = na + + tm.assert_series_equal(srs_enlarge, srs_setinto) + expected = Series(["a", "b", "c", na], dtype="category") + tm.assert_series_equal(srs_enlarge, expected) + def test_loc_getitem_preserves_index_level_category_dtype(self): # GH#15166 df = DataFrame(
- [x] closes #47677 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47751
2022-07-16T16:48:54Z
2022-08-16T21:24:23Z
null
2022-08-16T21:25:04Z
TYP: def validate_*
diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi index 12b113f0b73b1..4567dde4e056b 100644 --- a/pandas/_libs/tslibs/offsets.pyi +++ b/pandas/_libs/tslibs/offsets.pyi @@ -104,7 +104,9 @@ class SingleConstructorOffset(BaseOffset): @overload def to_offset(freq: None) -> None: ... @overload -def to_offset(freq: timedelta | BaseOffset | str) -> BaseOffset: ... +def to_offset(freq: _BaseOffsetT) -> _BaseOffsetT: ... +@overload +def to_offset(freq: timedelta | str) -> BaseOffset: ... class Tick(SingleConstructorOffset): _reso: int diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index e3aa5bb52f2ba..140d41782e6d3 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -17,7 +17,11 @@ """ from __future__ import annotations -from typing import Any +from typing import ( + Any, + TypeVar, + overload, +) from numpy import ndarray @@ -25,6 +29,7 @@ is_bool, is_integer, ) +from pandas._typing import Axis from pandas.errors import UnsupportedFunctionCall from pandas.util._validators import ( validate_args, @@ -32,6 +37,8 @@ validate_kwargs, ) +AxisNoneT = TypeVar("AxisNoneT", Axis, None) + class CompatValidator: def __init__( @@ -84,7 +91,7 @@ def __call__( ) -def process_skipna(skipna, args): +def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]: if isinstance(skipna, ndarray) or skipna is None: args = (skipna,) + args skipna = True @@ -92,7 +99,7 @@ def process_skipna(skipna, args): return skipna, args -def validate_argmin_with_skipna(skipna, args, kwargs): +def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: """ If 'Series.argmin' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so @@ -104,7 +111,7 @@ def validate_argmin_with_skipna(skipna, args, kwargs): return skipna -def validate_argmax_with_skipna(skipna, args, kwargs): +def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: """ If 'Series.argmax' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so @@ -137,7 +144,7 @@ def validate_argmax_with_skipna(skipna, args, kwargs): ) -def validate_argsort_with_ascending(ascending, args, kwargs): +def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool: """ If 'Categorical.argsort' is called via the 'numpy' library, the first parameter in its signature is 'axis', which takes either an integer or @@ -149,7 +156,8 @@ def validate_argsort_with_ascending(ascending, args, kwargs): ascending = True validate_argsort_kind(args, kwargs, max_fname_arg_count=3) - return ascending + # error: Incompatible return value type (got "int", expected "bool") + return ascending # type: ignore[return-value] CLIP_DEFAULTS: dict[str, Any] = {"out": None} @@ -158,7 +166,19 @@ def validate_argsort_with_ascending(ascending, args, kwargs): ) -def validate_clip_with_axis(axis, args, kwargs): +@overload +def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None: + ... + + +@overload +def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT: + ... + + +def validate_clip_with_axis( + axis: ndarray | AxisNoneT, args, kwargs +) -> AxisNoneT | None: """ If 'NDFrame.clip' is called via the numpy library, the third parameter in its signature is 'out', which can takes an ndarray, so check if the 'axis' @@ -167,10 +187,14 @@ def validate_clip_with_axis(axis, args, kwargs): """ if isinstance(axis, ndarray): args = (axis,) + args - axis = None + # error: Incompatible types in assignment (expression has type "None", + # variable has type "Union[ndarray[Any, Any], str, int]") + axis = None # type: ignore[assignment] validate_clip(args, kwargs) - return axis + # error: Incompatible return value type (got "Union[ndarray[Any, Any], + # str, int]", expected "Union[str, int, None]") + return axis # type: ignore[return-value] CUM_FUNC_DEFAULTS: dict[str, Any] = {} @@ -184,7 +208,7 @@ def validate_clip_with_axis(axis, args, kwargs): ) -def validate_cum_func_with_skipna(skipna, args, kwargs, name): +def validate_cum_func_with_skipna(skipna, args, kwargs, name) -> bool: """ If this function is called via the 'numpy' library, the third parameter in its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so @@ -288,7 +312,7 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name): validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs") -def validate_take_with_convert(convert, args, kwargs): +def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> bool: """ If this function is called via the 'numpy' library, the third parameter in its signature is 'axis', which takes either an ndarray or 'None', so check diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 0f88ad9811bf0..325c94d0ea267 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -2164,7 +2164,17 @@ def ensure_arraylike_for_datetimelike(data, copy: bool, cls_name: str): return data, copy -def validate_periods(periods): +@overload +def validate_periods(periods: None) -> None: + ... + + +@overload +def validate_periods(periods: int | float) -> int: + ... + + +def validate_periods(periods: int | float | None) -> int | None: """ If a `periods` argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. @@ -2187,7 +2197,9 @@ def validate_periods(periods): periods = int(periods) elif not lib.is_integer(periods): raise TypeError(f"periods must be a number, got {periods}") - return periods + # error: Incompatible return value type (got "Optional[float]", + # expected "Optional[int]") + return periods # type: ignore[return-value] def validate_inferred_freq(freq, inferred_freq, freq_infer): diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 106afcc3c12ea..d9f6cecc8d61d 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -251,7 +251,7 @@ def _scalar_type(self) -> type[Timestamp]: # Constructors _dtype: np.dtype | DatetimeTZDtype - _freq = None + _freq: BaseOffset | None = None _default_dtype = DT64NS_DTYPE # used in TimeLikeOps.__init__ @classmethod diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index fa7c4e0d0aa70..6e6de8399cc38 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -8,6 +8,8 @@ Callable, Literal, Sequence, + TypeVar, + overload, ) import numpy as np @@ -92,6 +94,8 @@ TimedeltaArray, ) +BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset) + _shared_doc_kwargs = { "klass": "PeriodArray", @@ -976,7 +980,19 @@ def period_array( return PeriodArray._from_sequence(data, dtype=dtype) -def validate_dtype_freq(dtype, freq): +@overload +def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT: + ... + + +@overload +def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset: + ... + + +def validate_dtype_freq( + dtype, freq: BaseOffsetT | timedelta | str | None +) -> BaseOffsetT: """ If both a dtype and a freq are available, ensure they match. If only dtype is available, extract the implied freq. @@ -996,7 +1012,10 @@ def validate_dtype_freq(dtype, freq): IncompatibleFrequency : mismatch between dtype and freq """ if freq is not None: - freq = to_offset(freq) + # error: Incompatible types in assignment (expression has type + # "BaseOffset", variable has type "Union[BaseOffsetT, timedelta, + # str, None]") + freq = to_offset(freq) # type: ignore[assignment] if dtype is not None: dtype = pandas_dtype(dtype) @@ -1006,7 +1025,9 @@ def validate_dtype_freq(dtype, freq): freq = dtype.freq elif freq != dtype.freq: raise IncompatibleFrequency("specified freq and dtype are different") - return freq + # error: Incompatible return value type (got "Union[BaseOffset, Any, None]", + # expected "BaseOffset") + return freq # type: ignore[return-value] def dt64arr_to_periodarr( diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index 3676e6eb0091e..fc3439a57a002 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -5,6 +5,7 @@ from __future__ import annotations from typing import ( + Any, Iterable, Sequence, TypeVar, @@ -265,7 +266,9 @@ def validate_bool_kwarg( return value -def validate_axis_style_args(data, args, kwargs, arg_name, method_name): +def validate_axis_style_args( + data, args, kwargs, arg_name, method_name +) -> dict[str, Any]: """ Argument handler for mixed index, columns / axis functions
null
https://api.github.com/repos/pandas-dev/pandas/pulls/47750
2022-07-16T15:49:09Z
2022-07-18T17:38:58Z
2022-07-18T17:38:58Z
2022-09-10T01:39:04Z
BUG: Correct numeric_only default for resample var and std
diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 2af9e09d1c713..83626a42134d6 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -937,7 +937,13 @@ def asfreq(self, fill_value=None): """ return self._upsample("asfreq", fill_value=fill_value) - def std(self, ddof=1, numeric_only: bool = False, *args, **kwargs): + def std( + self, + ddof=1, + numeric_only: bool | lib.NoDefault = lib.no_default, + *args, + **kwargs, + ): """ Compute standard deviation of groups, excluding missing values. @@ -958,7 +964,13 @@ def std(self, ddof=1, numeric_only: bool = False, *args, **kwargs): nv.validate_resampler_func("std", args, kwargs) return self._downsample("std", ddof=ddof, numeric_only=numeric_only) - def var(self, ddof=1, numeric_only: bool = False, *args, **kwargs): + def var( + self, + ddof=1, + numeric_only: bool | lib.NoDefault = lib.no_default, + *args, + **kwargs, + ): """ Compute variance of groups, excluding missing values. diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index 2d74b703b9bb1..c5cd777962df3 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -859,6 +859,10 @@ def test_frame_downsample_method(method, numeric_only, expected_data): expected_index = date_range("2018-12-31", periods=1, freq="Y") df = DataFrame({"cat": ["cat_1", "cat_2"], "num": [5, 20]}, index=index) resampled = df.resample("Y") + if numeric_only is lib.no_default: + kwargs = {} + else: + kwargs = {"numeric_only": numeric_only} func = getattr(resampled, method) if numeric_only is lib.no_default and method not in ( @@ -882,9 +886,9 @@ def test_frame_downsample_method(method, numeric_only, expected_data): if isinstance(expected_data, str): klass = TypeError if method == "var" else ValueError with pytest.raises(klass, match=expected_data): - _ = func(numeric_only=numeric_only) + _ = func(**kwargs) else: - result = func(numeric_only=numeric_only) + result = func(**kwargs) expected = DataFrame(expected_data, index=expected_index) tm.assert_frame_equal(result, expected)
- [x] closes #46560 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. No whatsnew since this was introduced in 1.5.0. The test would have caught it but it was explicitly passing `numeric_only=lib.no_default` instead of just not passing any arg. As far as I know, this was the last task for #46560.
https://api.github.com/repos/pandas-dev/pandas/pulls/47749
2022-07-16T15:42:47Z
2022-07-16T18:19:56Z
2022-07-16T18:19:56Z
2022-07-18T21:09:50Z
FIX: PeriodIndex json roundtrip
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 82090c93a965e..252bea3ba774a 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -956,6 +956,7 @@ I/O - Bug in :func:`read_sas` that scrambled column names (:issue:`31243`) - Bug in :func:`read_sas` with RLE-compressed SAS7BDAT files that contain 0x00 control bytes (:issue:`47099`) - Bug in :func:`read_parquet` with ``use_nullable_dtypes=True`` where ``float64`` dtype was returned instead of nullable ``Float64`` dtype (:issue:`45694`) +- Bug in :meth:`DataFrame.to_json` where ``PeriodDtype`` would not make the serialization roundtrip when read back with :meth:`read_json` (:issue:`44720`) Period ^^^^^^ diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 44c5ce0e5ee83..b7a8b5cc82f7a 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -197,6 +197,9 @@ def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype: elif typ == "datetime": if field.get("tz"): return f"datetime64[ns, {field['tz']}]" + elif field.get("freq"): + # GH#47747 using datetime over period to minimize the change surface + return f"period[{field['freq']}]" else: return "datetime64[ns]" elif typ == "any": diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index c90ac2fb3b813..f4c8b9e764d6d 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -708,6 +708,44 @@ def test_read_json_table_orient_raises(self, index_nm, vals, recwarn): with pytest.raises(NotImplementedError, match="can not yet read "): pd.read_json(out, orient="table") + @pytest.mark.parametrize( + "index_nm", + [None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"], + ) + @pytest.mark.parametrize( + "vals", + [ + {"ints": [1, 2, 3, 4]}, + {"objects": ["a", "b", "c", "d"]}, + {"objects": ["1", "2", "3", "4"]}, + {"date_ranges": pd.date_range("2016-01-01", freq="d", periods=4)}, + {"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))}, + { + "ordered_cats": pd.Series( + pd.Categorical(["a", "b", "c", "c"], ordered=True) + ) + }, + {"floats": [1.0, 2.0, 3.0, 4.0]}, + {"floats": [1.1, 2.2, 3.3, 4.4]}, + {"bools": [True, False, False, True]}, + { + "timezones": pd.date_range( + "2016-01-01", freq="d", periods=4, tz="US/Central" + ) # added in # GH 35973 + }, + ], + ) + def test_read_json_table_period_orient(self, index_nm, vals, recwarn): + df = DataFrame( + vals, + index=pd.Index( + (pd.Period(f"2022Q{q}") for q in range(1, 5)), name=index_nm + ), + ) + out = df.to_json(orient="table") + result = pd.read_json(out, orient="table") + tm.assert_frame_equal(df, result) + @pytest.mark.parametrize( "idx", [
- [X] closes #44720 - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47747
2022-07-16T11:09:54Z
2022-07-18T22:41:22Z
2022-07-18T22:41:21Z
2022-07-18T22:41:28Z
Update environment.yml
diff --git a/environment.yml b/environment.yml index 0a6055d80c071..39458a8de2d8c 100644 --- a/environment.yml +++ b/environment.yml @@ -3,7 +3,7 @@ name: pandas-dev channels: - conda-forge dependencies: - - python=3.8 + - python=3.9 # test dependencies - cython=0.29.30
- update python version - [ ] closes #xxxx (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47746
2022-07-16T04:56:32Z
2022-07-16T05:00:17Z
null
2022-07-16T05:00:17Z
BUG: Behavior with fallback between raise and coerce #46071
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index a1a2149da7cf6..74e3e1093eb75 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -902,6 +902,7 @@ Datetimelike - Bug in :meth:`DatetimeIndex.resolution` incorrectly returning "day" instead of "nanosecond" for nanosecond-resolution indexes (:issue:`46903`) - Bug in :class:`Timestamp` with an integer or float value and ``unit="Y"`` or ``unit="M"`` giving slightly-wrong results (:issue:`47266`) - Bug in :class:`.DatetimeArray` construction when passed another :class:`.DatetimeArray` and ``freq=None`` incorrectly inferring the freq from the given array (:issue:`47296`) +- Bug in :func:`to_datetime` where ``infer_datetime_format`` fallback would not run if ``errors=coerce`` (:issue:`46071`) - Bug in :func:`to_datetime` where ``OutOfBoundsDatetime`` would be thrown even if ``errors=coerce`` if there were more than 50 rows (:issue:`45319`) - Bug when adding a :class:`DateOffset` to a :class:`Series` would not add the ``nanoseconds`` field (:issue:`47856`) - diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 203a5711b7a59..0063df68c595b 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -501,6 +501,10 @@ def _array_strptime_with_fallback( if "%Z" in fmt or "%z" in fmt: return _return_parsed_timezone_results(result, timezones, tz, name) + if infer_datetime_format and np.isnan(result).any(): + # Indicates to the caller to fallback to objects_to_datetime64ns + return None + return _box_as_indexlike(result, utc=utc, name=name) @@ -798,7 +802,10 @@ def to_datetime( If :const:`True` and no `format` is given, attempt to infer the format of the datetime strings based on the first non-NaN element, and if it can be inferred, switch to a faster method of parsing them. - In some cases this can increase the parsing speed by ~5-10x. + In some cases this can increase the parsing speed by ~5-10x. If subsequent + datetime strings do not follow the inferred format, parsing will fall + back to the slower method of determining the format for each + string individually. origin : scalar, default 'unix' Define the reference date. The numeric values would be parsed as number of units (defined by `unit`) since this reference date. diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index af1a292a2975a..68ffff3fc93f5 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -3,11 +3,13 @@ """ import operator +from dateutil.parser._parser import ParserError import numpy as np import pytest from pandas._libs.tslibs import tz_compare from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas.errors import OutOfBoundsDatetime from pandas.core.dtypes.dtypes import DatetimeTZDtype @@ -639,3 +641,47 @@ def test_tz_localize_t2d(self): roundtrip = expected.tz_localize("US/Pacific") tm.assert_datetime_array_equal(roundtrip, dta) + + @pytest.mark.parametrize( + "error", + ["coerce", "raise"], + ) + def test_fallback_different_formats(self, error): + # GH#46071 + # 2 valid dates with different formats + # Should parse with no errors + s = pd.Series(["6/30/2025", "1 27 2024"]) + expected = pd.Series( + [pd.Timestamp("2025-06-30 00:00:00"), pd.Timestamp("2024-01-27 00:00:00")] + ) + result = pd.to_datetime(s, errors=error, infer_datetime_format=True) + tm.assert_series_equal(expected, result) + + @pytest.mark.parametrize( + "dateseries", + [ + pd.Series(["1/1/2000", "7/12/1200"]), + pd.Series(["1/1/2000", "Invalid input"]), + ], + ) + def test_fallback_with_errors_coerce(self, dateseries): + # GH#46071 + # Invalid inputs + # Parsing should fail for the second element + expected = pd.Series([pd.Timestamp("2000-01-01 00:00:00"), pd.NaT]) + result = pd.to_datetime(dateseries, errors="coerce", infer_datetime_format=True) + tm.assert_series_equal(expected, result) + + def test_fallback_with_errors_raise(self): + # GH#46071 + # Invalid inputs + # Parsing should fail for the second element + dates1 = pd.Series(["1/1/2000", "7/12/1200"]) + with pytest.raises( + OutOfBoundsDatetime, match="Out of bounds nanosecond timestamp" + ): + pd.to_datetime(dates1, errors="raise", infer_datetime_format=True) + + dates2 = pd.Series(["1/1/2000", "Invalid input"]) + with pytest.raises(ParserError, match="Unknown string format: Invalid input"): + pd.to_datetime(dates2, errors="raise", infer_datetime_format=True)
- [ ] closes #46071 (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. The problem reported by this issue is that success in parsing a date string depends on how errors are reported (coerce vs raise). How the error is reported should not affect whether or not a parsing error occurs. Initially it appears that experiment 1 should return an error (as experiment 3 does) because the second date string does not match the format established by the first string. However, there is a fallback mechanism that is supposed to try parsing again without the effect of the `infer_datetime_format` flag. So in fact, experiment 3 should succeed in parsing the date as experiment 1 does. The problem is that the fallback mechanism only works when `errors=raise`. This bug fix applies the same logic when `errors=coerce` so that the fallback parsing will take place regardless of how errors are reported.
https://api.github.com/repos/pandas-dev/pandas/pulls/47745
2022-07-15T23:50:02Z
2022-11-28T12:26:13Z
null
2022-11-28T12:26:13Z
ENH/TST: Add quantile & mode tests for ArrowExtensionArray
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 5db859897b663..147134afd70c3 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -28,6 +28,7 @@ pa_version_under6p0, pa_version_under7p0, pa_version_under8p0, + pa_version_under9p0, ) if TYPE_CHECKING: @@ -160,4 +161,5 @@ def get_lzma_file() -> type[lzma.LZMAFile]: "pa_version_under6p0", "pa_version_under7p0", "pa_version_under8p0", + "pa_version_under9p0", ] diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index 833cda20368a2..6965865acb5da 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -17,6 +17,7 @@ pa_version_under6p0 = _palv < Version("6.0.0") pa_version_under7p0 = _palv < Version("7.0.0") pa_version_under8p0 = _palv < Version("8.0.0") + pa_version_under9p0 = _palv < Version("9.0.0") except ImportError: pa_version_under1p01 = True pa_version_under2p0 = True @@ -26,3 +27,4 @@ pa_version_under6p0 = True pa_version_under7p0 = True pa_version_under8p0 = True + pa_version_under9p0 = True diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index b0e4d46564ba4..2c4859061998b 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -825,6 +825,57 @@ def _indexing_key_to_indices( indices = np.arange(n)[key] return indices + # TODO: redefine _rank using pc.rank with pyarrow 9.0 + + def _quantile( + self: ArrowExtensionArrayT, qs: npt.NDArray[np.float64], interpolation: str + ) -> ArrowExtensionArrayT: + """ + Compute the quantiles of self for each quantile in `qs`. + + Parameters + ---------- + qs : np.ndarray[float64] + interpolation: str + + Returns + ------- + same type as self + """ + if pa_version_under4p0: + raise NotImplementedError( + "quantile only supported for pyarrow version >= 4.0" + ) + result = pc.quantile(self._data, q=qs, interpolation=interpolation) + return type(self)(result) + + def _mode(self: ArrowExtensionArrayT, dropna: bool = True) -> ArrowExtensionArrayT: + """ + Returns the mode(s) of the ExtensionArray. + + Always returns `ExtensionArray` even if only one value. + + Parameters + ---------- + dropna : bool, default True + Don't consider counts of NA values. + Not implemented by pyarrow. + + Returns + ------- + same type as self + Sorted, if possible. + """ + if pa_version_under6p0: + raise NotImplementedError("mode only supported for pyarrow version >= 6.0") + modes = pc.mode(self._data, pc.count_distinct(self._data).as_py()) + values = modes.field(0) + counts = modes.field(1) + # counts sorted descending i.e counts[0] = max + mask = pc.equal(counts, counts[0]) + most_common = values.filter(mask) + return type(self)(most_common) + def _maybe_convert_setitem_value(self, value): """Maybe convert value to be pyarrow compatible.""" # TODO: Make more robust like ArrowStringArray._maybe_convert_setitem_value diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index a2a96da02b2a6..136c147c07f2e 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -10,7 +10,6 @@ classes (if they are relevant for the extension interface for all dtypes), or be added to the array-specific tests in `pandas/tests/arrays/`. """ - from datetime import ( date, datetime, @@ -24,8 +23,10 @@ from pandas.compat import ( pa_version_under2p0, pa_version_under3p0, + pa_version_under4p0, pa_version_under6p0, pa_version_under8p0, + pa_version_under9p0, ) import pandas as pd @@ -1946,3 +1947,72 @@ def test_compare_array(self, data, comparison_op, na_value, request): def test_arrowdtype_construct_from_string_type_with_unsupported_parameters(): with pytest.raises(NotImplementedError, match="Passing pyarrow type"): ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]") + + +@pytest.mark.xfail( + pa_version_under4p0, + raises=NotImplementedError, + reason="quantile only supported for pyarrow version >= 4.0", +) +@pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"] +) +@pytest.mark.parametrize("quantile", [0.5, [0.5, 0.5]]) +def test_quantile(data, interpolation, quantile, request): + pa_dtype = data.dtype.pyarrow_dtype + if not (pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype)): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason=f"quantile not supported by pyarrow for {pa_dtype}", + ) + ) + data = data.take([0, 0, 0]) + ser = pd.Series(data) + result = ser.quantile(q=quantile, interpolation=interpolation) + if quantile == 0.5: + assert result == data[0] + else: + # Just check the values + result = result.astype("float64[pyarrow]") + expected = pd.Series( + data.take([0, 0]).astype("float64[pyarrow]"), index=[0.5, 0.5] + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.xfail( + pa_version_under6p0, + raises=NotImplementedError, + reason="mode only supported for pyarrow version >= 6.0", +) +@pytest.mark.parametrize("dropna", [True, False]) +@pytest.mark.parametrize( + "take_idx, exp_idx", + [[[0, 0, 2, 2, 4, 4], [4, 0]], [[0, 0, 0, 2, 4, 4], [0]]], + ids=["multi_mode", "single_mode"], +) +def test_mode(data_for_grouping, dropna, take_idx, exp_idx, request): + pa_dtype = data_for_grouping.dtype.pyarrow_dtype + if pa.types.is_temporal(pa_dtype): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason=f"mode not supported by pyarrow for {pa_dtype}", + ) + ) + elif ( + pa.types.is_boolean(pa_dtype) + and "multi_mode" in request.node.nodeid + and pa_version_under9p0 + ): + request.node.add_marker( + pytest.mark.xfail( + reason="https://issues.apache.org/jira/browse/ARROW-17096", + ) + ) + data = data_for_grouping.take(take_idx) + ser = pd.Series(data) + result = ser.mode(dropna=dropna) + expected = pd.Series(data_for_grouping.take(exp_idx)) + tm.assert_series_equal(result, expected)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/47744
2022-07-15T21:27:05Z
2022-07-28T18:12:36Z
2022-07-28T18:12:36Z
2022-07-28T18:23:36Z
DOC: Clarify return type cases in pandas.unique
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 159c0bb2e72c0..f72371d046578 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -325,15 +325,14 @@ def unique(values): Returns ------- - numpy.ndarray or ExtensionArray - - The return can be: - - * Index : when the input is an Index - * Categorical : when the input is a Categorical dtype - * ndarray : when the input is a Series/ndarray - - Return numpy.ndarray or ExtensionArray. + The return can be: + + * Index : when the input is an Index + * ndarray : when the input is a Series/ndarray + * If the argument is an :ref:` ExtensionArray <extending.extension-types>`, + the return type matches argument type. In pandas, classes implementing the + ExtensionArray interface include Categorical, PeriodArray, IntervalArray, + DateTimeArray, SparseArray. See Also --------
- [ ] closes #47426 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47742
2022-07-15T17:45:47Z
2022-09-09T18:12:06Z
null
2022-09-09T18:12:06Z
DOC: Clarify return type cases in pandas.unique
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 159c0bb2e72c0..f6c6c81e73ea3 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -333,7 +333,8 @@ def unique(values): * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray - Return numpy.ndarray or ExtensionArray. + Return numpy.ndarray or, if the argument is a pandas + :ref:`extension array <extending.extension-types>` type, ExtensionArray. See Also --------
- [ ] closes #47426 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47741
2022-07-15T17:34:19Z
2022-07-15T17:38:37Z
null
2022-07-15T17:38:37Z
DEPR: deprecate pandas.tests
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 9651269963803..955e496be6592 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -771,6 +771,7 @@ Other Deprecations - Clarified warning from :func:`to_datetime` when delimited dates can't be parsed in accordance to specified ``dayfirst`` argument (:issue:`46210`) - Deprecated :class:`Series` and :class:`Resampler` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) raising a ``NotImplementedError`` when the dtype is non-numric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`) - Deprecated :meth:`Series.rank` returning an empty result when the dtype is non-numeric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`) +- Deprecated the module ``pandas.tests``. It will be private in the future and renamed to to ``pandas._tests`` (:issue:`46651`) .. --------------------------------------------------------------------------- .. _whatsnew_150.performance: diff --git a/pandas/tests/__init__.py b/pandas/tests/__init__.py index e69de29bb2d1d..b69eb2abea07c 100644 --- a/pandas/tests/__init__.py +++ b/pandas/tests/__init__.py @@ -0,0 +1,7 @@ +from warnings import warn + +warn( + "pandas.tests is considered to be private and will be renamed " + "to pandas._tests in the future.", + DeprecationWarning, +) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index d31f617b9be15..0669cc4d38da9 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,6 +1,8 @@ import collections from functools import partial import string +import subprocess +import sys import numpy as np import pytest @@ -229,3 +231,16 @@ def test_temp_setattr(with_exception): raise ValueError("Inside exception raised") raise ValueError("Outside exception raised") assert ser.name == "first" + + +def test_private_tests(): + # GH 47738 + output = subprocess.check_output( + [sys.executable, "-W", "default", "-c", '"from pandas import tests"'], + stderr=subprocess.STDOUT, + ) + msg = ( + "DeprecationWarning: pandas.tests is considered to be private and " + "will be renamed to pandas._tests in the future." + ) + assert msg in output.decode() diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py index 1b018a6a1ba34..af2c1830c32fb 100644 --- a/pandas/util/_tester.py +++ b/pandas/util/_tester.py @@ -5,6 +5,7 @@ import os import sys +import warnings from pandas.compat._optional import import_optional_dependency @@ -22,6 +23,11 @@ def test(extra_args: list[str] | None = None): extra_args : list[str], default None Extra marks to run the tests. """ + # prevent pytest from containing the pandas/tests deprecation + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=DeprecationWarning) + from pandas import tests + pytest = import_optional_dependency("pytest") import_optional_dependency("hypothesis") cmd = ["--skip-slow", "--skip-network", "--skip-db"]
- [x] closes #46651 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Not sure how to test this. All of the following imports trigger the warning (DeprecationWarning is by default not enabled, which hopefully doesn't spam the CI): ```sh python -W default -c "from pandas import tests" python -W default -c "from pandas.tests.frame import common" python -W default -c "import pandas.tests.frame.common" ```
https://api.github.com/repos/pandas-dev/pandas/pulls/47738
2022-07-15T15:52:52Z
2022-07-18T01:28:53Z
null
2022-09-10T01:39:02Z
TST: add test for last() on dataframe grouped by on boolean column (#46409)
diff --git a/pandas/tests/frame/methods/test_dtypes.py b/pandas/tests/frame/methods/test_dtypes.py index 31592f987f04d..87e6ed5b1b135 100644 --- a/pandas/tests/frame/methods/test_dtypes.py +++ b/pandas/tests/frame/methods/test_dtypes.py @@ -1,6 +1,7 @@ from datetime import timedelta import numpy as np +import pytest from pandas.core.dtypes.dtypes import DatetimeTZDtype @@ -79,6 +80,20 @@ def test_dtypes_are_correct_after_column_slice(self): Series({"a": np.float_, "b": np.float_, "c": np.float_}), ) + @pytest.mark.parametrize( + "data", + [pd.NA, True], + ) + def test_dtypes_are_correct_after_groupby_last(self, data): + # GH46409 + df = DataFrame( + {"id": [1, 2, 3, 4], "test": [True, pd.NA, data, False]} + ).convert_dtypes() + result = df.groupby("id").last().test + expected = df.set_index("id").test + assert result.dtype == pd.BooleanDtype() + tm.assert_series_equal(expected, result) + def test_dtypes_gh8722(self, float_string_frame): float_string_frame["bool"] = float_string_frame["A"] > 0 result = float_string_frame.dtypes
- [x] closes #46409 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47736
2022-07-15T14:17:44Z
2022-07-18T19:06:22Z
2022-07-18T19:06:21Z
2022-07-18T19:06:31Z
DOC: Updating some capitalization in doc/source/user_guide #32550
diff --git a/doc/source/user_guide/sparse.rst b/doc/source/user_guide/sparse.rst index ef2cb8909b59d..bc4eec1c23a35 100644 --- a/doc/source/user_guide/sparse.rst +++ b/doc/source/user_guide/sparse.rst @@ -266,8 +266,8 @@ have no replacement. .. _sparse.scipysparse: -Interaction with scipy.sparse ------------------------------ +Interaction with *scipy.sparse* +------------------------------- Use :meth:`DataFrame.sparse.from_spmatrix` to create a :class:`DataFrame` with sparse values from a sparse matrix. diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index c67d028b65b3e..ed7688f229ca8 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -388,7 +388,7 @@ We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by .. _timeseries.origin: -Using the ``origin`` Parameter +Using the ``origin`` parameter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Using the ``origin`` parameter, one can specify an alternative starting point for creation @@ -1523,7 +1523,7 @@ or calendars with additional rules. .. _timeseries.advanced_datetime: -Time series-related instance methods +Time Series-related instance methods ------------------------------------ Shifting / lagging @@ -2601,7 +2601,7 @@ Transform nonexistent times to ``NaT`` or shift the times. .. _timeseries.timezone_series: -Time zone series operations +Time zone Series operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A :class:`Series` with time zone **naive** values is diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index 72600289dcf75..d6426fe8bed2d 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -3,7 +3,7 @@ {{ header }} ******************* -Chart Visualization +Chart visualization ******************* This section demonstrates visualization through charting. For information on @@ -1746,7 +1746,7 @@ Andrews curves charts: plt.close("all") -Plotting directly with matplotlib +Plotting directly with Matplotlib --------------------------------- In some situations it may still be preferable or necessary to prepare plots diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst index 2407fd3113830..e08fa81c5fa09 100644 --- a/doc/source/user_guide/window.rst +++ b/doc/source/user_guide/window.rst @@ -3,7 +3,7 @@ {{ header }} ******************** -Windowing Operations +Windowing operations ******************** pandas contains a compact set of APIs for performing windowing operations - an operation that performs @@ -490,7 +490,7 @@ For all supported aggregation functions, see :ref:`api.functions_expanding`. .. _window.exponentially_weighted: -Exponentially Weighted window +Exponentially weighted window ----------------------------- An exponentially weighted window is similar to an expanding window but with each prior point
- [x] contributes to #32550 - [x] I left RadViz, SparseArray, SparseDtype, and PandasObject as is
https://api.github.com/repos/pandas-dev/pandas/pulls/47732
2022-07-15T04:00:32Z
2022-07-22T18:05:06Z
2022-07-22T18:05:06Z
2022-07-22T18:05:06Z
BUG: groupby.corrwith fails with axis=1 and other=df
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 89e47af4cb614..b44db88723742 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1024,7 +1024,7 @@ def curried(x): curried, self._obj_with_exclusions, is_transform=is_transform ) - if self._selected_obj.ndim != 1 and self.axis != 1: + if self._selected_obj.ndim != 1 and self.axis != 1 and result.ndim != 1: missing = self._obj_with_exclusions.columns.difference(result.columns) if len(missing) > 0: warn_dropping_nuisance_columns_deprecated( diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 7d6c5310942e2..de2ff20ff3a96 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1505,3 +1505,15 @@ def test_groupby_empty_dataset(dtype, kwargs): expected = df.groupby("A").B.describe(**kwargs).reset_index(drop=True).iloc[:0] expected.index = Index([]) tm.assert_frame_equal(result, expected) + + +def test_corrwith_with_1_axis(): + # GH 47723 + df = DataFrame({"a": [1, 1, 2], "b": [3, 7, 4]}) + result = df.groupby("a").corrwith(df, axis=1) + index = Index( + data=[(1, 0), (1, 1), (1, 2), (2, 2), (2, 0), (2, 1)], + name=("a", None), + ) + expected = Series([np.nan] * 6, index=index) + tm.assert_series_equal(result, expected)
- [x] closes #47723 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). Since this one is only confirmed on master, whatsnew ommited.
https://api.github.com/repos/pandas-dev/pandas/pulls/47731
2022-07-15T03:43:15Z
2022-07-15T20:50:50Z
2022-07-15T20:50:50Z
2022-07-15T21:01:38Z
ENH/TST: Add Reduction tests for ArrowExtensionArray
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 8957ea493e9ad..ab0e262caa6a9 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -628,6 +628,69 @@ def _concat_same_type( arr = pa.chunked_array(chunks) return cls(arr) + def _reduce(self, name: str, *, skipna: bool = True, **kwargs): + """ + Return a scalar result of performing the reduction operation. + + Parameters + ---------- + name : str + Name of the function, supported values are: + { any, all, min, max, sum, mean, median, prod, + std, var, sem, kurt, skew }. + skipna : bool, default True + If True, skip NaN values. + **kwargs + Additional keyword arguments passed to the reduction function. + Currently, `ddof` is the only supported kwarg. + + Returns + ------- + scalar + + Raises + ------ + TypeError : subclass does not define reductions + """ + if name == "sem": + + def pyarrow_meth(data, skipna, **kwargs): + numerator = pc.stddev(data, skip_nulls=skipna, **kwargs) + denominator = pc.sqrt_checked( + pc.subtract_checked( + pc.count(self._data, skip_nulls=skipna), kwargs["ddof"] + ) + ) + return pc.divide_checked(numerator, denominator) + + else: + pyarrow_name = { + "median": "approximate_median", + "prod": "product", + "std": "stddev", + "var": "variance", + }.get(name, name) + # error: Incompatible types in assignment + # (expression has type "Optional[Any]", variable has type + # "Callable[[Any, Any, KwArg(Any)], Any]") + pyarrow_meth = getattr(pc, pyarrow_name, None) # type: ignore[assignment] + if pyarrow_meth is None: + # Let ExtensionArray._reduce raise the TypeError + return super()._reduce(name, skipna=skipna, **kwargs) + try: + result = pyarrow_meth(self._data, skip_nulls=skipna, **kwargs) + except (AttributeError, NotImplementedError, TypeError) as err: + msg = ( + f"'{type(self).__name__}' with dtype {self.dtype} " + f"does not support reduction '{name}' with pyarrow " + f"version {pa.__version__}. '{name}' may be supported by " + f"upgrading pyarrow." + ) + raise TypeError(msg) from err + if pc.is_null(result).as_py(): + return self.dtype.na_value + return result.as_py() + def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None: """Set one or more values inplace. diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 4376a0de37a8c..6a17a56a47cbc 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -5,7 +5,10 @@ import numpy as np import pytest -from pandas.compat import pa_version_under2p0 +from pandas.compat import ( + pa_version_under2p0, + pa_version_under6p0, +) from pandas.errors import PerformanceWarning import pandas.util._test_decorators as td @@ -375,7 +378,7 @@ def test_reduce_missing(skipna, dtype): @pytest.mark.parametrize("method", ["min", "max"]) @pytest.mark.parametrize("skipna", [True, False]) def test_min_max(method, skipna, dtype, request): - if dtype.storage == "pyarrow": + if dtype.storage == "pyarrow" and pa_version_under6p0: reason = "'ArrowStringArray' object has no attribute 'max'" mark = pytest.mark.xfail(raises=TypeError, reason=reason) request.node.add_marker(mark) @@ -392,7 +395,7 @@ def test_min_max(method, skipna, dtype, request): @pytest.mark.parametrize("method", ["min", "max"]) @pytest.mark.parametrize("box", [pd.Series, pd.array]) def test_min_max_numpy(method, box, dtype, request): - if dtype.storage == "pyarrow": + if dtype.storage == "pyarrow" and (pa_version_under6p0 or box is pd.array): if box is pd.array: reason = "'<=' not supported between instances of 'str' and 'NoneType'" else: diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index ef576692c83b6..62f8a855ce263 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -24,6 +24,7 @@ from pandas.compat import ( pa_version_under2p0, pa_version_under3p0, + pa_version_under6p0, pa_version_under8p0, ) @@ -303,6 +304,95 @@ def test_loc_iloc_frame_single_dtype(self, request, using_array_manager, data): super().test_loc_iloc_frame_single_dtype(data) +class TestBaseNumericReduce(base.BaseNumericReduceTests): + def check_reduce(self, ser, op_name, skipna): + pa_dtype = ser.dtype.pyarrow_dtype + result = getattr(ser, op_name)(skipna=skipna) + if pa.types.is_boolean(pa_dtype): + # Can't convert if ser contains NA + pytest.skip( + "pandas boolean data with NA does not fully support all reductions" + ) + elif pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype): + ser = ser.astype("Float64") + expected = getattr(ser, op_name)(skipna=skipna) + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series(self, data, all_numeric_reductions, skipna, request): + pa_dtype = data.dtype.pyarrow_dtype + xfail_mark = pytest.mark.xfail( + raises=TypeError, + reason=( + f"{all_numeric_reductions} is not implemented in " + f"pyarrow={pa.__version__} for {pa_dtype}" + ), + ) + if all_numeric_reductions in {"skew", "kurt"}: + request.node.add_marker(xfail_mark) + elif ( + all_numeric_reductions in {"median", "var", "std", "prod", "max", "min"} + and pa_version_under6p0 + ): + request.node.add_marker(xfail_mark) + elif all_numeric_reductions in {"sum", "mean"} and pa_version_under2p0: + request.node.add_marker(xfail_mark) + elif ( + all_numeric_reductions in {"sum", "mean"} + and skipna is False + and pa_version_under6p0 + and (pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype)) + ): + request.node.add_marker( + pytest.mark.xfail( + raises=AssertionError, + reason=( + f"{all_numeric_reductions} with skip_nulls={skipna} did not " + f"return NA for {pa_dtype} with pyarrow={pa.__version__}" + ), + ) + ) + elif not ( + pa.types.is_integer(pa_dtype) + or pa.types.is_floating(pa_dtype) + or pa.types.is_boolean(pa_dtype) + ) and not ( + all_numeric_reductions in {"min", "max"} + and (pa.types.is_temporal(pa_dtype) and not pa.types.is_duration(pa_dtype)) + ): + request.node.add_marker(xfail_mark) + elif pa.types.is_boolean(pa_dtype) and all_numeric_reductions in { + "std", + "var", + "median", + }: + request.node.add_marker(xfail_mark) + super().test_reduce_series(data, all_numeric_reductions, skipna) + + +class TestBaseBooleanReduce(base.BaseBooleanReduceTests): + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series( + self, data, all_boolean_reductions, skipna, na_value, request + ): + pa_dtype = data.dtype.pyarrow_dtype + xfail_mark = pytest.mark.xfail( + raises=TypeError, + reason=( + f"{all_boolean_reductions} is not implemented in " + f"pyarrow={pa.__version__} for {pa_dtype}" + ), + ) + if not pa.types.is_boolean(pa_dtype): + request.node.add_marker(xfail_mark) + elif pa_version_under3p0: + request.node.add_marker(xfail_mark) + op_name = all_boolean_reductions + s = pd.Series(data) + result = getattr(s, op_name)(skipna=skipna) + assert result is (op_name == "any") + + class TestBaseGroupby(base.BaseGroupbyTests): def test_groupby_agg_extension(self, data_for_grouping, request): tz = getattr(data_for_grouping.dtype.pyarrow_dtype, "tz", None)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/47730
2022-07-15T03:10:09Z
2022-07-22T02:30:18Z
2022-07-22T02:30:18Z
2022-07-22T02:57:02Z
TYP: freq and na_value
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 325c94d0ea267..c3fbd716ad09d 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -2202,7 +2202,9 @@ def validate_periods(periods: int | float | None) -> int | None: return periods # type: ignore[return-value] -def validate_inferred_freq(freq, inferred_freq, freq_infer): +def validate_inferred_freq( + freq, inferred_freq, freq_infer +) -> tuple[BaseOffset | None, bool]: """ If the user passes a freq and another freq is inferred from passed data, require that they match. diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d9f6cecc8d61d..7a56bba0e58b3 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -264,7 +264,10 @@ def _validate_dtype(cls, values, dtype): # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" @classmethod def _simple_new( # type: ignore[override] - cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE + cls, + values: np.ndarray, + freq: BaseOffset | None = None, + dtype=DT64NS_DTYPE, ) -> DatetimeArray: assert isinstance(values, np.ndarray) assert dtype.kind == "M" @@ -291,7 +294,7 @@ def _from_sequence_not_strict( dtype=None, copy: bool = False, tz=None, - freq=lib.no_default, + freq: str | BaseOffset | lib.NoDefault | None = lib.no_default, dayfirst: bool = False, yearfirst: bool = False, ambiguous="raise", diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fe101926a6782..427c744b92a0a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1746,7 +1746,7 @@ def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, - na_value=lib.no_default, + na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f776585926024..3a7adb19f1c01 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -25,6 +25,7 @@ lib, ) from pandas._libs.tslibs import ( + BaseOffset, Resolution, periods_per_day, timezones, @@ -312,7 +313,7 @@ def isocalendar(self) -> DataFrame: def __new__( cls, data=None, - freq=lib.no_default, + freq: str | BaseOffset | lib.NoDefault = lib.no_default, tz=None, normalize: bool = False, closed=None, diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 3a8ed54d6c634..88f81064b826f 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -1132,7 +1132,7 @@ def as_array( self, dtype=None, copy: bool = False, - na_value=lib.no_default, + na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the blockmanager data into an numpy array.
Type annotations are only related by having `lib.no_default` as a default value (pyright then assumes that their type is `lib.NoDefault`).
https://api.github.com/repos/pandas-dev/pandas/pulls/47729
2022-07-15T02:19:11Z
2022-07-18T19:19:56Z
2022-07-18T19:19:56Z
2022-09-10T01:39:00Z
DEPR: deprecate unused errors in NDFrame.where/mask
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 9651269963803..112f5f08a3393 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -771,6 +771,7 @@ Other Deprecations - Clarified warning from :func:`to_datetime` when delimited dates can't be parsed in accordance to specified ``dayfirst`` argument (:issue:`46210`) - Deprecated :class:`Series` and :class:`Resampler` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) raising a ``NotImplementedError`` when the dtype is non-numric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`) - Deprecated :meth:`Series.rank` returning an empty result when the dtype is non-numeric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`) +- Deprecated argument ``errors`` for :meth:`Series.mask`, :meth:`Series.where`, :meth:`DataFrame.mask`, and :meth:`DataFrame.where` as ``errors`` had no effect on this methods (:issue:`47728`) .. --------------------------------------------------------------------------- .. _whatsnew_150.performance: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ead4ea744c647..e70312c562907 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -11788,6 +11788,7 @@ def where( ... # error: Signature of "where" incompatible with supertype "NDFrame" + @deprecate_kwarg(old_arg_name="errors", new_arg_name=None) @deprecate_nonkeyword_arguments( version=None, allowed_args=["self", "cond", "other"] ) @@ -11807,7 +11808,6 @@ def where( # type: ignore[override] inplace=inplace, axis=axis, level=level, - errors=errors, try_cast=try_cast, ) @@ -11854,6 +11854,7 @@ def mask( ... # error: Signature of "mask" incompatible with supertype "NDFrame" + @deprecate_kwarg(old_arg_name="errors", new_arg_name=None) @deprecate_nonkeyword_arguments( version=None, allowed_args=["self", "cond", "other"] ) @@ -11873,7 +11874,6 @@ def mask( # type: ignore[override] inplace=inplace, axis=axis, level=level, - errors=errors, try_cast=try_cast, ) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4325f0eb04a9c..6e00f33f486d9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9381,7 +9381,6 @@ def _where( inplace=False, axis=None, level=None, - errors: IgnoreRaise | lib.NoDefault = "raise", ): """ Equivalent to public method `where`, except that `other` is not @@ -9548,6 +9547,7 @@ def where( ) -> NDFrameT | None: ... + @deprecate_kwarg(old_arg_name="errors", new_arg_name=None) @deprecate_nonkeyword_arguments( version=None, allowed_args=["self", "cond", "other"] ) @@ -9599,6 +9599,9 @@ def where( - 'raise' : allow exceptions to be raised. - 'ignore' : suppress exceptions. On error return original object. + .. deprecated:: 1.5.0 + This argument had no effect. + try_cast : bool, default None Try to cast the result back to the input type (if possible). @@ -9721,7 +9724,7 @@ def where( stacklevel=find_stack_level(), ) - return self._where(cond, other, inplace, axis, level, errors=errors) + return self._where(cond, other, inplace, axis, level) @overload def mask( @@ -9765,6 +9768,7 @@ def mask( ) -> NDFrameT | None: ... + @deprecate_kwarg(old_arg_name="errors", new_arg_name=None) @deprecate_nonkeyword_arguments( version=None, allowed_args=["self", "cond", "other"] ) @@ -9808,7 +9812,6 @@ def mask( inplace=inplace, axis=axis, level=level, - errors=errors, ) @doc(klass=_shared_doc_kwargs["klass"]) diff --git a/pandas/core/series.py b/pandas/core/series.py index ef4ea0172c505..60898ee75f7c2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -61,6 +61,7 @@ from pandas.util._decorators import ( Appender, Substitution, + deprecate_kwarg, deprecate_nonkeyword_arguments, doc, ) @@ -6069,6 +6070,7 @@ def where( ... # error: Signature of "where" incompatible with supertype "NDFrame" + @deprecate_kwarg(old_arg_name="errors", new_arg_name=None) @deprecate_nonkeyword_arguments( version=None, allowed_args=["self", "cond", "other"] ) @@ -6088,7 +6090,6 @@ def where( # type: ignore[override] inplace=inplace, axis=axis, level=level, - errors=errors, try_cast=try_cast, ) @@ -6135,6 +6136,7 @@ def mask( ... # error: Signature of "mask" incompatible with supertype "NDFrame" + @deprecate_kwarg(old_arg_name="errors", new_arg_name=None) @deprecate_nonkeyword_arguments( version=None, allowed_args=["self", "cond", "other"] ) @@ -6154,7 +6156,6 @@ def mask( # type: ignore[override] inplace=inplace, axis=axis, level=level, - errors=errors, try_cast=try_cast, ) diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 9d004613116b8..5b9883f3866e7 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -1035,3 +1035,17 @@ def test_where_dt64_2d(): mask[:] = True expected = df _check_where_equivalences(df, mask, other, expected) + + +def test_where_mask_deprecated(frame_or_series): + # GH 47728 + obj = DataFrame(np.random.randn(4, 3)) + obj = tm.get_obj(obj, frame_or_series) + + mask = obj > 0 + + with tm.assert_produces_warning(FutureWarning): + obj.where(mask, -1, errors="raise") + + with tm.assert_produces_warning(FutureWarning): + obj.mask(mask, -1, errors="raise")
null
https://api.github.com/repos/pandas-dev/pandas/pulls/47728
2022-07-15T02:15:41Z
2022-07-15T17:37:34Z
2022-07-15T17:37:34Z
2022-09-10T01:39:06Z
DOC: update min package versions in install.rst to align with v.1.5.0 requirements
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 39c9db2c883b8..5d9bfd97030b5 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -199,7 +199,7 @@ the code base as of this writing. To run it on your machine to verify that everything is working (and that you have all of the dependencies, soft and hard, installed), make sure you have `pytest <https://docs.pytest.org/en/latest/>`__ >= 6.0 and `Hypothesis -<https://hypothesis.readthedocs.io/en/latest/>`__ >= 3.58, then run: +<https://hypothesis.readthedocs.io/en/latest/>`__ >= 6.13.0, then run: :: @@ -247,11 +247,11 @@ Recommended dependencies * `numexpr <https://github.com/pydata/numexpr>`__: for accelerating certain numerical operations. ``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups. - If installed, must be Version 2.7.1 or higher. + If installed, must be Version 2.7.3 or higher. * `bottleneck <https://github.com/pydata/bottleneck>`__: for accelerating certain types of ``nan`` evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups. If installed, - must be Version 1.3.1 or higher. + must be Version 1.3.2 or higher. .. note:: @@ -277,8 +277,8 @@ Visualization Dependency Minimum Version Notes ========================= ================== ============================================================= matplotlib 3.3.2 Plotting library -Jinja2 2.11 Conditional formatting with DataFrame.style -tabulate 0.8.7 Printing in Markdown-friendly format (see `tabulate`_) +Jinja2 3.0.0 Conditional formatting with DataFrame.style +tabulate 0.8.9 Printing in Markdown-friendly format (see `tabulate`_) ========================= ================== ============================================================= Computation @@ -287,10 +287,10 @@ Computation ========================= ================== ============================================================= Dependency Minimum Version Notes ========================= ================== ============================================================= -SciPy 1.4.1 Miscellaneous statistical functions -numba 0.50.1 Alternative execution engine for rolling operations +SciPy 1.7.1 Miscellaneous statistical functions +numba 0.53.1 Alternative execution engine for rolling operations (see :ref:`Enhancing Performance <enhancingperf.numba>`) -xarray 0.15.1 pandas-like API for N-dimensional data +xarray 0.19.0 pandas-like API for N-dimensional data ========================= ================== ============================================================= Excel files @@ -301,9 +301,9 @@ Dependency Minimum Version Notes ========================= ================== ============================================================= xlrd 2.0.1 Reading Excel xlwt 1.3.0 Writing Excel -xlsxwriter 1.2.2 Writing Excel -openpyxl 3.0.3 Reading / writing for xlsx files -pyxlsb 1.0.6 Reading for xlsb files +xlsxwriter 1.4.3 Writing Excel +openpyxl 3.0.7 Reading / writing for xlsx files +pyxlsb 1.0.8 Reading for xlsb files ========================= ================== ============================================================= HTML @@ -312,9 +312,9 @@ HTML ========================= ================== ============================================================= Dependency Minimum Version Notes ========================= ================== ============================================================= -BeautifulSoup4 4.8.2 HTML parser for read_html +BeautifulSoup4 4.9.3 HTML parser for read_html html5lib 1.1 HTML parser for read_html -lxml 4.5.0 HTML parser for read_html +lxml 4.6.3 HTML parser for read_html ========================= ================== ============================================================= One of the following combinations of libraries is needed to use the @@ -356,9 +356,9 @@ SQL databases ========================= ================== ============================================================= Dependency Minimum Version Notes ========================= ================== ============================================================= -SQLAlchemy 1.4.0 SQL support for databases other than sqlite -psycopg2 2.8.4 PostgreSQL engine for sqlalchemy -pymysql 0.10.1 MySQL engine for sqlalchemy +SQLAlchemy 1.4.16 SQL support for databases other than sqlite +psycopg2 2.8.6 PostgreSQL engine for sqlalchemy +pymysql 1.0.2 MySQL engine for sqlalchemy ========================= ================== ============================================================= Other data sources @@ -368,11 +368,11 @@ Other data sources Dependency Minimum Version Notes ========================= ================== ============================================================= PyTables 3.6.1 HDF5-based reading / writing -blosc 1.20.1 Compression for HDF5 +blosc 1.21.0 Compression for HDF5 zlib Compression for HDF5 fastparquet 0.4.0 Parquet reading / writing pyarrow 1.0.1 Parquet, ORC, and feather reading / writing -pyreadstat 1.1.0 SPSS files (.sav) reading +pyreadstat 1.1.2 SPSS files (.sav) reading ========================= ================== ============================================================= .. _install.warn_orc: @@ -396,10 +396,10 @@ Access data in the cloud ========================= ================== ============================================================= Dependency Minimum Version Notes ========================= ================== ============================================================= -fsspec 0.7.4 Handling files aside from simple local and HTTP -gcsfs 0.6.0 Google Cloud Storage access -pandas-gbq 0.14.0 Google Big Query access -s3fs 0.4.0 Amazon S3 access +fsspec 2021.5.0 Handling files aside from simple local and HTTP +gcsfs 2021.5.0 Google Cloud Storage access +pandas-gbq 0.15.0 Google Big Query access +s3fs 2021.05.0 Amazon S3 access ========================= ================== ============================================================= Clipboard
Documentation update: should be part of the 1.5 milestone Closes #47740 Makes the min package versions in `doc/source/install.rst` consistent with those recommended in [`doc/source/whatsnew/v1.5.0.rst`.](https://github.com/pandas-dev/pandas/blob/main/doc/source/whatsnew/v1.5.0.rst#increased-minimum-versions-for-dependencies) If this is not done, there will be a mistmatch in optional library recommendations between the release notes and the documentation. <img width="524" alt="image" src="https://user-images.githubusercontent.com/23153616/179132443-da09ff46-b6a4-4baf-83f0-ef5716cb7d54.png">
https://api.github.com/repos/pandas-dev/pandas/pulls/47727
2022-07-15T02:11:45Z
2022-07-18T19:07:48Z
2022-07-18T19:07:48Z
2022-09-28T20:45:45Z
BUG: numeric_only with axis=1 in DataFrame.corrwith and DataFrameGroupBy.cummin/max
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e70312c562907..629f711d8ec73 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10550,7 +10550,8 @@ def corrwith( else: return this.apply(lambda x: other.corr(x, method=method), axis=axis) - other = other._get_numeric_data() + if numeric_only_bool: + other = other._get_numeric_data() left, right = this.align(other, join="inner", copy=False) if axis == 1: @@ -10563,11 +10564,15 @@ def corrwith( right = right + left * 0 # demeaned data - ldem = left - left.mean() - rdem = right - right.mean() + ldem = left - left.mean(numeric_only=numeric_only_bool) + rdem = right - right.mean(numeric_only=numeric_only_bool) num = (ldem * rdem).sum() - dom = (left.count() - 1) * left.std() * right.std() + dom = ( + (left.count() - 1) + * left.std(numeric_only=numeric_only_bool) + * right.std(numeric_only=numeric_only_bool) + ) correl = num / dom diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 89e47af4cb614..09545aa5c3184 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -3630,7 +3630,11 @@ def cummin(self, axis=0, numeric_only=False, **kwargs) -> NDFrameT: skipna = kwargs.get("skipna", True) if axis != 0: f = lambda x: np.minimum.accumulate(x, axis) - return self._python_apply_general(f, self._selected_obj, is_transform=True) + numeric_only_bool = self._resolve_numeric_only("cummax", numeric_only, axis) + obj = self._selected_obj + if numeric_only_bool: + obj = obj._get_numeric_data() + return self._python_apply_general(f, obj, is_transform=True) return self._cython_transform( "cummin", numeric_only=numeric_only, skipna=skipna @@ -3650,7 +3654,11 @@ def cummax(self, axis=0, numeric_only=False, **kwargs) -> NDFrameT: skipna = kwargs.get("skipna", True) if axis != 0: f = lambda x: np.maximum.accumulate(x, axis) - return self._python_apply_general(f, self._selected_obj, is_transform=True) + numeric_only_bool = self._resolve_numeric_only("cummax", numeric_only, axis) + obj = self._selected_obj + if numeric_only_bool: + obj = obj._get_numeric_data() + return self._python_apply_general(f, obj, is_transform=True) return self._cython_transform( "cummax", numeric_only=numeric_only, skipna=skipna diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 7d6c5310942e2..9c622e0bfb69e 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -555,6 +555,81 @@ def test_idxmin_idxmax_axis1(): gb2.idxmax(axis=1) +@pytest.mark.parametrize("numeric_only", [True, False, None]) +def test_axis1_numeric_only(request, groupby_func, numeric_only): + if groupby_func in ("idxmax", "idxmin"): + pytest.skip("idxmax and idx_min tested in test_idxmin_idxmax_axis1") + if groupby_func in ("mad", "tshift"): + pytest.skip("mad and tshift are deprecated") + if groupby_func in ("corrwith", "skew"): + msg = "GH#47723 groupby.corrwith and skew do not correctly implement axis=1" + request.node.add_marker(pytest.mark.xfail(reason=msg)) + + df = DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"]) + df["E"] = "x" + groups = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4] + gb = df.groupby(groups) + method = getattr(gb, groupby_func) + args = (0,) if groupby_func == "fillna" else () + kwargs = {"axis": 1} + if numeric_only is not None: + # when numeric_only is None we don't pass any argument + kwargs["numeric_only"] = numeric_only + + # Functions without numeric_only and axis args + no_args = ("cumprod", "cumsum", "diff", "fillna", "pct_change", "rank", "shift") + # Functions with axis args + has_axis = ( + "cumprod", + "cumsum", + "diff", + "pct_change", + "rank", + "shift", + "cummax", + "cummin", + "idxmin", + "idxmax", + "fillna", + ) + if numeric_only is not None and groupby_func in no_args: + msg = "got an unexpected keyword argument 'numeric_only'" + with pytest.raises(TypeError, match=msg): + method(*args, **kwargs) + elif groupby_func not in has_axis: + msg = "got an unexpected keyword argument 'axis'" + warn = FutureWarning if groupby_func == "skew" and not numeric_only else None + with tm.assert_produces_warning(warn, match="Dropping of nuisance columns"): + with pytest.raises(TypeError, match=msg): + method(*args, **kwargs) + # fillna and shift are successful even on object dtypes + elif (numeric_only is None or not numeric_only) and groupby_func not in ( + "fillna", + "shift", + ): + msgs = ( + # cummax, cummin, rank + "not supported between instances of", + # cumprod + "can't multiply sequence by non-int of type 'float'", + # cumsum, diff, pct_change + "unsupported operand type", + ) + with pytest.raises(TypeError, match=f"({'|'.join(msgs)})"): + method(*args, **kwargs) + else: + result = method(*args, **kwargs) + + df_expected = df.drop(columns="E").T if numeric_only else df.T + expected = getattr(df_expected, groupby_func)(*args).T + if groupby_func == "shift" and not numeric_only: + # shift with axis=1 leaves the leftmost column as numeric + # but transposing for expected gives us object dtype + expected = expected.astype(float) + + tm.assert_equal(result, expected) + + def test_groupby_cumprod(): # GH 4095 df = DataFrame({"key": ["b"] * 10, "value": 2}) @@ -1321,7 +1396,7 @@ def test_deprecate_numeric_only( assert "b" not in result.columns elif ( # kernels that work on any dtype and have numeric_only arg - kernel in ("first", "last", "corrwith") + kernel in ("first", "last") or ( # kernels that work on any dtype and don't have numeric_only arg kernel in ("any", "all", "bfill", "ffill", "fillna", "nth", "nunique") @@ -1339,7 +1414,8 @@ def test_deprecate_numeric_only( "(not allowed for this dtype" "|must be a string or a number" "|cannot be performed against 'object' dtypes" - "|must be a string or a real number)" + "|must be a string or a real number" + "|unsupported operand type)" ) with pytest.raises(TypeError, match=msg): method(*args, **kwargs)
Part of #46560 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. No whatsnew note as these methods gained numeric_ony in 1.5.0
https://api.github.com/repos/pandas-dev/pandas/pulls/47724
2022-07-14T22:35:42Z
2022-07-16T18:14:59Z
2022-07-16T18:14:59Z
2022-07-17T14:16:12Z
ENH: Timestamp.min/max/resolution support non-nano
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 692b4430fa577..5c47c176b52ef 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -1,4 +1,5 @@ from cpython.datetime cimport ( + PyDateTime_CheckExact, PyDateTime_DATE_GET_HOUR, PyDateTime_DATE_GET_MICROSECOND, PyDateTime_DATE_GET_MINUTE, @@ -228,7 +229,13 @@ def py_td64_to_tdstruct(int64_t td64, NPY_DATETIMEUNIT unit): cdef inline void pydatetime_to_dtstruct(datetime dt, npy_datetimestruct *dts): - dts.year = PyDateTime_GET_YEAR(dt) + if PyDateTime_CheckExact(dt): + dts.year = PyDateTime_GET_YEAR(dt) + else: + # We use dt.year instead of PyDateTime_GET_YEAR because with Timestamp + # we override year such that PyDateTime_GET_YEAR is incorrect. + dts.year = dt.year + dts.month = PyDateTime_GET_MONTH(dt) dts.day = PyDateTime_GET_DAY(dt) dts.hour = PyDateTime_DATE_GET_HOUR(dt) diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd index 69a1dd436dec0..0ecb26822cf50 100644 --- a/pandas/_libs/tslibs/timestamps.pxd +++ b/pandas/_libs/tslibs/timestamps.pxd @@ -22,7 +22,7 @@ cdef _Timestamp create_timestamp_from_ts(int64_t value, cdef class _Timestamp(ABCTimestamp): cdef readonly: - int64_t value, nanosecond + int64_t value, nanosecond, year BaseOffset _freq NPY_DATETIMEUNIT _reso diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index ae3ce46cbc3c8..5163bfd8b7760 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -143,12 +143,27 @@ cdef inline _Timestamp create_timestamp_from_ts( """ convenience routine to construct a Timestamp from its parts """ cdef: _Timestamp ts_base - - ts_base = _Timestamp.__new__(Timestamp, dts.year, dts.month, + int64_t pass_year = dts.year + + # We pass year=1970/1972 here and set year below because with non-nanosecond + # resolution we may have datetimes outside of the stdlib pydatetime + # implementation bounds, which would raise. + # NB: this means the C-API macro PyDateTime_GET_YEAR is unreliable. + if 1 <= pass_year <= 9999: + # we are in-bounds for pydatetime + pass + elif ccalendar.is_leapyear(dts.year): + pass_year = 1972 + else: + pass_year = 1970 + + ts_base = _Timestamp.__new__(Timestamp, pass_year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tz, fold=fold) + ts_base.value = value ts_base._freq = freq + ts_base.year = dts.year ts_base.nanosecond = dts.ps // 1000 ts_base._reso = reso @@ -179,6 +194,40 @@ def integer_op_not_supported(obj): return TypeError(int_addsub_msg) +class MinMaxReso: + """ + We need to define min/max/resolution on both the Timestamp _instance_ + and Timestamp class. On an instance, these depend on the object's _reso. + On the class, we default to the values we would get with nanosecond _reso. + + See also: timedeltas.MinMaxReso + """ + def __init__(self, name): + self._name = name + + def __get__(self, obj, type=None): + cls = Timestamp + if self._name == "min": + val = np.iinfo(np.int64).min + 1 + elif self._name == "max": + val = np.iinfo(np.int64).max + else: + assert self._name == "resolution" + val = 1 + cls = Timedelta + + if obj is None: + # i.e. this is on the class, default to nanos + return cls(val) + elif self._name == "resolution": + return Timedelta._from_value_and_reso(val, obj._reso) + else: + return Timestamp._from_value_and_reso(val, obj._reso, tz=None) + + def __set__(self, obj, value): + raise AttributeError(f"{self._name} is not settable.") + + # ---------------------------------------------------------------------- cdef class _Timestamp(ABCTimestamp): @@ -188,6 +237,10 @@ cdef class _Timestamp(ABCTimestamp): dayofweek = _Timestamp.day_of_week dayofyear = _Timestamp.day_of_year + min = MinMaxReso("min") + max = MinMaxReso("max") + resolution = MinMaxReso("resolution") # GH#21336, GH#21365 + cpdef void _set_freq(self, freq): # set the ._freq attribute without going through the constructor, # which would issue a warning @@ -248,10 +301,12 @@ cdef class _Timestamp(ABCTimestamp): def __hash__(_Timestamp self): if self.nanosecond: return hash(self.value) + if not (1 <= self.year <= 9999): + # out of bounds for pydatetime + return hash(self.value) if self.fold: return datetime.__hash__(self.replace(fold=0)) return datetime.__hash__(self) - # TODO(non-nano): what if we are out of bounds for pydatetime? def __richcmp__(_Timestamp self, object other, int op): cdef: @@ -968,6 +1023,9 @@ cdef class _Timestamp(ABCTimestamp): """ base_ts = "microseconds" if timespec == "nanoseconds" else timespec base = super(_Timestamp, self).isoformat(sep=sep, timespec=base_ts) + # We need to replace the fake year 1970 with our real year + base = f"{self.year}-" + base.split("-", 1)[1] + if self.nanosecond == 0 and timespec != "nanoseconds": return base @@ -2332,29 +2390,24 @@ default 'raise' Return the day of the week represented by the date. Monday == 1 ... Sunday == 7. """ - return super().isoweekday() + # same as super().isoweekday(), but that breaks because of how + # we have overriden year, see note in create_timestamp_from_ts + return self.weekday() + 1 def weekday(self): """ Return the day of the week represented by the date. Monday == 0 ... Sunday == 6. """ - return super().weekday() + # same as super().weekday(), but that breaks because of how + # we have overriden year, see note in create_timestamp_from_ts + return ccalendar.dayofweek(self.year, self.month, self.day) # Aliases Timestamp.weekofyear = Timestamp.week Timestamp.daysinmonth = Timestamp.days_in_month -# Add the min and max fields at the class level -cdef int64_t _NS_UPPER_BOUND = np.iinfo(np.int64).max -cdef int64_t _NS_LOWER_BOUND = NPY_NAT + 1 - -# Resolution is in nanoseconds -Timestamp.min = Timestamp(_NS_LOWER_BOUND) -Timestamp.max = Timestamp(_NS_UPPER_BOUND) -Timestamp.resolution = Timedelta(nanoseconds=1) # GH#21336, GH#21365 - # ---------------------------------------------------------------------- # Scalar analogues to functions in vectorized.pyx diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 353c99688c139..67ad152dcab30 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -1011,6 +1011,35 @@ def test_sub_timedeltalike_mismatched_reso(self, ts_tz): # With a mismatched td64 as opposed to Timedelta ts + np.timedelta64(1, "ns") + def test_min(self, ts): + assert ts.min <= ts + assert ts.min._reso == ts._reso + assert ts.min.value == NaT.value + 1 + + def test_max(self, ts): + assert ts.max >= ts + assert ts.max._reso == ts._reso + assert ts.max.value == np.iinfo(np.int64).max + + def test_resolution(self, ts): + expected = Timedelta._from_value_and_reso(1, ts._reso) + result = ts.resolution + assert result == expected + assert result._reso == expected._reso + + +def test_timestamp_class_min_max_resolution(): + # when accessed on the class (as opposed to an instance), we default + # to nanoseconds + assert Timestamp.min == Timestamp(NaT.value + 1) + assert Timestamp.min._reso == NpyDatetimeUnit.NPY_FR_ns.value + + assert Timestamp.max == Timestamp(np.iinfo(np.int64).max) + assert Timestamp.max._reso == NpyDatetimeUnit.NPY_FR_ns.value + + assert Timestamp.resolution == Timedelta(1) + assert Timestamp.resolution._reso == NpyDatetimeUnit.NPY_FR_ns.value + class TestAsUnit: def test_as_unit(self):
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47720
2022-07-14T15:08:31Z
2022-07-21T17:41:44Z
2022-07-21T17:41:44Z
2022-07-22T17:43:21Z
GroupBy enhancement unifies the return of iterating over GroupBy #42795
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index a882d3a955469..61fe7ee7a1b39 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -294,6 +294,8 @@ def __getitem__(self, item: PositionalIndexer): ) # We are not an array indexer, so maybe e.g. a slice or integer # indexer. We dispatch to pyarrow. + if type(item) == np.int64: + item = item.item() value = self._data[item] if isinstance(value, pa.ChunkedArray): return type(self)(value) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index b9f4166b475ca..eec24acd3305c 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -733,6 +733,11 @@ def get_grouper( """ group_axis = obj._get_axis(axis) + tuple_unified = False + if isinstance(key, list): + if len(key) == 1 and isinstance(key[0], str): + tuple_unified = True + # validate that the passed single level is compatible with the passed # axis of the object if level is not None: @@ -918,7 +923,12 @@ def is_in_obj(gpr) -> bool: # create the internals grouper grouper = ops.BaseGrouper( - group_axis, groupings, sort=sort, mutated=mutated, dropna=dropna + group_axis, + groupings, + tuple_unified=tuple_unified, + sort=sort, + mutated=mutated, + dropna=dropna, ) return grouper, frozenset(exclusions), obj diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 6dc4ccfa8e1ee..fd72a61065404 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -711,6 +711,7 @@ def __init__( self, axis: Index, groupings: Sequence[grouper.Grouping], + tuple_unified: bool = False, sort: bool = True, group_keys: bool = True, mutated: bool = False, @@ -721,6 +722,7 @@ def __init__( self.axis = axis self._groupings: list[grouper.Grouping] = list(groupings) + self.tuple_unified = tuple_unified self._sort = sort self.group_keys = group_keys self.mutated = mutated @@ -779,13 +781,13 @@ def _get_grouper(self): @final @cache_readonly def group_keys_seq(self): - if len(self.groupings) == 1: + if len(self.groupings) == 1 and self.tuple_unified is False: return self.levels[0] - else: - ids, _, ngroups = self.group_info - # provide "flattened" iterator for multi-group setting - return get_flattened_list(ids, ngroups, self.levels, self.codes) + ids, _, ngroups = self.group_info + + # provide "flattened" iterator for multi-group setting + return get_flattened_list(ids, ngroups, self.levels, self.codes) @final def apply( @@ -1123,12 +1125,13 @@ def __init__( binlabels, mutated: bool = False, indexer=None, + tuple_unified: bool = False, ) -> None: self.bins = ensure_int64(bins) self.binlabels = ensure_index(binlabels) self.mutated = mutated self.indexer = indexer - + self.tuple_unified = False # These lengths must match, otherwise we could call agg_series # with empty self.bins, which would raise in libreduction. assert len(self.binlabels) == len(self.bins) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 03aad0ef64dec..e8e4598d1472a 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -161,6 +161,9 @@ def __internal_pivot_table( pass values = list(values) + if isinstance(keys, list): + if len(keys) == 1: + keys = keys[0] grouped = data.groupby(keys, observed=observed, sort=sort) agged = grouped.agg(aggfunc) if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns): @@ -367,7 +370,11 @@ def _all_key(key): margin = data[rows + values].groupby(rows, observed=observed).agg(aggfunc) cat_axis = 1 - for key, piece in table.groupby(level=0, axis=cat_axis, observed=observed): + for keys, piece in table.groupby(level=0, axis=cat_axis, observed=observed): + if isinstance(keys, tuple): + (key,) = keys + else: + key = keys all_key = _all_key(key) # we are going to mutate this, so need to copy! diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index ee7493813f13a..fe75f552c6633 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -175,7 +175,8 @@ def __init__( # For `hist` plot, need to get grouped original data before `self.data` is # updated later if self.by is not None and self._kind == "hist": - self._grouped = data.groupby(self.by) + bymodi = fix_groupby_singlelist_input(by) + self._grouped = data.groupby(bymodi) self.kind = kind @@ -1832,3 +1833,10 @@ def blank_labeler(label, value): leglabels = labels if labels is not None else idx for p, l in zip(patches, leglabels): self._append_legend_handles_labels(p, l) + + +def fix_groupby_singlelist_input(keys): + if isinstance(keys, list): + if len(keys) == 1 and isinstance(keys[0], str): + keys = keys[0] + return keys diff --git a/pandas/plotting/_matplotlib/groupby.py b/pandas/plotting/_matplotlib/groupby.py index 4f1cd3f38343a..0c87db697b342 100644 --- a/pandas/plotting/_matplotlib/groupby.py +++ b/pandas/plotting/_matplotlib/groupby.py @@ -108,7 +108,8 @@ def reconstruct_data_with_by( 1 3.0 4.0 NaN NaN 2 NaN NaN 5.0 6.0 """ - grouped = data.groupby(by) + bymodi = fix_groupby_singlelist_input(by) + grouped = data.groupby(bymodi) data_list = [] for key, group in grouped: @@ -134,3 +135,10 @@ def reformat_hist_y_given_by( if by is not None and len(y.shape) > 1: return np.array([remove_na_arraylike(col) for col in y.T]).T return remove_na_arraylike(y) + + +def fix_groupby_singlelist_input(keys): + if isinstance(keys, list): + if len(keys) == 1 and isinstance(keys[0], str): + keys = keys[0] + return keys diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index 3b151d67c70be..61408d7c946c9 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -67,7 +67,8 @@ def _args_adjust(self): # where subplots are created based on by argument if is_integer(self.bins): if self.by is not None: - grouped = self.data.groupby(self.by)[self.columns] + bymodi = fix_groupby_singlelist_input(self.by) + grouped = self.data.groupby(bymodi)[self.columns] self.bins = [self._calculate_bins(group) for key, group in grouped] else: self.bins = self._calculate_bins(self.data) @@ -271,6 +272,8 @@ def _grouped_plot( grouped = data.groupby(by) if column is not None: grouped = grouped[column] + if isinstance(by, list) and len(by) == 1: + by = [by] naxes = len(grouped) fig, axes = create_subplots( @@ -528,3 +531,10 @@ def hist_frame( maybe_adjust_figure(fig, wspace=0.3, hspace=0.3) return axes + + +def fix_groupby_singlelist_input(keys): + if isinstance(keys, list): + if len(keys) == 1 and isinstance(keys[0], str): + keys = keys[0] + return keys diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 920b869ef799b..0ce73c73dd2a4 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2795,3 +2795,21 @@ def test_groupby_none_column_name(): result = df.groupby(by=[None]).sum() expected = DataFrame({"b": [2, 5], "c": [9, 13]}, index=Index([1, 2], name=None)) tm.assert_frame_equal(result, expected) + + +def test_groupby_iterator_one_grouper(): + df = DataFrame(columns=["a", "b", "c"], index=["x", "y"]) + df.loc["y"] = Series({"a": 1, "b": 5, "c": 2}) + expected = True + + values, _ = next(iter(df.groupby(["a", "b"]))) + result = isinstance(values, tuple) + assert result == expected + + values, _ = next(iter(df.groupby(["a"]))) + result = isinstance(values, tuple) + assert result == expected + + values, _ = next(iter(df.groupby("a"))) + result = isinstance(values, int) + assert result == expected diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 905c2af2d22a5..ee460eb365d25 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -718,7 +718,9 @@ def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix # some smoke tests for c in join_col: assert result[c].notna().all() - + if isinstance(join_col, list): + if len(join_col) == 1: + join_col = join_col[0] left_grouped = left.groupby(join_col) right_grouped = right.groupby(join_col)
- [x] closes #42795 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Applied the deprecation in #47761
https://api.github.com/repos/pandas-dev/pandas/pulls/47719
2022-07-14T14:48:04Z
2022-12-05T18:01:19Z
null
2022-12-05T18:01:20Z
TST: add test for groupby with dropna=False on multi-index
diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index ca55263146db3..515c96780e731 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -378,3 +378,12 @@ def test_groupby_nan_included(): tm.assert_numpy_array_equal(result_values, expected_values) assert np.isnan(list(result.keys())[2]) assert list(result.keys())[0:2] == ["g1", "g2"] + + +def test_groupby_drop_nan_with_multi_index(): + # GH 39895 + df = pd.DataFrame([[np.nan, 0, 1]], columns=["a", "b", "c"]) + df = df.set_index(["a", "b"]) + result = df.groupby(["a", "b"], dropna=False).first() + expected = df + tm.assert_frame_equal(result, expected)
- [x] closes #39895 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47717
2022-07-14T07:28:01Z
2022-07-14T16:53:47Z
2022-07-14T16:53:46Z
2022-07-14T16:57:51Z
opt out of bottleneck for nanmean
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index a6408b940119d..a8af7f023d34d 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -844,7 +844,7 @@ Numeric - Bug in operations with array-likes with ``dtype="boolean"`` and :attr:`NA` incorrectly altering the array in-place (:issue:`45421`) - Bug in division, ``pow`` and ``mod`` operations on array-likes with ``dtype="boolean"`` not being like their ``np.bool_`` counterparts (:issue:`46063`) - Bug in multiplying a :class:`Series` with ``IntegerDtype`` or ``FloatingDtype`` by an array-like with ``timedelta64[ns]`` dtype incorrectly raising (:issue:`45622`) -- +- Bug in :meth:`mean` where the optional dependency ``bottleneck`` causes precision loss linear in the length of the array. ``bottleneck`` has been disabled for :meth:`mean` improving the loss to log-linear but may result in a performance decrease. (:issue:`42878`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 05a9bde700e32..81766dc91f271 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -162,6 +162,10 @@ def f( def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool: # Bottleneck chokes on datetime64, PeriodDtype (or and EA) if not is_object_dtype(dtype) and not needs_i8_conversion(dtype): + # GH 42878 + # Bottleneck uses naive summation leading to O(n) loss of precision + # unlike numpy which implements pairwise summation, which has O(log(n)) loss + # crossref: https://github.com/pydata/bottleneck/issues/379 # GH 15507 # bottleneck does not properly upcast during the sum @@ -171,7 +175,7 @@ def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool: # further we also want to preserve NaN when all elements # are NaN, unlike bottleneck/numpy which consider this # to be 0 - return name not in ["nansum", "nanprod"] + return name not in ["nansum", "nanprod", "nanmean"] return False diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 005f7b088271f..f46d5c8e2590e 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1120,3 +1120,25 @@ def test_check_below_min_count__large_shape(min_count, expected_result): shape = (2244367, 1253) result = nanops.check_below_min_count(shape, mask=None, min_count=min_count) assert result == expected_result + + +@pytest.mark.parametrize("func", ["nanmean", "nansum"]) +@pytest.mark.parametrize( + "dtype", + [ + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.int8, + np.int16, + np.int32, + np.int64, + np.float16, + np.float32, + np.float64, + ], +) +def test_check_bottleneck_disallow(dtype, func): + # GH 42878 bottleneck sometimes produces unreliable results for mean and sum + assert not nanops._bn_ok_dtype(dtype, func)
- [x] closes #42878 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47716
2022-07-14T07:23:58Z
2022-07-18T19:14:18Z
2022-07-18T19:14:17Z
2022-07-18T19:51:36Z
TST: Test for the Enum triggering TypeError (#22551 issue)
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 0864032b741c9..25257a2c102fd 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1,5 +1,6 @@ from collections import deque from datetime import datetime +from enum import Enum import functools import operator import re @@ -2050,3 +2051,15 @@ def _constructor_sliced(self): result = sdf + sdf tm.assert_frame_equal(result, expected) + + +def test_enum_column_equality(): + Cols = Enum("Cols", "col1 col2") + + q1 = DataFrame({Cols.col1: [1, 2, 3]}) + q2 = DataFrame({Cols.col1: [1, 2, 3]}) + + result = q1[Cols.col1] == q2[Cols.col1] + expected = Series([True, True, True], name=Cols.col1) + + tm.assert_series_equal(result, expected)
- [x] closes #22551
https://api.github.com/repos/pandas-dev/pandas/pulls/47715
2022-07-14T04:20:41Z
2022-07-16T18:08:28Z
2022-07-16T18:08:27Z
2022-07-16T18:08:41Z
BUG: df.fillna ignores axis when df is single block
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index a6408b940119d..9651269963803 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -914,6 +914,7 @@ Missing - Bug in :meth:`Series.fillna` and :meth:`DataFrame.fillna` with :class:`IntervalDtype` and incompatible value raising instead of casting to a common (usually object) dtype (:issue:`45796`) - Bug in :meth:`DataFrame.interpolate` with object-dtype column not returning a copy with ``inplace=False`` (:issue:`45791`) - Bug in :meth:`DataFrame.dropna` allows to set both ``how`` and ``thresh`` incompatible arguments (:issue:`46575`) +- Bug in :meth:`DataFrame.fillna` ignored ``axis`` when :class:`DataFrame` is single block (:issue:`47713`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e392802bdb5ea..4325f0eb04a9c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6679,7 +6679,7 @@ def fillna( return result if not inplace else None elif not is_list_like(value): - if not self._mgr.is_single_block and axis == 1: + if axis == 1: result = self.T.fillna(value=value, limit=limit).T diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py index f5c9dd65e4760..d86c1b2aedcac 100644 --- a/pandas/tests/frame/methods/test_fillna.py +++ b/pandas/tests/frame/methods/test_fillna.py @@ -685,6 +685,29 @@ def test_inplace_dict_update_view(self, val): tm.assert_frame_equal(df, expected) tm.assert_frame_equal(result_view, expected) + def test_single_block_df_with_horizontal_axis(self): + # GH 47713 + df = DataFrame( + { + "col1": [5, 0, np.nan, 10, np.nan], + "col2": [7, np.nan, np.nan, 5, 3], + "col3": [12, np.nan, 1, 2, 0], + "col4": [np.nan, 1, 1, np.nan, 18], + } + ) + result = df.fillna(50, limit=1, axis=1) + expected = DataFrame( + [ + [5.0, 7.0, 12.0, 50.0], + [0.0, 50.0, np.nan, 1.0], + [50.0, np.nan, 1.0, 1.0], + [10.0, 5.0, 2.0, 50.0], + [50.0, 3.0, 0.0, 18.0], + ], + columns=["col1", "col2", "col3", "col4"], + ) + tm.assert_frame_equal(result, expected) + def test_fillna_nonconsolidated_frame(): # https://github.com/pandas-dev/pandas/issues/36495
- [x] closes #47713 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47714
2022-07-14T04:11:29Z
2022-07-14T20:41:38Z
2022-07-14T20:41:37Z
2022-07-14T20:56:04Z
DOC: fix typos in "See also" documentation section
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e70312c562907..5c3fdf04c4342 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10355,9 +10355,10 @@ def cov( See Also -------- Series.cov : Compute covariance with another Series. - core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance. - core.window.Expanding.cov : Expanding sample covariance. - core.window.Rolling.cov : Rolling sample covariance. + core.window.ewm.ExponentialMovingWindow.cov : Exponential weighted sample + covariance. + core.window.expanding.Expanding.cov : Expanding sample covariance. + core.window.rolling.Rolling.cov : Rolling sample covariance. Notes ----- @@ -11167,7 +11168,7 @@ def quantile( See Also -------- - core.window.Rolling.quantile: Rolling quantile. + core.window.rolling.Rolling.quantile: Rolling quantile. numpy.percentile: Numpy function to compute the percentile. Examples diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6e00f33f486d9..81af89a7e0bdb 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -12193,7 +12193,7 @@ def _doc_params(cls): See Also -------- -core.window.Expanding.{accum_func_name} : Similar functionality +core.window.expanding.Expanding.{accum_func_name} : Similar functionality but ignores ``NaN`` values. {name2}.{accum_func_name} : Return the {desc} over {name2} axis.
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47712
2022-07-14T02:02:42Z
2022-07-16T19:53:02Z
2022-07-16T19:53:02Z
2022-07-17T01:15:09Z
ENH/TST: Add BaseUnaryOpsTests tests for ArrowExtensionArray
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 92aedbb836b38..69cd2a44dfed4 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -235,6 +235,20 @@ def __arrow_array__(self, type=None): """Convert myself to a pyarrow ChunkedArray.""" return self._data + def __invert__(self: ArrowExtensionArrayT) -> ArrowExtensionArrayT: + if pa_version_under2p0: + raise NotImplementedError("__invert__ not implement for pyarrow < 2.0") + return type(self)(pc.invert(self._data)) + + def __neg__(self: ArrowExtensionArrayT) -> ArrowExtensionArrayT: + return type(self)(pc.negate_checked(self._data)) + + def __pos__(self: ArrowExtensionArrayT) -> ArrowExtensionArrayT: + return type(self)(self._data) + + def __abs__(self: ArrowExtensionArrayT) -> ArrowExtensionArrayT: + return type(self)(pc.abs_checked(self._data)) + def _cmp_method(self, other, op): from pandas.arrays import BooleanArray diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 7e0792a6010a7..c6e9bed030567 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -1210,6 +1210,24 @@ def test_EA_types(self, engine, data, request): super().test_EA_types(engine, data) +class TestBaseUnaryOps(base.BaseUnaryOpsTests): + @pytest.mark.xfail( + pa_version_under2p0, + raises=NotImplementedError, + reason="pyarrow.compute.invert not supported in pyarrow<2.0", + ) + def test_invert(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if not pa.types.is_boolean(pa_dtype): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason=f"pyarrow.compute.invert does support {pa_dtype}", + ) + ) + super().test_invert(data) + + class TestBaseMethods(base.BaseMethodsTests): @pytest.mark.parametrize("dropna", [True, False]) def test_value_counts(self, all_data, dropna, request):
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/47711
2022-07-14T00:13:00Z
2022-07-21T20:35:51Z
2022-07-21T20:35:51Z
2022-07-21T21:58:54Z
GH: Add CITATION.cff
diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000000..0161dfa92fdef --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,10 @@ +cff-version: 1.2.0 +title: 'pandas-dev/pandas: Pandas' +message: 'If you use this software, please cite it as below.' +authors: + - name: "The pandas development team" +license: BSD-3-Clause +license-url: "https://github.com/pandas-dev/pandas/blob/main/LICENSE" +repository-code: "https://github.com/pandas-dev/pandas" +type: software +url: "https://github.com/pandas-dev/pandas" diff --git a/web/pandas/about/citing.md b/web/pandas/about/citing.md index e2821dbc19a4e..b4c7848e5db00 100644 --- a/web/pandas/about/citing.md +++ b/web/pandas/about/citing.md @@ -5,7 +5,7 @@ If you use _pandas_ for a scientific publication, we would appreciate citations to the published software and the following paper: -- [pandas on Zenodo](https://zenodo.org/record/3715232#.XoqFyC2ZOL8), +- [pandas on Zenodo](https://zenodo.org/search?page=1&size=20&q=conceptrecid%3A%223509134%22&sort=-version&all_versions=True), Please find us on Zenodo and replace with the citation for the version you are using. You can replace the full author list from there with "The pandas development team" like in the example below.
Enables a citation widget on the side of the repository that provides a copy-pastable ABA & Bibtex citation: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-citation-files Followed this schema: https://github.com/citation-file-format/citation-file-format/blob/1.2.0/schema-guide.md
https://api.github.com/repos/pandas-dev/pandas/pulls/47710
2022-07-13T22:18:24Z
2022-07-16T02:12:22Z
2022-07-16T02:12:22Z
2022-07-16T17:35:38Z
Backport PR #47670 on branch 1.4.x (CI: Fix npdev build post Cython annotation change)
diff --git a/pandas/_libs/arrays.pyx b/pandas/_libs/arrays.pyx index 8895a2bcfca89..f63d16e819c92 100644 --- a/pandas/_libs/arrays.pyx +++ b/pandas/_libs/arrays.pyx @@ -157,7 +157,7 @@ cdef class NDArrayBacked: return self._from_backing_data(res_values) # TODO: pass NPY_MAXDIMS equiv to axis=None? - def repeat(self, repeats, axis: int = 0): + def repeat(self, repeats, axis: int | np.integer = 0): if axis is None: axis = 0 res_values = cnp.PyArray_Repeat(self._ndarray, repeats, <int>axis) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index b2ea2e746b44c..ef565d3e0e746 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1654,7 +1654,7 @@ cdef class _Period(PeriodMixin): return freq @classmethod - def _from_ordinal(cls, ordinal: int, freq) -> "Period": + def _from_ordinal(cls, ordinal: int64_t, freq) -> "Period": """ Fast creation from an ordinal and freq that are already validated! """ diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py index 456dd049d2f4a..a1aba949e74fe 100644 --- a/pandas/tests/io/parser/test_quoting.py +++ b/pandas/tests/io/parser/test_quoting.py @@ -38,7 +38,7 @@ def test_bad_quote_char(all_parsers, kwargs, msg): @pytest.mark.parametrize( "quoting,msg", [ - ("foo", '"quoting" must be an integer'), + ("foo", '"quoting" must be an integer|Argument'), (5, 'bad "quoting" value'), # quoting must be in the range [0, 3] ], )
Backport PR #47670: CI: Fix npdev build post Cython annotation change
https://api.github.com/repos/pandas-dev/pandas/pulls/47709
2022-07-13T20:58:59Z
2022-07-14T11:42:15Z
2022-07-14T11:42:15Z
2022-07-14T11:42:15Z
BUG: json_normalize raises boardcasting error with list-like metadata
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 48c808819d788..32c7a0e73fa06 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -1047,6 +1047,7 @@ I/O - Bug in :func:`read_parquet` with ``use_nullable_dtypes=True`` where ``float64`` dtype was returned instead of nullable ``Float64`` dtype (:issue:`45694`) - Bug in :meth:`DataFrame.to_json` where ``PeriodDtype`` would not make the serialization roundtrip when read back with :meth:`read_json` (:issue:`44720`) - Bug in :func:`read_xml` when reading XML files with Chinese character tags and would raise ``XMLSyntaxError`` (:issue:`47902`) +- Bug in :func:`json_normalize` raised boardcasting error with list-like metadata (:issue:`37782`, :issue:`47182`) Period ^^^^^^ diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index e77d60d2d4950..febe5d111c774 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -22,6 +22,8 @@ ) from pandas.util._decorators import deprecate +from pandas.core.dtypes.common import is_list_like + import pandas as pd from pandas import DataFrame @@ -531,7 +533,14 @@ def _recursive_extract(data, path, seen_meta, level=0): raise ValueError( f"Conflicting metadata name {k}, need distinguishing prefix " ) - result[k] = np.array(v, dtype=object).repeat(lengths) + if v and is_list_like(v[0]): + out = [] + for item, repeat in zip(v, lengths): + for _ in range(repeat): + out.append(item) + else: + out = np.array(v, dtype=object).repeat(lengths).tolist() + result[k] = out return result diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 231228ef6c0af..960d9240e331f 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -546,7 +546,7 @@ def test_meta_non_iterable(self): result = json_normalize(json.loads(data), record_path=["data"], meta=["id"]) expected = DataFrame( - {"one": [1], "two": [2], "id": np.array([99], dtype=object)} + {"one": [1], "two": [2], "id": np.array([99], dtype="int64")} ) tm.assert_frame_equal(result, expected) @@ -640,9 +640,7 @@ def test_missing_nested_meta(self): ) ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]] columns = ["rec", "meta", "nested_meta.leaf"] - expected = DataFrame(ex_data, columns=columns).astype( - {"nested_meta.leaf": object} - ) + expected = DataFrame(ex_data, columns=columns) tm.assert_frame_equal(result, expected) # If errors="raise" and nested metadata is null, we should raise with the @@ -891,3 +889,32 @@ def test_series_non_zero_index(self): } ) tm.assert_frame_equal(result, expected) + + def test_list_type_meta_data(self): + # GH 37782 + data = {"values": [1, 2, 3], "metadata": {"listdata": [1, 2]}} + result = json_normalize( + data=data, + record_path=["values"], + meta=[["metadata", "listdata"]], + ) + expected = DataFrame( + { + 0: [1, 2, 3], + "metadata.listdata": [[1, 2], [1, 2], [1, 2]], + } + ) + tm.assert_frame_equal(result, expected) + + def test_empty_list_data(self): + # GH 47182 + data = [ + {"id": 1, "path": [{"a": 3, "b": 4}], "emptyList": []}, + {"id": 2, "path": [{"a": 5, "b": 6}], "emptyList": []}, + ] + result = json_normalize(data, "path", ["id", "emptyList"]) + expected = DataFrame( + [[3, 4, 1, []], [5, 6, 2, []]], + columns=["a", "b", "id", "emptyList"], + ) + tm.assert_frame_equal(result, expected)
- [x] closes #37782 - [x] closes #47182 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47708
2022-07-13T20:09:29Z
2022-10-24T21:22:44Z
null
2022-10-24T21:22:45Z
WEB: Governance community members
diff --git a/web/pandas/about/governance.md b/web/pandas/about/governance.md index 92923db6e6763..f017b153c1e8d 100644 --- a/web/pandas/about/governance.md +++ b/web/pandas/about/governance.md @@ -49,145 +49,142 @@ manage project donations and acts as a parent legal entity. NumFOCUS is the only legal entity that has a formal relationship with the project (see Institutional Partners section below). -## Governance - -This section describes the governance and leadership model of The Project. - -The foundations of Project governance are: - -- Openness & Transparency -- Active Contribution -- Institutional Neutrality - -Traditionally, Project leadership was provided by a BDFL (Wes McKinney) and -subset of Contributors, called the Core Team, whose active and consistent -contributions have been recognized by their receiving “commit rights” to the -Project GitHub repositories. In general all Project decisions are made through -consensus among the Core Team with input from the Community. The BDFL can, but -rarely chooses to, override the Core Team and make a final decision on a -matter. - -While this approach has served us well, as the Project grows and faces more -legal and financial decisions and interacts with other institutions, we see a -need for a more formal governance model. Moving forward The Project leadership -will consist of a BDFL and Core Team. We view this governance model as the -formalization of what we are already doing, rather than a change in direction. - -### BDFL - -The Project will have a BDFL (Benevolent Dictator for Life), who is currently -Wes McKinney. As Dictator, the BDFL has the authority to make all final -decisions for The Project. As Benevolent, the BDFL, in practice chooses to -defer that authority to the consensus of the community discussion channels and -the Core Team. It is expected, and in the past has been the case, that the BDFL -will only rarely assert his/her final authority. Because it is rarely used, we -refer to BDFL’s final authority as a “special” or “overriding” vote. When it -does occur, the BDFL override typically happens in situations where there is a -deadlock in the Core Team or if the Core Team ask the BDFL to make a decision -on a specific matter. To ensure the benevolence of the BDFL, The Project -encourages others to fork the project if they disagree with the overall -direction the BDFL is taking. The BDFL is chair of the Core Team (see below) -and may delegate his/her authority on a particular decision or set of decisions -to any other Core Team Member at his/her discretion. - -The BDFL can appoint his/her successor, but it is expected that the Core Team -would be consulted on this decision. If the BDFL is unable to appoint a -successor (e.g. due to death or illness), the Core Team will choose a successor -by voting with at least 2/3 of the Core Team members voting in favor of the -chosen successor. At least 80% of the Core Team must participate in the -vote. If no BDFL candidate receives 2/3 of the votes of the Core Team, the Core -Team members shall propose the BDFL candidates to the Main NumFOCUS board, who -will then make the final decision. - -### Core Team - -The Project's Core Team will consist of Project Contributors who have produced -contributions that are substantial in quality and quantity, and sustained over -at least one year. The overall role of the Core Team is to ensure, through -working with the BDFL and taking input from the Community, the long-term -well-being of the project, both technically and as a community. - -During the everyday project activities, Core Team participate in all -discussions, code review and other project activities as peers with all other -Contributors and the Community. In these everyday activities, Core Team do not -have any special power or privilege through their membership on the Core -Team. However, it is expected that because of the quality and quantity of their -contributions and their expert knowledge of the Project Software that the Core -Team will provide useful guidance, both technical and in terms of project -direction, to potentially less experienced contributors. - -The Core Team and its Members play a special role in certain situations. -In particular, the Core Team may: - -- Make decisions about the overall scope, vision and direction of the - project. -- Make decisions about strategic collaborations with other organizations or - individuals. -- Make decisions about specific technical issues, features, bugs and pull - requests. They are the primary mechanism of guiding the code review process - and merging pull requests. -- Make decisions about the Services that are run by The Project and manage - those Services for the benefit of the Project and Community. -- Make decisions when regular community discussion doesn’t produce consensus - on an issue in a reasonable time frame. - -### Core Team membership - -To become eligible for being a Core Team Member an individual must be a Project -Contributor who has produced contributions that are substantial in quality and -quantity, and sustained over at least one year. Potential Core Team Members are -nominated by existing Core members and voted upon by the existing Core Team -after asking if the potential Member is interested and willing to serve in that -capacity. The Core Team will be initially formed from the set of existing -Contributors who have been granted commit rights as of late 2015. - -When considering potential Members, the Core Team will look at candidates with -a comprehensive view of their contributions. This will include but is not -limited to code, code review, infrastructure work, mailing list and chat -participation, community help/building, education and outreach, design work, -etc. We are deliberately not setting arbitrary quantitative metrics (like “100 -commits in this repo”) to avoid encouraging behavior that plays to the metrics -rather than the project’s overall well-being. We want to encourage a diverse -array of backgrounds, viewpoints and talents in our team, which is why we -explicitly do not define code as the sole metric on which Core Team membership -will be evaluated. - -If a Core Team member becomes inactive in the project for a period of one year, -they will be considered for removal from the Core Team. Before removal, -inactive Member will be approached by the BDFL to see if they plan on returning -to active participation. If not they will be removed immediately upon a Core -Team vote. If they plan on returning to active participation soon, they will be -given a grace period of one year. If they don’t return to active participation -within that time period they will be removed by vote of the Core Team without -further grace period. All former Core Team members can be considered for -membership again at any time in the future, like any other Project Contributor. -Retired Core Team members will be listed on the project website, acknowledging -the period during which they were active in the Core Team. - -The Core Team reserves the right to eject current Members, other than the BDFL, -if they are deemed to be actively harmful to the project’s well-being, and -attempts at communication and conflict resolution have failed. - -### Conflict of interest - -It is expected that the BDFL and Core Team Members will be employed at a wide -range of companies, universities and non-profit organizations. Because of this, -it is possible that Members will have conflict of interests. Such conflict of -interests include, but are not limited to: - -- Financial interests, such as investments, employment or contracting work, - outside of The Project that may influence their work on The Project. -- Access to proprietary information of their employer that could potentially - leak into their work with the Project. - -All members of the Core Team, BDFL included, shall disclose to the rest of the -Core Team any conflict of interest they may have. Members with a conflict of -interest in a particular issue may participate in Core Team discussions on that -issue, but must recuse themselves from voting on the issue. If the BDFL has -recused his/herself for a particular decision, they will appoint a substitute -BDFL for that decision. - -### Private communications of the Core Team +## Community members + +The pandas community is composed of a diverse group of stakeholders, such as +developers, contributors, individual and corporate users, etc. There are +some groups which have specific responsibilities. We list them next. + +### Active maintainers + +Active maintainers (aka the core developer team) are contributors of the +project who made significant contributions in the form of code, reviews, +software design, documentation etc. Their role in the project is to +advance the pandas software and goals. + +Some of the responsibilities and abilities of active maintainers are: + +- Have commit rights on the pandas repositories +- Merge pull requests from other maintainers and other contributors +- Have access to the core developer distribution list +- Have access to the social media accounts +- Participate in the decisions regarding the project funding +- Have access to the project funding for tasks like maintenance and + leadership +- Have voting rights for things that require a voting, such as technical + decisions, project partnerships, changes to project governance, etc. + +**Membership**: Contributors to the pandas project become maintainers after +showing significant contributions over a period of one year. In general an +active maintainer will nominate a contributor, and the nomination will move +forward if consensus from the rest of activate maintainers exists. + +See the list of active maintainers [here](team.html#maintainers). + +### BDFL (Benevolent Dictator For Life) + +The figure of the BDFL exist to be able to unblock situations where a decision +needs to be made, and consensus or voting has failed. In such situations, the +BDFL will make the final decision. + +**Membership**: Wes McKinney, as original creator of pandas has been the BDFL +of the project. In the event of Wes stepping down as BDFL, maintainers will +make a decision about whether to appoint a new BDFL or change the governance +type. + +### Finances committee + +The role of the members of the finances committee is to approve the spending +of pandas funds. Decisions in general will be made together with the rest of +active maintainers. Committee members will be responsible to make the final +decisions, and formally approve payments. + +**Membership**: The committee will have 5 members, who will be selected by active +maintainers. Some constraints exists regarding committee membership: + +- Members must be active maintainers +- No more than two committee members can be employed directly or indirectly + by the same employer +- Committee members should not have conflicts of interest that could prevent + them to make the best decisions in the interest of the project. This includes + maintainers who receive significant payments from pandas funds + +### Code of conduct committee + +The role of the committee is to make sure pandas is as open, transparent and +inclusive as it aims to be by its values. In particular, the committee will +monitor and respond to any possible violation of our +[code of conduct](../community/coc.html). And will publish regular summaries +about violation reports. + +**Membership**: Any members of the community can be part of the committee. +The committee will have 5 members, who will be selected by active maintainers. +The next constraints must be satisfied: + +- The committee should aim to be as diverse as reasonably possible, to be able + to make decisions based on a variety of points of views. In particular, the + committee should not have more than 3 members of the same gender, or more + than two members from the same geography (continent). Ideally the committee + will also be diverse in other ways such as religion, political views, + age, etc. +- No more than two members of the committee should be pandas maintainers. + +### Inactive maintainers + +Inactive maintainers are former active maintainers who are not interested or +not available to continue contributing to pandas in a regular way. If they +decide to participate in a discussion, they will still be considered active +maintainers for that discussion, but otherwise they are not expected to be part +of the decision making of the project, not have commit rights to the pandas +repositories, or be in the maintainers distribution list. + +**Membership**: An active maintainer becomes inactive by their own decision when +they notify the rest of active maintainers to be inactive. Alternatively, an +active maintainer will be considered inactive if both of the following conditions +are satisfied: + +- The maintainer has not been engaged on the pandas repository, mailing lists, in + person sprints, or any other project communication channel for over 1 year +- The maintainer has not responded to an inquiry from the active maintainers, within + a 1 month time frame, that they want to remain an active maintainer + +Inactive maintainers can become active again at any time if they are interested. + +### NumFOCUS + +[NumFOCUS](https://numfocus.org) is the fiscal sponsor of the pandas project. +As such, NumFOCUS is the legal and financial entity of the project, being the +owner of pandas trademarks and copyrights, and the legal entity of the +project for financial and tax reasons. NumFOCUS also helps promote pandas, and +find synergies with other projects of the ecosystem. + +### Sponsors + +Sponsors are institutions (companies, non-profits, universities, government +agencies, etc) that contribute to the pandas project. The main types of +sponsors are institutions employing people who work in pandas as part of their +job and institutions funding the project. Sponsors will have advantages like +being listed in the pandas website, being mentioned in pandas channels +such as the blog or social media, or having direct communication with the +pandas maintainers, other than the usual channels. And others agreed by +active maintainers. + +**Membership**: Institutions become sponsors if they employ a person to work +on pandas at least one day per week. Or if they provide funds to the project +(in money or in kind) of value of at least $10,000. Institutions stop +being considered sponsors after one year since the last action that made them +sponsors. + +## Creation of committees + +Active maintainers can create new committees which are helpful to a specific +goal or that can provide leadership and guidance for specific aspects of the +project. New committees will be added to the [Community members](#community-members) +section of this document, with their roles and responsibilities, as well as +a membership section on how members are selected, and any constraints about +the members if they exist. + +## Private communications of the Core Team Unless specifically required, all Core Team discussions and activities will be public and done in collaboration and discussion with the Project Contributors @@ -197,103 +194,6 @@ communications and decisions are needed, the Core Team will do its best to summarize those to the Community after eliding personal/private/sensitive information that should not be posted to the public internet. -### Subcommittees - -The Core Team can create subcommittees that provide leadership and guidance for -specific aspects of the project. Like the Core Team as a whole, subcommittees -should conduct their business in an open and public manner unless privacy is -specifically called for. Private subcommittee communications should happen on -the main private mailing list of the Core Team unless specifically called for. - -Question: if the BDFL is not on a subcommittee, do they still have override -authority? - -Suggestion: they do, but they should appoint a delegate who plays that role -most of the time, and explicit BDFL intervention is sought only if the -committee disagrees with that delegate’s decision and no resolution is possible -within the team. This is different from a BDFL delegate for a specific decision -(or a recusal situation), where the BDFL is literally giving up his/her -authority to someone else in full. It’s more like what Linus Torvalds uses with his -“lieutenants” model. - -### NumFOCUS Subcommittee - -The Core Team will maintain one narrowly focused subcommittee to manage its -interactions with NumFOCUS. - -- The NumFOCUS Subcommittee is comprised of at least 5 persons who manage - project funding that comes through NumFOCUS. It is expected that these funds - will be spent in a manner that is consistent with the non-profit mission of - NumFOCUS and the direction of the Project as determined by the full Core - Team. -- This Subcommittee shall NOT make decisions about the direction, scope or - technical direction of the Project. -- This Subcommittee will have at least 5 members. No more than 2 Subcommitee - Members can report to one person (either directly or indirectly) through - employment or contracting work (including the reportee, i.e. the reportee + 1 - is the max). This avoids effective majorities resting on one person. - -## Institutional Partners and Funding - -The BDFL and Core Team are the primary leadership for the project. No outside -institution, individual or legal entity has the ability to own, control, usurp -or influence the project other than by participating in the Project as -Contributors and Core Team. However, because institutions are the primary -funding mechanism for the project, it is important to formally acknowledge -institutional participation in the project. These are Institutional Partners. - -An Institutional Contributor is any individual Project Contributor who -contributes to the project as part of their official duties at an Institutional -Partner. Likewise, an Institutional Core Team Member is any Core Team Member -who contributes to the project as part of their official duties at an -Institutional Partner. - -With these definitions, an Institutional Partner is any recognized legal entity -in the United States or elsewhere that employs at least one Institutional -Contributor or Institutional Core Team Member. Institutional Partners can be -for-profit or non-profit entities. - -Institutions become eligible to become an Institutional Partner by employing -individuals who actively contribute to The Project as part of their official -duties. To state this another way, the only way for an Institutional Partner to -influence the project is by actively contributing to the open development of -the project, on equal terms with any other member of the community of -Contributors and Core Team Members. Merely using pandas Software or Services in -an institutional context does not allow an entity to become an Institutional -Partner. Financial gifts do not enable an entity to become an Institutional -Partner. Once an institution becomes eligible for Institutional Partnership, -the Core Team must nominate and approve the Partnership. - -If an existing Institutional Partner no longer has a contributing employee, -they will be given a one-year grace period for other employees to begin -contributing. - -An Institutional Partner is free to pursue funding for their work on The -Project through any legal means. This could involve a non-profit organization -raising money from private foundations and donors or a for-profit company -building proprietary products and services that leverage Project Software and -Services. Funding acquired by Institutional Partners to work on The Project is -called Institutional Funding. However, no funding obtained by an Institutional -Partner can override The Project BDFL and Core Team. If a Partner has funding -to do pandas work and the Core Team decides to not pursue that work as a -project, the Partner is free to pursue it on their own. However in this -situation, that part of the Partner’s work will not be under the pandas -umbrella and cannot use the Project trademarks in a way that suggests a formal -relationship. - -To acknowledge institutional contributions, there are two levels of -Institutional Partners, with associated benefits: - -**Tier 1** = an institution with at least one Institutional Core Team Member - -- Acknowledged on the pandas website, in talks and T-shirts. -- Ability to acknowledge their own funding sources on the pandas website, in - talks and T-shirts. -- Ability to influence the project through the participation of their Core Team - Member. - -**Tier 2** = an institution with at least one Institutional Contributor - ## Breach Non-compliance with the terms of the governance documents shall be reported to
xref #47694 Draft of the different groups of the pandas community worth mentioning in the governance. I personally find this format (a concise description with responsibilities, and a section for how membership works) the best. I think it keeps simple, while having all the information so decision making is clear. The exact content is an initial version what makes sense to me, but feedback more than welcome. I think having an initial proposal make things easier to discuss, but surely things can be improved. Topics like how a technical decision in the project is made are not in the scope of this PR, and will be addressed separately. What I try to do here is to formalize which are the different groups that are relevant in the pandas community, and when and how people and institutions become part of them, or when stop being part.
https://api.github.com/repos/pandas-dev/pandas/pulls/47706
2022-07-13T18:34:42Z
2022-07-28T17:17:15Z
null
2022-07-28T17:26:56Z
GH: Convert feature request template to GH form
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 0c30b941bc520..0000000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,33 +0,0 @@ ---- - -name: Feature Request -about: Suggest an idea for pandas -title: "ENH:" -labels: "Enhancement, Needs Triage" - ---- - -#### Is your feature request related to a problem? - -[this should provide a description of what the problem is, e.g. "I wish I could use pandas to do [...]"] - -#### Describe the solution you'd like - -[this should provide a description of the feature request, e.g. "`DataFrame.foo` should get a new parameter `bar` that [...]", try to write a docstring for the desired feature] - -#### API breaking implications - -[this should provide a description of how this feature will affect the API] - -#### Describe alternatives you've considered - -[this should provide a description of any alternative solutions or features you've considered] - -#### Additional context - -[add any other context, code examples, or references to existing implementations about the feature request here] - -```python -# Your code here, if applicable - -``` diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml new file mode 100644 index 0000000000000..f837eb1ca5bb7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -0,0 +1,72 @@ +name: Feature Request +description: Suggest an idea for pandas +title: "ENH: " +labels: [Enhancement, Needs Triage] + +body: + - type: checkboxes + id: checks + attributes: + label: Feature Type + description: Please check what type of feature request you would like to propose. + options: + - label: > + Adding new functionality to pandas + - label: > + Changing existing functionality in pandas + - label: > + Removing existing functionality in pandas + - type: textarea + id: description + attributes: + label: Problem Description + description: > + Please describe what problem the feature would solve, e.g. "I wish I could use pandas to ..." + placeholder: > + I wish I could use pandas to return a Series from a DataFrame when possible. + validations: + required: true + - type: textarea + id: feature + attributes: + label: Feature Description + description: > + Please describe how the new feature would be implemented, using psudocode if relevant. + placeholder: > + Add a new parameter to DataFrame, to_series, to return a Series if possible. + + def __init__(self, ..., to_series: bool=False): + """ + Parameters + ---------- + ... + + to_series : bool, default False + Return a Series if possible + """ + if to_series: + return Series(data) + validations: + required: true + - type: textarea + id: alternative + attributes: + label: Alternative Solutions + description: > + Please describe any alternative solution (existing functionality, 3rd party package, etc.) + that would satisfy the feature request. + placeholder: > + Write a custom function to return a Series when possible. + + def to_series(...) + result = pd.DataFrame(...) + ... + validations: + required: true + - type: textarea + id: context + attributes: + label: Additional Context + description: > + Please provide any relevant Github issues, code examples or references that help describe and support + the feature request.
Mostly a 1:1 transfer. I removed the `API breaking implications` section though as the reviewers should have more/just-as-good insight into potential API impacts.
https://api.github.com/repos/pandas-dev/pandas/pulls/47696
2022-07-13T06:19:16Z
2022-07-16T02:13:34Z
2022-07-16T02:13:34Z
2022-07-16T17:37:35Z
DOC: Centeralized testing guidance for contributions
diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst index 81cd69aa384a4..c74c44fb1d5f0 100644 --- a/doc/source/development/contributing_codebase.rst +++ b/doc/source/development/contributing_codebase.rst @@ -324,8 +324,169 @@ Writing tests All tests should go into the ``tests`` subdirectory of the specific package. This folder contains many current examples of tests, and we suggest looking to these for -inspiration. Please reference our :ref:`testing location guide <test_organization>` if you are unsure -where to place a new unit test. +inspiration. Ideally, there should be one, and only one, obvious place for a test to reside. +Until we reach that ideal, these are some rules of thumb for where a test should +be located. + +1. Does your test depend only on code in ``pd._libs.tslibs``? + This test likely belongs in one of: + + - tests.tslibs + + .. note:: + + No file in ``tests.tslibs`` should import from any pandas modules + outside of ``pd._libs.tslibs`` + + - tests.scalar + - tests.tseries.offsets + +2. Does your test depend only on code in pd._libs? + This test likely belongs in one of: + + - tests.libs + - tests.groupby.test_libgroupby + +3. Is your test for an arithmetic or comparison method? + This test likely belongs in one of: + + - tests.arithmetic + + .. note:: + + These are intended for tests that can be shared to test the behavior + of DataFrame/Series/Index/ExtensionArray using the ``box_with_array`` + fixture. + + - tests.frame.test_arithmetic + - tests.series.test_arithmetic + +4. Is your test for a reduction method (min, max, sum, prod, ...)? + This test likely belongs in one of: + + - tests.reductions + + .. note:: + + These are intended for tests that can be shared to test the behavior + of DataFrame/Series/Index/ExtensionArray. + + - tests.frame.test_reductions + - tests.series.test_reductions + - tests.test_nanops + +5. Is your test for an indexing method? + This is the most difficult case for deciding where a test belongs, because + there are many of these tests, and many of them test more than one method + (e.g. both ``Series.__getitem__`` and ``Series.loc.__getitem__``) + + A) Is the test specifically testing an Index method (e.g. ``Index.get_loc``, + ``Index.get_indexer``)? + This test likely belongs in one of: + + - tests.indexes.test_indexing + - tests.indexes.fooindex.test_indexing + + Within that files there should be a method-specific test class e.g. + ``TestGetLoc``. + + In most cases, neither ``Series`` nor ``DataFrame`` objects should be + needed in these tests. + + B) Is the test for a Series or DataFrame indexing method *other* than + ``__getitem__`` or ``__setitem__``, e.g. ``xs``, ``where``, ``take``, + ``mask``, ``lookup``, or ``insert``? + This test likely belongs in one of: + + - tests.frame.indexing.test_methodname + - tests.series.indexing.test_methodname + + C) Is the test for any of ``loc``, ``iloc``, ``at``, or ``iat``? + This test likely belongs in one of: + + - tests.indexing.test_loc + - tests.indexing.test_iloc + - tests.indexing.test_at + - tests.indexing.test_iat + + Within the appropriate file, test classes correspond to either types of + indexers (e.g. ``TestLocBooleanMask``) or major use cases + (e.g. ``TestLocSetitemWithExpansion``). + + See the note in section D) about tests that test multiple indexing methods. + + D) Is the test for ``Series.__getitem__``, ``Series.__setitem__``, + ``DataFrame.__getitem__``, or ``DataFrame.__setitem__``? + This test likely belongs in one of: + + - tests.series.test_getitem + - tests.series.test_setitem + - tests.frame.test_getitem + - tests.frame.test_setitem + + If many cases such a test may test multiple similar methods, e.g. + + .. code-block:: python + + import pandas as pd + import pandas._testing as tm + + def test_getitem_listlike_of_ints(): + ser = pd.Series(range(5)) + + result = ser[[3, 4]] + expected = pd.Series([2, 3]) + tm.assert_series_equal(result, expected) + + result = ser.loc[[3, 4]] + tm.assert_series_equal(result, expected) + + In cases like this, the test location should be based on the *underlying* + method being tested. Or in the case of a test for a bugfix, the location + of the actual bug. So in this example, we know that ``Series.__getitem__`` + calls ``Series.loc.__getitem__``, so this is *really* a test for + ``loc.__getitem__``. So this test belongs in ``tests.indexing.test_loc``. + +6. Is your test for a DataFrame or Series method? + + A) Is the method a plotting method? + This test likely belongs in one of: + + - tests.plotting + + B) Is the method an IO method? + This test likely belongs in one of: + + - tests.io + + C) Otherwise + This test likely belongs in one of: + + - tests.series.methods.test_mymethod + - tests.frame.methods.test_mymethod + + .. note:: + + If a test can be shared between DataFrame/Series using the + ``frame_or_series`` fixture, by convention it goes in the + ``tests.frame`` file. + +7. Is your test for an Index method, not depending on Series/DataFrame? + This test likely belongs in one of: + + - tests.indexes + +8) Is your test for one of the pandas-provided ExtensionArrays (``Categorical``, + ``DatetimeArray``, ``TimedeltaArray``, ``PeriodArray``, ``IntervalArray``, + ``PandasArray``, ``FloatArray``, ``BoolArray``, ``StringArray``)? + This test likely belongs in one of: + + - tests.arrays + +9) Is your test for *all* ExtensionArray subclasses (the "EA Interface")? + This test likely belongs in one of: + + - tests.extension Using ``pytest`` ~~~~~~~~~~~~~~~~ @@ -388,6 +549,8 @@ xfail is not to be used for tests involving failure due to invalid user argument For these tests, we need to verify the correct exception type and error message is being raised, using ``pytest.raises`` instead. +.. _contributing.warnings: + Testing a warning ^^^^^^^^^^^^^^^^^ @@ -405,6 +568,27 @@ If a warning should specifically not happen in a block of code, pass ``False`` i with tm.assert_produces_warning(False): pd.no_warning_function() +If you have a test that would emit a warning, but you aren't actually testing the +warning itself (say because it's going to be removed in the future, or because we're +matching a 3rd-party library's behavior), then use ``pytest.mark.filterwarnings`` to +ignore the error. + +.. code-block:: python + + @pytest.mark.filterwarnings("ignore:msg:category") + def test_thing(self): + pass + +If you need finer-grained control, you can use Python's +`warnings module <https://docs.python.org/3/library/warnings.html>`__ +to control whether a warning is ignored or raised at different places within +a single test. + +.. code-block:: python + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", FutureWarning) + Testing an exception ^^^^^^^^^^^^^^^^^^^^ @@ -570,59 +754,6 @@ preferred if the inputs or logic are simple, with Hypothesis tests reserved for cases with complex logic or where there are too many combinations of options or subtle interactions to test (or think of!) all of them. -.. _contributing.warnings: - -Testing warnings -~~~~~~~~~~~~~~~~ - -By default, the :ref:`Continuous Integration <contributing.ci>` will fail if any unhandled warnings are emitted. - -If your change involves checking that a warning is actually emitted, use -``tm.assert_produces_warning(ExpectedWarning)``. - - -.. code-block:: python - - import pandas._testing as tm - - - df = pd.DataFrame() - with tm.assert_produces_warning(FutureWarning): - df.some_operation() - -We prefer this to the ``pytest.warns`` context manager because ours checks that the warning's -stacklevel is set correctly. The stacklevel is what ensure the *user's* file name and line number -is printed in the warning, rather than something internal to pandas. It represents the number of -function calls from user code (e.g. ``df.some_operation()``) to the function that actually emits -the warning. Our linter will fail the build if you use ``pytest.warns`` in a test. - -If you have a test that would emit a warning, but you aren't actually testing the -warning itself (say because it's going to be removed in the future, or because we're -matching a 3rd-party library's behavior), then use ``pytest.mark.filterwarnings`` to -ignore the error. - -.. code-block:: python - - @pytest.mark.filterwarnings("ignore:msg:category") - def test_thing(self): - ... - -If the test generates a warning of class ``category`` whose message starts -with ``msg``, the warning will be ignored and the test will pass. - -If you need finer-grained control, you can use Python's usual -`warnings module <https://docs.python.org/3/library/warnings.html>`__ -to control whether a warning is ignored / raised at different places within -a single test. - -.. code-block:: python - - with warnings.catch_warnings(): - warnings.simplefilter("ignore", FutureWarning) - # Or use warnings.filterwarnings(...) - -Alternatively, consider breaking up the unit test. - Running the test suite ---------------------- diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst index 01509705bb92c..1dbe162cd1a6b 100644 --- a/doc/source/development/index.rst +++ b/doc/source/development/index.rst @@ -18,7 +18,6 @@ Development contributing_codebase maintaining internals - test_writing debugging_extensions extending developer diff --git a/doc/source/development/test_writing.rst b/doc/source/development/test_writing.rst deleted file mode 100644 index 76eae505471b7..0000000000000 --- a/doc/source/development/test_writing.rst +++ /dev/null @@ -1,167 +0,0 @@ -.. _test_organization: - -Test organization -================= -Ideally, there should be one, and only one, obvious place for a test to reside. -Until we reach that ideal, these are some rules of thumb for where a test should -be located. - -1. Does your test depend only on code in ``pd._libs.tslibs``? - This test likely belongs in one of: - - - tests.tslibs - - .. note:: - - No file in ``tests.tslibs`` should import from any pandas modules - outside of ``pd._libs.tslibs`` - - - tests.scalar - - tests.tseries.offsets - -2. Does your test depend only on code in pd._libs? - This test likely belongs in one of: - - - tests.libs - - tests.groupby.test_libgroupby - -3. Is your test for an arithmetic or comparison method? - This test likely belongs in one of: - - - tests.arithmetic - - .. note:: - - These are intended for tests that can be shared to test the behavior - of DataFrame/Series/Index/ExtensionArray using the ``box_with_array`` - fixture. - - - tests.frame.test_arithmetic - - tests.series.test_arithmetic - -4. Is your test for a reduction method (min, max, sum, prod, ...)? - This test likely belongs in one of: - - - tests.reductions - - .. note:: - - These are intended for tests that can be shared to test the behavior - of DataFrame/Series/Index/ExtensionArray. - - - tests.frame.test_reductions - - tests.series.test_reductions - - tests.test_nanops - -5. Is your test for an indexing method? - This is the most difficult case for deciding where a test belongs, because - there are many of these tests, and many of them test more than one method - (e.g. both ``Series.__getitem__`` and ``Series.loc.__getitem__``) - - A) Is the test specifically testing an Index method (e.g. ``Index.get_loc``, - ``Index.get_indexer``)? - This test likely belongs in one of: - - - tests.indexes.test_indexing - - tests.indexes.fooindex.test_indexing - - Within that files there should be a method-specific test class e.g. - ``TestGetLoc``. - - In most cases, neither ``Series`` nor ``DataFrame`` objects should be - needed in these tests. - - B) Is the test for a Series or DataFrame indexing method *other* than - ``__getitem__`` or ``__setitem__``, e.g. ``xs``, ``where``, ``take``, - ``mask``, ``lookup``, or ``insert``? - This test likely belongs in one of: - - - tests.frame.indexing.test_methodname - - tests.series.indexing.test_methodname - - C) Is the test for any of ``loc``, ``iloc``, ``at``, or ``iat``? - This test likely belongs in one of: - - - tests.indexing.test_loc - - tests.indexing.test_iloc - - tests.indexing.test_at - - tests.indexing.test_iat - - Within the appropriate file, test classes correspond to either types of - indexers (e.g. ``TestLocBooleanMask``) or major use cases - (e.g. ``TestLocSetitemWithExpansion``). - - See the note in section D) about tests that test multiple indexing methods. - - D) Is the test for ``Series.__getitem__``, ``Series.__setitem__``, - ``DataFrame.__getitem__``, or ``DataFrame.__setitem__``? - This test likely belongs in one of: - - - tests.series.test_getitem - - tests.series.test_setitem - - tests.frame.test_getitem - - tests.frame.test_setitem - - If many cases such a test may test multiple similar methods, e.g. - - .. code-block:: python - - import pandas as pd - import pandas._testing as tm - - def test_getitem_listlike_of_ints(): - ser = pd.Series(range(5)) - - result = ser[[3, 4]] - expected = pd.Series([2, 3]) - tm.assert_series_equal(result, expected) - - result = ser.loc[[3, 4]] - tm.assert_series_equal(result, expected) - - In cases like this, the test location should be based on the *underlying* - method being tested. Or in the case of a test for a bugfix, the location - of the actual bug. So in this example, we know that ``Series.__getitem__`` - calls ``Series.loc.__getitem__``, so this is *really* a test for - ``loc.__getitem__``. So this test belongs in ``tests.indexing.test_loc``. - -6. Is your test for a DataFrame or Series method? - - A) Is the method a plotting method? - This test likely belongs in one of: - - - tests.plotting - - B) Is the method an IO method? - This test likely belongs in one of: - - - tests.io - - C) Otherwise - This test likely belongs in one of: - - - tests.series.methods.test_mymethod - - tests.frame.methods.test_mymethod - - .. note:: - - If a test can be shared between DataFrame/Series using the - ``frame_or_series`` fixture, by convention it goes in the - ``tests.frame`` file. - -7. Is your test for an Index method, not depending on Series/DataFrame? - This test likely belongs in one of: - - - tests.indexes - -8) Is your test for one of the pandas-provided ExtensionArrays (``Categorical``, - ``DatetimeArray``, ``TimedeltaArray``, ``PeriodArray``, ``IntervalArray``, - ``PandasArray``, ``FloatArray``, ``BoolArray``, ``StringArray``)? - This test likely belongs in one of: - - - tests.arrays - -9) Is your test for *all* ExtensionArray subclasses (the "EA Interface")? - This test likely belongs in one of: - - - tests.extension
* Move contents of `doc/source/development/test_writing.rst` to testing section in `doc/source/development/contributing_codebase.rst` * Consolidate 2 warning testing sections to 1
https://api.github.com/repos/pandas-dev/pandas/pulls/47692
2022-07-13T00:30:01Z
2022-07-16T02:15:05Z
2022-07-16T02:15:05Z
2022-07-22T03:58:34Z
ENH: dt64/td64 comparison support non-nano
diff --git a/pandas/_libs/tslibs/np_datetime.pyi b/pandas/_libs/tslibs/np_datetime.pyi index 27871a78f8aaf..757165fbad268 100644 --- a/pandas/_libs/tslibs/np_datetime.pyi +++ b/pandas/_libs/tslibs/np_datetime.pyi @@ -1,5 +1,7 @@ import numpy as np +from pandas._typing import npt + class OutOfBoundsDatetime(ValueError): ... class OutOfBoundsTimedelta(ValueError): ... @@ -10,3 +12,6 @@ def astype_overflowsafe( arr: np.ndarray, dtype: np.dtype, copy: bool = ... ) -> np.ndarray: ... def is_unitless(dtype: np.dtype) -> bool: ... +def compare_mismatched_resolutions( + left: np.ndarray, right: np.ndarray, op +) -> npt.NDArray[np.bool_]: ... diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 1aab5dcd6f70b..692b4430fa577 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -20,12 +20,14 @@ from cpython.object cimport ( import_datetime() import numpy as np + cimport numpy as cnp cnp.import_array() from numpy cimport ( int64_t, ndarray, + uint8_t, ) from pandas._libs.tslibs.util cimport get_c_string_buf_and_size @@ -370,3 +372,81 @@ cpdef ndarray astype_overflowsafe( cnp.PyArray_MultiIter_NEXT(mi) return iresult.view(dtype) + + +# TODO: try to upstream this fix to numpy +def compare_mismatched_resolutions(ndarray left, ndarray right, op): + """ + Overflow-safe comparison of timedelta64/datetime64 with mismatched resolutions. + + >>> left = np.array([500], dtype="M8[Y]") + >>> right = np.array([0], dtype="M8[ns]") + >>> left < right # <- wrong! + array([ True]) + """ + + if left.dtype.kind != right.dtype.kind or left.dtype.kind not in ["m", "M"]: + raise ValueError("left and right must both be timedelta64 or both datetime64") + + cdef: + int op_code = op_to_op_code(op) + NPY_DATETIMEUNIT left_unit = get_unit_from_dtype(left.dtype) + NPY_DATETIMEUNIT right_unit = get_unit_from_dtype(right.dtype) + + # equiv: result = np.empty((<object>left).shape, dtype="bool") + ndarray result = cnp.PyArray_EMPTY( + left.ndim, left.shape, cnp.NPY_BOOL, 0 + ) + + ndarray lvalues = left.view("i8") + ndarray rvalues = right.view("i8") + + cnp.broadcast mi = cnp.PyArray_MultiIterNew3(result, lvalues, rvalues) + int64_t lval, rval + bint res_value + + Py_ssize_t i, N = left.size + npy_datetimestruct ldts, rdts + + + for i in range(N): + # Analogous to: lval = lvalues[i] + lval = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0] + + # Analogous to: rval = rvalues[i] + rval = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 2))[0] + + if lval == NPY_DATETIME_NAT or rval == NPY_DATETIME_NAT: + res_value = op_code == Py_NE + + else: + pandas_datetime_to_datetimestruct(lval, left_unit, &ldts) + pandas_datetime_to_datetimestruct(rval, right_unit, &rdts) + + res_value = cmp_dtstructs(&ldts, &rdts, op_code) + + # Analogous to: result[i] = res_value + (<uint8_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = res_value + + cnp.PyArray_MultiIter_NEXT(mi) + + return result + + +import operator + + +cdef int op_to_op_code(op): + # TODO: should exist somewhere? + if op is operator.eq: + return Py_EQ + if op is operator.ne: + return Py_NE + if op is operator.le: + return Py_LE + if op is operator.lt: + return Py_LT + if op is operator.ge: + return Py_GE + if op is operator.gt: + return Py_GT diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index eadf47b36d7fc..0f88ad9811bf0 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -46,6 +46,7 @@ RoundTo, round_nsint64, ) +from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions from pandas._libs.tslibs.timestamps import integer_op_not_supported from pandas._typing import ( ArrayLike, @@ -1065,6 +1066,24 @@ def _cmp_method(self, other, op): ) return result + if other is NaT: + if op is operator.ne: + result = np.ones(self.shape, dtype=bool) + else: + result = np.zeros(self.shape, dtype=bool) + return result + + if not is_period_dtype(self.dtype): + self = cast(TimelikeOps, self) + if self._reso != other._reso: + if not isinstance(other, type(self)): + # i.e. Timedelta/Timestamp, cast to ndarray and let + # compare_mismatched_resolutions handle broadcasting + other_arr = np.array(other.asm8) + else: + other_arr = other._ndarray + return compare_mismatched_resolutions(self._ndarray, other_arr, op) + other_vals = self._unbox(other) # GH#37462 comparison on i8 values is almost 2x faster than M8/m8 result = op(self._ndarray.view("i8"), other_vals.view("i8")) diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 63601ff963609..af1a292a2975a 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -1,6 +1,8 @@ """ Tests for DatetimeArray """ +import operator + import numpy as np import pytest @@ -169,6 +171,42 @@ def test_repr(self, dta_dti, unit): assert repr(dta) == repr(dti._data).replace("[ns", f"[{unit}") + # TODO: tests with td64 + def test_compare_mismatched_resolutions(self, comparison_op): + # comparison that numpy gets wrong bc of silent overflows + op = comparison_op + + iinfo = np.iinfo(np.int64) + vals = np.array([iinfo.min, iinfo.min + 1, iinfo.max], dtype=np.int64) + + # Construct so that arr2[1] < arr[1] < arr[2] < arr2[2] + arr = np.array(vals).view("M8[ns]") + arr2 = arr.view("M8[s]") + + left = DatetimeArray._simple_new(arr, dtype=arr.dtype) + right = DatetimeArray._simple_new(arr2, dtype=arr2.dtype) + + if comparison_op is operator.eq: + expected = np.array([False, False, False]) + elif comparison_op is operator.ne: + expected = np.array([True, True, True]) + elif comparison_op in [operator.lt, operator.le]: + expected = np.array([False, False, True]) + else: + expected = np.array([False, True, False]) + + result = op(left, right) + tm.assert_numpy_array_equal(result, expected) + + result = op(left[1], right) + tm.assert_numpy_array_equal(result, expected) + + if op not in [operator.eq, operator.ne]: + # check that numpy still gets this wrong; if it is fixed we may be + # able to remove compare_mismatched_resolutions + np_res = op(left._ndarray, right._ndarray) + tm.assert_numpy_array_equal(np_res[1:], ~expected[1:]) + class TestDatetimeArrayComparisons: # TODO: merge this into tests/arithmetic/test_datetime64 once it is
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47691
2022-07-12T23:31:55Z
2022-07-13T20:00:40Z
2022-07-13T20:00:40Z
2022-07-13T20:40:01Z
Specify that both ``by`` and ``level`` should not be specified in ``groupby`` - GH40378
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index 5d8ef7ce02097..3a6ec0bfd6c5c 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -274,15 +274,16 @@ the length of the ``groups`` dict, so it is largely just a convenience: df gb = df.groupby("gender") - -.. ipython:: +.. ipython:: python @verbatim - In [1]: gb.<TAB> # noqa: E225, E999 + # flake8: noqa + In [1]: gb.<TAB> gb.agg gb.boxplot gb.cummin gb.describe gb.filter gb.get_group gb.height gb.last gb.median gb.ngroups gb.plot gb.rank gb.std gb.transform gb.aggregate gb.count gb.cumprod gb.dtype gb.first gb.groups gb.hist gb.max gb.min gb.nth gb.prod gb.resample gb.sum gb.var gb.apply gb.cummax gb.cumsum gb.fillna gb.gender gb.head gb.indices gb.mean gb.name gb.ohlc gb.quantile gb.size gb.tail gb.weight + .. _groupby.multiindex: GroupBy with MultiIndex @@ -345,6 +346,17 @@ Index level names may be supplied as keys. More on the ``sum`` function and aggregation later. +When using ``.groupby()`` on a DatFrame with a MultiIndex, do not specify both ``by`` and ``level``. +The argument validation should be done in ``.groupby()``, using the name of the specific index. + +.. ipython:: python + + df = pd.DataFrame({"col1": ["a", "b", "c"]}) + df.index = pd.MultiIndex.from_arrays([["a", "a", "b"], + [1, 2, 1]], + names=["x", "y"]) + df.groupby(["col1", "x"]) + Grouping DataFrame with Index levels and columns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A DataFrame may be grouped by a combination of columns and index levels by
- [ ] closes #40378 (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47690
2022-07-12T22:39:57Z
2022-07-18T13:30:37Z
null
2022-07-18T13:30:37Z
First proj
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ead4ea744c647..1de871942aeb1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8332,6 +8332,18 @@ def update( 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 + +When using ``.groupby()`` on a multiple index dataframe, +do not specify both ``by`` and ``level``. +The argument validation should be done in instead be done in +``.groupby()``, using the name of the specific index. + +>>> df = pandas.DataFrame({"col1": ["a", "b", "c"]}) +>>> df.index = pandas.MultiIndex.from_arrays([["a", "a", "b"], +... [1, 2, 1]], +... names=["x", "y"]) +>>> df.groupby(["col1", "x"]) # Fine +>>> df.groupby("col1", level=0) # TypeError """ ) @Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47689
2022-07-12T21:10:25Z
2022-07-13T21:11:47Z
null
2022-07-13T21:11:47Z
BUG/TST fix replace with panda NAType #47480
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 0b450fab53137..0e5bde5fc29ae 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -910,6 +910,7 @@ Missing - Bug in :meth:`Series.fillna` and :meth:`DataFrame.fillna` with :class:`IntervalDtype` and incompatible value raising instead of casting to a common (usually object) dtype (:issue:`45796`) - Bug in :meth:`DataFrame.interpolate` with object-dtype column not returning a copy with ``inplace=False`` (:issue:`45791`) - Bug in :meth:`DataFrame.dropna` allows to set both ``how`` and ``thresh`` incompatible arguments (:issue:`46575`) +- Bug in :meth:`DataFrame.replace` now works when ``pandas.NA`` is a value in the dara frame (:issue:`47480`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index 9b470e95dc22b..906935bb12aae 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -409,7 +409,6 @@ class NAType(C_NAType): __rdivmod__ = _create_binary_propagating_op("__rdivmod__", is_divmod=True) # __lshift__ and __rshift__ are not implemented - __eq__ = _create_binary_propagating_op("__eq__") __ne__ = _create_binary_propagating_op("__ne__") __le__ = _create_binary_propagating_op("__le__") __lt__ = _create_binary_propagating_op("__lt__") @@ -423,6 +422,12 @@ class NAType(C_NAType): __abs__ = _create_unary_propagating_op("__abs__") __invert__ = _create_unary_propagating_op("__invert__") + def __eq__(self, other): + if other is C_NA: + return True + else: + return False + # pow has special def __pow__(self, other): if other is C_NA: diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 57b0a95f803b1..391921deded1f 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -93,6 +93,7 @@ def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]: pass else: new_mask = arr == x + if not isinstance(new_mask, np.ndarray): # usually BooleanArray new_mask = new_mask.to_numpy(dtype=bool, na_value=False) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index f7504e9173bf5..5e2d2d536e6ff 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1567,3 +1567,10 @@ def test_replace_with_value_also_being_replaced(self): result = df.replace({0: 1, 1: np.nan}) expected = DataFrame({"A": [1, np.nan, 2], "B": [np.nan, 1, 2]}) tm.assert_frame_equal(result, expected) + + def test_replace_with_pandas_NA(self): + # GH47480 + df = DataFrame({"A": [pd.NA, 1, 2], "B": [1, 0, 2]}) + result = df.replace(2, 3) + expected = DataFrame({"A": [pd.NA, 1, 3], "B": [1, 0, 3]}) + tm.assert_frame_equal(result, expected)
- [ ] closes #47480 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/v1.0.5.rst`. Currently numpy does not support elementwise operations with pd.NA, therefore replace including this type didn't work or different errors arose. Fixed issue by manually iterating.
https://api.github.com/repos/pandas-dev/pandas/pulls/47688
2022-07-12T21:10:10Z
2022-07-13T17:50:11Z
null
2022-07-13T17:50:21Z
updated the docstring in accordance with GH40378
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ead4ea744c647..1de871942aeb1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8332,6 +8332,18 @@ def update( 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 + +When using ``.groupby()`` on a multiple index dataframe, +do not specify both ``by`` and ``level``. +The argument validation should be done in instead be done in +``.groupby()``, using the name of the specific index. + +>>> df = pandas.DataFrame({"col1": ["a", "b", "c"]}) +>>> df.index = pandas.MultiIndex.from_arrays([["a", "a", "b"], +... [1, 2, 1]], +... names=["x", "y"]) +>>> df.groupby(["col1", "x"]) # Fine +>>> df.groupby("col1", level=0) # TypeError """ ) @Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47686
2022-07-12T20:36:32Z
2022-07-12T21:05:41Z
null
2022-07-12T21:06:18Z
TST: avoid sort when concat int-index Dataframes with sort=False
diff --git a/pandas/tests/reshape/concat/test_sort.py b/pandas/tests/reshape/concat/test_sort.py index a789dc0f8dc83..e83880625f3d6 100644 --- a/pandas/tests/reshape/concat/test_sort.py +++ b/pandas/tests/reshape/concat/test_sort.py @@ -93,6 +93,22 @@ def test_concat_frame_with_sort_false(self): tm.assert_frame_equal(result, expected) + # GH 37937 + df1 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[1, 2, 3]) + df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}, index=[3, 1, 6]) + result = pd.concat([df2, df1], axis=1, sort=False) + expected = DataFrame( + [ + [7.0, 10.0, 3.0, 6.0], + [8.0, 11.0, 1.0, 4.0], + [9.0, 12.0, np.nan, np.nan], + [np.nan, np.nan, 2.0, 5.0], + ], + index=[3, 1, 6, 2], + columns=["c", "d", "a", "b"], + ) + tm.assert_frame_equal(result, expected) + def test_concat_sort_none_warning(self): # GH#41518 df = DataFrame({1: [1, 2], "a": [3, 4]})
- [x] closes #37937 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). This issue may have been fixed by #36299. Add the test and close.
https://api.github.com/repos/pandas-dev/pandas/pulls/47685
2022-07-12T19:14:38Z
2022-07-12T21:40:09Z
2022-07-12T21:40:09Z
2022-07-13T01:23:39Z
DOC: Clarify that FrozenList is hashable
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index deb6ac2c80a81..90713e846fbd1 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -18,7 +18,7 @@ class FrozenList(PandasObject, list): """ Container that doesn't allow setting item *but* - because it's technically non-hashable, will be used + because it's technically hashable, will be used for lookups, appropriately, etc. """
- [x] closes #47683 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47684
2022-07-12T18:48:11Z
2022-07-12T19:10:32Z
2022-07-12T19:10:32Z
2022-07-12T19:10:40Z
CLN: non-nano follow-ups
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index ee3964b892e2e..9c7f35d240f96 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -152,7 +152,7 @@ def format_array_from_datetime( # a format based on precision basic_format = format is None if basic_format: - reso_obj = get_resolution(values, reso=reso) + reso_obj = get_resolution(values, tz=tz, reso=reso) show_ns = reso_obj == Resolution.RESO_NS show_us = reso_obj == Resolution.RESO_US show_ms = reso_obj == Resolution.RESO_MS diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 00e2c8b8b6be6..0dfb859a3444f 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -144,9 +144,13 @@ cpdef inline (int64_t, int) precision_from_unit(str unit): NPY_DATETIMEUNIT reso = abbrev_to_npy_unit(unit) if reso == NPY_DATETIMEUNIT.NPY_FR_Y: + # each 400 years we have 97 leap years, for an average of 97/400=.2425 + # extra days each year. We get 31556952 by writing + # 3600*24*365.2425=31556952 m = 1_000_000_000 * 31556952 p = 9 elif reso == NPY_DATETIMEUNIT.NPY_FR_M: + # 2629746 comes from dividing the "Y" case by 12. m = 1_000_000_000 * 2629746 p = 9 elif reso == NPY_DATETIMEUNIT.NPY_FR_W: diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi index 7eb4695b9ca2c..d24541aede8d8 100644 --- a/pandas/_libs/tslibs/vectorized.pyi +++ b/pandas/_libs/tslibs/vectorized.pyi @@ -42,5 +42,5 @@ def ints_to_pydatetime( def tz_convert_from_utc( stamps: npt.NDArray[np.int64], tz: tzinfo | None, - reso: int = ..., + reso: int = ..., # NPY_DATETIMEUNIT ) -> npt.NDArray[np.int64]: ... diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 58b4d82bcbe5f..a212da050e1f1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6897,6 +6897,7 @@ def insert(self, loc: int, item) -> Index: # Use self._constructor instead of Index to retain NumericIndex GH#43921 # TODO(2.0) can use Index instead of self._constructor + # Check if doing so fixes GH#47071 return self._constructor._with_infer(new_values, name=self.name) def drop(
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47682
2022-07-12T16:10:23Z
2022-07-12T19:01:24Z
2022-07-12T19:01:24Z
2022-07-12T19:19:11Z
WEB: Updating links of the governance page
diff --git a/web/pandas/about/governance.md b/web/pandas/about/governance.md index 56ca0a2aac3db..92923db6e6763 100644 --- a/web/pandas/about/governance.md +++ b/web/pandas/about/governance.md @@ -1,23 +1,21 @@ -# Main Governance Document +# Project governance The official version of this document, along with a list of individuals and institutions in the roles defined in the governance -section below, is contained in The Project Governance Repository at: +section below, is contained in the +[Project governance](https://pandas.pydata.org/about/governance.html) +page of the pandas website. -[https://github.com/pydata/pandas-governance](https://github.com/pydata/pandas-governance) - -The Project -=========== +## The Project The pandas Project (The Project) is an open source software project affiliated with the 501(c)3 NumFOCUS Foundation. The goal of The Project is to develop open source software for data ingest, data preparation, data analysis, and data visualization for the Python programming language. The Software developed by The Project is released under the BSD (or similar) open source license, -developed openly and hosted in public GitHub repositories under the [PyData -GitHub organization](https://github.com/pydata). Examples of Project Software -include the main pandas code repository, pandas-website, and the -pandas-datareader add-on library. +developed openly and hosted in public GitHub repositories under the [pandas +GitHub organization](https://github.com/pandas-dev). Examples of Project Software +include the main pandas code repository and the pandas-stubs library. Through its affiliation with NumFOCUS, The Project has the right to receive tax-deductible donations in the United States of America. @@ -34,7 +32,7 @@ transparency. Here is a list of the current Contributors to the main pandas repository: -[https://github.com/pydata/pandas/graphs/contributors](https://github.com/pydata/pandas/graphs/contributors) +[https://github.com/pandas-dev/pandas/graphs/contributors](https://github.com/pandas-dev/pandas/graphs/contributors) There are also many other Contributors listed in the logs of other repositories of the pandas project. @@ -45,14 +43,13 @@ Community and we strive to keep the barrier between Contributors and Users as low as possible. The Project is formally affiliated with the 501(c)3 NumFOCUS Foundation -([http://numfocus.org](http://numfocus.org)), which serves as its fiscal +([https://numfocus.org](https://numfocus.org)), which serves as its fiscal sponsor, may hold project trademarks and other intellectual property, helps manage project donations and acts as a parent legal entity. NumFOCUS is the only legal entity that has a formal relationship with the project (see Institutional Partners section below). -Governance -========== +## Governance This section describes the governance and leadership model of The Project. @@ -76,8 +73,7 @@ need for a more formal governance model. Moving forward The Project leadership will consist of a BDFL and Core Team. We view this governance model as the formalization of what we are already doing, rather than a change in direction. -BDFL ----- +### BDFL The Project will have a BDFL (Benevolent Dictator for Life), who is currently Wes McKinney. As Dictator, the BDFL has the authority to make all final @@ -103,8 +99,7 @@ vote. If no BDFL candidate receives 2/3 of the votes of the Core Team, the Core Team members shall propose the BDFL candidates to the Main NumFOCUS board, who will then make the final decision. -Core Team ---------- +### Core Team The Project's Core Team will consist of Project Contributors who have produced contributions that are substantial in quality and quantity, and sustained over @@ -238,8 +233,7 @@ interactions with NumFOCUS. employment or contracting work (including the reportee, i.e. the reportee + 1 is the max). This avoids effective majorities resting on one person. -Institutional Partners and Funding -================================== +## Institutional Partners and Funding The BDFL and Core Team are the primary leadership for the project. No outside institution, individual or legal entity has the ability to own, control, usurp @@ -300,23 +294,20 @@ Institutional Partners, with associated benefits: **Tier 2** = an institution with at least one Institutional Contributor -Breach -====== +## Breach Non-compliance with the terms of the governance documents shall be reported to the Core Team either through public or private channels as deemed appropriate. -Changing the Governance Documents -================================= +## Changing the Governance -Changes to the governance documents are submitted via a GitHub pull request to -The Project's governance documents GitHub repository at -[https://github.com/pydata/pandas-governance](https://github.com/pydata/pandas-governance). +Changes to the governance are submitted via a GitHub pull request to The Project's +[governance page](https://github.com/pandas-dev/pandas/blob/main/web/pandas/about/governance.md). The pull request is then refined in response to public comment and review, with the goal being consensus in the community. After this open period, a Core Team Member proposes to the Core Team that the changes be ratified and the pull request merged (accepting the proposed changes) or proposes that the pull -request be closed without merging (rejecting the proposed changes). The Member +request be closed without merging (rejecting the proposed changes). The Member should state the final commit hash in the pull request being proposed for acceptance or rejection and briefly summarize the pull request. A minimum of 80% of the Core Team must vote and at least 2/3 of the votes must be positive
Changing links and header formatting of the governance page. There are no material changes in this PR, I'll follow up with actual proposed updates to the governance.
https://api.github.com/repos/pandas-dev/pandas/pulls/47679
2022-07-12T10:57:09Z
2022-07-12T17:15:00Z
2022-07-12T17:15:00Z
2022-07-12T17:15:10Z
WEB: Update sponsors in website
diff --git a/web/pandas/config.yml b/web/pandas/config.yml index aeef826157b90..1330addf9a229 100644 --- a/web/pandas/config.yml +++ b/web/pandas/config.yml @@ -118,17 +118,27 @@ sponsors: url: https://www.twosigma.com/ logo: /static/img/partners/two_sigma.svg kind: partner - description: "Phillip Cloud, Jeff Reback" - - name: "Ursa Labs" - url: https://ursalabs.org/ - logo: /static/img/partners/ursa_labs.svg + description: "Jeff Reback" + - name: "Voltron Data" + url: https://voltrondata.com/ + logo: /static/img/partners/voltron_data.svg kind: partner - description: "Wes McKinney, Joris Van den Bossche" + description: "Joris Van den Bossche" - name: "d-fine GmbH" url: https://www.d-fine.com/en/ logo: /static/img/partners/dfine.svg kind: partner description: "Patrick Hoefler" + - name: "Quansight" + url: https://quansight.com/ + logo: /static/img/partners/quansight_labs.svg + kind: partner + description: "Marco Gorelli" + - name: "Nvidia" + url: https://www.nvidia.com + logo: /static/img/partners/nvidia.svg + kind: partner + description: "Matthew Roeschke" - name: "Tidelift" url: https://tidelift.com logo: /static/img/partners/tidelift.svg @@ -139,6 +149,11 @@ sponsors: logo: /static/img/partners/czi.svg kind: regular description: "<i>pandas</i> is funded by the Essential Open Source Software for Science program of the Chan Zuckerberg Initiative. The funding is used for general maintenance, improve extension types, and a efficient string type." + - name: "Bodo" + url: https://www.bodo.ai/ + logo: /static/img/partners/bodo.svg + kind: regular + description: "Bodo's parallel computing platform uses pandas API, and Bodo financially supports pandas development to help improve pandas, in particular the pandas API" inkind: # not included in active so they don't appear in the home page - name: "OVH" url: https://us.ovhcloud.com/ @@ -152,10 +167,13 @@ sponsors: kind: partner - name: "Anaconda" url: https://www.anaconda.com/ - logo: /static/img/partners/anaconda.svg kind: partner - name: "RStudio" url: https://www.rstudio.com/ - logo: /static/img/partners/r_studio.svg kind: partner - description: "Wes McKinney" + - name: "Ursa Labs" + url: https://ursalabs.org/ + kind: partner + - name: "Gousto" + url: https://www.gousto.co.uk/ + kind: partner diff --git a/web/pandas/static/img/partners/anaconda.svg b/web/pandas/static/img/partners/anaconda.svg deleted file mode 100644 index fcddf72ebaa28..0000000000000 --- a/web/pandas/static/img/partners/anaconda.svg +++ /dev/null @@ -1,99 +0,0 @@ -<?xml version="1.0" encoding="UTF-8" standalone="no"?> -<svg - xmlns:dc="http://purl.org/dc/elements/1.1/" - xmlns:cc="http://creativecommons.org/ns#" - xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" - xmlns:svg="http://www.w3.org/2000/svg" - xmlns="http://www.w3.org/2000/svg" - viewBox="0 0 530.44 90.053329" - height="90.053329" - width="530.44" - xml:space="preserve" - id="svg2" - version="1.1"><metadata - id="metadata8"><rdf:RDF><cc:Work - rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type - rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs - id="defs6" /><g - transform="matrix(1.3333333,0,0,-1.3333333,0,90.053333)" - id="g10"><g - transform="scale(0.1)" - id="g12"><path - id="path14" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 958.313,274.5 53.637,120.406 h 1.64 L 1068.32,274.5 Z m 67.867,251.754 c -1.65,3.285 -3.83,6.027 -9.31,6.027 h -5.47 c -4.93,0 -7.66,-2.742 -9.31,-6.027 L 831.887,157.93 c -3.282,-7.117 1.097,-14.231 9.304,-14.231 h 47.618 c 8.754,0 13.679,5.473 15.867,10.942 l 26.82,59.113 h 163.644 l 26.81,-59.113 c 3.83,-7.657 7.66,-10.942 15.88,-10.942 h 47.61 c 8.21,0 12.59,7.114 9.3,14.231 l -168.56,368.324" /><path - id="path16" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 1547.94,526.801 h -50.35 c -6.03,0 -10.4,-4.922 -10.4,-10.395 V 290.371 h -0.55 l -227.67,241.91 h -13.68 c -5.48,0 -10.4,-4.383 -10.4,-9.855 V 154.102 c 0,-5.481 4.92,-10.403 10.4,-10.403 h 49.8 c 6.02,0 10.4,4.922 10.4,10.403 v 235.332 h 0.54 L 1534.8,138.227 h 13.14 c 5.47,0 10.4,4.378 10.4,9.847 v 368.332 c 0,5.473 -4.93,10.395 -10.4,10.395" /><path - id="path18" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 1725.97,274.5 53.64,120.406 h 1.64 L 1835.98,274.5 Z m 67.87,251.754 c -1.64,3.285 -3.83,6.027 -9.31,6.027 h -5.47 c -4.93,0 -7.66,-2.742 -9.31,-6.027 L 1599.55,157.93 c -3.29,-7.117 1.09,-14.231 9.3,-14.231 h 47.62 c 8.75,0 13.68,5.473 15.87,10.942 l 26.82,59.113 h 163.64 l 26.81,-59.113 c 3.83,-7.657 7.67,-10.942 15.88,-10.942 h 47.61 c 8.21,0 12.59,7.114 9.3,14.231 l -168.56,368.324" /><path - id="path20" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 2261.6,241.117 c -3.29,3.285 -9.31,3.836 -13.69,0 -22.98,-18.605 -50.9,-31.191 -83.73,-31.191 -70.06,0 -122.6,58.008 -122.6,126.418 0,68.965 51.99,127.519 122.05,127.519 30.64,0 61.3,-12.039 84.28,-32.285 4.38,-4.379 9.85,-4.379 13.69,0 l 33.38,34.477 c 4.38,4.375 4.38,10.941 -0.55,15.328 -37.21,33.383 -77.17,50.898 -132.45,50.898 -109.45,0 -197.57,-88.117 -197.57,-197.574 0,-109.465 88.12,-196.48 197.57,-196.48 48.72,0 95.78,16.964 133,53.086 3.83,3.835 4.92,10.949 0.55,14.777 l -33.93,35.027" /><path - id="path22" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 2520.21,209.379 c -68.95,0 -125.33,56.371 -125.33,125.328 0,68.957 56.38,126.426 125.33,126.426 68.96,0 125.88,-57.469 125.88,-126.426 0,-68.957 -56.92,-125.328 -125.88,-125.328 z m 0,322.902 c -109.46,0 -196.48,-88.117 -196.48,-197.574 0,-109.465 87.02,-196.48 196.48,-196.48 109.46,0 197.03,87.015 197.03,196.48 0,109.457 -87.57,197.574 -197.03,197.574" /><path - id="path24" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 3090.17,526.801 h -50.35 c -6.02,0 -10.4,-4.922 -10.4,-10.395 V 290.371 h -0.54 l -227.68,241.91 h -13.68 c -5.47,0 -10.4,-4.383 -10.4,-9.855 V 154.102 c 0,-5.481 4.93,-10.403 10.4,-10.403 h 49.8 c 6.02,0 10.4,4.922 10.4,10.403 v 235.332 h 0.55 l 228.77,-251.207 h 13.13 c 5.47,0 10.4,4.378 10.4,9.847 v 368.332 c 0,5.473 -4.93,10.395 -10.4,10.395" /><path - id="path26" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 3303.16,210.465 h -62.39 v 250.121 h 62.39 c 71.15,0 123.14,-53.641 123.14,-124.785 0,-71.696 -51.99,-125.336 -123.14,-125.336 z m 6.57,316.336 h -129.71 c -5.47,0 -9.85,-4.922 -9.85,-10.395 V 154.102 c 0,-5.481 4.38,-10.403 9.85,-10.403 h 129.71 c 105.63,0 192.1,85.926 192.1,192.102 0,105.082 -86.47,191 -192.1,191" /><path - id="path28" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 3631.32,274.5 53.64,120.406 h 1.64 L 3741.33,274.5 Z m 236.43,-116.57 -168.57,368.324 c -1.64,3.285 -3.82,6.027 -9.29,6.027 h -5.48 c -4.93,0 -7.67,-2.742 -9.3,-6.027 L 3504.9,157.93 c -3.29,-7.117 1.09,-14.231 9.3,-14.231 h 47.62 c 8.76,0 13.68,5.473 15.87,10.942 l 26.82,59.113 h 163.63 l 26.83,-59.113 c 3.82,-7.657 7.66,-10.942 15.86,-10.942 h 47.62 c 8.21,0 12.59,7.114 9.3,14.231" /><path - id="path30" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 3940.9,176.27 h 7.99 c 2.7,0 4.5,-1.793 4.5,-4.403 0,-2.422 -1.8,-4.394 -4.5,-4.394 h -7.99 z m -4.85,-26.582 h 3.33 c 0.99,0 1.7,0.808 1.7,1.707 v 10.148 h 5.57 l 4.49,-10.598 c 0.27,-0.629 0.9,-1.257 1.62,-1.257 h 4.04 c 1.26,0 2.16,1.257 1.53,2.425 -1.53,3.235 -3.15,6.645 -4.76,9.969 2.69,0.984 6.82,3.5 6.82,9.879 0,6.824 -5.48,10.594 -11.04,10.594 h -13.3 c -0.98,0 -1.7,-0.809 -1.7,-1.703 v -29.457 c 0,-0.899 0.72,-1.707 1.7,-1.707" /><path - id="path32" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 3945.93,192.078 c 14.46,0 26.05,-11.586 26.05,-26.043 0,-14.371 -11.59,-26.047 -26.05,-26.047 -14.37,0 -26.04,11.676 -26.04,26.047 0,14.457 11.67,26.043 26.04,26.043 z m 0,-58.285 c 17.79,0 32.33,14.461 32.33,32.242 0,17.781 -14.54,32.328 -32.33,32.328 -17.78,0 -32.24,-14.547 -32.24,-32.328 0,-17.781 14.46,-32.242 32.24,-32.242" /><path - id="path34" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 125.527,158.422 0.051,2.484 c 0.414,19.649 1.977,39.149 4.684,57.961 l 0.254,1.77 -1.668,0.679 c -17.871,7.305 -35.4574,15.782 -52.2699,25.219 l -2.1172,1.184 -1.0742,-2.16 C 62.3164,223.238 52.9844,199.707 45.6836,175.602 l -0.7031,-2.254 2.2812,-0.629 C 72.0234,165.91 97.5195,161.184 123.051,158.66 l 2.476,-0.238" /><path - id="path36" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 177.781,500.941 c 0.032,0.196 0.063,0.395 0.094,0.59 -14.668,-0.258 -29.324,-1.265 -43.926,-2.965 1.891,-14.777 4.481,-29.437 7.828,-43.925 10.02,16.949 22.121,32.511 36.004,46.3" /><path - id="path38" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 125.527,140.855 -0.039,2.051 -2.043,0.199 c -21.406,2.02 -43.2223,5.661 -64.8278,10.821 l -5.668,1.355 3.211,-4.855 C 75.5742,121.098 99.3125,95.0195 126.73,72.9258 l 4.43,-3.5899 -0.719,5.668 c -2.906,22.6719 -4.554,44.8321 -4.914,65.8511" /><path - id="path40" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 230.566,657.227 c -26.32,-9.008 -51.164,-21.161 -74.101,-36.036 17.359,-3.07 34.469,-7.097 51.273,-12.027 6.696,16.375 14.297,32.426 22.828,48.063" /><path - id="path42" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 339.918,675.43 c -13.023,0 -25.848,-0.813 -38.488,-2.25 17.925,-12.489 35.066,-26.145 51.238,-41.051 l 13.43,-12.391 -13.168,-12.672 c -10.899,-10.488 -21.559,-21.898 -31.688,-33.918 l -0.512,-0.585 c -0.117,-0.125 -2.003,-2.219 -5.152,-6.055 8,0.84 16.117,1.293 24.34,1.293 127.07,0 230.086,-103.016 230.086,-230.086 0,-127.074 -103.016,-230.086 -230.086,-230.086 -44.094,0 -85.277,12.426 -120.277,33.934 -17.27,-1.918 -34.629,-2.922 -52.012,-2.922 -8.074,0 -16.152,0.211 -24.227,0.629 0.524,-26.172 3.016,-53.3052 7.477,-81.438 C 204.82,21.3242 269.879,0 339.918,0 c 186.516,0 337.715,151.199 337.715,337.715 0,186.512 -151.199,337.715 -337.715,337.715" /><path - id="path44" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 295.145,595.602 c 6.726,7.968 13.671,15.695 20.765,23.101 -15.824,13.469 -32.531,25.758 -50.004,36.856 -10.742,-18.161 -20.09,-36.977 -28.093,-56.282 15.195,-5.574 30.066,-11.953 44.589,-19.031 6.711,8.617 11.399,13.883 12.743,15.356" /><path - id="path46" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 65.9219,402.934 1.289,-2.09 2.0118,1.433 c 15.6289,11.235 32.0823,21.594 48.9103,30.789 l 1.582,0.864 -0.449,1.738 c -5.028,19.227 -8.868,39.055 -11.414,58.941 l -0.305,2.399 -2.387,-0.434 C 80.168,492.027 55.4609,485.344 31.7383,476.703 l -2.2227,-0.816 0.8789,-2.188 c 9.7422,-24.562 21.6914,-48.363 35.5274,-70.765" /><path - id="path48" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="M 62.0469,370.18 60.125,368.629 C 41.9492,353.844 24.7266,337.414 8.93359,319.797 L 7.375,318.066 9.13281,316.531 C 26.6641,301.188 45.5547,287.094 65.2734,274.645 l 2.0274,-1.293 1.2031,2.097 c 8.8828,15.781 18.8945,31.356 29.7695,46.278 l 1.0938,1.503 -1.2383,1.383 c -12.3281,13.746 -23.9883,28.395 -34.668,43.547 l -1.414,2.02" /><path - id="path50" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 194.48,157.273 5.868,0.348 -4.559,3.723 c -17.976,14.715 -33.625,32.09 -46.453,51.656 l -0.106,0.621 -3.75,1.649 -0.433,-3.184 c -2.262,-16.856 -3.586,-34.566 -3.945,-52.625 l -0.039,-2.215 2.207,-0.129 c 8.003,-0.429 16.078,-0.644 24.171,-0.644 9.004,0 18.032,0.269 27.039,0.8" /><path - id="path52" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 183.219,530.238 c 3.633,16.649 8.109,33.121 13.511,49.317 -21.125,6.078 -42.769,10.617 -64.789,13.523 -1.867,-22.047 -2.082,-44.082 -0.707,-65.941 17.278,1.988 34.629,3.011 51.985,3.101" /><path - id="path54" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 215.813,531.414 c 14.707,9.441 30.539,17.266 47.281,23.195 -11.875,5.59 -24,10.661 -36.348,15.184 -4.219,-12.633 -7.863,-25.441 -10.933,-38.379" /><path - id="path56" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 58.6914,257.121 -1.7773,1.113 C 39.4922,269.16 22.6055,281.363 6.74609,294.496 l -4.51953,3.742 0.76953,-5.812 C 7.30078,260.039 16.2734,228.496 29.6406,198.684 l 2.3672,-5.278 1.9024,5.465 c 6.6406,19.125 14.6601,38.102 23.8281,56.387 l 0.9531,1.863" /><path - id="path58" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="M 102.133,577.48 C 81.9766,557.492 64.3555,534.969 49.7266,510.445 c 17.4804,5.215 35.1836,9.371 53.0194,12.528 -1.23,18.082 -1.465,36.273 -0.613,54.507" /><path - id="path60" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 112.121,340.762 0.234,5.824 c 0.79,20.598 4.309,40.855 10.461,60.195 l 1.793,5.653 -5.129,-2.961 c -13.152,-7.59 -26.1792,-16.012 -38.7222,-25.047 l -1.8281,-1.328 1.293,-1.86 c 8.6992,-12.406 18.1562,-24.535 28.0973,-36.062 l 3.801,-4.414" /><path - id="path62" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 114.383,305.906 -0.805,5.707 -3.34,-4.691 C 100.836,293.727 92.082,279.945 84.2227,265.961 l -1.1133,-1.992 1.9922,-1.133 c 14.1562,-7.984 29.0114,-15.305 44.1564,-21.762 l 5.402,-2.316 -2.406,5.363 c -8.863,19.668 -14.875,40.453 -17.871,61.785" /><path - id="path64" - style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none" - d="m 48.6602,386.676 1.5976,1.273 -1.0781,1.735 c -10.5859,16.918 -20.1836,34.707 -28.5469,52.867 l -2.457,5.355 -1.8125,-5.605 C 6.51172,411.789 1.05859,379.887 0.160156,347.473 L 0,341.523 4.10938,345.82 c 14.01172,14.598 28.99612,28.34 44.55082,40.856" /></g></g></svg> \ No newline at end of file diff --git a/web/pandas/static/img/partners/bodo.svg b/web/pandas/static/img/partners/bodo.svg new file mode 100644 index 0000000000000..9dc6cb47505a3 --- /dev/null +++ b/web/pandas/static/img/partners/bodo.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" width="930.84" height="267.43" viewBox="0 0 930.84 267.43"><defs><style>.cls-1{fill:#5e5e5e;}.cls-2{fill:#1db100;}</style></defs><title>bodo-grey-green</title><path class="cls-1" d="M85.73,93a83.24,83.24,0,0,1,33.14,6.64,85.67,85.67,0,0,1,0,158.11,83.45,83.45,0,0,1-33.14,6.67,83.06,83.06,0,0,1-33.34-6.67A87.46,87.46,0,0,1,6.67,212,83.23,83.23,0,0,1,0,178.67V11.44A10.82,10.82,0,0,1,3.43,3.25a11.3,11.3,0,0,1,8-3.24,10.66,10.66,0,0,1,7.81,3.24,11,11,0,0,1,3.24,8.19V121.17a85.33,85.33,0,0,1,23.24-17.91,86.89,86.89,0,0,1,19.47-7.64A81.71,81.71,0,0,1,85.77,93m0,149a62.21,62.21,0,0,0,24.57-5,63.2,63.2,0,0,0,33.72-33.53,64,64,0,0,0,0-49.34,62.65,62.65,0,0,0-33.72-33.72,63.48,63.48,0,0,0-49.15,0,62.72,62.72,0,0,0-33.72,33.72,64,64,0,0,0,0,49.34A63.25,63.25,0,0,0,61.19,237,62.19,62.19,0,0,0,85.77,242"/><path class="cls-1" d="M264.79,264.43a83.15,83.15,0,0,1-33.33-6.67,88.56,88.56,0,0,1-27.27-18.29,84.93,84.93,0,0,1-18.48-27.05,85.76,85.76,0,1,1,158.12,0,84.61,84.61,0,0,1-18.48,27.05,87.75,87.75,0,0,1-27.43,18.29,83.53,83.53,0,0,1-33.15,6.67m0-148.59a61.81,61.81,0,0,0-24.58,5,62.65,62.65,0,0,0-33.72,33.72,63.48,63.48,0,0,0,0,49.15,62.72,62.72,0,0,0,33.72,33.72,63.48,63.48,0,0,0,49.15,0,62.74,62.74,0,0,0,33.75-33.76,63.48,63.48,0,0,0,0-49.15,62.65,62.65,0,0,0-33.72-33.72,62,62,0,0,0-24.58-5"/><path class="cls-1" d="M443.48,93a85.82,85.82,0,0,1,63.24,28.19V11.06A10.66,10.66,0,0,1,510,3.25,11.31,11.31,0,0,1,529.19,11V178.7A83.06,83.06,0,0,1,522.52,212,85.76,85.76,0,1,1,410.19,99.62,82.89,82.89,0,0,1,443.53,93m63.24,85.72a61.86,61.86,0,0,0-4.95-24.57,62.65,62.65,0,0,0-33.72-33.72,63.48,63.48,0,0,0-49.15,0,62.74,62.74,0,0,0-33.76,33.75,64,64,0,0,0,0,49.34,63.39,63.39,0,0,0,116.59,0,61.48,61.48,0,0,0,5-24.77"/><path class="cls-1" d="M622.94,264.43a83.06,83.06,0,0,1-33.34-6.67,88.6,88.6,0,0,1-27.24-18.29,84.77,84.77,0,0,1-18.48-27.05,85.76,85.76,0,1,1,158.11,0,84.42,84.42,0,0,1-18.47,27.05,87.91,87.91,0,0,1-27.44,18.29,83.45,83.45,0,0,1-33.14,6.67m0-148.59a62,62,0,0,0-24.58,5,62.72,62.72,0,0,0-33.72,33.72,63.48,63.48,0,0,0,0,49.15,62.78,62.78,0,0,0,33.72,33.72,63.48,63.48,0,0,0,49.15,0,62.72,62.72,0,0,0,33.68-33.76,63.48,63.48,0,0,0,0-49.15,62.65,62.65,0,0,0-33.72-33.72,61.86,61.86,0,0,0-24.57-5"/><path class="cls-2" d="M850.77,254.14a86.3,86.3,0,0,1-19.24,7.62,81.2,81.2,0,0,1-20.76,2.67,83.23,83.23,0,0,1-33.34-6.67A87.46,87.46,0,0,1,731.71,212a85.77,85.77,0,1,1,158.12-66.51,83.44,83.44,0,0,1,6.66,33.14V253a11.31,11.31,0,0,1-3.23,8,10.84,10.84,0,0,1-8.2,3.43,10.38,10.38,0,0,1-7.81-3.43,11.34,11.34,0,0,1-3.24-8V236.23a81.85,81.85,0,0,1-23.24,17.91M747.52,178.7A63.44,63.44,0,0,0,786.19,237a63.45,63.45,0,0,0,49.14,0,62.65,62.65,0,0,0,33.72-33.72,63.48,63.48,0,0,0,0-49.15,62.59,62.59,0,0,0-33.72-33.72,63.45,63.45,0,0,0-49.14,0,63.4,63.4,0,0,0-38.72,58.29"/><path class="cls-2" d="M919.35,78.12a11.07,11.07,0,0,1-11-11V62.93a11.33,11.33,0,0,1,3.24-8,10.31,10.31,0,0,1,7.81-3.43,11.71,11.71,0,0,1,11.43,11.43v4.19a10.31,10.31,0,0,1-3.43,7.81,11.31,11.31,0,0,1-8,3.24m0,186.31a10.68,10.68,0,0,1-7.81-3.24,11.14,11.14,0,0,1-3.24-8.19V104.41a11.12,11.12,0,0,1,3.24-8.19A10.6,10.6,0,0,1,919.4,93a11.22,11.22,0,0,1,8,3.24,10.75,10.75,0,0,1,3.43,8.19V253a10.76,10.76,0,0,1-3.43,8.19,11.31,11.31,0,0,1-8,3.24"/><path class="cls-2" d="M731.81,234.54a19.27,19.27,0,1,1-27.25,0h0a19.26,19.26,0,0,1,27.25,0"/></svg> \ No newline at end of file diff --git a/web/pandas/static/img/partners/dfine.svg b/web/pandas/static/img/partners/dfine.svg old mode 100755 new mode 100644 diff --git a/web/pandas/static/img/partners/nvidia.svg b/web/pandas/static/img/partners/nvidia.svg new file mode 100644 index 0000000000000..59f9e19cf00ad --- /dev/null +++ b/web/pandas/static/img/partners/nvidia.svg @@ -0,0 +1,56 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + version="1.1" + id="svg2" + x="0px" + y="0px" + width="450" + height="340" + viewBox="35.188 31.512 450.00001 340" + enable-background="new 35.188 31.512 351.46 258.785" + xml:space="preserve" + sodipodi:docname="nvidia.svg" + inkscape:version="1.0.1 (3bc2e813f5, 2020-09-07)"><metadata + id="metadata11"><rdf:RDF><cc:Work + rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title>generated by pstoedit version:3.44 from NVBadge_2D.eps</dc:title></cc:Work></rdf:RDF></metadata><defs + id="defs9" /><sodipodi:namedview + pagecolor="#ffffff" + bordercolor="#666666" + borderopacity="1" + objecttolerance="10" + gridtolerance="10" + guidetolerance="10" + inkscape:pageopacity="0" + inkscape:pageshadow="2" + inkscape:window-width="1920" + inkscape:window-height="1051" + id="namedview7" + showgrid="false" + inkscape:zoom="2.2870273" + inkscape:cx="163.94912" + inkscape:cy="154.68953" + inkscape:window-x="0" + inkscape:window-y="0" + inkscape:window-maximized="1" + inkscape:current-layer="svg2" /> +<title + id="title4">generated by pstoedit version:3.44 from NVBadge_2D.eps</title> + + +<path + id="path17" + d="m 433.465,322.7165 c 0,3.771 -2.769,6.302 -6.047,6.302 v -0.023 c -3.371,0.023 -6.089,-2.508 -6.089,-6.278 0,-3.769 2.718,-6.293 6.089,-6.293 3.279,-0.001 6.047,2.523 6.047,6.292 z m 2.453,0 c 0,-5.175 -4.02,-8.179 -8.5,-8.179 -4.511,0 -8.531,3.004 -8.531,8.179 0,5.172 4.021,8.188 8.531,8.188 4.481,0 8.5,-3.016 8.5,-8.188 m -9.91,0.692 h 0.91 l 2.109,3.703 h 2.316 l -2.336,-3.859 c 1.207,-0.086 2.2,-0.661 2.2,-2.286 0,-2.019 -1.392,-2.668 -3.75,-2.668 h -3.411 v 8.813 h 1.961 v -3.703 m 10e-4,-1.492 v -2.122 h 1.364 c 0.742,0 1.753,0.06 1.753,0.965 0,0.985 -0.523,1.157 -1.398,1.157 h -1.719" /><path + id="path19" + d="m 378.676,277.6345 10.598,28.993 H 367.75 Z m -11.35,-11.289 -24.423,61.88 h 17.246 l 3.863,-10.934 h 28.903 l 3.656,10.934 h 18.722 l -24.605,-61.888 z m -49.033,61.903 h 17.497 v -61.922 l -17.5,-0.004 z m -121.467,-61.926 -14.598,49.078 -13.984,-49.074 -18.879,-0.004 19.972,61.926 h 25.207 l 20.133,-61.926 z m 70.725,13.484 h 7.52 c 10.91,0 17.966,4.898 17.966,17.609 0,12.714 -7.056,17.613 -17.966,17.613 h -7.52 z m -17.35,-13.484 v 61.926 h 28.366 c 15.113,0 20.048,-2.512 25.384,-8.148 3.769,-3.957 6.207,-12.641 6.207,-22.134 0,-8.707 -2.063,-16.468 -5.66,-21.304 -6.481,-8.649 -15.817,-10.34 -29.75,-10.34 z m -165.743,-0.086 v 62.012 h 17.645 v -47.086 l 13.672,0.004 c 4.527,0 7.754,1.128 9.934,3.457 2.765,2.945 3.894,7.699 3.894,16.395 v 27.23 h 17.098 v -34.262 c 0,-24.453 -15.586,-27.75 -30.836,-27.75 z m 137.583,0.086 0.007,61.926 h 17.489 v -61.926 z" /><path + id="path21" + fill="#77b900" + d="m 131.481,143.0215 c 0,0 22.504,-33.203 67.437,-36.638 v -12.046 c -49.769,3.997 -92.867,46.149 -92.867,46.149 0,0 24.41,70.565 92.867,77.026 v -12.804 c -50.237,-6.32 -67.437,-61.687 -67.437,-61.687 z m 67.437,36.223 v 11.726 c -37.968,-6.769 -48.507,-46.237 -48.507,-46.237 0,0 18.23,-20.195 48.507,-23.47 v 12.867 c -0.023,0 -0.039,-0.007 -0.058,-0.007 -15.891,-1.907 -28.305,12.938 -28.305,12.938 0,0 6.958,24.991 28.363,32.183 m 0,-107.125 v 22.218 c 1.461,-0.112 2.922,-0.207 4.391,-0.257 56.582,-1.907 93.449,46.406 93.449,46.406 0,0 -42.343,51.488 -86.457,51.488 -4.043,0 -7.828,-0.375 -11.383,-1.005 v 13.739 c 3.04,0.386 6.192,0.613 9.481,0.613 41.051,0 70.738,-20.965 99.484,-45.778 4.766,3.817 24.278,13.103 28.289,17.168 -27.332,22.883 -91.031,41.329 -127.144,41.329 -3.481,0 -6.824,-0.211 -10.11,-0.528 v 19.306 H 354.95 V 72.1195 Z m 0,49.144 v -14.879 c 1.446,-0.101 2.903,-0.179 4.391,-0.226 40.688,-1.278 67.382,34.965 67.382,34.965 0,0 -28.832,40.043 -59.746,40.043 -4.449,0 -8.438,-0.715 -12.028,-1.922 v -45.114 c 15.84,1.914 19.028,8.911 28.551,24.786 l 21.18,-17.859 c 0,0 -15.461,-20.277 -41.524,-20.277 -2.833,-0.001 -5.544,0.198 -8.206,0.483" /> +</svg> diff --git a/web/pandas/static/img/partners/quansight_labs.svg b/web/pandas/static/img/partners/quansight_labs.svg new file mode 100644 index 0000000000000..d49ab1b7d39ec --- /dev/null +++ b/web/pandas/static/img/partners/quansight_labs.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 511.28 230.68"><defs><style>.cls-1{fill:#452391;}.cls-2{fill:#99c941;}.cls-3{fill:#973896;}</style></defs><path class="cls-1" d="M41.94,188.52c-2.37.51-7.48,2.71-12.51,5.54h3.46c5-2.71,10-5,14.08-6a15,15,0,0,0,1.62-.46Z"/><path class="cls-1" d="M21.51,200.85s9.82-6.64,19.13-6.73C52.76,194,64,212.43,78.34,212.43c13.42,0,19.3-9.22,22.1-16.83l-.58-.26c-5.22,7.72-10.57,10.32-19.14,10.32-15,0-20-15-40.06-14.73C30.05,191.06,21.41,199.35,21.51,200.85Z"/><path class="cls-1" d="M62.94,178.35c.13-.15.25-.31.37-.46a30.46,30.46,0,0,0,6.35-18.57,27.52,27.52,0,0,0-.42-4.5,30.27,30.27,0,0,0-3.75-10.91l.21.25C61,135.79,52.5,130.1,42.82,129.88h-1.3c-9.69.22-18.18,5.91-22.89,14.28l.21-.25A30.48,30.48,0,0,0,15,155.57a29.28,29.28,0,0,0-.31,3.85c.08,16.05,12.4,29.22,27.41,29.22A26.8,26.8,0,0,0,62.9,178.41ZM39.15,133.72l.38-.05.41,0,.69-.07.35,0,.44,0h1.5l.44,0,.34,0,.69.07.41,0,.38.05.43.08c11.63,2.15,16.16,15,16.16,25.45,0,6.22-1.59,13.22-5.3,18.35h0l-.33.45a16.86,16.86,0,0,1-13.87,6.76,18.31,18.31,0,0,1-2.76-.2c-.63-.05-1.26-.13-1.88-.22A16.13,16.13,0,0,1,32.19,182l.14,0c-6.93-4.77-9.77-14.46-9.77-22.76,0-10.48,4.53-23.3,16.16-25.45Z"/><path class="cls-1" d="M113.62,185.14c0,.43-.44.65-1.32.65H104a1.38,1.38,0,0,1-1.32-.9L101.38,182c-.28-.54-.55-.57-.82-.08q-3.54,4.68-10.27,4.68a10.93,10.93,0,0,1-8.42-3.4c-2.11-2.28-3.16-5.49-3.16-9.66v-16.5a3.43,3.43,0,0,0-2.71-3.7l-1.73-.49c-.87-.28-1.31-.58-1.31-.91s.44-.65,1.31-.65H85.61c.71,0,1.07.19,1.07.57a7.93,7.93,0,0,1-1,1.11c-.69.69-1,2.31-1,4.89v17q0,7.4,7.64,7.39A11.09,11.09,0,0,0,98,180.54c2.14-1.32,3.2-2.88,3.2-4.69v-18.8a3.43,3.43,0,0,0-2.71-3.7l-1.72-.49c-.88-.28-1.32-.58-1.32-.91s.44-.65,1.32-.65h9.61c.76,0,1.12.32,1.07,1l-.33,5.42v22.43a3.27,3.27,0,0,0,2.79,3.61l2.3.49C113.15,184.45,113.62,184.75,113.62,185.14Z"/><path class="cls-1" d="M149.59,185.14c0,.43-.44.65-1.31.65h-8.14a1.52,1.52,0,0,1-1.39-.9l-.66-1.15a1,1,0,0,0-.82-.58,1.9,1.9,0,0,0-1.07.41,17.43,17.43,0,0,1-10.26,3,11,11,0,0,1-7.15-2.21,8.23,8.23,0,0,1-3-6.66q0-5.91,5.67-8.37a31.87,31.87,0,0,1,5.42-1.56c3.07-.72,5.06-1.21,6-1.48,2.9-.88,4.35-2.27,4.35-4.19v-3.21a4.59,4.59,0,0,0-2.63-4.18,10,10,0,0,0-4.84-1.15c-3,0-5.25.65-6.7,2s-2.5,3.53-3.16,6.65a.59.59,0,0,1-.58.41.43.43,0,0,1-.49-.49l-.08-9.86c0-.49.27-.76.82-.82.88-.05,2.6-.25,5.18-.57a41.59,41.59,0,0,1,5.17-.42q6.57,0,9.69,2.55,3.54,3,3.53,9.69V180q0,3.21,2.63,3.7l2.47.49C149.15,184.4,149.59,184.7,149.59,185.14Zm-12.32-7.89v-8.13q0-1.06-.9-.66l-10.76,3.12q-4,1.15-4,4.77,0,7.14,6.41,7.14a11.85,11.85,0,0,0,6.57-1.76A5.21,5.21,0,0,0,137.27,177.25Z"/><path class="cls-1" d="M189.92,185.14c0,.43-.44.65-1.32.65H177.27c-.71,0-1.07-.19-1.07-.57a7.21,7.21,0,0,1,1-1.11c.68-.69,1-2.31,1-4.89v-17q0-7.4-7.63-7.39a11.26,11.26,0,0,0-5.67,1.72q-3.28,2.05-3.29,4.68V180a3.45,3.45,0,0,0,2.71,3.7l1.73.49c.87.28,1.31.58,1.31.91s-.44.65-1.31.65H151.4q-1.32,0-1.32-.57t1.32-1l1.72-.49a3.43,3.43,0,0,0,2.63-3.7V157c0-2.13-.93-3.34-2.79-3.61l-3.37-.49c-.88-.11-1.32-.41-1.32-.91s.44-.65,1.32-.65h9.28a1.36,1.36,0,0,1,1.31.9l1.32,2.87c.27.55.55.58.82.09q3.52-4.68,10.27-4.69a11,11,0,0,1,8.42,3.41q3.15,3.42,3.16,9.65V180a3.44,3.44,0,0,0,2.71,3.7l1.72.49C189.48,184.51,189.92,184.81,189.92,185.14Z"/><path class="cls-1" d="M218.58,176.18q0,10.44-12.73,10.43A43.59,43.59,0,0,1,194.27,185c-.71-.22-1.15-1-1.31-2.38,0-1,0-2-.09-3.08s-.19-2.51-.41-4.31c-.11-.66.08-1,.58-1s.79.3.9.91c1.15,3.28,2.77,5.53,4.85,6.73,1.8,1.1,4.49,1.64,8.05,1.64a8.34,8.34,0,0,0,4.6-1.31,4.68,4.68,0,0,0,2.38-4q0-3.53-5.34-5.83-4.84-2.06-9.78-4.19-5.34-3-5.33-8,0-9.69,11.41-9.7a86.77,86.77,0,0,1,11.17,1c.55.06.82.33.82.82l.17,8.13c0,.66-.17,1-.49,1s-.5-.25-.66-.74q-1.48-4.44-4.11-5.91c-1.53-.88-4-1.32-7.39-1.32q-6.4,0-6.41,4.93,0,2.79,5.34,5.09,9.45,4,10,4.36Q218.58,171,218.58,176.18Z"/><path class="cls-1" d="M242.07,185.22c0,.38-.44.57-1.31.57H223.51q-1.32,0-1.32-.57c0-.5.44-.82,1.32-1l2.54-.49q2.72-.5,2.71-3.7V157q0-3.12-2.71-3.61l-2.79-.49c-.87-.17-1.31-.47-1.31-.91s.44-.65,1.31-.65h10.6c.82,0,1.2.32,1.15,1l-.33,5.42V180c0,2.19.9,3.43,2.71,3.7l3.37.49C241.63,184.34,242.07,184.67,242.07,185.22ZM235,137.74a4.15,4.15,0,0,1-1.43,3.08,4.49,4.49,0,0,1-3.21,1.36,3.42,3.42,0,0,1-2.4-1,3.06,3.06,0,0,1-1-2.34,4.18,4.18,0,0,1,1.38-3.12,4.49,4.49,0,0,1,3.17-1.31Q235,134.46,235,137.74Z"/><path class="cls-1" d="M280.67,154c0,.54-.3.82-.9.82l-8.95.25a11.67,11.67,0,0,1-1.4,16.53A16,16,0,0,1,259,175h-2.38c-3.29,0-4.93,1.06-4.93,3.16q0,2.76,3.45,2.76c.33,0,1.67-.06,4-.17s4.25-.16,5.67-.16c4,0,6.95.63,8.87,1.89q3.38,2.3,3.37,7.8,0,6.74-6.74,10.18a27.43,27.43,0,0,1-12.73,2.72,17.13,17.13,0,0,1-9.11-2.22,8.74,8.74,0,0,1-4.36-7.89q0-5.74,5.42-8.7a4.85,4.85,0,0,1-2-4,5.83,5.83,0,0,1,3.78-5.91c.38-.16.58-.36.58-.57s-.17-.42-.51-.58c-4.86-2.14-7.3-5.69-7.3-10.68a10.82,10.82,0,0,1,4.25-8.83,16.31,16.31,0,0,1,10.52-3.41,18.24,18.24,0,0,1,7.18,1.4l13.7-.57c.6,0,.9.22.9.65Zm-8.21,37.61c0-2.13-.71-3.64-2.14-4.51s-3.86-1.32-7.31-1.32c-1.58,0-3.87,0-6.85.13l-5,.12q-2.55.83-2.55,5,0,3.85,4.35,5.91a18.64,18.64,0,0,0,7.89,1.56Q272.46,198.52,272.46,191.62Zm-4.76-28q0-9.94-9.33-9.94-8.09,0-8.09,8.13a11.06,11.06,0,0,0,2.4,7.47q2.38,2.81,7,2.8a7.56,7.56,0,0,0,5.94-2.3A8.88,8.88,0,0,0,267.7,163.62Z"/><path class="cls-1" d="M319.28,185.14c0,.43-.44.65-1.32.65H306.63c-.72,0-1.07-.19-1.07-.57a7.21,7.21,0,0,1,1-1.11q1-1,1-4.23V162.22q0-7.4-7.64-7.39a10.82,10.82,0,0,0-5.75,1.72q-3.2,2-3.2,4.68V180a3.44,3.44,0,0,0,2.71,3.7l1.73.49c.87.28,1.31.58,1.31.91s-.44.65-1.31.65H280.75c-.87,0-1.31-.19-1.31-.57s.44-.71,1.31-1l1.73-.49a3.43,3.43,0,0,0,2.63-3.7v-42c0-2.13-.91-3.34-2.71-3.61L279,134c-.88-.11-1.31-.41-1.31-.91s.43-.65,1.31-.65H290.2c.82,0,1.2.32,1.15,1l-.33,5.42v16a.39.39,0,0,0,.66.33q3.52-4.68,10.27-4.69a10.92,10.92,0,0,1,8.41,3.41q3.17,3.42,3.17,9.65V180a3.44,3.44,0,0,0,2.71,3.7l1.8.49C318.86,184.56,319.28,184.86,319.28,185.14Z"/><path class="cls-1" d="M343.34,183.71a.51.51,0,0,1-.16.65,11.25,11.25,0,0,1-6.74,1.84q-10.1,0-10.1-7.41V155.4h-6.49c-.55,0-.82-.25-.82-.77v-2.48c0-.57.27-.85.82-.85h4.76q1.39,0,2.22-1.89l3.86-9.2c.22-.49.49-.74.82-.74.5,0,.74.33.74,1V151.3h9.36c.55,0,.83.22.83.65v2.8c0,.43-.25.65-.74.65h-9.45v21.82q0,6.53,5.09,6.52a9.31,9.31,0,0,0,4.77-1.15c.33-.33.6-.3.82.08Z"/><path class="cls-1" d="M396,172.82l-2,12.07a1,1,0,0,1-1.15.9H360.92q-1.32,0-1.32-.57t1.32-1q2.54-.57,2.54-3.69V142.18q0-3.21-2.71-3.61l-3.37-.5c-.87-.11-1.31-.41-1.31-.9s.44-.66,1.31-.66h18.48q1.32,0,1.32.57c0,.5-.44.82-1.32,1l-2.71.49c-1.86.33-2.79,1.55-2.79,3.68v38.63c0,1,.58,1.47,1.73,1.47h10.26c3.95-.05,6.76-1,8.46-2.87q1.89-2,3.37-7.48c.22-.71.6-1.06,1.15-1.06S396.21,171.56,396,172.82Z"/><path class="cls-1" d="M431.47,185.14c0,.43-.44.65-1.32.65H422a1.52,1.52,0,0,1-1.39-.9l-.66-1.15a1,1,0,0,0-.82-.58,1.9,1.9,0,0,0-1.07.41,17.44,17.44,0,0,1-10.27,3,11,11,0,0,1-7.14-2.21,8.23,8.23,0,0,1-3-6.66q0-5.91,5.67-8.37a31.58,31.58,0,0,1,5.42-1.56q4.59-1.08,6-1.48c2.91-.88,4.36-2.27,4.36-4.19v-3.21a4.59,4.59,0,0,0-2.63-4.18,10,10,0,0,0-4.85-1.15c-3,0-5.24.65-6.69,2s-2.51,3.53-3.16,6.65a.59.59,0,0,1-.58.41.43.43,0,0,1-.49-.49l-.08-9.86c0-.49.27-.76.82-.82.87-.05,2.6-.25,5.17-.57a41.72,41.72,0,0,1,5.18-.42q6.57,0,9.69,2.55,3.53,3,3.53,9.69V180q0,3.21,2.63,3.7l2.46.49C431,184.4,431.47,184.7,431.47,185.14Zm-12.32-7.89v-8.13c0-.71-.3-.93-.91-.66l-10.75,3.12q-4,1.15-4,4.77,0,7.14,6.41,7.14a11.85,11.85,0,0,0,6.57-1.76A5.23,5.23,0,0,0,419.15,177.25Z"/><path class="cls-1" d="M465.88,168.79q0,8-4.35,12.9a13.71,13.71,0,0,1-10.68,4.92,26.07,26.07,0,0,1-6.82-.82,23.07,23.07,0,0,0-3.69-.82,5.45,5.45,0,0,0-2.47.44,14.15,14.15,0,0,1-1.56.79c-.65,0-1-.3-1-.9a2,2,0,0,1,.45-.86,2.8,2.8,0,0,0,.45-1.69V138.07c0-2.13-.9-3.34-2.71-3.61l-3.37-.49c-.87-.11-1.31-.41-1.31-.91s.44-.65,1.31-.65h11.17c.83,0,1.21.32,1.15,1l-.32,5.42v15.44c0,.28.1.44.32.5a.58.58,0,0,0,.58-.17,11.68,11.68,0,0,1,8.7-4.11,12.5,12.5,0,0,1,9.86,4.19Q465.89,159.52,465.88,168.79Zm-6.24,0q0-14.08-9.28-14.09-4.61,0-7.64,4.67a3.25,3.25,0,0,0-.57,1.72v14.17q0,4.42,1.8,6.31,1.89,2.13,6.57,2.13a7.52,7.52,0,0,0,6.66-4Q459.64,175.64,459.64,168.75Z"/><path class="cls-1" d="M496.6,176.18q0,10.44-12.73,10.43A43.48,43.48,0,0,1,472.29,185c-.71-.22-1.15-1-1.32-2.38,0-1,0-2-.08-3.08s-.19-2.51-.41-4.31c-.11-.66.08-1,.58-1s.79.3.9.91c1.15,3.28,2.76,5.53,4.85,6.73,1.8,1.1,4.48,1.64,8,1.64a8.31,8.31,0,0,0,4.6-1.31,4.67,4.67,0,0,0,2.39-4q0-3.53-5.34-5.83-4.85-2.06-9.78-4.19-5.34-3-5.34-8,0-9.69,11.42-9.7a86.77,86.77,0,0,1,11.17,1c.55.06.82.33.82.82l.17,8.13c0,.66-.17,1-.5,1s-.49-.25-.65-.74q-1.49-4.44-4.11-5.91c-1.53-.88-4-1.32-7.39-1.32q-6.4,0-6.41,4.93,0,2.79,5.34,5.09,9.45,4,10,4.36Q496.6,171,496.6,176.18Z"/><rect class="cls-1" x="179.9" y="34" width="154.24" height="4.36"/><path class="cls-1" d="M270.14,85.37,270.06,81c1.24,0,3.05,0,5.14,0,4.63,0,11,0,13.71-.28a14.89,14.89,0,0,0,8.51-4c3.41-3.08,5.1-7.06,5.16-12.18,0-1.64,0-3.28,0-4.92,0-2.58,0-5.25.09-7.9a17.67,17.67,0,0,1,17.56-17.56c3.14-.09,6.32-.08,9.4-.07h4v4.36c-1.33,0-2.66,0-4,0-3,0-6.19,0-9.27.07a13.26,13.26,0,0,0-13.32,13.33c-.08,2.58-.08,5.23-.09,7.78,0,1.66,0,3.31,0,5A20.12,20.12,0,0,1,300.34,80a19.34,19.34,0,0,1-10.94,5.06c-3,.34-9.21.33-14.21.31C273.12,85.36,271.33,85.35,270.14,85.37Z"/><path class="cls-1" d="M242.35,85.37c-1.22,0-2.51,0-3.89,0a82.68,82.68,0,0,1-9.55-.31A19.37,19.37,0,0,1,218,80c-4.3-3.88-6.52-9-6.6-15.36,0-1.65,0-3.31,0-5,0-2.56,0-5.2-.08-7.79a13.27,13.27,0,0,0-13.33-13.33c-3.13-.08-8.74-.08-13.24-.07-1.85,0-3.52,0-4.8,0l0-4.36h4.77c4.54,0,10.18,0,13.38.07a17.67,17.67,0,0,1,17.56,17.56c.07,2.65.08,5.32.09,7.91,0,1.64,0,3.28,0,4.91.07,5.12,1.76,9.1,5.17,12.18a14.86,14.86,0,0,0,8.51,4,84.4,84.4,0,0,0,9,.28c1.4,0,2.72,0,4,0Z"/><path class="cls-2" d="M256.86,97.52A14.47,14.47,0,0,1,242.39,82.9a14.26,14.26,0,0,1,14.67-14.32A14.46,14.46,0,0,1,271.33,83.1,14.69,14.69,0,0,1,256.86,97.52Z"/><path class="cls-1" d="M348.8,21.45A14.45,14.45,0,0,1,363.08,36a14.66,14.66,0,0,1-14.45,14.43,14.45,14.45,0,0,1-14.49-14.6A14.27,14.27,0,0,1,348.8,21.45Z"/><path class="cls-3" d="M164.39,20.16A15.75,15.75,0,0,1,179.9,36a16,16,0,0,1-15.78,15.67,15.75,15.75,0,0,1-15.74-16A15.55,15.55,0,0,1,164.39,20.16Z"/><path class="cls-1" d="M256.72,99.43a16.38,16.38,0,0,1,.08-32.76H257a16.37,16.37,0,0,1,16.16,16.45,16.54,16.54,0,0,1-16.38,16.31Zm.08-28.94a12.41,12.41,0,0,0-12.59,12.42,12.59,12.59,0,0,0,3.67,9,12.4,12.4,0,0,0,8.84,3.68v1.91l0-1.91a12.71,12.71,0,0,0,12.57-12.53A12.55,12.55,0,0,0,257,70.49Z"/><path class="cls-1" d="M348.46,52.3a16.13,16.13,0,0,1-11.53-4.8,16.38,16.38,0,0,1-4.79-11.72,16.19,16.19,0,0,1,16.39-16.24h.2A16.34,16.34,0,0,1,364.9,36,16.54,16.54,0,0,1,348.55,52.3Zm.07-28.94A12.41,12.41,0,0,0,336,35.8a12.57,12.57,0,0,0,3.68,9,12.34,12.34,0,0,0,8.82,3.67h.07a12.7,12.7,0,0,0,12.55-12.54,12.51,12.51,0,0,0-12.4-12.58Z"/><path class="cls-1" d="M164,53.59a17.44,17.44,0,0,1-12.46-5.18,17.71,17.71,0,0,1-5.16-12.69,17.49,17.49,0,0,1,17.7-17.47h.24A17.67,17.67,0,0,1,181.72,36,17.85,17.85,0,0,1,164,53.59Zm.08-31.52A13.7,13.7,0,0,0,150.2,35.75a13.89,13.89,0,0,0,4.05,10A13.63,13.63,0,0,0,164,49.77v1.91l0-1.91A14,14,0,0,0,177.9,36,13.59,13.59,0,0,0,174,26.27a13.74,13.74,0,0,0-9.72-4.2Z"/></svg> \ No newline at end of file diff --git a/web/pandas/static/img/partners/r_studio.svg b/web/pandas/static/img/partners/r_studio.svg deleted file mode 100644 index 15a1d2a30ff30..0000000000000 --- a/web/pandas/static/img/partners/r_studio.svg +++ /dev/null @@ -1,50 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<!-- Generator: Adobe Illustrator 22.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> -<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 1784.1 625.9" style="enable-background:new 0 0 1784.1 625.9;" xml:space="preserve"> -<style type="text/css"> - .st0{fill:#75AADB;} - .st1{fill:#4D4D4D;} - .st2{fill:#FFFFFF;} - .st3{fill:url(#SVGID_1_);} - .st4{fill:url(#SVGID_2_);} - .st5{fill:url(#SVGID_3_);} - .st6{fill:url(#SVGID_4_);} - .st7{fill:url(#SVGID_5_);} - .st8{fill:url(#SVGID_6_);} - .st9{fill:url(#SVGID_7_);} - .st10{fill:url(#SVGID_8_);} - .st11{fill:url(#SVGID_9_);} - .st12{fill:url(#SVGID_10_);} - .st13{opacity:0.18;fill:url(#SVGID_11_);} - .st14{opacity:0.3;} -</style> -<g id="Gray_Logo"> -</g> -<g id="Black_Letters"> -</g> -<g id="Blue_Gradient_Letters"> - <g> - - <ellipse transform="matrix(0.7071 -0.7071 0.7071 0.7071 -127.9265 317.0317)" class="st0" cx="318.7" cy="312.9" rx="309.8" ry="309.8"/> - <g> - <path class="st1" d="M694.4,404.8c16.1,10.3,39.1,18.1,63.9,18.1c36.7,0,58.1-19.4,58.1-47.4c0-25.5-14.8-40.8-52.3-54.8 c-45.3-16.5-73.3-40.4-73.3-79.1c0-43.3,35.8-75.4,89.8-75.4c28,0,49,6.6,61,13.6l-9.9,29.3c-8.7-5.4-27.2-13.2-52.3-13.2 c-37.9,0-52.3,22.7-52.3,41.6c0,26,16.9,38.7,55.2,53.6c47,18.1,70.5,40.8,70.5,81.6c0,42.8-31.3,80.3-96.8,80.3 c-26.8,0-56-8.2-70.9-18.1L694.4,404.8z"/> - <path class="st1" d="M943.3,201.3v47.8h51.9v27.6h-51.9v107.5c0,24.7,7,38.7,27.2,38.7c9.9,0,15.7-0.8,21-2.5l1.6,27.6 c-7,2.5-18.1,4.9-32.1,4.9c-16.9,0-30.5-5.8-39.1-15.2c-9.9-11.1-14-28.8-14-52.3V276.7h-30.9v-27.6h30.9V212L943.3,201.3z"/> - <path class="st1" d="M1202.8,393.7c0,21,0.4,39.1,1.6,54.8h-32.1l-2.1-32.5h-0.8c-9.1,16.1-30.5,37.1-65.9,37.1 c-31.3,0-68.8-17.7-68.8-87.3V249.1h36.3v110c0,37.9,11.9,63.9,44.5,63.9c24.3,0,41.2-16.9,47.8-33.4c2.1-4.9,3.3-11.5,3.3-18.5 v-122h36.3V393.7z"/> - <path class="st1" d="M1434.8,156v241c0,17.7,0.8,37.9,1.6,51.5h-32.1l-1.6-34.6h-1.2c-10.7,22.2-34.6,39.1-67.2,39.1 c-48.2,0-85.7-40.8-85.7-101.4c-0.4-66.3,41.2-106.7,89.4-106.7c30.9,0,51.1,14.4,60.2,30.1h0.8V156H1434.8z M1398.9,330.2 c0-4.5-0.4-10.7-1.6-15.2c-5.4-22.7-25.1-41.6-52.3-41.6c-37.5,0-59.7,33-59.7,76.6c0,40.4,20.2,73.8,58.9,73.8 c24.3,0,46.6-16.5,53.1-43.3c1.2-4.9,1.6-9.9,1.6-15.7V330.2z"/> - <path class="st1" d="M1535.7,193c0,12.4-8.7,22.2-23.1,22.2c-13.2,0-21.8-9.9-21.8-22.2c0-12.4,9.1-22.7,22.7-22.7 C1526.6,170.4,1535.7,180.3,1535.7,193z M1495.3,448.5V249.1h36.3v199.4H1495.3z"/> - <path class="st1" d="M1772.2,347.1c0,73.7-51.5,105.9-99.3,105.9c-53.6,0-95.6-39.6-95.6-102.6c0-66.3,44.1-105.5,98.9-105.5 C1733.5,245,1772.2,286.6,1772.2,347.1z M1614.4,349.2c0,43.7,24.7,76.6,60.2,76.6c34.6,0,60.6-32.5,60.6-77.5 c0-33.8-16.9-76.2-59.7-76.2C1632.9,272.1,1614.4,311.7,1614.4,349.2z"/> - </g> - <g> - <path class="st2" d="M424.7,411.8h33.6v26.1h-51.3L322,310.5h-45.3v101.3h44.3v26.1H209.5v-26.1h38.3V187.3l-38.3-4.7v-24.7 c14.5,3.3,27.1,5.6,42.9,5.6c23.8,0,48.1-5.6,71.9-5.6c46.2,0,89.1,21,89.1,72.3c0,39.7-23.8,64.9-60.7,75.6L424.7,411.8z M276.7,285.3l24.3,0.5c59.3,0.9,82.1-21.9,82.1-52.3c0-35.5-25.7-49.5-58.3-49.5c-15.4,0-31.3,1.4-48.1,3.3V285.3z"/> - </g> - <g> - <path class="st1" d="M1751.8,170.4c-12.9,0-23.4,10.5-23.4,23.4c0,12.9,10.5,23.4,23.4,23.4c12.9,0,23.4-10.5,23.4-23.4 C1775.2,180.9,1764.7,170.4,1751.8,170.4z M1771.4,193.8c0,10.8-8.8,19.5-19.5,19.5c-10.8,0-19.5-8.8-19.5-19.5 c0-10.8,8.8-19.5,19.5-19.5C1762.6,174.2,1771.4,183,1771.4,193.8z"/> - <path class="st1" d="M1760.1,203.3l-5.8-8.5c3.3-1.2,5-3.6,5-7c0-5.1-4.3-6.9-8.4-6.9c-1.1,0-2.2,0.1-3.2,0.3 c-1,0.1-2.1,0.2-3.1,0.2c-1.4,0-2.5-0.2-3.7-0.5l-0.6-0.1v3.3l3.4,0.4v18.8h-3.4v3.4h10.9v-3.4h-3.9v-7.9h3.2l7.3,11l0.2,0.2h5.3 v-3.4H1760.1z M1755.6,188.1c0,1.2-0.5,2.2-1.4,2.9c-1.1,0.8-2.8,1.2-5,1.2l-1.9,0v-7.7c1.4-0.1,2.6-0.2,3.7-0.2 C1753.1,184.3,1755.6,185,1755.6,188.1z"/> - </g> - </g> -</g> -<g id="White_Letters"> -</g> -<g id="R_Ball"> -</g> -</svg> \ No newline at end of file diff --git a/web/pandas/static/img/partners/ursa_labs.svg b/web/pandas/static/img/partners/ursa_labs.svg deleted file mode 100644 index cacc80e337d25..0000000000000 --- a/web/pandas/static/img/partners/ursa_labs.svg +++ /dev/null @@ -1,106 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?> -<!-- Generator: Adobe Illustrator 23.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> -<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" - viewBox="0 0 359 270" style="enable-background:new 0 0 359 270;" xml:space="preserve"> -<style type="text/css"> - .st0{fill-rule:evenodd;clip-rule:evenodd;fill:#404040;} - .st1{filter:url(#Adobe_OpacityMaskFilter);} - .st2{fill-rule:evenodd;clip-rule:evenodd;fill:#FFFFFF;} - .st3{mask:url(#mask-2_1_);} -</style> -<title>HOME 1 Copy 8</title> -<desc>Created with Sketch.</desc> -<g id="HOME-1-Copy-8"> - <g id="Group" transform="translate(20.000000, 20.000000)"> - <path id="URSA-LABS-Copy" class="st0" d="M0,158.4h9.1V214c0,0.3,0,0.7,0.1,1.1c0,0.3,0,0.9,0.1,1.6s0.2,1.5,0.6,2.3 - c0.3,0.8,0.9,1.5,1.6,2.1c0.7,0.6,1.8,0.9,3.3,0.9c0.3,0,0.9,0,1.6-0.1c0.7-0.1,1.4-0.4,2.1-0.9c1-0.9,1.6-2,1.8-3.3 - s0.3-3.2,0.4-5.5v-53.8h9.2v54.4c0,0.6,0,1.3-0.1,2.1c-0.1,0.8-0.2,1.7-0.3,2.6s-0.3,1.8-0.5,2.6c-0.7,2.3-1.7,4.1-3,5.4 - c-1.3,1.3-2.7,2.3-4.2,2.9c-1.5,0.7-2.9,1.1-4.2,1.2c-1.3,0.1-2.3,0.2-3,0.2c-0.6,0-1.5-0.1-2.7-0.2c-1.2-0.1-2.5-0.5-3.8-1 - s-2.6-1.4-3.8-2.5c-1.2-1.1-2.2-2.7-3-4.6c-0.4-1-0.7-2.1-0.9-3.3c-0.2-1.2-0.3-2.9-0.4-5V158.4z M44,158.4h17 - c0.6,0,1.2,0,1.7,0.1c0.6,0.1,1.3,0.2,2.2,0.3c0.9,0.1,1.7,0.4,2.6,0.8c0.8,0.4,1.6,1.1,2.3,2c0.7,0.9,1.2,2.1,1.6,3.7 - c0.4,1.8,0.6,5.1,0.6,10.1c0,1.3,0,2.7-0.1,4.1c0,1.4-0.1,2.8-0.2,4.2c-0.1,0.9-0.3,1.9-0.4,2.9s-0.4,1.9-0.7,2.7 - c-0.4,0.9-0.9,1.6-1.6,2.1s-1.3,0.8-2,1c-0.7,0.2-1.3,0.3-1.9,0.3H64v0.5c1.3,0.1,2.4,0.3,3.3,0.6c0.9,0.3,1.8,1,2.5,2.1 - c0.8,1.3,1.3,2.7,1.5,4.3c0.2,1.6,0.3,3.9,0.3,6.8v7.7c0,2,0,3.6,0.1,4.9c0.1,1.3,0.2,2.4,0.3,3.3c0.1,0.9,0.3,1.8,0.5,2.7 - c0.2,0.9,0.6,1.8,1,2.9h-9.7c-0.3-1.7-0.6-3-0.8-4.1s-0.3-2.2-0.4-3.2c-0.1-1-0.2-2.1-0.2-3.2c0-1.1-0.1-2.5-0.1-4.2v-5 - c-0.1-1.2-0.1-2.4-0.2-3.6c0-1.2-0.1-2.4-0.3-3.6c-0.1-0.9-0.3-1.7-0.5-2.5c-0.2-0.8-0.6-1.5-1.2-2c-0.5-0.3-1-0.5-1.5-0.6 - s-1-0.2-1.6-0.2h-3.8v32.4H44V158.4z M53.4,166.9v21.7h4.4c1.2,0,2.2-0.2,2.9-0.6c0.7-0.4,1.2-1.2,1.6-2.5 - c0.2-0.9,0.3-2.3,0.4-4.2s0.1-4.1,0.1-6.6c0-0.7,0-1.5-0.1-2.2c0-0.8-0.1-1.5-0.2-2.2c-0.1-1.4-0.4-2.3-1-2.8 - c-0.3-0.3-0.8-0.5-1.3-0.5c-0.5,0-1.2,0-2.2,0H53.4z M110.6,169.1v12.4h-8.5v-12.4c0-0.2,0-0.6-0.1-1.1c0-0.5-0.2-1.1-0.4-1.6 - c-0.2-0.5-0.6-1-1.1-1.4s-1.3-0.6-2.3-0.6c-1.1,0-2,0.2-2.6,0.6c-0.6,0.4-1.1,1-1.4,1.7c-0.3,0.7-0.5,1.5-0.6,2.3 - c-0.1,0.9-0.1,1.7-0.1,2.5c0,1.5,0.1,2.8,0.3,4c0.2,1.2,0.5,2.3,0.9,3.4s0.9,2.2,1.5,3.2s1.3,2.2,2.1,3.4c0.7,1.1,1.3,2.1,2,3.1 - c0.7,1,1.4,2,2.1,3.1c1,1.4,2,2.9,3.1,4.6c1.2,1.9,2.2,3.7,2.9,5.3c0.7,1.6,1.3,3.1,1.7,4.5c0.4,1.4,0.7,2.7,0.8,3.9 - c0.1,1.2,0.2,2.3,0.2,3.3c0,0.4,0,1.3-0.1,2.6c-0.1,1.3-0.4,2.8-0.9,4.4c-0.5,1.6-1.3,3.3-2.3,4.9c-1,1.6-2.6,2.9-4.6,3.7 - c-0.6,0.3-1.4,0.5-2.4,0.7c-1,0.2-2.3,0.3-3.8,0.3c-2.9,0-5.1-0.5-6.8-1.4s-2.8-1.9-3.6-2.8c-1.5-1.7-2.3-3.4-2.6-5.3 - s-0.4-3.8-0.5-5.9V203h8.6v12.8c0,1.7,0.2,3,0.5,3.8c0.3,0.8,0.8,1.5,1.6,2c0.2,0.1,0.5,0.3,1,0.5c0.5,0.2,1.1,0.3,1.8,0.3 - c1.1,0,2-0.3,2.7-0.8c0.6-0.6,1.1-1.3,1.4-2.1c0.3-0.8,0.4-1.7,0.5-2.7c0-1,0.1-1.8,0.1-2.6c0-2.5-0.3-4.6-0.8-6.4 - c-0.5-1.7-1.4-3.7-2.7-5.9c-1.3-2.3-2.8-4.5-4.3-6.6s-2.9-4.3-4.3-6.5c-0.4-0.6-0.9-1.4-1.5-2.4c-0.6-1-1.2-2.2-1.8-3.6 - c-0.6-1.4-1.1-3-1.5-4.7s-0.7-3.7-0.7-5.7c0-3.9,0.7-6.9,2.1-9s2.8-3.7,4.3-4.5c0.7-0.5,1.8-0.9,3.1-1.3c1.3-0.4,3-0.6,5-0.6 - c0.5,0,1.2,0,2.3,0.1c1,0.1,2.1,0.3,3.3,0.7c1.1,0.4,2.2,1.1,3.3,2c1.1,0.9,1.9,2.3,2.4,4c0.2,0.7,0.4,1.4,0.5,2.1 - C110.5,166.6,110.5,167.7,110.6,169.1z M140.1,158.4l10.9,70.3h-9.1l-1.8-12.9h-10.6l-1.6,12.9h-9.1l10-70.3H140.1z M133.5,183 - l-3,24.2h8.4l-3.5-24.4c-0.1-0.6-0.2-1.2-0.3-1.8c0-0.6-0.1-1.2-0.2-1.8c-0.1-1.3-0.1-2.6-0.1-3.8c0-1.3,0-2.5-0.1-3.8H134 - c-0.1,1.9-0.1,3.8-0.2,5.7C133.7,179.2,133.6,181.1,133.5,183z M190.2,158.4V220h15.4v8.7h-24.7v-70.3H190.2z M232,158.4 - l10.9,70.3h-9.1l-1.8-12.9h-10.6l-1.6,12.9h-9.1l10-70.3H232z M225.4,183l-3,24.2h8.4l-3.5-24.4c-0.1-0.6-0.2-1.2-0.3-1.8 - c0-0.6-0.1-1.2-0.2-1.8c-0.1-1.3-0.1-2.6-0.1-3.8c0-1.3,0-2.5-0.1-3.8h-0.8c-0.1,1.9-0.1,3.8-0.2,5.7 - C225.6,179.2,225.5,181.1,225.4,183z M251.9,158.4h16.5c1.5,0,2.9,0.1,4.4,0.2s2.8,0.8,3.9,1.8c1.3,1.2,2,2.7,2.2,4.5 - c0.2,1.8,0.3,4.3,0.4,7.4c0,0.6,0,1.2,0.1,1.8c0,0.6,0.1,1.2,0.1,1.8c0,1.1,0,2.2-0.1,3.3c0,1.1-0.1,2.2-0.2,3.3 - c0,0.2,0,0.9-0.1,2.1c-0.1,1.2-0.3,2.3-0.8,3.3c-0.4,0.7-1,1.3-1.7,1.8c-0.7,0.5-1.4,0.8-2.2,1c-0.4,0.1-0.8,0.2-1.3,0.2 - c-0.5,0-0.8,0-0.9,0.1v0.5c1.3,0.1,2.4,0.4,3.5,0.7c1,0.4,1.9,1.1,2.6,2.2c0.5,1,0.8,2.2,0.9,3.7c0.1,1.5,0.1,3.4,0.1,5.9 - c0.1,0.9,0.1,1.9,0.1,2.8v7c0,1.4-0.1,2.8-0.2,4.3c0,0.2,0,0.6-0.1,1.2c0,0.6-0.2,1.3-0.4,2.1c-0.2,0.8-0.5,1.6-0.9,2.5 - s-1,1.6-1.7,2.3c-1.4,1.1-3,1.8-4.9,1.9s-3.6,0.2-5.3,0.2h-14.2V158.4z M260.9,166.8v21.1h3.6c1.5-0.1,2.7-0.2,3.7-0.5 - c1-0.3,1.6-1.3,1.8-3c0.2-1.4,0.3-3.8,0.3-7.1c0-2.2-0.1-4.4-0.3-6.6c-0.1-1.7-0.4-2.8-1-3.3c-0.3-0.3-0.8-0.5-1.3-0.5 - c-0.5,0-1.2,0-2.1,0H260.9z M260.9,195.5V220h4.8c0.5,0,1,0,1.5,0c0.5,0,0.9-0.1,1.3-0.2c0.4-0.1,0.7-0.3,1-0.6 - c0.3-0.3,0.5-0.8,0.6-1.4c0-0.3,0-0.7,0.1-1.4c0-0.7,0.1-1.5,0.1-2.4c0-0.9,0.1-1.9,0.1-2.9c0-1.1,0.1-2.1,0.1-3.1 - c0-1.2,0-2.4-0.1-3.5c0-1.2-0.1-2.3-0.2-3.5c-0.1-0.7-0.2-1.4-0.3-2.3c-0.1-0.9-0.4-1.6-1-2.1c-0.4-0.3-0.9-0.5-1.4-0.6 - s-1-0.1-1.5-0.1H260.9z M318.4,169.1v12.4h-8.5v-12.4c0-0.2,0-0.6-0.1-1.1c0-0.5-0.2-1.1-0.4-1.6c-0.2-0.5-0.6-1-1.1-1.4 - c-0.5-0.4-1.3-0.6-2.3-0.6c-1.1,0-2,0.2-2.6,0.6s-1.1,1-1.4,1.7s-0.5,1.5-0.6,2.3c-0.1,0.9-0.1,1.7-0.1,2.5c0,1.5,0.1,2.8,0.3,4 - s0.5,2.3,0.9,3.4s0.9,2.2,1.5,3.2c0.6,1.1,1.3,2.2,2.1,3.4c0.7,1.1,1.3,2.1,2,3.1c0.7,1,1.4,2,2.1,3.1c1,1.4,2,2.9,3.1,4.6 - c1.2,1.9,2.2,3.7,2.9,5.3c0.7,1.6,1.3,3.1,1.7,4.5c0.4,1.4,0.7,2.7,0.8,3.9c0.1,1.2,0.2,2.3,0.2,3.3c0,0.4,0,1.3-0.1,2.6 - c-0.1,1.3-0.4,2.8-0.9,4.4c-0.5,1.6-1.3,3.3-2.3,4.9c-1,1.6-2.6,2.9-4.6,3.7c-0.6,0.3-1.4,0.5-2.4,0.7c-1,0.2-2.3,0.3-3.8,0.3 - c-2.9,0-5.1-0.5-6.8-1.4c-1.6-0.9-2.8-1.9-3.6-2.8c-1.5-1.7-2.3-3.4-2.6-5.3c-0.3-1.9-0.4-3.8-0.5-5.9V203h8.6v12.8 - c0,1.7,0.2,3,0.5,3.8c0.3,0.8,0.8,1.5,1.6,2c0.2,0.1,0.5,0.3,1,0.5c0.5,0.2,1.1,0.3,1.8,0.3c1.1,0,2-0.3,2.7-0.8 - c0.6-0.6,1.1-1.3,1.4-2.1c0.3-0.8,0.4-1.7,0.5-2.7c0-1,0.1-1.8,0.1-2.6c0-2.5-0.3-4.6-0.8-6.4c-0.5-1.7-1.4-3.7-2.7-5.9 - c-1.3-2.3-2.8-4.5-4.3-6.6c-1.5-2.1-2.9-4.3-4.3-6.5c-0.4-0.6-0.9-1.4-1.5-2.4c-0.6-1-1.2-2.2-1.8-3.6c-0.6-1.4-1.1-3-1.5-4.7 - c-0.4-1.8-0.7-3.7-0.7-5.7c0-3.9,0.7-6.9,2.1-9s2.8-3.7,4.3-4.5c0.7-0.5,1.8-0.9,3.1-1.3c1.3-0.4,3-0.6,5-0.6c0.5,0,1.2,0,2.3,0.1 - c1,0.1,2.1,0.3,3.3,0.7s2.2,1.1,3.3,2s1.9,2.3,2.4,4c0.2,0.7,0.4,1.4,0.5,2.1C318.2,166.6,318.3,167.7,318.4,169.1z"/> - <g id="Group-3-Copy" transform="translate(47.000000, 0.000000)"> - <g id="Clip-2"> - </g> - <defs> - <filter id="Adobe_OpacityMaskFilter" filterUnits="userSpaceOnUse" x="0" y="0" width="225.8" height="123.9"> - <feColorMatrix type="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0"/> - </filter> - </defs> - <mask maskUnits="userSpaceOnUse" x="0" y="0" width="225.8" height="123.9" id="mask-2_1_"> - <g class="st1"> - <polygon id="path-1_1_" class="st2" points="0,0 225.9,0 225.9,123.9 0,123.9 "/> - </g> - </mask> - <g id="Page-1" class="st3"> - <g id="Mask"> - <path class="st0" d="M177.2,54.3c6.1,21.2,19.4,48.5,24,54.7c5.3-1.2,9.1,1.2,12.4,5.1c-1.2,0.9-2.7,1.5-3.4,2.6 - c-2.7,4.4-6.9,3-10.7,3.2c-2.8,0.2-5.6,0.3-8.4,0.3c-0.9,0-1.8-0.3-2.7-0.5c-1-0.3-1.9-1-2.8-1c-2.5,0.1-4.7,0-7.1-1.1 - c-1-0.5-2.6,0.9-3.6-0.8c-1.1-1.8-2.2-3.6-3.4-5.5c-1.2,0.2-2.2,0.4-3.4,0.6c-2.4-3-3.4-14.8-6.1-17.7 - c-0.6-0.7-2.1-2.2-3.8-2.7c-0.3-0.9-5.4-7.2-5.9-8.7c-0.2-0.5-0.3-1.2-0.7-1.4c-3.1-2-4.2-4.9-4-8.5c0-0.4-0.2-0.7-0.4-1.7 - c-1.2,2.7-2.2,4.8-3.2,7.1c-0.6,1.4-1,2.9-1.8,4.3c-0.5,0.9-1.3,1.6-2,2.3c-2.4,2.2-1.8,0.9-3.2,3.6c-1.1,2-2,4-3,6.1 - c-0.5,1.1-0.9,2.2-1.1,3.3c-0.7,4.1-3.2,7.6-1.5,11.2c3,0.6,6.3,0.5,8.6,2c2.2,1.5,3.5,4.5,5,6.7c-3.1,0.5-5.9,1.2-8.7,1.4 - c-3.8,0.3-7.6,0.2-11.3,0.2c-5,0-10.1-0.1-15.1-0.1c-2.6,0-3.9-1.5-5.4-3.7c-2.1-3.1-1.1-6-0.8-9.1c0.1-0.8,0-3.3-0.1-4.2 - c-0.1-0.9-0.1-1.9,0-2.9c0.2-1.3,0.8-2.6,0.9-3.9c0.1-1.5-0.4-3-0.4-4.5c0-1.5,0.1-3.1,0.5-4.6c0.7-2.7-0.1,0,0.7-2.7 - c0.1-0.2,0-0.7,0-0.8c-0.9-3.6,1.8-6,2.8-8.8c0-0.1,0-0.1-0.1-0.5c-1.8,1.8-4.1,0.8-6.1,1.2c-2.9,0.6-5.7,2.1-8,3 - c-1.4-0.1-2.5-0.4-3.5-0.2c-2,0.5-3.9,1.1-6.2,0.9c-2.5-0.2-5.1,0.6-7.7,0.8c-2.2,0.2-4.8,0.9-6.5,0c-1.5-0.7-2.8-0.9-4.4-1 - c-1.6-0.1-2.4,0.7-2.6,2.1c-1.1,6.3-2.3,12.7-3.1,19.1c-0.4,3.3-0.2,6.6-0.2,9.9c0,1.5,0.6,2.5,1.9,3.5 - c1.5,1.1,2.6,2.7,3.6,4.3c0.8,1.3,0.6,2.6-1.5,2.7c-7.3,0.2-14.6,0.5-21.9,0.4c-2.1,0-4.2-1.5-6.2-2.5 - c-0.3-0.2-0.4-1.1-0.4-1.7c0-4.4,0-13.5,0-18.4c-1,0.6-1.3,0.8-1.6,1c-2.5,2.3-4.9,4.1-7.3,6.4c-1.9,1.8-1.6,3.3,0.2,5.4 - c2.4,2.7,4.4,5.7,4.4,9.5c0,2.5-2.2,3.2-3.8,3.3c-5.7,0.4-11.5,0.4-17.2,0.4c-2.8,0-3.8-1.5-4.4-4.2 - c-1.2-5.4-2.2-10.8-4.3-16.1c-1.6-4.1-2-8.9,1.5-13c5.1-5.9,9.5-12.3,12.8-19.5c1-2.2,1.4-3.8,0.4-6.1c-4.9-1-7.1-3.7-8.2-8.7 - c-1-4.6-0.2-8.9,1-13.2c2.3-7.8,4.1-11,8.4-18c5.6-9,13.4-15.5,22.8-20.2c11.3-5.6,23.3-5.5,35.3-4.2 - c16.2,1.6,32.4,3.6,48.6,5.3c1.3,0.1,2.9-0.2,4.1-0.8c7.7-3.9,15.5-4.2,23.6-1.4c5.6,1.9,11.4,3.6,17.1,5.2 - c2,0.6,4.1,0.8,6.2,1.1c5.7,0.9,11.5,1.8,17.3,2.4c2.9,0.3,5.9,0.1,8.8,0.3c0.7,0,1.5,0.3,2.1,0.7c2.6,1.8,5.1,3.7,7.5,5.6 - c1.6,1.2,3.2,2.3,4.5,3.8c0.6,0.7,0.7,1.9,0.9,2.9c0.3,1.1,0.3,2.6,0.9,3.4c2.6,3.1,5.3,6,8.1,8.9c0.9,1,1.1,1.7,0.3,2.9 - c-1.2,1.6-1.8,3.7-3.3,4.8c-3.1,2.2-6.3,4.3-10.7,3.2c-2.5-0.6-5.5,0.5-8.2,0.8c-2.1,0.3-4.3,0.2-6.2,0.9 - c-4.1,1.6-8.5,1.1-12.5,2.3c-1.5,0.4-2.8,1.2-4.3,1.6C179.2,54.8,178.3,54.5,177.2,54.3"/> - </g> - </g> - </g> - </g> -</g> -</svg> diff --git a/web/pandas/static/img/partners/voltron_data.svg b/web/pandas/static/img/partners/voltron_data.svg new file mode 100644 index 0000000000000..0fb7dfd850166 --- /dev/null +++ b/web/pandas/static/img/partners/voltron_data.svg @@ -0,0 +1,52 @@ +<?xml version="1.0" encoding="utf-8"?> +<!-- Generator: Adobe Illustrator 26.3.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> +<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" + viewBox="0 0 1887.7 876.4" style="enable-background:new 0 0 1887.7 876.4;" xml:space="preserve"> +<style type="text/css"> + .st0{fill:#005050;} +</style> +<g> + <g> + <g> + <path class="st0" d="M943.8,0L943.8,0H791.1H638.5l76.3,132.2l76.3,132.2l76.3,132.2l76.4,132.2l76.3-132.2l76.3-132.2 + l-76.3-132.2L943.8,0z M1000,385l-56.2,97.4L887.6,385L818,264.4l69.6-120.6l56.2-97.4l56.2,97.4l69.6,120.6L1000,385z"/> + <polygon class="st0" points="1082.5,0 1096,23.5 1208.4,23.5 1152.1,121.1 1165.6,144.6 1249.1,0 "/> + </g> + </g> + <g> + <g> + <path class="st0" d="M3.2,758.3c-1-2.1-0.3-3.6,1.7-3.6h14.7c1.2-0.2,2.5,0.1,3.5,0.7c1,0.7,1.8,1.7,2.2,2.8l38.2,78.4h0.3 + l38.3-78.4c0.4-1.2,1.2-2.2,2.2-2.8c1-0.7,2.3-0.9,3.5-0.7h14.7c2.1,0,2.9,1.5,1.7,3.6L66.8,873.9c-0.7,1.5-1.4,2-2.6,2h-1 + c-1.2,0-1.9-0.5-2.5-2L3.2,758.3z"/> + <path class="st0" d="M245.4,752.4c16.2,0.3,31.6,7,43,18.6c11.3,11.6,17.7,27.1,17.7,43.4c0,16.2-6.3,31.8-17.7,43.4 + c-11.3,11.6-26.8,18.3-43,18.6c-34.5,0-61.1-27.1-61.1-62C184.2,779.6,210.9,752.4,245.4,752.4z M245.4,858.4 + c24.9,0,41.7-20.6,41.7-44s-16.7-44.1-41.7-44.1c-24.9,0-41.5,20.5-41.5,44.1S220.6,858.5,245.4,858.4L245.4,858.4z"/> + <path class="st0" d="M454.2,856.8c2.9,0,3.4,0.7,3.4,3.4v10.4c0,2.9-0.7,3.6-3.4,3.6h-70c-2.6,0-3.4-0.9-3.4-3.6V758.3 + c0-2.7,0.8-3.6,3.4-3.6h12.6c2.5,0,3.4,0.9,3.4,3.6v98.5H454.2z"/> + <path class="st0" d="M593.9,754.7c2.9,0,3.6,0.9,3.6,3.6v10.9c0,2.9-0.9,3.6-3.6,3.6h-34.5v97.8c0,2.9-0.7,3.6-3.4,3.6h-12.6 + c-2.6,0-3.4-0.9-3.4-3.6v-97.8h-34.7c-2.5,0-3.4-0.9-3.4-3.6v-10.9c0-2.7,0.9-3.6,3.4-3.6H593.9z"/> + <path class="st0" d="M686,870.7c0,2.9-0.9,3.6-3.4,3.6H670c-2.6,0-3.4-0.9-3.4-3.6V758.3c0-2.7,0.8-3.6,3.4-3.6h44.6 + c20,0,42,9.9,42,37.5c0,21.9-13.8,32.6-29.6,36l28.5,41.5c1.9,2.9,0.7,4.5-2.6,4.5h-13.6c-1.2,0-2.4-0.4-3.5-1 + c-1-0.6-1.9-1.5-2.5-2.6l-28.4-41h-19.1L686,870.7z M712.7,811.8c18.4,0,24.2-8.6,24.2-19.8s-5.7-19.4-24.2-19.4h-26.6v39.3 + L712.7,811.8L712.7,811.8z"/> + <path class="st0" d="M885.7,752.4c16.2,0.3,31.6,7,43,18.6c11.3,11.6,17.7,27.1,17.7,43.4c0,16.2-6.3,31.8-17.7,43.4 + c-11.3,11.6-26.8,18.3-43,18.6c-34.5,0-61.1-27.1-61.1-62C824.5,779.6,851.2,752.4,885.7,752.4z M885.7,858.4 + c24.9,0,41.6-20.6,41.6-44s-16.7-44.1-41.6-44.1c-24.9,0-41.4,20.6-41.4,44.1C844.2,838.1,860.9,858.5,885.7,858.4L885.7,858.4z" + /> + <path class="st0" d="M1040.9,796.4h-0.3v74.2c0,2.9-0.9,3.6-3.4,3.6h-12.5c-2.9,0-3.6-0.9-3.6-3.6V754.7c0-1.2,0.5-1.7,1.9-1.7 + h0.9c0.6,0,1.1,0.1,1.6,0.4c0.5,0.3,0.9,0.7,1.2,1.1l76.1,78h0.3v-74.2c0-2.7,0.7-3.6,3.4-3.6h12.5c2.9,0,3.6,0.9,3.6,3.6v115.8 + c0,1.3-0.5,1.9-1.9,1.9h-0.9c-0.6,0-1.1-0.1-1.6-0.4c-0.5-0.3-0.9-0.6-1.2-1.1L1040.9,796.4z"/> + <path class="st0" d="M1335.7,754.7c34.5,0,60.3,24.9,60.3,59.8c0,34.8-25.7,59.8-60.3,59.8h-42.8c-2.6,0-3.4-0.9-3.4-3.6V758.3 + c0-2.7,0.8-3.6,3.4-3.6H1335.7z M1309.1,772.5v84h26.6c24.8,0,40.6-18.6,40.6-42s-15.9-42.1-40.6-42.1L1309.1,772.5z"/> + <path class="st0" d="M1577,870.7c1,2.1,0.3,3.6-1.7,3.6h-14.7c-1.2,0.1-2.4-0.1-3.5-0.8c-1-0.7-1.8-1.6-2.2-2.8l-10.9-22.5h-55 + l-10.9,22.5c-0.4,1.2-1.1,2.2-2.2,2.9c-1,0.7-2.3,0.9-3.5,0.7h-14.7c-2.1,0-2.9-1.5-1.7-3.6l57.4-115.6c0.7-1.5,1.4-2.1,2.6-2.1 + h1c1.2,0,1.9,0.5,2.6,2.1L1577,870.7z M1516.6,790.1l-19.8,40.1h39.6l-19.6-40.1H1516.6z"/> + <path class="st0" d="M1714.4,754.7c2.9,0,3.6,0.9,3.6,3.6v10.9c0,2.9-0.9,3.6-3.6,3.6h-34.5v97.8c0,2.9-0.7,3.6-3.4,3.6h-12.7 + c-2.5,0-3.4-0.9-3.4-3.6v-97.8h-34.7c-2.6,0-3.4-0.9-3.4-3.6v-10.9c0-2.7,0.9-3.6,3.4-3.6H1714.4z"/> + <path class="st0" d="M1884.4,870.7c1,2.1,0.3,3.6-1.7,3.6H1868c-1.2,0.1-2.4-0.1-3.5-0.8c-1-0.7-1.8-1.6-2.2-2.8l-10.9-22.5 + h-54.9l-10.9,22.5c-0.3,1.2-1.1,2.2-2.2,2.9s-2.3,0.9-3.5,0.7h-14.7c-2.1,0-2.9-1.5-1.7-3.6l57.4-115.6c0.7-1.5,1.4-2.1,2.6-2.1 + h1c1.2,0,1.9,0.5,2.6,2.1L1884.4,870.7z M1823.9,790.1l-19.8,40.1h39.6l-19.6-40.1H1823.9z"/> + </g> + </g> +</g> +</svg>
Still waiting for the logos of the new sponsors, and final confirmation that we want to replace Ursa Labs by Voltron, but opening early in case there is feedback.
https://api.github.com/repos/pandas-dev/pandas/pulls/47678
2022-07-12T09:42:04Z
2022-07-24T09:24:51Z
2022-07-24T09:24:51Z
2022-07-24T09:24:52Z
TYP: make na_value consistently a property
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 083acf16ec758..c9abef226770c 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -90,8 +90,11 @@ class StringDtype(StorageExtensionDtype): name = "string" - #: StringDtype.na_value uses pandas.NA - na_value = libmissing.NA + #: StringDtype().na_value uses pandas.NA + @property + def na_value(self) -> libmissing.NAType: + return libmissing.NA + _metadata = ("storage",) def __init__(self, storage=None) -> None: @@ -335,13 +338,11 @@ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False): na_values = scalars._mask result = scalars._data result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) - result[na_values] = StringDtype.na_value + result[na_values] = libmissing.NA else: - # convert non-na-likes to str, and nan-likes to StringDtype.na_value - result = lib.ensure_string_array( - scalars, na_value=StringDtype.na_value, copy=copy - ) + # convert non-na-likes to str, and nan-likes to StringDtype().na_value + result = lib.ensure_string_array(scalars, na_value=libmissing.NA, copy=copy) # Manually creating new array avoids the validation step in the __init__, so is # faster. Refactor need for validation? @@ -396,7 +397,7 @@ def __setitem__(self, key, value): # validate new items if scalar_value: if isna(value): - value = StringDtype.na_value + value = libmissing.NA elif not isinstance(value, str): raise ValueError( f"Cannot set non-string value '{value}' into a StringArray." @@ -497,7 +498,7 @@ def _cmp_method(self, other, op): if op.__name__ in ops.ARITHMETIC_BINOPS: result = np.empty_like(self._ndarray, dtype="object") - result[mask] = StringDtype.na_value + result[mask] = libmissing.NA result[valid] = op(self._ndarray[valid], other) return StringArray(result) else: @@ -512,7 +513,7 @@ def _cmp_method(self, other, op): # String methods interface # error: Incompatible types in assignment (expression has type "NAType", # base class "PandasArray" defined the type as "float") - _str_na_value = StringDtype.na_value # type: ignore[assignment] + _str_na_value = libmissing.NA # type: ignore[assignment] def _str_map( self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 3e3df5a3200c1..bb2fefabd6ae5 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -242,8 +242,9 @@ def astype(self, dtype, copy: bool = True): # ------------------------------------------------------------------------ # String methods interface - # error: Cannot determine type of 'na_value' - _str_na_value = StringDtype.na_value # type: ignore[has-type] + # error: Incompatible types in assignment (expression has type "NAType", + # base class "ObjectStringArrayMixin" defined the type as "float") + _str_na_value = libmissing.NA # type: ignore[assignment] def _str_map( self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 9683c1dd93645..99b2082d409a9 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -676,11 +676,14 @@ class DatetimeTZDtype(PandasExtensionDtype): kind: str_type = "M" num = 101 base = np.dtype("M8[ns]") # TODO: depend on reso? - na_value = NaT _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} + @property + def na_value(self) -> NaTType: + return NaT + @cache_readonly def str(self): return f"|M8[{self._unit}]" @@ -1450,7 +1453,9 @@ class BaseMaskedDtype(ExtensionDtype): base = None type: type - na_value = libmissing.NA + @property + def na_value(self) -> libmissing.NAType: + return libmissing.NA @cache_readonly def numpy_dtype(self) -> np.dtype: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 56fcec751749b..5731d476cef10 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -409,7 +409,10 @@ class Float64Index(NumericIndex): __doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args _typ = "float64index" - _engine_type = libindex.Float64Engine _default_dtype = np.dtype(np.float64) _dtype_validation_metadata = (is_float_dtype, "float") _is_backward_compat_public_numeric_index: bool = False + + @property + def _engine_type(self) -> type[libindex.Float64Engine]: + return libindex.Float64Engine
Similar to `_engine_type`, `na_value` is sometimes a property and sometimes a class variable. There were multiple places that access `StringDtype.na_value` (works with class variables but not properties). I replaced these cases with the value of `StringDtype().na_value`. (and one `_engine_type` class variable I forgot in another PR)
https://api.github.com/repos/pandas-dev/pandas/pulls/47676
2022-07-12T01:21:53Z
2022-07-12T17:21:45Z
2022-07-12T17:21:45Z
2022-09-10T01:39:07Z
ENH: TDA+datetime_scalar support non-nano
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index a2251c49a2cc5..eadf47b36d7fc 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -73,7 +73,6 @@ from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( - DT64NS_DTYPE, is_all_strings, is_categorical_dtype, is_datetime64_any_dtype, @@ -1103,6 +1102,7 @@ def _add_datetimelike_scalar(self, other) -> DatetimeArray: self = cast("TimedeltaArray", self) from pandas.core.arrays import DatetimeArray + from pandas.core.arrays.datetimes import tz_to_dtype assert other is not NaT other = Timestamp(other) @@ -1113,10 +1113,17 @@ def _add_datetimelike_scalar(self, other) -> DatetimeArray: # Preserve our resolution return DatetimeArray._simple_new(result, dtype=result.dtype) + if self._reso != other._reso: + raise NotImplementedError( + "Addition between TimedeltaArray and Timestamp with mis-matched " + "resolutions is not yet supported." + ) + i8 = self.asi8 result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan) - dtype = DatetimeTZDtype(tz=other.tz) if other.tz else DT64NS_DTYPE - return DatetimeArray(result, dtype=dtype, freq=self.freq) + dtype = tz_to_dtype(tz=other.tz, unit=self._unit) + res_values = result.view(f"M8[{self._unit}]") + return DatetimeArray._simple_new(res_values, dtype=dtype, freq=self.freq) @final def _add_datetime_arraylike(self, other) -> DatetimeArray: diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index c9f5946c30c8c..106afcc3c12ea 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -91,22 +91,23 @@ _midnight = time(0, 0) -def tz_to_dtype(tz): +def tz_to_dtype(tz: tzinfo | None, unit: str = "ns"): """ Return a datetime64[ns] dtype appropriate for the given timezone. Parameters ---------- tz : tzinfo or None + unit : str, default "ns" Returns ------- np.dtype or Datetime64TZDType """ if tz is None: - return DT64NS_DTYPE + return np.dtype(f"M8[{unit}]") else: - return DatetimeTZDtype(tz=tz) + return DatetimeTZDtype(tz=tz, unit=unit) def _field_accessor(name: str, field: str, docstring=None): @@ -800,7 +801,7 @@ def tz_convert(self, tz) -> DatetimeArray: ) # No conversion since timestamps are all UTC to begin with - dtype = tz_to_dtype(tz) + dtype = tz_to_dtype(tz, unit=self._unit) return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq) @dtl.ravel_compat @@ -965,10 +966,14 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArr # Convert to UTC new_dates = tzconversion.tz_localize_to_utc( - self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent + self.asi8, + tz, + ambiguous=ambiguous, + nonexistent=nonexistent, + reso=self._reso, ) - new_dates = new_dates.view(DT64NS_DTYPE) - dtype = tz_to_dtype(tz) + new_dates = new_dates.view(f"M8[{self._unit}]") + dtype = tz_to_dtype(tz, unit=self._unit) freq = None if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])): diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index abc27469a5428..b3b79bd988ad8 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -92,6 +92,34 @@ def test_add_pdnat(self, tda): assert result._reso == tda._reso assert result.isna().all() + # TODO: 2022-07-11 this is the only test that gets to DTA.tz_convert + # or tz_localize with non-nano; implement tests specific to that. + def test_add_datetimelike_scalar(self, tda, tz_naive_fixture): + ts = pd.Timestamp("2016-01-01", tz=tz_naive_fixture) + + msg = "with mis-matched resolutions" + with pytest.raises(NotImplementedError, match=msg): + # mismatched reso -> check that we don't give an incorrect result + tda + ts + with pytest.raises(NotImplementedError, match=msg): + # mismatched reso -> check that we don't give an incorrect result + ts + tda + + ts = ts._as_unit(tda._unit) + + exp_values = tda._ndarray + ts.asm8 + expected = ( + DatetimeArray._simple_new(exp_values, dtype=exp_values.dtype) + .tz_localize("UTC") + .tz_convert(ts.tz) + ) + + result = tda + ts + tm.assert_extension_array_equal(result, expected) + + result = ts + tda + tm.assert_extension_array_equal(result, expected) + def test_mul_scalar(self, tda): other = 2 result = tda * other
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47675
2022-07-11T23:31:51Z
2022-07-12T01:49:14Z
2022-07-12T01:49:14Z
2022-07-12T16:05:08Z
ENH: Move database error to error/__init__.py per GH27656
diff --git a/doc/source/reference/testing.rst b/doc/source/reference/testing.rst index e617712aa8f5e..338dd87aa8c62 100644 --- a/doc/source/reference/testing.rst +++ b/doc/source/reference/testing.rst @@ -29,6 +29,7 @@ Exceptions and warnings errors.AttributeConflictWarning errors.ClosedFileError errors.CSSWarning + errors.DatabaseError errors.DataError errors.DtypeWarning errors.DuplicateLabelError diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 0e0409ccb0932..08ee5650e97a6 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -456,12 +456,26 @@ class AttributeConflictWarning(Warning): """ +class DatabaseError(OSError): + """ + Error is raised when executing sql with bad syntax or sql that throws an error. + + Examples + -------- + >>> from sqlite3 import connect + >>> conn = connect(':memory:') + >>> pd.read_sql('select * test', conn) # doctest: +SKIP + ... # DatabaseError: Execution failed on sql 'test': near "test": syntax error + """ + + __all__ = [ "AbstractMethodError", "AccessorRegistrationWarning", "AttributeConflictWarning", "ClosedFileError", "CSSWarning", + "DatabaseError", "DataError", "DtypeWarning", "DuplicateLabelError", diff --git a/pandas/io/sql.py b/pandas/io/sql.py index e4111f24ed295..f591e7b8676f6 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -31,7 +31,10 @@ DtypeArg, ) from pandas.compat._optional import import_optional_dependency -from pandas.errors import AbstractMethodError +from pandas.errors import ( + AbstractMethodError, + DatabaseError, +) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( @@ -56,10 +59,6 @@ from sqlalchemy import Table -class DatabaseError(OSError): - pass - - # ----------------------------------------------------------------------------- # -- Helper functions diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py index f003e1d07bca6..187d5399f5985 100644 --- a/pandas/tests/test_errors.py +++ b/pandas/tests/test_errors.py @@ -34,6 +34,7 @@ "PossibleDataLossError", "IncompatibilityWarning", "AttributeConflictWarning", + "DatabaseError", ], ) def test_exception_importable(exc):
- [x] xref #27656. this GitHub issue is being done in multiple parts - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/47674
2022-07-11T20:35:47Z
2022-07-14T16:35:35Z
2022-07-14T16:35:35Z
2022-08-03T04:21:56Z
Bug fix using GroupBy.resample produces inconsistent behavior when calling it over empty df #47705
diff --git a/doc/source/whatsnew/v1.6.0.rst b/doc/source/whatsnew/v1.6.0.rst index 098750aa3a2b2..6e435c8eaaf2f 100644 --- a/doc/source/whatsnew/v1.6.0.rst +++ b/doc/source/whatsnew/v1.6.0.rst @@ -244,6 +244,7 @@ Groupby/resample/rolling - Bug in :class:`.ExponentialMovingWindow` with ``online`` not raising a ``NotImplementedError`` for unsupported operations (:issue:`48834`) - Bug in :meth:`DataFrameGroupBy.sample` raises ``ValueError`` when the object is empty (:issue:`48459`) - Bug in :meth:`Series.groupby` raises ``ValueError`` when an entry of the index is equal to the name of the index (:issue:`48567`) +- Bug in :meth:`DataFrameGroupBy.resample` produces inconsistent results when passing empty DataFrame (:issue:`47705`) - Reshaping diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 0cf1aafed0f56..574c2e5e0f552 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -522,11 +522,21 @@ def _wrap_result(self, result): """ Potentially wrap any results. """ + # GH 47705 + obj = self.obj + if ( + isinstance(result, ABCDataFrame) + and result.empty + and not isinstance(result.index, PeriodIndex) + ): + result = result.set_index( + _asfreq_compat(obj.index[:0], freq=self.freq), append=True + ) + if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: - obj = self.obj # When index is all NaT, result is empty but index is not result.index = _asfreq_compat(obj.index[:0], freq=self.freq) result.name = getattr(obj, "name", None) diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index ceb9d6e2fda4d..7fe1e645aa141 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -435,7 +435,11 @@ def test_empty(keys): # GH 26411 df = DataFrame([], columns=["a", "b"], index=TimedeltaIndex([])) result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean() - expected = DataFrame(columns=["a", "b"]).set_index(keys, drop=False) + expected = ( + DataFrame(columns=["a", "b"]) + .set_index(keys, drop=False) + .set_index(TimedeltaIndex([]), append=True) + ) if len(keys) == 1: expected.index.name = keys[0] @@ -497,3 +501,19 @@ def test_groupby_resample_with_list_of_keys(): ), ) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("keys", [["a"], ["a", "b"]]) +def test_resample_empty_Dataframe(keys): + # GH 47705 + df = DataFrame([], columns=["a", "b", "date"]) + df["date"] = pd.to_datetime(df["date"]) + df = df.set_index("date") + result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean() + expected = DataFrame(columns=["a", "b", "date"]).set_index(keys, drop=False) + expected["date"] = pd.to_datetime(expected["date"]) + expected = expected.set_index("date", append=True, drop=True) + if len(keys) == 1: + expected.index.name = keys[0] + + tm.assert_frame_equal(result, expected)
- [X] closes #47705 - [X] Tests added and passed(https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47672
2022-07-11T19:16:15Z
2022-10-03T20:27:31Z
2022-10-03T20:27:31Z
2022-10-13T16:59:54Z
BUG #43767 GroupBy resambler fix
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index c70acc0a0b18c..1cb1156da379d 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -277,6 +277,7 @@ Other enhancements - Allow reading compressed SAS files with :func:`read_sas` (e.g., ``.sas7bdat.gz`` files) - :meth:`DatetimeIndex.astype` now supports casting timezone-naive indexes to ``datetime64[s]``, ``datetime64[ms]``, and ``datetime64[us]``, and timezone-aware indexes to the corresponding ``datetime64[unit, tzname]`` dtypes (:issue:`47579`) - :class:`Series` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) will now successfully operate when the dtype is numeric and ``numeric_only=True`` is provided; previously this would raise a ``NotImplementedError`` (:issue:`47500`) +- :meth:`DataFrame.compare` now accepts a ``suffixes`` to allow the user to specify the suffixes of both left and right DataFrame which are being compared. This is by default ``'self'`` and ``'other'`` (:issue:`44354`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ead4ea744c647..ffa0b46896f98 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7776,6 +7776,14 @@ def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]: 0 a c NaN NaN 2 NaN NaN 3.0 4.0 +Assign suffixes + +>>> df.compare(df2, suffixes=("left", "right")) + col1 col3 + left right left right +0 a c NaN NaN +2 NaN NaN 3.0 4.0 + Stack the differences on rows >>> df.compare(df2, align_axis=0) @@ -7823,12 +7831,14 @@ def compare( align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, + suffixes: Suffixes = ("self", "other"), ) -> DataFrame: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, + suffixes=suffixes, ) def combine( diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ba3474a2513fb..327e0912ca291 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -58,6 +58,7 @@ Renamer, SortKind, StorageOptions, + Suffixes, T, TimedeltaConvertibleTypes, TimestampConvertibleTypes, @@ -8965,6 +8966,7 @@ def compare( align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, + suffixes: Suffixes = ("self", "other"), ): from pandas.core.reshape.concat import concat @@ -8975,7 +8977,6 @@ def compare( ) mask = ~((self == other) | (self.isna() & other.isna())) - keys = ["self", "other"] if not keep_equal: self = self.where(mask) @@ -8990,13 +8991,18 @@ def compare( else: self = self[mask] other = other[mask] + if not isinstance(suffixes, tuple): + raise TypeError( + f"Passing 'suffixes' as a {type(suffixes)}, is not " + "supported Provide 'suffixes' as a tuple instead." + ) if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) - diff = concat([self, other], axis=axis, keys=keys) + diff = concat([self, other], axis=axis, keys=suffixes) if axis >= self.ndim: # No need to reorganize data if stacking on new axis diff --git a/pandas/core/series.py b/pandas/core/series.py index ef4ea0172c505..8116706963bc1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -164,6 +164,7 @@ from pandas._typing import ( NumpySorter, NumpyValueArrayLike, + Suffixes, ) from pandas.core.frame import DataFrame @@ -3236,12 +3237,14 @@ def compare( align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, + suffixes: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, + suffixes=suffixes, ) def combine(self, other, func, fill_value=None) -> Series: diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 4b7a487e9472d..f5b3bff521f2e 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -75,6 +75,11 @@ keep_equal : bool, default False If true, the result keeps values that are equal. Otherwise, equal values are shown as NaNs. + +suffixes : tuple, default ('self', 'other') + Set the dataframes names in the comparison. + + .. versionadded:: 1.5.0 """ _shared_docs[ diff --git a/pandas/tests/frame/methods/test_compare.py b/pandas/tests/frame/methods/test_compare.py index 468811eba0d39..18106fa3c2496 100644 --- a/pandas/tests/frame/methods/test_compare.py +++ b/pandas/tests/frame/methods/test_compare.py @@ -180,3 +180,27 @@ def test_compare_unaligned_objects(): df1 = pd.DataFrame(np.ones((3, 3))) df2 = pd.DataFrame(np.zeros((2, 1))) df1.compare(df2) + + +def test_compare_suffixes(): + # 44354 + df1 = pd.DataFrame( + {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}, + ) + df2 = pd.DataFrame( + { + "col1": ["c", "b", "c"], + "col2": [1.0, 2.0, np.nan], + "col3": [1.0, 2.0, np.nan], + }, + ) + result = df1.compare(df2, suffixes=("left", "right")) + expected = pd.DataFrame( + { + ("col1", "left"): {0: "a", 2: np.nan}, + ("col1", "right"): {0: "c", 2: np.nan}, + ("col3", "left"): {0: np.nan, 2: 3.0}, + ("col3", "right"): {0: np.nan, 2: np.nan}, + } + ) + tm.assert_frame_equal(result, expected)
- [ ] closes #43767 - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47671
2022-07-11T19:08:55Z
2022-07-11T19:10:41Z
null
2022-07-11T19:10:41Z
CI: Fix npdev build post Cython annotation change
diff --git a/pandas/_libs/arrays.pyx b/pandas/_libs/arrays.pyx index 8895a2bcfca89..f63d16e819c92 100644 --- a/pandas/_libs/arrays.pyx +++ b/pandas/_libs/arrays.pyx @@ -157,7 +157,7 @@ cdef class NDArrayBacked: return self._from_backing_data(res_values) # TODO: pass NPY_MAXDIMS equiv to axis=None? - def repeat(self, repeats, axis: int = 0): + def repeat(self, repeats, axis: int | np.integer = 0): if axis is None: axis = 0 res_values = cnp.PyArray_Repeat(self._ndarray, repeats, <int>axis) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e187df6d6f627..3332628627739 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1643,7 +1643,7 @@ cdef class _Period(PeriodMixin): return freq @classmethod - def _from_ordinal(cls, ordinal: int, freq) -> "Period": + def _from_ordinal(cls, ordinal: int64_t, freq) -> "Period": """ Fast creation from an ordinal and freq that are already validated! """ diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py index 456dd049d2f4a..a1aba949e74fe 100644 --- a/pandas/tests/io/parser/test_quoting.py +++ b/pandas/tests/io/parser/test_quoting.py @@ -38,7 +38,7 @@ def test_bad_quote_char(all_parsers, kwargs, msg): @pytest.mark.parametrize( "quoting,msg", [ - ("foo", '"quoting" must be an integer'), + ("foo", '"quoting" must be an integer|Argument'), (5, 'bad "quoting" value'), # quoting must be in the range [0, 3] ], )
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). Appears the new build failure was due to https://github.com/cython/cython/issues/4885 Major changes: 1. `Period._from_ordinal` is annotated `def _from_ordinal(oridinal: int, freq)` which now strictly only accepts Python ints. Need to change to `def _from_ordinal(oridinal: int64_t, freq)`
https://api.github.com/repos/pandas-dev/pandas/pulls/47670
2022-07-11T18:33:03Z
2022-07-13T20:58:30Z
2022-07-13T20:58:30Z
2022-07-13T20:58:45Z
ENH: Add reversed to Series so that it is also a Sequence
diff --git a/pandas/core/base.py b/pandas/core/base.py index 1fa840bcbd51f..be8530d3633c4 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -757,6 +757,25 @@ def __iter__(self): else: return map(self._values.item, range(self._values.size)) + def __reversed__(self): + """ + Return an iterator of the values in reverse order. + + These are each a scalar type, which is a Python scalar + (for str, int, float) or a pandas scalar + (for Timestamp/Timedelta/Interval/Period) + + Returns + ------- + iterator + """ + # We are explicitly making element iterators. + if not isinstance(self._values, np.ndarray): + # Check type instead of dtype to catch DTA/TDA + return reversed(self._values) + else: + return map(self._values.item, range(self._values.size - 1, -1, -1)) + @cache_readonly def hasnans(self) -> bool: """
Add Reversed so that it fulfills all properties of Sequence - [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47669
2022-07-11T18:13:14Z
2022-07-12T07:31:52Z
null
2022-07-12T07:31:52Z
ENH: TDA.__mul__ support non-nano
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index fbd27aa026a37..5f227cb45a65b 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -398,7 +398,7 @@ def __mul__(self, other) -> TimedeltaArray: freq = None if self.freq is not None and not isna(other): freq = self.freq * other - return type(self)(result, freq=freq) + return type(self)._simple_new(result, dtype=result.dtype, freq=freq) if not hasattr(other, "dtype"): # list, tuple @@ -412,13 +412,14 @@ def __mul__(self, other) -> TimedeltaArray: # this multiplication will succeed only if all elements of other # are int or float scalars, so we will end up with # timedelta64[ns]-dtyped result - result = [self[n] * other[n] for n in range(len(self))] + arr = self._ndarray + result = [arr[n] * other[n] for n in range(len(self))] result = np.array(result) - return type(self)(result) + return type(self)._simple_new(result, dtype=result.dtype) # numpy will accept float or int dtype, raise TypeError for others result = self._ndarray * other - return type(self)(result) + return type(self)._simple_new(result, dtype=result.dtype) __rmul__ = __mul__ @@ -446,7 +447,8 @@ def __truediv__(self, other): if self.freq is not None: # Tick division is not implemented, so operate on Timedelta freq = self.freq.delta / other - return type(self)(result, freq=freq) + freq = to_offset(freq) + return type(self)._simple_new(result, dtype=result.dtype, freq=freq) if not hasattr(other, "dtype"): # e.g. list, tuple @@ -462,6 +464,7 @@ def __truediv__(self, other): elif is_object_dtype(other.dtype): # We operate on raveled arrays to avoid problems in inference # on NaT + # TODO: tests with non-nano srav = self.ravel() orav = other.ravel() result_list = [srav[n] / orav[n] for n in range(len(srav))] @@ -488,7 +491,7 @@ def __truediv__(self, other): else: result = self._ndarray / other - return type(self)(result) + return type(self)._simple_new(result, dtype=result.dtype) @unpack_zerodim_and_defer("__rtruediv__") def __rtruediv__(self, other): diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 36acb8f0fe389..abc27469a5428 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -1,3 +1,5 @@ +from datetime import timedelta + import numpy as np import pytest @@ -90,6 +92,53 @@ def test_add_pdnat(self, tda): assert result._reso == tda._reso assert result.isna().all() + def test_mul_scalar(self, tda): + other = 2 + result = tda * other + expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._reso == tda._reso + + def test_mul_listlike(self, tda): + other = np.arange(len(tda)) + result = tda * other + expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._reso == tda._reso + + def test_mul_listlike_object(self, tda): + other = np.arange(len(tda)) + result = tda * other.astype(object) + expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._reso == tda._reso + + def test_div_numeric_scalar(self, tda): + other = 2 + result = tda / other + expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._reso == tda._reso + + def test_div_td_scalar(self, tda): + other = timedelta(seconds=1) + result = tda / other + expected = tda._ndarray / np.timedelta64(1, "s") + tm.assert_numpy_array_equal(result, expected) + + def test_div_numeric_array(self, tda): + other = np.arange(len(tda)) + result = tda / other + expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._reso == tda._reso + + def test_div_td_array(self, tda): + other = tda._ndarray + tda._ndarray[-1] + result = tda / other + expected = tda._ndarray / other + tm.assert_numpy_array_equal(result, expected) + class TestTimedeltaArray: @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47668
2022-07-11T14:27:37Z
2022-07-11T22:36:14Z
2022-07-11T22:36:14Z
2022-07-11T23:29:01Z
Code review from #46759 : moved strftime benchmark file outside of tslibs dir
diff --git a/asv_bench/benchmarks/tslibs/strftime.py b/asv_bench/benchmarks/strftime.py similarity index 100% rename from asv_bench/benchmarks/tslibs/strftime.py rename to asv_bench/benchmarks/strftime.py
Minor : suggested by https://github.com/pandas-dev/pandas/pull/46759#discussion_r915263401
https://api.github.com/repos/pandas-dev/pandas/pulls/47665
2022-07-11T07:53:59Z
2022-07-11T17:02:04Z
2022-07-11T17:02:04Z
2022-07-11T17:02:11Z
TYP: make _engine_type consistently a property
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c8123f90ab3a3..06025c730700f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -93,7 +93,7 @@ repos: types: [python] stages: [manual] additional_dependencies: &pyright_dependencies - - pyright@1.1.253 + - pyright@1.1.258 - repo: local hooks: - id: pyright_reportGeneralTypeIssues diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index fc5fcaeab7d2a..58b4d82bcbe5f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -404,9 +404,12 @@ def _outer_indexer( # associated code in pandas 2.0. _is_backward_compat_public_numeric_index: bool = False - _engine_type: type[libindex.IndexEngine] | type[ - libindex.ExtensionEngine - ] = libindex.ObjectEngine + @property + def _engine_type( + self, + ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: + return libindex.ObjectEngine + # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 9a70a4a1aa615..c1ae3cb1b16ea 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -192,7 +192,7 @@ def _should_fallback_to_positional(self) -> bool: _values: Categorical @property - def _engine_type(self): + def _engine_type(self) -> type[libindex.IndexEngine]: # self.codes can have dtype int8, int16, int32 or int64, so we need # to return the corresponding engine type (libindex.Int8Engine, etc.). return { diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6aa2ff91ba933..f776585926024 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -252,9 +252,12 @@ class DatetimeIndex(DatetimeTimedeltaMixin): _typ = "datetimeindex" _data_cls = DatetimeArray - _engine_type = libindex.DatetimeEngine _supports_partial_string_indexing = True + @property + def _engine_type(self) -> type[libindex.DatetimeEngine]: + return libindex.DatetimeEngine + _data: DatetimeArray inferred_freq: str | None tz: tzinfo | None diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index f270a6e8b555f..56fcec751749b 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -106,7 +106,7 @@ class NumericIndex(Index): } @property - def _engine_type(self): + def _engine_type(self) -> type[libindex.IndexEngine]: # error: Invalid index type "Union[dtype[Any], ExtensionDtype]" for # "Dict[dtype[Any], Type[IndexEngine]]"; expected type "dtype[Any]" return self._engine_types[self.dtype] # type: ignore[index] @@ -373,10 +373,13 @@ class Int64Index(IntegerIndex): __doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args _typ = "int64index" - _engine_type = libindex.Int64Engine _default_dtype = np.dtype(np.int64) _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer") + @property + def _engine_type(self) -> type[libindex.Int64Engine]: + return libindex.Int64Engine + class UInt64Index(IntegerIndex): _index_descr_args = { @@ -388,10 +391,13 @@ class UInt64Index(IntegerIndex): __doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args _typ = "uint64index" - _engine_type = libindex.UInt64Engine _default_dtype = np.dtype(np.uint64) _dtype_validation_metadata = (is_unsigned_integer_dtype, "unsigned integer") + @property + def _engine_type(self) -> type[libindex.UInt64Engine]: + return libindex.UInt64Engine + class Float64Index(NumericIndex): _index_descr_args = { diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index e3ab5e8624585..c034d9416eae7 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -159,9 +159,12 @@ class PeriodIndex(DatetimeIndexOpsMixin): dtype: PeriodDtype _data_cls = PeriodArray - _engine_type = libindex.PeriodEngine _supports_partial_string_indexing = True + @property + def _engine_type(self) -> type[libindex.PeriodEngine]: + return libindex.PeriodEngine + @cache_readonly # Signature of "_resolution_obj" incompatible with supertype "DatetimeIndexOpsMixin" def _resolution_obj(self) -> Resolution: # type: ignore[override] diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 12a995c7de99a..376c98b6e176f 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -104,11 +104,14 @@ class RangeIndex(NumericIndex): """ _typ = "rangeindex" - _engine_type = libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer") _range: range _is_backward_compat_public_numeric_index: bool = False + @property + def _engine_type(self) -> type[libindex.Int64Engine]: + return libindex.Int64Engine + # -------------------------------------------------------------------- # Constructors diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index cdf09bbc3b78c..095c5d1b1ba03 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -101,7 +101,10 @@ class TimedeltaIndex(DatetimeTimedeltaMixin): _typ = "timedeltaindex" _data_cls = TimedeltaArray - _engine_type = libindex.TimedeltaEngine + + @property + def _engine_type(self) -> type[libindex.TimedeltaEngine]: + return libindex.TimedeltaEngine _data: TimedeltaArray diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json index 98da481a6d80f..c482aa32600fb 100644 --- a/pyright_reportGeneralTypeIssues.json +++ b/pyright_reportGeneralTypeIssues.json @@ -15,7 +15,6 @@ "pandas/io/clipboard", "pandas/util/version", # and all files that currently don't pass - "pandas/_config/config.py", "pandas/_testing/__init__.py", "pandas/core/algorithms.py", "pandas/core/apply.py", @@ -58,7 +57,6 @@ "pandas/core/indexes/multi.py", "pandas/core/indexes/numeric.py", "pandas/core/indexes/period.py", - "pandas/core/indexes/range.py", "pandas/core/indexing.py", "pandas/core/internals/api.py", "pandas/core/internals/array_manager.py", @@ -80,7 +78,6 @@ "pandas/core/tools/datetimes.py", "pandas/core/tools/timedeltas.py", "pandas/core/util/hashing.py", - "pandas/core/util/numba_.py", "pandas/core/window/ewm.py", "pandas/core/window/rolling.py", "pandas/io/common.py",
`_engine_type` was sometimes a property and sometimes a class variable.
https://api.github.com/repos/pandas-dev/pandas/pulls/47664
2022-07-11T01:34:19Z
2022-07-11T16:49:49Z
2022-07-11T16:49:49Z
2022-09-21T15:28:31Z
Enh move pytable errors and warnings
diff --git a/doc/source/reference/testing.rst b/doc/source/reference/testing.rst index 249c2c56cfe57..e617712aa8f5e 100644 --- a/doc/source/reference/testing.rst +++ b/doc/source/reference/testing.rst @@ -26,11 +26,14 @@ Exceptions and warnings errors.AbstractMethodError errors.AccessorRegistrationWarning + errors.AttributeConflictWarning + errors.ClosedFileError errors.CSSWarning errors.DataError errors.DtypeWarning errors.DuplicateLabelError errors.EmptyDataError + errors.IncompatibilityWarning errors.IndexingError errors.InvalidIndexError errors.IntCastingNaNError @@ -44,6 +47,7 @@ Exceptions and warnings errors.ParserError errors.ParserWarning errors.PerformanceWarning + errors.PossibleDataLossError errors.PyperclipException errors.PyperclipWindowsException errors.SettingWithCopyError diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 47819ae5fad23..0e0409ccb0932 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -415,14 +415,58 @@ class CSSWarning(UserWarning): """ +class PossibleDataLossError(Exception): + """ + Exception is raised when trying to open a HDFStore file when the file is already + opened. + + Examples + -------- + >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP + >>> store.open("w") # doctest: +SKIP + ... # PossibleDataLossError: Re-opening the file [my-store] with mode [a]... + """ + + +class ClosedFileError(Exception): + """ + Exception is raised when trying to perform an operation on a closed HDFStore file. + + Examples + -------- + >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP + >>> store.close() # doctest: +SKIP + >>> store.keys() # doctest: +SKIP + ... # ClosedFileError: my-store file is not open! + """ + + +class IncompatibilityWarning(Warning): + """ + Warning is raised when trying to use where criteria on an incompatible + HDF5 file. + """ + + +class AttributeConflictWarning(Warning): + """ + Warning is raised when attempting to append an index with a different + name than the existing index on an HDFStore or attempting to append an index with a + different frequency than the existing index on an HDFStore. + """ + + __all__ = [ "AbstractMethodError", "AccessorRegistrationWarning", + "AttributeConflictWarning", + "ClosedFileError", "CSSWarning", "DataError", "DtypeWarning", "DuplicateLabelError", "EmptyDataError", + "IncompatibilityWarning", "IntCastingNaNError", "InvalidIndexError", "IndexingError", @@ -436,6 +480,7 @@ class CSSWarning(UserWarning): "ParserError", "ParserWarning", "PerformanceWarning", + "PossibleDataLossError", "PyperclipException", "PyperclipWindowsException", "SettingWithCopyError", diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b96fa4a57f188..52a2883e70f93 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -48,7 +48,13 @@ ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle -from pandas.errors import PerformanceWarning +from pandas.errors import ( + AttributeConflictWarning, + ClosedFileError, + IncompatibilityWarning, + PerformanceWarning, + PossibleDataLossError, +) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level @@ -169,43 +175,17 @@ def _ensure_term(where, scope_level: int): return where if where is None or len(where) else None -class PossibleDataLossError(Exception): - pass - - -class ClosedFileError(Exception): - pass - - -class IncompatibilityWarning(Warning): - pass - - incompatibility_doc = """ where criteria is being ignored as this version [%s] is too old (or not-defined), read the file in and write it out to a new file to upgrade (with the copy_to method) """ - -class AttributeConflictWarning(Warning): - pass - - attribute_conflict_doc = """ the [%s] attribute of the existing index is [%s] which conflicts with the new [%s], resetting the attribute to None """ - -class DuplicateWarning(Warning): - pass - - -duplicate_doc = """ -duplicate entries in table, taking most recently appended -""" - performance_doc = """ your performance may suffer as PyTables will pickle object types that it cannot map directly to c-types [inferred_type->%s,key->%s] [items->%s] @@ -3550,7 +3530,7 @@ def get_attrs(self) -> None: def validate_version(self, where=None) -> None: """are we trying to operate on an old version?""" if where is not None: - if self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1: + if self.is_old_version: ws = incompatibility_doc % ".".join([str(x) for x in self.version]) warnings.warn(ws, IncompatibilityWarning) diff --git a/pandas/tests/io/pytables/test_file_handling.py b/pandas/tests/io/pytables/test_file_handling.py index 9fde65e3a1a43..13b6b94dda8d4 100644 --- a/pandas/tests/io/pytables/test_file_handling.py +++ b/pandas/tests/io/pytables/test_file_handling.py @@ -4,6 +4,10 @@ import pytest from pandas.compat import is_platform_little_endian +from pandas.errors import ( + ClosedFileError, + PossibleDataLossError, +) from pandas import ( DataFrame, @@ -20,11 +24,7 @@ ) from pandas.io import pytables as pytables -from pandas.io.pytables import ( - ClosedFileError, - PossibleDataLossError, - Term, -) +from pandas.io.pytables import Term pytestmark = pytest.mark.single_cpu diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index 3a6f699cce94e..e8f4e7ee92fc3 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -589,7 +589,6 @@ def test_store_series_name(setup_path): tm.assert_series_equal(recons, series) -@pytest.mark.filterwarnings("ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning") def test_overwrite_node(setup_path): with ensure_clean_store(setup_path) as store: diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py index 177ff566e347a..f003e1d07bca6 100644 --- a/pandas/tests/test_errors.py +++ b/pandas/tests/test_errors.py @@ -30,6 +30,10 @@ "IndexingError", "PyperclipException", "CSSWarning", + "ClosedFileError", + "PossibleDataLossError", + "IncompatibilityWarning", + "AttributeConflictWarning", ], ) def test_exception_importable(exc):
- [x] xref #27656. this GitHub issue is being done in multiple parts - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). It looked like `DuplicateWarning` wasn't used anywhere, so I removed it. Also, the if condition for `validate_version` seemed duplicated. Since it inherits from `Fixed`, I replaced it with the already existing property
https://api.github.com/repos/pandas-dev/pandas/pulls/47662
2022-07-10T20:42:40Z
2022-07-10T23:06:44Z
2022-07-10T23:06:44Z
2022-07-10T23:06:53Z
DOC: Fixed CoC broken link
diff --git a/README.md b/README.md index fc3f988dc6809..aaf63ead9c416 100644 --- a/README.md +++ b/README.md @@ -169,4 +169,4 @@ Or maybe through using pandas you have an idea of your own or are looking for so Feel free to ask questions on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Gitter](https://gitter.im/pydata/pandas). -As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/pandas/blob/main/.github/CODE_OF_CONDUCT.md) +As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/.github/blob/master/CODE_OF_CONDUCT.md)
Link to Code of Conduct in README.md broke after moving markdown files to `.github` organization level repository (#47412). Redirected link to new location
https://api.github.com/repos/pandas-dev/pandas/pulls/47661
2022-07-10T19:28:28Z
2022-07-10T21:43:07Z
2022-07-10T21:43:07Z
2022-07-10T21:43:12Z
WEB: Add governance page to the website
diff --git a/web/pandas/about/governance.md b/web/pandas/about/governance.md new file mode 100644 index 0000000000000..56ca0a2aac3db --- /dev/null +++ b/web/pandas/about/governance.md @@ -0,0 +1,326 @@ +# Main Governance Document + +The official version of this document, along with a list of +individuals and institutions in the roles defined in the governance +section below, is contained in The Project Governance Repository at: + +[https://github.com/pydata/pandas-governance](https://github.com/pydata/pandas-governance) + +The Project +=========== + +The pandas Project (The Project) is an open source software project affiliated +with the 501(c)3 NumFOCUS Foundation. The goal of The Project is to develop open +source software for data ingest, data preparation, data analysis, and data +visualization for the Python programming language. The Software developed by +The Project is released under the BSD (or similar) open source license, +developed openly and hosted in public GitHub repositories under the [PyData +GitHub organization](https://github.com/pydata). Examples of Project Software +include the main pandas code repository, pandas-website, and the +pandas-datareader add-on library. + +Through its affiliation with NumFOCUS, The Project has the right to receive +tax-deductible donations in the United States of America. + +The Project is developed by a team of distributed developers, called +Contributors. Contributors are individuals who have contributed code, +documentation, designs or other work to one or more Project repositories. +Anyone can be a Contributor. Contributors can be affiliated with any legal +entity or none. Contributors participate in the project by submitting, +reviewing and discussing GitHub Pull Requests and Issues and participating in +open and public Project discussions on GitHub, mailing lists, and +elsewhere. The foundation of Project participation is openness and +transparency. + +Here is a list of the current Contributors to the main pandas repository: + +[https://github.com/pydata/pandas/graphs/contributors](https://github.com/pydata/pandas/graphs/contributors) + +There are also many other Contributors listed in the logs of other repositories of +the pandas project. + +The Project Community consists of all Contributors and Users of the Project. +Contributors work on behalf of and are responsible to the larger Project +Community and we strive to keep the barrier between Contributors and Users as +low as possible. + +The Project is formally affiliated with the 501(c)3 NumFOCUS Foundation +([http://numfocus.org](http://numfocus.org)), which serves as its fiscal +sponsor, may hold project trademarks and other intellectual property, helps +manage project donations and acts as a parent legal entity. NumFOCUS is the +only legal entity that has a formal relationship with the project (see +Institutional Partners section below). + +Governance +========== + +This section describes the governance and leadership model of The Project. + +The foundations of Project governance are: + +- Openness & Transparency +- Active Contribution +- Institutional Neutrality + +Traditionally, Project leadership was provided by a BDFL (Wes McKinney) and +subset of Contributors, called the Core Team, whose active and consistent +contributions have been recognized by their receiving “commit rights” to the +Project GitHub repositories. In general all Project decisions are made through +consensus among the Core Team with input from the Community. The BDFL can, but +rarely chooses to, override the Core Team and make a final decision on a +matter. + +While this approach has served us well, as the Project grows and faces more +legal and financial decisions and interacts with other institutions, we see a +need for a more formal governance model. Moving forward The Project leadership +will consist of a BDFL and Core Team. We view this governance model as the +formalization of what we are already doing, rather than a change in direction. + +BDFL +---- + +The Project will have a BDFL (Benevolent Dictator for Life), who is currently +Wes McKinney. As Dictator, the BDFL has the authority to make all final +decisions for The Project. As Benevolent, the BDFL, in practice chooses to +defer that authority to the consensus of the community discussion channels and +the Core Team. It is expected, and in the past has been the case, that the BDFL +will only rarely assert his/her final authority. Because it is rarely used, we +refer to BDFL’s final authority as a “special” or “overriding” vote. When it +does occur, the BDFL override typically happens in situations where there is a +deadlock in the Core Team or if the Core Team ask the BDFL to make a decision +on a specific matter. To ensure the benevolence of the BDFL, The Project +encourages others to fork the project if they disagree with the overall +direction the BDFL is taking. The BDFL is chair of the Core Team (see below) +and may delegate his/her authority on a particular decision or set of decisions +to any other Core Team Member at his/her discretion. + +The BDFL can appoint his/her successor, but it is expected that the Core Team +would be consulted on this decision. If the BDFL is unable to appoint a +successor (e.g. due to death or illness), the Core Team will choose a successor +by voting with at least 2/3 of the Core Team members voting in favor of the +chosen successor. At least 80% of the Core Team must participate in the +vote. If no BDFL candidate receives 2/3 of the votes of the Core Team, the Core +Team members shall propose the BDFL candidates to the Main NumFOCUS board, who +will then make the final decision. + +Core Team +--------- + +The Project's Core Team will consist of Project Contributors who have produced +contributions that are substantial in quality and quantity, and sustained over +at least one year. The overall role of the Core Team is to ensure, through +working with the BDFL and taking input from the Community, the long-term +well-being of the project, both technically and as a community. + +During the everyday project activities, Core Team participate in all +discussions, code review and other project activities as peers with all other +Contributors and the Community. In these everyday activities, Core Team do not +have any special power or privilege through their membership on the Core +Team. However, it is expected that because of the quality and quantity of their +contributions and their expert knowledge of the Project Software that the Core +Team will provide useful guidance, both technical and in terms of project +direction, to potentially less experienced contributors. + +The Core Team and its Members play a special role in certain situations. +In particular, the Core Team may: + +- Make decisions about the overall scope, vision and direction of the + project. +- Make decisions about strategic collaborations with other organizations or + individuals. +- Make decisions about specific technical issues, features, bugs and pull + requests. They are the primary mechanism of guiding the code review process + and merging pull requests. +- Make decisions about the Services that are run by The Project and manage + those Services for the benefit of the Project and Community. +- Make decisions when regular community discussion doesn’t produce consensus + on an issue in a reasonable time frame. + +### Core Team membership + +To become eligible for being a Core Team Member an individual must be a Project +Contributor who has produced contributions that are substantial in quality and +quantity, and sustained over at least one year. Potential Core Team Members are +nominated by existing Core members and voted upon by the existing Core Team +after asking if the potential Member is interested and willing to serve in that +capacity. The Core Team will be initially formed from the set of existing +Contributors who have been granted commit rights as of late 2015. + +When considering potential Members, the Core Team will look at candidates with +a comprehensive view of their contributions. This will include but is not +limited to code, code review, infrastructure work, mailing list and chat +participation, community help/building, education and outreach, design work, +etc. We are deliberately not setting arbitrary quantitative metrics (like “100 +commits in this repo”) to avoid encouraging behavior that plays to the metrics +rather than the project’s overall well-being. We want to encourage a diverse +array of backgrounds, viewpoints and talents in our team, which is why we +explicitly do not define code as the sole metric on which Core Team membership +will be evaluated. + +If a Core Team member becomes inactive in the project for a period of one year, +they will be considered for removal from the Core Team. Before removal, +inactive Member will be approached by the BDFL to see if they plan on returning +to active participation. If not they will be removed immediately upon a Core +Team vote. If they plan on returning to active participation soon, they will be +given a grace period of one year. If they don’t return to active participation +within that time period they will be removed by vote of the Core Team without +further grace period. All former Core Team members can be considered for +membership again at any time in the future, like any other Project Contributor. +Retired Core Team members will be listed on the project website, acknowledging +the period during which they were active in the Core Team. + +The Core Team reserves the right to eject current Members, other than the BDFL, +if they are deemed to be actively harmful to the project’s well-being, and +attempts at communication and conflict resolution have failed. + +### Conflict of interest + +It is expected that the BDFL and Core Team Members will be employed at a wide +range of companies, universities and non-profit organizations. Because of this, +it is possible that Members will have conflict of interests. Such conflict of +interests include, but are not limited to: + +- Financial interests, such as investments, employment or contracting work, + outside of The Project that may influence their work on The Project. +- Access to proprietary information of their employer that could potentially + leak into their work with the Project. + +All members of the Core Team, BDFL included, shall disclose to the rest of the +Core Team any conflict of interest they may have. Members with a conflict of +interest in a particular issue may participate in Core Team discussions on that +issue, but must recuse themselves from voting on the issue. If the BDFL has +recused his/herself for a particular decision, they will appoint a substitute +BDFL for that decision. + +### Private communications of the Core Team + +Unless specifically required, all Core Team discussions and activities will be +public and done in collaboration and discussion with the Project Contributors +and Community. The Core Team will have a private mailing list that will be used +sparingly and only when a specific matter requires privacy. When private +communications and decisions are needed, the Core Team will do its best to +summarize those to the Community after eliding personal/private/sensitive +information that should not be posted to the public internet. + +### Subcommittees + +The Core Team can create subcommittees that provide leadership and guidance for +specific aspects of the project. Like the Core Team as a whole, subcommittees +should conduct their business in an open and public manner unless privacy is +specifically called for. Private subcommittee communications should happen on +the main private mailing list of the Core Team unless specifically called for. + +Question: if the BDFL is not on a subcommittee, do they still have override +authority? + +Suggestion: they do, but they should appoint a delegate who plays that role +most of the time, and explicit BDFL intervention is sought only if the +committee disagrees with that delegate’s decision and no resolution is possible +within the team. This is different from a BDFL delegate for a specific decision +(or a recusal situation), where the BDFL is literally giving up his/her +authority to someone else in full. It’s more like what Linus Torvalds uses with his +“lieutenants” model. + +### NumFOCUS Subcommittee + +The Core Team will maintain one narrowly focused subcommittee to manage its +interactions with NumFOCUS. + +- The NumFOCUS Subcommittee is comprised of at least 5 persons who manage + project funding that comes through NumFOCUS. It is expected that these funds + will be spent in a manner that is consistent with the non-profit mission of + NumFOCUS and the direction of the Project as determined by the full Core + Team. +- This Subcommittee shall NOT make decisions about the direction, scope or + technical direction of the Project. +- This Subcommittee will have at least 5 members. No more than 2 Subcommitee + Members can report to one person (either directly or indirectly) through + employment or contracting work (including the reportee, i.e. the reportee + 1 + is the max). This avoids effective majorities resting on one person. + +Institutional Partners and Funding +================================== + +The BDFL and Core Team are the primary leadership for the project. No outside +institution, individual or legal entity has the ability to own, control, usurp +or influence the project other than by participating in the Project as +Contributors and Core Team. However, because institutions are the primary +funding mechanism for the project, it is important to formally acknowledge +institutional participation in the project. These are Institutional Partners. + +An Institutional Contributor is any individual Project Contributor who +contributes to the project as part of their official duties at an Institutional +Partner. Likewise, an Institutional Core Team Member is any Core Team Member +who contributes to the project as part of their official duties at an +Institutional Partner. + +With these definitions, an Institutional Partner is any recognized legal entity +in the United States or elsewhere that employs at least one Institutional +Contributor or Institutional Core Team Member. Institutional Partners can be +for-profit or non-profit entities. + +Institutions become eligible to become an Institutional Partner by employing +individuals who actively contribute to The Project as part of their official +duties. To state this another way, the only way for an Institutional Partner to +influence the project is by actively contributing to the open development of +the project, on equal terms with any other member of the community of +Contributors and Core Team Members. Merely using pandas Software or Services in +an institutional context does not allow an entity to become an Institutional +Partner. Financial gifts do not enable an entity to become an Institutional +Partner. Once an institution becomes eligible for Institutional Partnership, +the Core Team must nominate and approve the Partnership. + +If an existing Institutional Partner no longer has a contributing employee, +they will be given a one-year grace period for other employees to begin +contributing. + +An Institutional Partner is free to pursue funding for their work on The +Project through any legal means. This could involve a non-profit organization +raising money from private foundations and donors or a for-profit company +building proprietary products and services that leverage Project Software and +Services. Funding acquired by Institutional Partners to work on The Project is +called Institutional Funding. However, no funding obtained by an Institutional +Partner can override The Project BDFL and Core Team. If a Partner has funding +to do pandas work and the Core Team decides to not pursue that work as a +project, the Partner is free to pursue it on their own. However in this +situation, that part of the Partner’s work will not be under the pandas +umbrella and cannot use the Project trademarks in a way that suggests a formal +relationship. + +To acknowledge institutional contributions, there are two levels of +Institutional Partners, with associated benefits: + +**Tier 1** = an institution with at least one Institutional Core Team Member + +- Acknowledged on the pandas website, in talks and T-shirts. +- Ability to acknowledge their own funding sources on the pandas website, in + talks and T-shirts. +- Ability to influence the project through the participation of their Core Team + Member. + +**Tier 2** = an institution with at least one Institutional Contributor + +Breach +====== + +Non-compliance with the terms of the governance documents shall be reported to +the Core Team either through public or private channels as deemed appropriate. + +Changing the Governance Documents +================================= + +Changes to the governance documents are submitted via a GitHub pull request to +The Project's governance documents GitHub repository at +[https://github.com/pydata/pandas-governance](https://github.com/pydata/pandas-governance). +The pull request is then refined in response to public comment and review, with +the goal being consensus in the community. After this open period, a Core Team +Member proposes to the Core Team that the changes be ratified and the pull +request merged (accepting the proposed changes) or proposes that the pull +request be closed without merging (rejecting the proposed changes). The Member +should state the final commit hash in the pull request being proposed for +acceptance or rejection and briefly summarize the pull request. A minimum of +80% of the Core Team must vote and at least 2/3 of the votes must be positive +to carry out the proposed action (fractions of a vote rounded up to the nearest +integer). Since the BDFL holds ultimate authority in The Project, the BDFL has +authority to act alone in accepting or rejecting changes or overriding Core +Team decisions. diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md index c8318dd8758ed..2982105616f47 100644 --- a/web/pandas/about/team.md +++ b/web/pandas/about/team.md @@ -42,7 +42,7 @@ If you want to support pandas development, you can find information in the [dona Wes McKinney is the Benevolent Dictator for Life (BDFL). -The project governance is available in the [project governance documents](https://github.com/pandas-dev/pandas-governance). +The project governance is available in the [project governance page](governance.html). ## Code of conduct committee diff --git a/web/pandas/community/blog/extension-arrays.md b/web/pandas/community/blog/extension-arrays.md index 61a77738a259c..80a187bb3fc3c 100644 --- a/web/pandas/community/blog/extension-arrays.md +++ b/web/pandas/community/blog/extension-arrays.md @@ -212,7 +212,7 @@ partners][partners] involved in the pandas community. [ml]: https://mail.python.org/mailman/listinfo/pandas-dev [twitter]: https://twitter.com/pandas_dev [tracker]: https://github.com/pandas-dev/pandas/issues -[partners]: https://github.com/pandas-dev/pandas-governance/blob/master/people.md +[partners]: https://pandas.pydata.org/about/sponsors.html [eco]: http://pandas.pydata.org/pandas-docs/stable/ecosystem.html#extension-data-types [whatsnew]: http://pandas.pydata.org/pandas-docs/version/0.24/whatsnew/v0.24.0.html [geopandas]: https://github.com/geopandas/geopandas diff --git a/web/pandas/community/blog/pandas-1.0.md b/web/pandas/community/blog/pandas-1.0.md index b07c34a4ab6b5..d190ed6e897b3 100644 --- a/web/pandas/community/blog/pandas-1.0.md +++ b/web/pandas/community/blog/pandas-1.0.md @@ -19,7 +19,7 @@ We're [working with those projects](https://datapythonista.me/blog/dataframe-sum ## Community and Project Health -This release cycle is the first to involve any kind of grant funding for pandas. [Pandas received funding](https://chanzuckerberg.com/eoss/proposals/) as part of the CZI’s [*Essential Open Source Software for Science*](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd) [program](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd). The pandas project relies overwhelmingly on volunteer contributors. These volunteer contributions are shepherded and augmented by some maintainers who are given time from their employers — our [institutional partners](https://github.com/pandas-dev/pandas-governance/blob/master/people.md#institutional-partners). The largest work item in our grant award was library maintenance, which specifically includes working with community members to address our large backlog of open issues and pull requests. +This release cycle is the first to involve any kind of grant funding for pandas. [Pandas received funding](https://chanzuckerberg.com/eoss/proposals/) as part of the CZI’s [*Essential Open Source Software for Science*](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd) [program](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd). The pandas project relies overwhelmingly on volunteer contributors. These volunteer contributions are shepherded and augmented by some maintainers who are given time from their employers — our [institutional partners](../about/sponsors.html). The largest work item in our grant award was library maintenance, which specifically includes working with community members to address our large backlog of open issues and pull requests. While a “1.0.0” version might seem arbitrary or anti-climactic (given that pandas as a codebase is nearly 12 years old), we see it as a symbolic milestone celebrating the growth of our core developer team and depth of our contributor base. Few open source projects are ever truly “done” and pandas is no different. We recognize the essential role that pandas now occupies, and we intend to continue to evolve the project and adapt to the needs of the world’s data wranglers. diff --git a/web/pandas/config.yml b/web/pandas/config.yml index 5bb0cbc7557f8..aeef826157b90 100644 --- a/web/pandas/config.yml +++ b/web/pandas/config.yml @@ -27,6 +27,8 @@ navbar: target: /about/ - name: "Project roadmap" target: /about/roadmap.html + - name: "Governance" + target: /about/governance.html - name: "Team" target: /about/team.html - name: "Sponsors"
I was having a look at the [pandas governance repo](https://github.com/pandas-dev/pandas-governance) and seems most things there are duplicated from the website. The list of core developers, the code of conduct, the sponsors, are all in the website, and in my opinion presented in a clearer and more accessible way. The only thing that is missing is the governance document itself, moving it we can remote that repo. I think it makes more sense to move it here, for few reasons: - To avoid duplication and inconsistencies, and having to maintain things in two different places - To have more visibility and easier to access by having it in our public web - We may want to update the governance document, I think it makes things easier to have the PRs with ammendments to the doc here with the rest of the PRs of the project, instead of in a separate repo For now I move the `governance.md` file without changes. I think the file can benefit from few changes (updating the title, links...), but I prefer to do it in follow up PRs, so there is visibility of what's being changed. Somehow related to this PR: @MarcoGorelli, I see in the governance repo we've got Gousto as a pandas sponsor, but it's not in the website (and the logo is not in the home page). Should it be added?
https://api.github.com/repos/pandas-dev/pandas/pulls/47660
2022-07-10T17:20:44Z
2022-07-11T16:54:28Z
2022-07-11T16:54:28Z
2022-07-11T16:54:36Z
ENH: GH19708 - Index now has diff and round methods
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index fc5fcaeab7d2a..941485a6e15eb 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6933,6 +6933,63 @@ def drop( indexer = indexer[~mask] return self.delete(indexer) + def round(self, decimals=0, *args, **kwargs) -> Index: + """ + Round each value in the index. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + decimals : int, default 0 + Number of decimal places to round to. If decimals is negative, + it specifies the number of positions to the left of the decimal point. + *args, kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. + + Returns + ------- + Index + Rounded values of the Index. + + See Also + -------- + numpy.around : Round values of an np.array. + Series.round : Round values of a Series. + DataFrame.round : Round values of a DataFrame. + + """ + + return Index(self.to_series().round(decimals, args, kwargs)) + + def diff(self, periods: int = 1) -> Index: + """ + First discrete difference of element. + + Calculates the difference of a Index element compared with another + element in the Index (default is element in previous row). + + .. versionadded:: 1.5.0 + + Parameters + ---------- + periods : int, default 1 + Periods to shift for calculating difference, accepts negative + values. + Returns + ------- + Index + First differences of the Index. + + See Also + -------- + Series.diff: First discrete difference for a Series. + DataFrame.diff: First discrete difference of object. + + """ + return Index(self.to_series().diff(periods)) + # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 5d7fc23feb5a8..b757adde1388b 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1223,6 +1223,26 @@ def test_sortlevel(self): result = index.sortlevel(ascending=False) tm.assert_index_equal(result[0], expected) + def test_diff(self): + # GH#19708 + index = Index([1, 2, 3, 4]) + + result = index.diff() + expected = Index([np.nan, 1, 1, 1]) + tm.assert_index_equal(result, expected) + + result = index.diff(periods=-1) + expected = Index([-1, -1, -1, np.nan]) + tm.assert_index_equal(result, expected) + + def test_round(self): + # GH#19708 + value = 1.2345987654321 + + result = Index([value]).round(2) + expected = Index([1.23]) + tm.assert_index_equal(result, expected) + class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ
- [X] closes #19708 - [ ] [Tests added and passed] - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [X] Added [type annotations]to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Was unsure how I should add the entry in the whatsnew file, whether in the "Other enhancements" section or if I should create a new separate enhancement entry w/ examples.
https://api.github.com/repos/pandas-dev/pandas/pulls/47658
2022-07-10T01:55:12Z
2022-08-22T18:04:20Z
null
2022-08-22T18:04:21Z
ENH - Add index parameter do df.to_dict()
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 7aa1c1e84aa09..e38b12757e37e 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -278,6 +278,7 @@ Other enhancements - :meth:`DatetimeIndex.astype` now supports casting timezone-naive indexes to ``datetime64[s]``, ``datetime64[ms]``, and ``datetime64[us]``, and timezone-aware indexes to the corresponding ``datetime64[unit, tzname]`` dtypes (:issue:`47579`) - :class:`Series` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) will now successfully operate when the dtype is numeric and ``numeric_only=True`` is provided; previously this would raise a ``NotImplementedError`` (:issue:`47500`) - :meth:`RangeIndex.union` now can return a :class:`RangeIndex` instead of a :class:`Int64Index` if the resulting values are equally spaced (:issue:`47557`, :issue:`43885`) +- :meth:`DataFrame.to_dict` now has a ``index`` parameter with default value True that can be set to False to exclude the index from the dictionary if orient is split or tight (:issue:`46398`) .. --------------------------------------------------------------------------- .. _whatsnew_150.notable_bug_fixes: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ead4ea744c647..c63636299be92 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1811,7 +1811,7 @@ def to_numpy( return result - def to_dict(self, orient: str = "dict", into=dict): + def to_dict(self, orient: str = "dict", into=dict, index=True): """ Convert the DataFrame to a dictionary. @@ -1847,6 +1847,12 @@ def to_dict(self, orient: str = "dict", into=dict): instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. + index : bool, default True + When set to False, method returns a dict without an index key + (and index_names if using orient='tight'). + + .. versionadded:: 1.5.0 + Returns ------- dict, list or collections.abc.Mapping @@ -1958,36 +1964,66 @@ def to_dict(self, orient: str = "dict", into=dict): ) elif orient == "split": - return into_c( - ( - ("index", self.index.tolist()), - ("columns", self.columns.tolist()), + + if not index: + return into_c( ( - "data", - [ - list(map(maybe_box_native, t)) - for t in self.itertuples(index=False, name=None) - ], - ), + ("columns", self.columns.tolist()), + ( + "data", + [ + list(map(maybe_box_native, t)) + for t in self.itertuples(index=False, name=None) + ], + ), + ) + ) + else: + return into_c( + ( + ("index", self.index.tolist()), + ("columns", self.columns.tolist()), + ( + "data", + [ + list(map(maybe_box_native, t)) + for t in self.itertuples(index=False, name=None) + ], + ), + ) ) - ) elif orient == "tight": - return into_c( - ( - ("index", self.index.tolist()), - ("columns", self.columns.tolist()), + if not index: + return into_c( + ( + ("columns", self.columns.tolist()), + ( + "data", + [ + list(map(maybe_box_native, t)) + for t in self.itertuples(index=False, name=None) + ], + ), + ("column_names", list(self.columns.names)), + ) + ) + else: + return into_c( ( - "data", - [ - list(map(maybe_box_native, t)) - for t in self.itertuples(index=False, name=None) - ], - ), - ("index_names", list(self.index.names)), - ("column_names", list(self.columns.names)), + ("index", self.index.tolist()), + ("columns", self.columns.tolist()), + ( + "data", + [ + list(map(maybe_box_native, t)) + for t in self.itertuples(index=False, name=None) + ], + ), + ("index_names", list(self.index.names)), + ("column_names", list(self.columns.names)), + ) ) - ) elif orient == "series": return into_c((k, v) for k, v in self.items()) diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py index 6d5c32cae7368..b3a39c720fc4e 100644 --- a/pandas/tests/frame/methods/test_to_dict.py +++ b/pandas/tests/frame/methods/test_to_dict.py @@ -421,3 +421,24 @@ def test_to_dict_returns_native_types(self, orient, data, expected_types): for i, key, value in assertion_iterator: assert value == data[key][i] assert type(value) is expected_types[key][i] + + def test_to_dict_index_orient_split(self): + # GH#46398 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["row1", "row2", "row3"]) + result = df.to_dict(orient="split", index=False) + expected = { + "columns": ["A", "B"], + "data": [[1, 4], [2, 5], [3, 6]], + } + tm.assert_dict_equal(result, expected) + + def test_to_dict_index_orient_tight(self): + # GH#46398 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["row1", "row2", "row3"]) + result = df.to_dict(orient="tight", index=False) + expected = { + "columns": ["A", "B"], + "data": [[1, 4], [2, 5], [3, 6]], + "column_names": [None], + } + tm.assert_dict_equal(result, expected)
- [x] closes #46398 - [x] [Tests added and passed] - [x] All [code checks passed] - [x] Added [type annotations] - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file
https://api.github.com/repos/pandas-dev/pandas/pulls/47657
2022-07-10T00:27:11Z
2022-09-01T21:24:52Z
null
2022-09-01T21:24:52Z
SAS7BDAT parser: Improve subheader lookup performance
diff --git a/pandas/io/sas/_sas.pyi b/pandas/io/sas/_sas.pyi index 527193dd71e57..5d65e2b56b591 100644 --- a/pandas/io/sas/_sas.pyi +++ b/pandas/io/sas/_sas.pyi @@ -3,3 +3,5 @@ from pandas.io.sas.sas7bdat import SAS7BDATReader class Parser: def __init__(self, parser: SAS7BDATReader) -> None: ... def read(self, nrows: int) -> None: ... + +def get_subheader_index(signature: bytes) -> int: ... diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index 8065859844b30..9406900b69998 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -6,6 +6,8 @@ from libc.stdint cimport ( int64_t, uint8_t, uint16_t, + uint32_t, + uint64_t, ) from libc.stdlib cimport ( calloc, @@ -17,6 +19,9 @@ import numpy as np import pandas.io.sas.sas_constants as const +cdef object np_nan = np.nan + + cdef struct Buffer: # Convenience wrapper for uint8_t data to allow fast and safe reads and writes. # We use this as a replacement for np.array(..., dtype=np.uint8) because it's @@ -53,9 +58,6 @@ cdef inline buf_free(Buffer buf): if buf.data != NULL: free(buf.data) - -cdef object np_nan = np.nan - # rle_decompress decompresses data using a Run Length Encoding # algorithm. It is partially documented here: # @@ -231,7 +233,7 @@ cdef enum ColumnTypes: column_type_string = 2 -# type the page_data types +# Const aliases assert len(const.page_meta_types) == 2 cdef: int page_meta_types_0 = const.page_meta_types[0] @@ -240,6 +242,53 @@ cdef: int page_data_type = const.page_data_type int subheader_pointers_offset = const.subheader_pointers_offset + # Copy of subheader_signature_to_index that allows for much faster lookups. + # Lookups are done in get_subheader_index. The C structures are initialized + # in _init_subheader_signatures(). + uint32_t subheader_signatures_32bit[13] + int subheader_indices_32bit[13] + uint64_t subheader_signatures_64bit[17] + int subheader_indices_64bit[17] + int data_subheader_index = const.SASIndex.data_subheader_index + + +def _init_subheader_signatures(): + subheaders_32bit = [(sig, idx) for sig, idx in const.subheader_signature_to_index.items() if len(sig) == 4] + subheaders_64bit = [(sig, idx) for sig, idx in const.subheader_signature_to_index.items() if len(sig) == 8] + assert len(subheaders_32bit) == 13 + assert len(subheaders_64bit) == 17 + assert len(const.subheader_signature_to_index) == 13 + 17 + for i, (signature, idx) in enumerate(subheaders_32bit): + subheader_signatures_32bit[i] = (<uint32_t *><char *>signature)[0] + subheader_indices_32bit[i] = idx + for i, (signature, idx) in enumerate(subheaders_64bit): + subheader_signatures_64bit[i] = (<uint64_t *><char *>signature)[0] + subheader_indices_64bit[i] = idx + + +_init_subheader_signatures() + + +def get_subheader_index(bytes signature): + """Fast version of 'subheader_signature_to_index.get(signature)'.""" + cdef: + uint32_t sig32 + uint64_t sig64 + Py_ssize_t i + assert len(signature) in (4, 8) + if len(signature) == 4: + sig32 = (<uint32_t *><char *>signature)[0] + for i in range(len(subheader_signatures_32bit)): + if subheader_signatures_32bit[i] == sig32: + return subheader_indices_32bit[i] + else: + sig64 = (<uint64_t *><char *>signature)[0] + for i in range(len(subheader_signatures_64bit)): + if subheader_signatures_64bit[i] == sig64: + return subheader_indices_64bit[i] + + return data_subheader_index + cdef class Parser: @@ -355,7 +404,7 @@ cdef class Parser: cdef bint readline(self) except? True: cdef: - int offset, bit_offset, align_correction + int offset, length, bit_offset, align_correction int subheader_pointer_length, mn bint done, flag @@ -379,12 +428,10 @@ cdef class Parser: if done: return True continue - current_subheader_pointer = ( - self.parser._current_page_data_subheader_pointers[ - self.current_row_on_page_index]) - self.process_byte_array_with_data( - current_subheader_pointer.offset, - current_subheader_pointer.length) + offset, length = self.parser._current_page_data_subheader_pointers[ + self.current_row_on_page_index + ] + self.process_byte_array_with_data(offset, length) return False elif self.current_page_type == page_mix_type: align_correction = ( diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 91c5e6b227c35..27bd0378e374a 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -42,7 +42,10 @@ ) from pandas.io.common import get_handle -from pandas.io.sas._sas import Parser +from pandas.io.sas._sas import ( + Parser, + get_subheader_index, +) import pandas.io.sas.sas_constants as const from pandas.io.sas.sasreader import ReaderBase @@ -87,19 +90,6 @@ def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series: return s_series -class _SubheaderPointer: - offset: int - length: int - compression: int - ptype: int - - def __init__(self, offset: int, length: int, compression: int, ptype: int) -> None: - self.offset = offset - self.length = length - self.compression = compression - self.ptype = ptype - - class _Column: col_id: int name: str | bytes @@ -189,7 +179,7 @@ def __init__( self.column_formats: list[str | bytes] = [] self.columns: list[_Column] = [] - self._current_page_data_subheader_pointers: list[_SubheaderPointer] = [] + self._current_page_data_subheader_pointers: list[tuple[int, int]] = [] self._cached_page = None self._column_data_lengths: list[int] = [] self._column_data_offsets: list[int] = [] @@ -205,6 +195,19 @@ def __init__( self._path_or_buf = self.handles.handle + # Same order as const.SASIndex + self._subheader_processors = [ + self._process_rowsize_subheader, + self._process_columnsize_subheader, + self._process_subheader_counts, + self._process_columntext_subheader, + self._process_columnname_subheader, + self._process_columnattributes_subheader, + self._process_format_subheader, + self._process_columnlist_subheader, + None, # Data + ] + try: self._get_properties() self._parse_metadata() @@ -426,89 +429,47 @@ def _process_page_metadata(self) -> None: bit_offset = self._page_bit_offset for i in range(self._current_page_subheaders_count): - pointer = self._process_subheader_pointers( - const.subheader_pointers_offset + bit_offset, i - ) - if pointer.length == 0: - continue - if pointer.compression == const.truncated_subheader_id: - continue - subheader_signature = self._read_subheader_signature(pointer.offset) - subheader_index = self._get_subheader_index( - subheader_signature, pointer.compression, pointer.ptype - ) - self._process_subheader(subheader_index, pointer) - - def _get_subheader_index(self, signature: bytes, compression, ptype) -> int: - # TODO: return here could be made an enum - index = const.subheader_signature_to_index.get(signature) - if index is None: - f1 = (compression == const.compressed_subheader_id) or (compression == 0) - f2 = ptype == const.compressed_subheader_type - if (self.compression != b"") and f1 and f2: - index = const.SASIndex.data_subheader_index - else: - self.close() - raise ValueError("Unknown subheader signature") - return index - - def _process_subheader_pointers( - self, offset: int, subheader_pointer_index: int - ) -> _SubheaderPointer: - - subheader_pointer_length = self._subheader_pointer_length - total_offset = offset + subheader_pointer_length * subheader_pointer_index + offset = const.subheader_pointers_offset + bit_offset + total_offset = offset + self._subheader_pointer_length * i - subheader_offset = self._read_int(total_offset, self._int_length) - total_offset += self._int_length + subheader_offset = self._read_int(total_offset, self._int_length) + total_offset += self._int_length - subheader_length = self._read_int(total_offset, self._int_length) - total_offset += self._int_length + subheader_length = self._read_int(total_offset, self._int_length) + total_offset += self._int_length - subheader_compression = self._read_int(total_offset, 1) - total_offset += 1 - - subheader_type = self._read_int(total_offset, 1) - - x = _SubheaderPointer( - subheader_offset, subheader_length, subheader_compression, subheader_type - ) + subheader_compression = self._read_int(total_offset, 1) + total_offset += 1 - return x + subheader_type = self._read_int(total_offset, 1) - def _read_subheader_signature(self, offset: int) -> bytes: - subheader_signature = self._read_bytes(offset, self._int_length) - return subheader_signature - - def _process_subheader( - self, subheader_index: int, pointer: _SubheaderPointer - ) -> None: - offset = pointer.offset - length = pointer.length - - if subheader_index == const.SASIndex.row_size_index: - processor = self._process_rowsize_subheader - elif subheader_index == const.SASIndex.column_size_index: - processor = self._process_columnsize_subheader - elif subheader_index == const.SASIndex.column_text_index: - processor = self._process_columntext_subheader - elif subheader_index == const.SASIndex.column_name_index: - processor = self._process_columnname_subheader - elif subheader_index == const.SASIndex.column_attributes_index: - processor = self._process_columnattributes_subheader - elif subheader_index == const.SASIndex.format_and_label_index: - processor = self._process_format_subheader - elif subheader_index == const.SASIndex.column_list_index: - processor = self._process_columnlist_subheader - elif subheader_index == const.SASIndex.subheader_counts_index: - processor = self._process_subheader_counts - elif subheader_index == const.SASIndex.data_subheader_index: - self._current_page_data_subheader_pointers.append(pointer) - return - else: - raise ValueError("unknown subheader index") + if ( + subheader_length == 0 + or subheader_compression == const.truncated_subheader_id + ): + continue - processor(offset, length) + subheader_signature = self._read_bytes(subheader_offset, self._int_length) + subheader_index = get_subheader_index(subheader_signature) + subheader_processor = self._subheader_processors[subheader_index] + + if subheader_processor is None: + f1 = ( + subheader_compression == const.compressed_subheader_id + or subheader_compression == 0 + ) + f2 = subheader_type == const.compressed_subheader_type + if self.compression and f1 and f2: + self._current_page_data_subheader_pointers.append( + (subheader_offset, subheader_length) + ) + else: + self.close() + raise ValueError( + f"Unknown subheader signature {subheader_signature}" + ) + else: + subheader_processor(subheader_offset, subheader_length) def _process_rowsize_subheader(self, offset: int, length: int) -> None: @@ -523,10 +484,12 @@ def _process_rowsize_subheader(self, offset: int, length: int) -> None: lcp_offset += 378 self.row_length = self._read_int( - offset + const.row_length_offset_multiplier * int_len, int_len + offset + const.row_length_offset_multiplier * int_len, + int_len, ) self.row_count = self._read_int( - offset + const.row_count_offset_multiplier * int_len, int_len + offset + const.row_count_offset_multiplier * int_len, + int_len, ) self.col_count_p1 = self._read_int( offset + const.col_count_p1_multiplier * int_len, int_len
Avoid constructing `_SubheaderPointer` objects and make dictionary lookups in C rather than in Python. Speedup relative to current `main`: ``` <main> <sas/shlookup~1> - 8.32±0.07ms 7.51±0.06ms 0.90 io.sas.SAS.time_test_meta2_pagesas7bdat - 82.8±0.5ms 73.6±0.5ms 0.89 io.sas.SAS.time_read_sas7bdat_2_chunked before after ratio ``` Will extend what's new from https://github.com/pandas-dev/pandas/pull/47404 once that's merged. - [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47656
2022-07-09T21:40:23Z
2022-10-04T17:56:17Z
2022-10-04T17:56:17Z
2022-10-13T16:59:53Z
CLN: Rename private variables to inclusive
diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi index ba0a339fa93dd..bad0f2bab93d8 100644 --- a/pandas/_libs/interval.pyi +++ b/pandas/_libs/interval.pyi @@ -17,7 +17,7 @@ from pandas._typing import ( Timestamp, ) -VALID_CLOSED: frozenset[str] +VALID_INCLUSIVE: frozenset[str] _OrderableScalarT = TypeVar("_OrderableScalarT", int, float) _OrderableTimesT = TypeVar("_OrderableTimesT", Timestamp, Timedelta) @@ -52,7 +52,9 @@ class IntervalMixin: def open_right(self) -> bool: ... @property def is_empty(self) -> bool: ... - def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ... + def _check_inclusive_matches( + self, other: IntervalMixin, name: str = ... + ) -> None: ... def _warning_interval( inclusive, closed @@ -150,7 +152,7 @@ class Interval(IntervalMixin, Generic[_OrderableT]): def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ... def intervals_to_interval_bounds( - intervals: np.ndarray, validate_closed: bool = ... + intervals: np.ndarray, validate_inclusive: bool = ... ) -> tuple[np.ndarray, np.ndarray, IntervalInclusiveType]: ... class IntervalTree(IntervalMixin): diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 79b3c0d056735..bc0a63c5c5a33 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -56,7 +56,7 @@ from pandas._libs.tslibs.util cimport ( is_timedelta64_object, ) -VALID_CLOSED = frozenset(['both', 'neither', 'left', 'right']) +VALID_INCLUSIVE = frozenset(['both', 'neither', 'left', 'right']) cdef class IntervalMixin: @@ -85,7 +85,7 @@ cdef class IntervalMixin: Returns ------- bool - True if the Interval is closed on the left-side. + True if the Interval is closed on the right-side. """ return self.inclusive in ('right', 'both') @@ -99,7 +99,7 @@ cdef class IntervalMixin: Returns ------- bool - True if the Interval is closed on the left-side. + True if the Interval is not closed on the left-side. """ return not self.closed_left @@ -113,7 +113,7 @@ cdef class IntervalMixin: Returns ------- bool - True if the Interval is closed on the left-side. + True if the Interval is not closed on the right-side. """ return not self.closed_right @@ -188,7 +188,7 @@ cdef class IntervalMixin: """ return (self.right == self.left) & (self.inclusive != 'both') - def _check_closed_matches(self, other, name='other'): + def _check_inclusive_matches(self, other, name='other'): """ Check if the inclusive attribute of `other` matches. @@ -203,7 +203,7 @@ cdef class IntervalMixin: Raises ------ ValueError - When `other` is not closed exactly the same as self. + When `other` is not inclusive exactly the same as self. """ if self.inclusive != other.inclusive: raise ValueError(f"'{name}.inclusive' is {repr(other.inclusive)}, " @@ -259,14 +259,14 @@ cdef class Interval(IntervalMixin): .. deprecated:: 1.5.0 inclusive : {'both', 'neither', 'left', 'right'}, default 'both' - Whether the interval is closed on the left-side, right-side, both or + Whether the interval is inclusive on the left-side, right-side, both or neither. See the Notes for more detailed explanation. .. versionadded:: 1.5.0 See Also -------- - IntervalIndex : An Index of Interval objects that are all closed on the + IntervalIndex : An Index of Interval objects that are all inclusive on the same side. cut : Convert continuous data into discrete bins (Categorical of Interval objects). @@ -279,13 +279,13 @@ cdef class Interval(IntervalMixin): The parameters `left` and `right` must be from the same type, you must be able to compare them and they must satisfy ``left <= right``. - A closed interval (in mathematics denoted by square brackets) contains - its endpoints, i.e. the closed interval ``[0, 5]`` is characterized by the + A inclusive interval (in mathematics denoted by square brackets) contains + its endpoints, i.e. the inclusive interval ``[0, 5]`` is characterized by the conditions ``0 <= x <= 5``. This is what ``inclusive='both'`` stands for. An open interval (in mathematics denoted by parentheses) does not contain its endpoints, i.e. the open interval ``(0, 5)`` is characterized by the conditions ``0 < x < 5``. This is what ``inclusive='neither'`` stands for. - Intervals can also be half-open or half-closed, i.e. ``[0, 5)`` is + Intervals can also be half-open or half-inclusive, i.e. ``[0, 5)`` is described by ``0 <= x < 5`` (``inclusive='left'``) and ``(0, 5]`` is described by ``0 < x <= 5`` (``inclusive='right'``). @@ -352,7 +352,7 @@ cdef class Interval(IntervalMixin): cdef readonly str inclusive """ - Whether the interval is closed on the left-side, right-side, both or + Whether the interval is inclusive on the left-side, right-side, both or neither. """ @@ -368,7 +368,7 @@ cdef class Interval(IntervalMixin): if inclusive is None: inclusive = "right" - if inclusive not in VALID_CLOSED: + if inclusive not in VALID_INCLUSIVE: raise ValueError(f"invalid option for 'inclusive': {inclusive}") if not left <= right: raise ValueError("left side of interval must be <= right side") @@ -522,7 +522,7 @@ cdef class Interval(IntervalMixin): """ Check whether two Interval objects overlap. - Two intervals overlap if they share a common point, including closed + Two intervals overlap if they share a common point, including inclusive endpoints. Intervals that only have an open endpoint in common do not overlap. @@ -551,7 +551,7 @@ cdef class Interval(IntervalMixin): >>> i1.overlaps(i3) False - Intervals that share closed endpoints overlap: + Intervals that share inclusive endpoints overlap: >>> i4 = pd.Interval(0, 1, inclusive='both') >>> i5 = pd.Interval(1, 2, inclusive='both') @@ -568,7 +568,7 @@ cdef class Interval(IntervalMixin): raise TypeError("`other` must be an Interval, " f"got {type(other).__name__}") - # equality is okay if both endpoints are closed (overlap at a point) + # equality is okay if both endpoints are inclusive (overlap at a point) op1 = le if (self.closed_left and other.closed_right) else lt op2 = le if (other.closed_left and self.closed_right) else lt @@ -580,16 +580,16 @@ cdef class Interval(IntervalMixin): @cython.wraparound(False) @cython.boundscheck(False) -def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True): +def intervals_to_interval_bounds(ndarray intervals, bint validate_inclusive=True): """ Parameters ---------- intervals : ndarray Object array of Intervals / nulls. - validate_closed: bool, default True - Boolean indicating if all intervals must be closed on the same side. - Mismatching closed will raise if True, else return None for closed. + validate_inclusive: bool, default True + Boolean indicating if all intervals must be inclusive on the same side. + Mismatching inclusive will raise if True, else return None for inclusive. Returns ------- @@ -602,7 +602,7 @@ def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True): object inclusive = None, interval Py_ssize_t i, n = len(intervals) ndarray left, right - bint seen_closed = False + bint seen_inclusive = False left = np.empty(n, dtype=intervals.dtype) right = np.empty(n, dtype=intervals.dtype) @@ -620,13 +620,13 @@ def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True): left[i] = interval.left right[i] = interval.right - if not seen_closed: - seen_closed = True + if not seen_inclusive: + seen_inclusive = True inclusive = interval.inclusive elif inclusive != interval.inclusive: inclusive = None - if validate_closed: - raise ValueError("intervals must all be closed on the same side") + if validate_inclusive: + raise ValueError("intervals must all be inclusive on the same side") return left, right, inclusive diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py index 5ed10661e8983..79b79a8ae8ff1 100644 --- a/pandas/core/arrays/arrow/_arrow_utils.py +++ b/pandas/core/arrays/arrow/_arrow_utils.py @@ -11,7 +11,7 @@ from pandas.util._decorators import deprecate_kwarg from pandas.util._exceptions import find_stack_level -from pandas.core.arrays.interval import VALID_CLOSED +from pandas.core.arrays.interval import VALID_INCLUSIVE def fallback_performancewarning(version: str | None = None) -> None: @@ -111,8 +111,8 @@ class ArrowIntervalType(pyarrow.ExtensionType): def __init__(self, subtype, inclusive: IntervalInclusiveType) -> None: # attributes need to be set first before calling # super init (as that calls serialize) - assert inclusive in VALID_CLOSED - self._closed: IntervalInclusiveType = inclusive + assert inclusive in VALID_INCLUSIVE + self._inclusive: IntervalInclusiveType = inclusive if not isinstance(subtype, pyarrow.DataType): subtype = pyarrow.type_for_alias(str(subtype)) self._subtype = subtype @@ -126,7 +126,7 @@ def subtype(self): @property def inclusive(self) -> IntervalInclusiveType: - return self._closed + return self._inclusive @property def closed(self) -> IntervalInclusiveType: @@ -135,7 +135,7 @@ def closed(self) -> IntervalInclusiveType: FutureWarning, stacklevel=find_stack_level(), ) - return self._closed + return self._inclusive def __arrow_ext_serialize__(self) -> bytes: metadata = {"subtype": str(self.subtype), "inclusive": self.inclusive} diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index ea0e7a769c25e..6469dccf6e2d5 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -23,7 +23,7 @@ from pandas._libs import lib from pandas._libs.interval import ( - VALID_CLOSED, + VALID_INCLUSIVE, Interval, IntervalMixin, intervals_to_interval_bounds, @@ -130,7 +130,7 @@ Array-like containing Interval objects from which to build the %(klass)s. inclusive : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both or + Whether the intervals are inclusive on the left-side, right-side, both or neither. dtype : dtype or None, default None If None, dtype will be inferred. @@ -185,7 +185,8 @@ _interval_shared_docs["class"] % { "klass": "IntervalArray", - "summary": "Pandas array for interval data that are closed on the same side.", + "summary": "Pandas array for interval data that are inclusive on the same " + "side.", "versionadded": "0.24.0", "name": "", "extra_attributes": "", @@ -254,13 +255,13 @@ def __new__( # might need to convert empty or purely na data data = _maybe_convert_platform_interval(data) - left, right, infer_closed = intervals_to_interval_bounds( - data, validate_closed=inclusive is None + left, right, infer_inclusive = intervals_to_interval_bounds( + data, validate_inclusive=inclusive is None ) if left.dtype == object: left = lib.maybe_convert_objects(left) right = lib.maybe_convert_objects(right) - inclusive = inclusive or infer_closed + inclusive = inclusive or infer_inclusive return cls._simple_new( left, @@ -389,7 +390,7 @@ def _from_factorized( breaks : array-like (1-dimensional) Left and right bounds for each interval. inclusive : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both + Whether the intervals are inclusive on the left-side, right-side, both or neither. copy : bool, default False Copy the data. @@ -455,7 +456,7 @@ def from_breaks( right : array-like (1-dimensional) Right bounds for each interval. inclusive : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both + Whether the intervals are inclusive on the left-side, right-side, both or neither. copy : bool, default False Copy the data. @@ -542,7 +543,7 @@ def from_arrays( data : array-like (1-dimensional) Array of tuples. inclusive : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both + Whether the intervals are inclusive on the left-side, right-side, both or neither. copy : bool, default False By-default copy the data, this is compat only and ignored. @@ -629,7 +630,7 @@ def _validate(self): * left and right have the same missing values * left is always below right """ - if self.inclusive not in VALID_CLOSED: + if self.inclusive not in VALID_INCLUSIVE: msg = f"invalid option for 'inclusive': {self.inclusive}" raise ValueError(msg) if len(self._left) != len(self._right): @@ -745,7 +746,7 @@ def _cmp_method(self, other, op): # for categorical defer to categories for dtype other_dtype = other.categories.dtype - # extract intervals if we have interval categories with matching closed + # extract intervals if we have interval categories with matching inclusive if is_interval_dtype(other_dtype): if self.inclusive != other.categories.inclusive: return invalid_comparison(self, other, op) @@ -754,7 +755,7 @@ def _cmp_method(self, other, op): other.codes, allow_fill=True, fill_value=other.categories._na_value ) - # interval-like -> need same closed and matching endpoints + # interval-like -> need same inclusive and matching endpoints if is_interval_dtype(other_dtype): if self.inclusive != other.inclusive: return invalid_comparison(self, other, op) @@ -994,7 +995,7 @@ def _concat_same_type( """ inclusive_set = {interval.inclusive for interval in to_concat} if len(inclusive_set) != 1: - raise ValueError("Intervals must all be closed on the same side.") + raise ValueError("Intervals must all be inclusive on the same side.") inclusive = inclusive_set.pop() left = np.concatenate([interval.left for interval in to_concat]) @@ -1120,7 +1121,7 @@ def _validate_listlike(self, value): # list-like of intervals try: array = IntervalArray(value) - self._check_closed_matches(array, name="value") + self._check_inclusive_matches(array, name="value") value_left, value_right = array.left, array.right except TypeError as err: # wrong type: not interval or NA @@ -1140,7 +1141,7 @@ def _validate_listlike(self, value): def _validate_scalar(self, value): if isinstance(value, Interval): - self._check_closed_matches(value, name="value") + self._check_inclusive_matches(value, name="value") left, right = value.left, value.right # TODO: check subdtype match like _validate_setitem_value? elif is_valid_na_for_dtype(value, self.left.dtype): @@ -1166,7 +1167,7 @@ def _validate_setitem_value(self, value): elif isinstance(value, Interval): # scalar - self._check_closed_matches(value, name="value") + self._check_inclusive_matches(value, name="value") value_left, value_right = value.left, value.right self.left._validate_fill_value(value_left) self.left._validate_fill_value(value_right) @@ -1352,7 +1353,7 @@ def overlaps(self, other): msg = f"`other` must be Interval-like, got {type(other).__name__}" raise TypeError(msg) - # equality is okay if both endpoints are closed (overlap at a point) + # equality is okay if both endpoints are inclusive (overlap at a point) op1 = le if (self.closed_left and other.closed_right) else lt op2 = le if (other.closed_left and self.closed_right) else lt @@ -1366,7 +1367,7 @@ def overlaps(self, other): @property def inclusive(self) -> IntervalInclusiveType: """ - Whether the intervals are closed on the left-side, right-side, both or + Whether the intervals are inclusive on the left-side, right-side, both or neither. """ return self.dtype.inclusive @@ -1482,7 +1483,7 @@ def set_closed( def set_inclusive( self: IntervalArrayT, inclusive: IntervalInclusiveType ) -> IntervalArrayT: - if inclusive not in VALID_CLOSED: + if inclusive not in VALID_INCLUSIVE: msg = f"invalid option for 'inclusive': {inclusive}" raise ValueError(msg) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 78096d836f5b0..9683c1dd93645 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -1124,7 +1124,7 @@ def __new__( # generally for pickle compat u = object.__new__(cls) u._subtype = None - u._closed = inclusive + u._inclusive = inclusive return u elif isinstance(subtype, str) and subtype.lower() == "interval": subtype = None @@ -1166,7 +1166,7 @@ def __new__( except KeyError: u = object.__new__(cls) u._subtype = subtype - u._closed = inclusive + u._inclusive = inclusive cls._cache_dtypes[key] = u return u @@ -1184,7 +1184,7 @@ def _can_hold_na(self) -> bool: @property def inclusive(self): - return self._closed + return self._inclusive @property def closed(self): @@ -1193,7 +1193,7 @@ def closed(self): FutureWarning, stacklevel=find_stack_level(), ) - return self._closed + return self._inclusive @property def subtype(self): @@ -1274,7 +1274,7 @@ def __setstate__(self, state) -> None: # pickle -> need to set the settable private ones here (see GH26067) self._subtype = state["subtype"] # backward-compat older pickles won't have "inclusive" key - self._closed = state.pop("inclusive", None) + self._inclusive = state.pop("inclusive", None) @classmethod def is_dtype(cls, dtype: object) -> bool: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index ced675fe9a3cf..23f2e724e208c 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -153,7 +153,7 @@ def _new_IntervalIndex(cls, d): _interval_shared_docs["class"] % { "klass": "IntervalIndex", - "summary": "Immutable index of intervals that are closed on the same side.", + "summary": "Immutable index of intervals that are inclusive on the same side.", "name": _index_doc_kwargs["name"], "versionadded": "0.20.0", "extra_attributes": "is_overlapping\nvalues\n", @@ -473,7 +473,7 @@ def is_overlapping(self) -> bool: >>> index.is_overlapping True - Intervals that share closed endpoints overlap: + Intervals that share inclusive endpoints overlap: >>> index = pd.interval_range(0, 3, inclusive='both') >>> index @@ -1009,7 +1009,7 @@ def interval_range( name : str, default None Name of the resulting IntervalIndex. inclusive : {"both", "neither", "left", "right"}, default "both" - Include boundaries; Whether to set each bound as closed or open. + Include boundaries; Whether to set each bound as inclusive or not. .. versionadded:: 1.5.0 closed : {'left', 'right', 'both', 'neither'}, default 'right' @@ -1026,7 +1026,7 @@ def interval_range( See Also -------- - IntervalIndex : An Index of intervals that are all closed on the same side. + IntervalIndex : An Index of intervals that are all inclusive on the same side. Notes ----- @@ -1079,7 +1079,7 @@ def interval_range( dtype='interval[float64, right]') The ``inclusive`` parameter specifies which endpoints of the individual - intervals within the ``IntervalIndex`` are closed. + intervals within the ``IntervalIndex`` are inclusive. >>> pd.interval_range(end=5, periods=4, inclusive='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index e127fe27b6209..695b06690b358 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -826,7 +826,7 @@ def test_unpickling_without_closed(self): # GH#38394 dtype = IntervalDtype("interval") - assert dtype._closed is None + assert dtype._inclusive is None tm.round_trip_pickle(dtype) diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py index 1966f344356a3..8c8998a8e4be9 100644 --- a/pandas/tests/indexes/interval/test_constructors.py +++ b/pandas/tests/indexes/interval/test_constructors.py @@ -401,7 +401,7 @@ def test_constructor_string(self): def test_constructor_errors(self, constructor): # mismatched closed within intervals with no constructor override ivs = [Interval(0, 1, inclusive="right"), Interval(2, 3, inclusive="left")] - msg = "intervals must all be closed on the same side" + msg = "intervals must all be inclusive on the same side" with pytest.raises(ValueError, match=msg): constructor(ivs)
null
https://api.github.com/repos/pandas-dev/pandas/pulls/47655
2022-07-09T21:00:51Z
2022-07-10T00:30:31Z
2022-07-10T00:30:31Z
2022-07-18T00:24:22Z
TYP: fix some of the __hash__ methods
diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py index 5ed10661e8983..3b8333fdb410a 100644 --- a/pandas/core/arrays/arrow/_arrow_utils.py +++ b/pandas/core/arrays/arrow/_arrow_utils.py @@ -92,7 +92,7 @@ def __eq__(self, other): else: return NotImplemented - def __hash__(self): + def __hash__(self) -> int: return hash((str(self), self.freq)) def to_pandas_dtype(self): @@ -158,7 +158,7 @@ def __eq__(self, other): else: return NotImplemented - def __hash__(self): + def __hash__(self) -> int: return hash((str(self), str(self.subtype), self.inclusive)) def to_pandas_dtype(self): diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 882cc76cf2d77..6c9b7adadb7b0 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -14,6 +14,7 @@ TYPE_CHECKING, Any, Callable, + ClassVar, Iterator, Literal, Sequence, @@ -1442,7 +1443,7 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs): # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") - __hash__: None # type: ignore[assignment] + __hash__: ClassVar[None] # type: ignore[assignment] # ------------------------------------------------------------------------ # Non-Optimized Default Methods; in the case of the private methods here, diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index 859995cb3c230..eaed6257736ba 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -99,7 +99,7 @@ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: self._fill_value = fill_value self._check_fill_value() - def __hash__(self): + def __hash__(self) -> int: # Python3 doesn't inherit __hash__ when a base class overrides # __eq__, so we explicitly do it here. return super().__hash__() diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bd8e04df7594f..e392802bdb5ea 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -13,6 +13,7 @@ TYPE_CHECKING, Any, Callable, + ClassVar, Hashable, Literal, Mapping, @@ -1882,7 +1883,7 @@ def _drop_labels_or_levels(self, keys, axis: int = 0): # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") - __hash__: None # type: ignore[assignment] + __hash__: ClassVar[None] # type: ignore[assignment] def __iter__(self): """ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 667ce4664c359..fc5fcaeab7d2a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -8,6 +8,7 @@ TYPE_CHECKING, Any, Callable, + ClassVar, Hashable, Iterable, Literal, @@ -5296,7 +5297,7 @@ def __contains__(self, key: Any) -> bool: # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") - __hash__: None # type: ignore[assignment] + __hash__: ClassVar[None] # type: ignore[assignment] @final def __setitem__(self, key, value): diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index ed5cf047ab59f..deb6ac2c80a81 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -89,7 +89,8 @@ def __mul__(self, other): def __reduce__(self): return type(self), (list(self),) - def __hash__(self): + # error: Signature of "__hash__" incompatible with supertype "list" + def __hash__(self) -> int: # type: ignore[override] return hash(tuple(self)) def _disabled(self, *args, **kwargs):
`__hash__` is `None` for DataFrame/Series/Index but I cannot find the line that sets `__hash__ = None`
https://api.github.com/repos/pandas-dev/pandas/pulls/47654
2022-07-09T18:27:01Z
2022-07-10T00:32:30Z
2022-07-10T00:32:30Z
2022-09-21T15:28:31Z
ci: add GitHub token permissions for workflows
diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml index be894e6a5a63e..e091160c952f8 100644 --- a/.github/workflows/32-bit-linux.yml +++ b/.github/workflows/32-bit-linux.yml @@ -12,6 +12,9 @@ on: paths-ignore: - "doc/**" +permissions: + contents: read + jobs: pytest: runs-on: ubuntu-latest diff --git a/.github/workflows/assign.yml b/.github/workflows/assign.yml index a1812843b1a8f..b7bb8db549f86 100644 --- a/.github/workflows/assign.yml +++ b/.github/workflows/assign.yml @@ -3,8 +3,14 @@ on: issue_comment: types: created +permissions: + contents: read + jobs: issue_assign: + permissions: + issues: write + pull-requests: write runs-on: ubuntu-latest steps: - if: github.event.comment.body == 'take' diff --git a/.github/workflows/asv-bot.yml b/.github/workflows/asv-bot.yml index dbf0ab0acb9ec..abb19a95315b6 100644 --- a/.github/workflows/asv-bot.yml +++ b/.github/workflows/asv-bot.yml @@ -9,8 +9,15 @@ env: ENV_FILE: environment.yml COMMENT: ${{github.event.comment.body}} +permissions: + contents: read + jobs: autotune: + permissions: + contents: read + issues: write + pull-requests: write name: "Run benchmarks" # TODO: Support more benchmarking options later, against different branches, against self, etc if: startsWith(github.event.comment.body, '@github-actions benchmark') diff --git a/.github/workflows/autoupdate-pre-commit-config.yml b/.github/workflows/autoupdate-pre-commit-config.yml index d2eac234ca361..9a41871c26062 100644 --- a/.github/workflows/autoupdate-pre-commit-config.yml +++ b/.github/workflows/autoupdate-pre-commit-config.yml @@ -5,8 +5,14 @@ on: - cron: "0 7 1 * *" # At 07:00 on 1st of every month. workflow_dispatch: +permissions: + contents: read + jobs: update-pre-commit: + permissions: + contents: write # for technote-space/create-pr-action to push code + pull-requests: write # for technote-space/create-pr-action to create a PR if: github.repository_owner == 'pandas-dev' name: Autoupdate pre-commit config runs-on: ubuntu-latest diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index 8031aaf22981f..09c603f347d4c 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -14,6 +14,9 @@ env: ENV_FILE: environment.yml PANDAS_CI: 1 +permissions: + contents: read + jobs: pre_commit: name: pre-commit diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml index f9a941b87387c..626bf7828e032 100644 --- a/.github/workflows/docbuild-and-upload.yml +++ b/.github/workflows/docbuild-and-upload.yml @@ -14,6 +14,9 @@ env: ENV_FILE: environment.yml PANDAS_CI: 1 +permissions: + contents: read + jobs: web_and_docs: name: Doc Build and Upload diff --git a/.github/workflows/macos-windows.yml b/.github/workflows/macos-windows.yml index cf9a59400bc92..e9503a2486560 100644 --- a/.github/workflows/macos-windows.yml +++ b/.github/workflows/macos-windows.yml @@ -18,6 +18,9 @@ env: PATTERN: "not slow and not db and not network and not single_cpu" +permissions: + contents: read + jobs: pytest: defaults: diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index 09639acafbba1..d93b92a9662ec 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -27,6 +27,9 @@ env: COVERAGE: true PYTEST_TARGET: pandas +permissions: + contents: read + jobs: build: if: false # Comment this line out to "unfreeze" diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 89312cdaaa80a..2e1ffe6d0d17e 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -13,6 +13,9 @@ on: paths-ignore: - "doc/**" +permissions: + contents: read + jobs: build: if: ${{ github.event.label.name == 'Build' || contains(github.event.pull_request.labels.*.name, 'Build') || github.event_name == 'push'}} diff --git a/.github/workflows/stale-pr.yml b/.github/workflows/stale-pr.yml index b97b60717a2b8..69656be18a8b1 100644 --- a/.github/workflows/stale-pr.yml +++ b/.github/workflows/stale-pr.yml @@ -4,8 +4,13 @@ on: # * is a special character in YAML so you have to quote this string - cron: "0 0 * * *" +permissions: + contents: read + jobs: stale: + permissions: + pull-requests: write runs-on: ubuntu-latest steps: - uses: actions/stale@v4 diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 8d6cae6278dcf..a759280c74521 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -15,6 +15,9 @@ on: env: PANDAS_CI: 1 +permissions: + contents: read + jobs: pytest: runs-on: ubuntu-latest
This PR adds minimum token permissions for the GITHUB_TOKEN using https://github.com/step-security/secure-workflows. GitHub recommends defining minimum GITHUB_TOKEN permissions for securing GitHub Actions workflows - https://github.blog/changelog/2021-04-20-github-actions-control-permissions-for-github_token/ - https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token - The Open Source Security Foundation (OpenSSF) [Scorecards](https://github.com/ossf/scorecard) treats not setting token permissions as a high-risk issue This project is part of the top 100 critical projects as per OpenSSF (https://github.com/ossf/wg-securing-critical-projects), so fixing the token permissions to improve security. Signed-off-by: Varun Sharma <varunsh@stepsecurity.io> - [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47652
2022-07-09T14:59:00Z
2022-07-11T16:57:29Z
2022-07-11T16:57:29Z
2022-07-11T16:57:37Z
WEB: Update NumFOCUS committee members
diff --git a/web/pandas/config.yml b/web/pandas/config.yml index 9165456d55897..5bb0cbc7557f8 100644 --- a/web/pandas/config.yml +++ b/web/pandas/config.yml @@ -101,11 +101,11 @@ maintainers: - Camille Scott - Nathaniel Smith numfocus: - - Phillip Cloud - - Stephan Hoyer - Wes McKinney - Jeff Reback - Joris Van den Bossche + - Tom Augspurger + - Matthew Roeschke sponsors: active: - name: "NumFOCUS"
After initial discussion in the mailing list about updating the NumFOCUS committee, I did some research, and based on the [governance docs](https://github.com/pandas-dev/pandas-governance/blob/master/governance.md#numfocus-subcommittee) the main goal of the committee is to manage the funds coming from NumFOCUS. So, I guess it makes sense that the list matches the approvers of NumFOCUS funds previously discussed. The governance also mentions that the committee must have at least 5 members. Leaving Wes in the list even if he's not one of the 4 approvers. Being the BDFL of the project I guess it makes sense to keep. We can always continue the discussion, but for now I think the new list makes more sense than the previous. CC: @wesm @cpcloud @shoyer @TomAugspurger @mroeschke
https://api.github.com/repos/pandas-dev/pandas/pulls/47650
2022-07-09T07:46:15Z
2022-07-10T07:21:08Z
2022-07-10T07:21:08Z
2022-07-10T07:21:08Z
BUG: #47350 if else added to add NaT for missing time values
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 038e4afdbd767..7f599635fcd5c 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -821,14 +821,37 @@ def apply( # This calls DataSplitter.__iter__ zipped = zip(group_keys, splitter) + i = 0 for key, group in zipped: + # BUG:47350 if replaced 1 by hamedgibago + # if key not in data.index and is_datetime64_any_dtype(data.index): + # #or (key not in data.index and f.__name__ in ['idxmax','idxmin']) : + # ser=Series(i,[key]) + # res = None + # else: + # res = f(group) + try: + res = f(group) + except (ValueError, AttributeError): + # except ValueError: + res = None + object.__setattr__(group, "name", key) # group might be modified group_axes = group.axes - res = f(group) + if not mutated and not _is_indexed_like(res, group_axes, axis): mutated = True + + i = i + 1 + + # BUG:47350 if added by hamedgibago + # if key in data.index: + # result_values.append(res) + # else: + # result_values.append(np.nan) + result_values.append(res) # getattr pattern for __name__ is needed for functools.partial objects diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7e2a9184f04d9..3e48075962fe2 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -45,8 +45,8 @@ tz_compare, ) from pandas._typing import ( + AnyArrayLike, ArrayLike, - Axes, Dtype, DtypeObj, F, @@ -261,10 +261,6 @@ def _new_Index(cls, d): # GH#23752 "labels" kwarg has been replaced with "codes" d["codes"] = d.pop("labels") - # Since this was a valid MultiIndex at pickle-time, we don't need to - # check validty at un-pickle time. - d["verify_integrity"] = False - elif "dtype" not in d and "data" in d: # Prevent Index.__new__ from conducting inference; # "data" key not in RangeIndex @@ -277,9 +273,8 @@ def _new_Index(cls, d): class Index(IndexOpsMixin, PandasObject): """ - Immutable sequence used for indexing and alignment. - - The basic object storing axis labels for all pandas objects. + Immutable sequence used for indexing and alignment. The basic object + storing axis labels for all pandas objects. Parameters ---------- @@ -2297,7 +2292,8 @@ def is_monotonic(self) -> bool: @property def is_monotonic_increasing(self) -> bool: """ - Return a boolean if the values are equal or increasing. + Return if the index is monotonic increasing (only equal or + increasing) values. Examples -------- @@ -2313,7 +2309,8 @@ def is_monotonic_increasing(self) -> bool: @property def is_monotonic_decreasing(self) -> bool: """ - Return a boolean if the values are equal or decreasing. + Return if the index is monotonic decreasing (only equal or + decreasing) values. Examples -------- @@ -3815,9 +3812,8 @@ def get_loc(self, key, method=None, tolerance=None): _index_shared_docs[ "get_indexer" ] = """ - Compute indexer and mask for new index given the current index. - - The indexer should be then used as an input to ndarray.take to align the + Compute indexer and mask for new index given the current index. The + indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters @@ -4586,7 +4582,8 @@ def join( sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ - Compute join_index and indexers to conform data structures to the new index. + Compute join_index and indexers to conform data + structures to the new index. Parameters ---------- @@ -4687,7 +4684,6 @@ def join( not isinstance(self, ABCMultiIndex) or not any(is_categorical_dtype(dtype) for dtype in self.dtypes) ) - and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 @@ -5983,9 +5979,8 @@ def set_value(self, arr, key, value) -> None: _index_shared_docs[ "get_indexer_non_unique" ] = """ - Compute indexer and mask for new index given the current index. - - The indexer should be then used as an input to ndarray.take to align the + Compute indexer and mask for new index given the current index. The + indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters @@ -7283,7 +7278,7 @@ def ensure_index_from_sequences(sequences, names=None) -> Index: return MultiIndex.from_arrays(sequences, names=names) -def ensure_index(index_like: Axes, copy: bool = False) -> Index: +def ensure_index(index_like: AnyArrayLike | Sequence, copy: bool = False) -> Index: """ Ensure that we have an index from some index-like object. diff --git a/pandas/core/resample.py b/pandas/core/resample.py index e3d81e01ac94c..d3138c0ad682a 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1982,6 +1982,12 @@ def _get_timestamp_range_edges( ------- A tuple of length 2, containing the adjusted pd.Timestamp objects. """ + if isinstance(origin, Timestamp): + first, last = _adjust_dates_anchored( + first, last, freq, closed=closed, origin=origin, offset=offset + ) + return first, last + if isinstance(freq, Tick): index_tz = first.tz if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): @@ -2116,7 +2122,10 @@ def _adjust_dates_anchored( origin_nanos = origin.value elif origin in ["end", "end_day"]: origin = last if origin == "end" else last.ceil("D") - sub_freq_times = (origin.value - first.value) // freq.nanos + if isinstance(freq, Tick): + sub_freq_times = (origin.value - first.value) // freq.nanos + else: + sub_freq_times = origin.value - first.value if closed == "left": sub_freq_times += 1 first = origin - sub_freq_times * freq @@ -2133,19 +2142,29 @@ def _adjust_dates_anchored( if last_tzinfo is not None: last = last.tz_convert("UTC") - foffset = (first.value - origin_nanos) % freq.nanos - loffset = (last.value - origin_nanos) % freq.nanos + if isinstance(freq, Tick): + foffset = (first.value - origin_nanos) % freq.nanos + loffset = (last.value - origin_nanos) % freq.nanos + else: + foffset = first.value - origin_nanos + loffset = last.value - origin_nanos if closed == "right": if foffset > 0: # roll back fresult_int = first.value - foffset else: - fresult_int = first.value - freq.nanos + if isinstance(freq, Tick): + fresult_int = first.value - freq.nanos + else: + fresult_int = first.value if loffset > 0: - # roll forward - lresult_int = last.value + (freq.nanos - loffset) + if isinstance(freq, Tick): + # roll forward + lresult_int = last.value + (freq.nanos - loffset) + else: + lresult_int = last.value - loffset else: # already the end of the road lresult_int = last.value @@ -2157,10 +2176,16 @@ def _adjust_dates_anchored( fresult_int = first.value if loffset > 0: - # roll forward - lresult_int = last.value + (freq.nanos - loffset) + if isinstance(freq, Tick): + # roll forward + lresult_int = last.value + (freq.nanos - loffset) + else: + lresult_int = last.value - loffset else: - lresult_int = last.value + freq.nanos + if isinstance(freq, Tick): + lresult_int = last.value + freq.nanos + else: + lresult_int = last.value fresult = Timestamp(fresult_int) lresult = Timestamp(lresult_int) if first_tzinfo is not None: diff --git a/pandas/core/series.py b/pandas/core/series.py index 20f0ecd06fbd1..9e9643f9ebe42 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2175,7 +2175,7 @@ def unique(self) -> ArrayLike: Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() - array([2, 1, 3]) + array([2, 1, 3], dtype=int64) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index d290aada18293..74b01327cb28e 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -39,6 +39,31 @@ def test_repr(): assert result == expected +def test_origin_param_no_effect(): + # GH 47653 + df = DataFrame( + [ + {"A": A, "datadate": datadate} + for A in range(1, 3) + for datadate in date_range(start="1/2/2022", end="2/1/2022", freq="D") + ] + ) + + result = df.groupby(["A", Grouper(key="datadate", freq="W", origin="start")]) + + # for i, dfg in result: + # print(dfg[["A", "datadate"]]).. + # print("-----------------------") + + expected = df.groupby(["A", Grouper(key="datadate", freq="W", origin="1/5/2022")]) + + # for i, dfg in expected: + # print(dfg[["A", "datadate"]]) + # print("-----------------------") + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"]) def test_basic(dtype): diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index 8aff217cca5c1..148d01662bf27 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -45,6 +45,17 @@ async def test_tab_complete_ipython6_warning(ip): list(ip.Completer.completions("rs.", 1)) +def test_dataframe_missing_a_day(): + # GH 47350 + dates = pd.DatetimeIndex(["2022-01-01", "2022-01-02", "2022-01-04"]) + df = DataFrame([0, 1, 2], index=dates) + result = df.resample("D")[0].idxmax() # raises value error + + expected = df.resample("D")[0].apply(lambda x: x.idxmax() if len(x) else None) + + tm.assert_series_equal(result, expected) + + def test_deferred_with_groupby(): # GH 12486
If else added to add NaT rows for missing time values.
https://api.github.com/repos/pandas-dev/pandas/pulls/47647
2022-07-08T20:23:17Z
2022-10-31T19:14:07Z
null
2022-10-31T19:14:07Z
TYP: Improve typing interval inclusive
diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi index 3bd5dd2042e69..ba0a339fa93dd 100644 --- a/pandas/_libs/interval.pyi +++ b/pandas/_libs/interval.pyi @@ -12,7 +12,7 @@ import numpy.typing as npt from pandas._libs import lib from pandas._typing import ( - IntervalClosedType, + IntervalInclusiveType, Timedelta, Timestamp, ) @@ -56,7 +56,7 @@ class IntervalMixin: def _warning_interval( inclusive, closed -) -> tuple[IntervalClosedType, lib.NoDefault]: ... +) -> tuple[IntervalInclusiveType, lib.NoDefault]: ... class Interval(IntervalMixin, Generic[_OrderableT]): @property @@ -64,17 +64,17 @@ class Interval(IntervalMixin, Generic[_OrderableT]): @property def right(self: Interval[_OrderableT]) -> _OrderableT: ... @property - def inclusive(self) -> IntervalClosedType: ... + def inclusive(self) -> IntervalInclusiveType: ... @property - def closed(self) -> IntervalClosedType: ... + def closed(self) -> IntervalInclusiveType: ... mid: _MidDescriptor length: _LengthDescriptor def __init__( self, left: _OrderableT, right: _OrderableT, - inclusive: IntervalClosedType = ..., - closed: IntervalClosedType = ..., + inclusive: IntervalInclusiveType = ..., + closed: IntervalInclusiveType = ..., ) -> None: ... def __hash__(self) -> int: ... @overload @@ -151,14 +151,14 @@ class Interval(IntervalMixin, Generic[_OrderableT]): def intervals_to_interval_bounds( intervals: np.ndarray, validate_closed: bool = ... -) -> tuple[np.ndarray, np.ndarray, str]: ... +) -> tuple[np.ndarray, np.ndarray, IntervalInclusiveType]: ... class IntervalTree(IntervalMixin): def __init__( self, left: np.ndarray, right: np.ndarray, - inclusive: IntervalClosedType = ..., + inclusive: IntervalInclusiveType = ..., leaf_size: int = ..., ) -> None: ... @property diff --git a/pandas/_typing.py b/pandas/_typing.py index ac1237f8841be..4bc5f75400455 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -314,7 +314,7 @@ def closed(self) -> bool: # Interval closed type IntervalLeftRight = Literal["left", "right"] -IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]] +IntervalInclusiveType = Union[IntervalLeftRight, Literal["both", "neither"]] # datetime and NaTType DatetimeNaTType = Union[datetime, "NaTType"] diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py index 5893ca77193c4..5ed10661e8983 100644 --- a/pandas/core/arrays/arrow/_arrow_utils.py +++ b/pandas/core/arrays/arrow/_arrow_utils.py @@ -6,6 +6,7 @@ import numpy as np import pyarrow +from pandas._typing import IntervalInclusiveType from pandas.errors import PerformanceWarning from pandas.util._decorators import deprecate_kwarg from pandas.util._exceptions import find_stack_level @@ -107,11 +108,11 @@ def to_pandas_dtype(self): class ArrowIntervalType(pyarrow.ExtensionType): @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") - def __init__(self, subtype, inclusive: str) -> None: + def __init__(self, subtype, inclusive: IntervalInclusiveType) -> None: # attributes need to be set first before calling # super init (as that calls serialize) assert inclusive in VALID_CLOSED - self._closed = inclusive + self._closed: IntervalInclusiveType = inclusive if not isinstance(subtype, pyarrow.DataType): subtype = pyarrow.type_for_alias(str(subtype)) self._subtype = subtype @@ -124,11 +125,11 @@ def subtype(self): return self._subtype @property - def inclusive(self) -> str: + def inclusive(self) -> IntervalInclusiveType: return self._closed @property - def closed(self): + def closed(self) -> IntervalInclusiveType: warnings.warn( "Attribute `closed` is deprecated in favor of `inclusive`.", FutureWarning, diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 4320c862fbc41..ea0e7a769c25e 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -32,7 +32,7 @@ from pandas._typing import ( ArrayLike, Dtype, - IntervalClosedType, + IntervalInclusiveType, NpDtype, PositionalIndexer, ScalarIndexer, @@ -230,7 +230,7 @@ def ndim(self) -> Literal[1]: def __new__( cls: type[IntervalArrayT], data, - inclusive: str | None = None, + inclusive: IntervalInclusiveType | None = None, dtype: Dtype | None = None, copy: bool = False, verify_integrity: bool = True, @@ -277,7 +277,7 @@ def _simple_new( cls: type[IntervalArrayT], left, right, - inclusive=None, + inclusive: IntervalInclusiveType | None = None, copy: bool = False, dtype: Dtype | None = None, verify_integrity: bool = True, @@ -431,7 +431,7 @@ def _from_factorized( def from_breaks( cls: type[IntervalArrayT], breaks, - inclusive: IntervalClosedType | None = None, + inclusive: IntervalInclusiveType | None = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: @@ -513,7 +513,7 @@ def from_arrays( cls: type[IntervalArrayT], left, right, - inclusive: IntervalClosedType | None = None, + inclusive: IntervalInclusiveType | None = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: @@ -586,7 +586,7 @@ def from_arrays( def from_tuples( cls: type[IntervalArrayT], data, - inclusive=None, + inclusive: IntervalInclusiveType | None = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: @@ -1364,7 +1364,7 @@ def overlaps(self, other): # --------------------------------------------------------------------- @property - def inclusive(self) -> IntervalClosedType: + def inclusive(self) -> IntervalInclusiveType: """ Whether the intervals are closed on the left-side, right-side, both or neither. @@ -1372,7 +1372,7 @@ def inclusive(self) -> IntervalClosedType: return self.dtype.inclusive @property - def closed(self) -> IntervalClosedType: + def closed(self) -> IntervalInclusiveType: """ Whether the intervals are closed on the left-side, right-side, both or neither. @@ -1426,7 +1426,9 @@ def closed(self) -> IntervalClosedType: ), } ) - def set_closed(self: IntervalArrayT, closed: IntervalClosedType) -> IntervalArrayT: + def set_closed( + self: IntervalArrayT, closed: IntervalInclusiveType + ) -> IntervalArrayT: warnings.warn( "set_closed is deprecated and will be removed in a future version. " "Use set_inclusive instead.", @@ -1478,7 +1480,7 @@ def set_closed(self: IntervalArrayT, closed: IntervalClosedType) -> IntervalArra } ) def set_inclusive( - self: IntervalArrayT, inclusive: IntervalClosedType + self: IntervalArrayT, inclusive: IntervalInclusiveType ) -> IntervalArrayT: if inclusive not in VALID_CLOSED: msg = f"invalid option for 'inclusive': {inclusive}" diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 16e7559e4d153..78096d836f5b0 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -38,6 +38,7 @@ from pandas._typing import ( Dtype, DtypeObj, + IntervalInclusiveType, Ordered, npt, type_t, @@ -1091,7 +1092,7 @@ class IntervalDtype(PandasExtensionDtype): def __new__( cls, subtype=None, - inclusive: str_type | None = None, + inclusive: IntervalInclusiveType | None = None, closed: None | lib.NoDefault = lib.no_default, ): from pandas.core.dtypes.common import ( @@ -1140,7 +1141,11 @@ def __new__( "'inclusive' keyword does not match value " "specified in dtype string" ) - inclusive = gd["inclusive"] + # Incompatible types in assignment (expression has type + # "Union[str, Any]", variable has type + # "Optional[Union[Literal['left', 'right'], + # Literal['both', 'neither']]]") + inclusive = gd["inclusive"] # type: ignore[assignment] try: subtype = pandas_dtype(subtype) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8089fc58db07d..e2f4574abe5a0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -48,7 +48,7 @@ IgnoreRaise, IndexKeyFunc, IndexLabel, - IntervalClosedType, + IntervalInclusiveType, JSONSerializable, Level, Manager, @@ -8066,7 +8066,7 @@ def between_time( end_time, include_start: bool_t | lib.NoDefault = lib.no_default, include_end: bool_t | lib.NoDefault = lib.no_default, - inclusive: IntervalClosedType | None = None, + inclusive: IntervalInclusiveType | None = None, axis=None, ) -> NDFrameT: """ @@ -8172,7 +8172,7 @@ def between_time( left = True if include_start is lib.no_default else include_start right = True if include_end is lib.no_default else include_end - inc_dict: dict[tuple[bool_t, bool_t], IntervalClosedType] = { + inc_dict: dict[tuple[bool_t, bool_t], IntervalInclusiveType] = { (True, True): "both", (True, False): "left", (False, True): "right", diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 18c0d56abbeb4..6aa2ff91ba933 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -35,7 +35,7 @@ from pandas._typing import ( Dtype, DtypeObj, - IntervalClosedType, + IntervalInclusiveType, IntervalLeftRight, npt, ) @@ -920,7 +920,7 @@ def date_range( normalize: bool = False, name: Hashable = None, closed: Literal["left", "right"] | None | lib.NoDefault = lib.no_default, - inclusive: IntervalClosedType | None = None, + inclusive: IntervalInclusiveType | None = None, **kwargs, ) -> DatetimeIndex: """ @@ -1126,7 +1126,7 @@ def bdate_range( weekmask=None, holidays=None, closed: IntervalLeftRight | lib.NoDefault | None = lib.no_default, - inclusive: IntervalClosedType | None = None, + inclusive: IntervalInclusiveType | None = None, **kwargs, ) -> DatetimeIndex: """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index b1f839daa694d..ced675fe9a3cf 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -30,7 +30,7 @@ from pandas._typing import ( Dtype, DtypeObj, - IntervalClosedType, + IntervalInclusiveType, npt, ) from pandas.errors import InvalidIndexError @@ -198,7 +198,7 @@ class IntervalIndex(ExtensionIndex): _typ = "intervalindex" # annotate properties pinned via inherit_names - inclusive: IntervalClosedType + inclusive: IntervalInclusiveType is_non_overlapping_monotonic: bool closed_left: bool closed_right: bool @@ -217,7 +217,7 @@ class IntervalIndex(ExtensionIndex): def __new__( cls, data, - inclusive=None, + inclusive: IntervalInclusiveType | None = None, dtype: Dtype | None = None, copy: bool = False, name: Hashable = None, @@ -266,7 +266,7 @@ def closed(self): def from_breaks( cls, breaks, - inclusive=None, + inclusive: IntervalInclusiveType | None = None, name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, @@ -302,7 +302,7 @@ def from_arrays( cls, left, right, - inclusive=None, + inclusive: IntervalInclusiveType | None = None, name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, @@ -337,7 +337,7 @@ def from_arrays( def from_tuples( cls, data, - inclusive=None, + inclusive: IntervalInclusiveType | None = None, name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, @@ -989,7 +989,7 @@ def interval_range( periods=None, freq=None, name: Hashable = None, - inclusive: IntervalClosedType | None = None, + inclusive: IntervalInclusiveType | None = None, ) -> IntervalIndex: """ Return a fixed frequency IntervalIndex. diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 10b607da45ca8..0461fbfc6faa8 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -25,6 +25,7 @@ Axis, FilePath, IndexLabel, + IntervalInclusiveType, Level, QuantileInterpolation, Scalar, @@ -3479,7 +3480,7 @@ def highlight_between( axis: Axis | None = 0, left: Scalar | Sequence | None = None, right: Scalar | Sequence | None = None, - inclusive: str = "both", + inclusive: IntervalInclusiveType = "both", props: str | None = None, ) -> Styler: """ @@ -3584,7 +3585,7 @@ def highlight_quantile( q_left: float = 0.0, q_right: float = 1.0, interpolation: QuantileInterpolation = "linear", - inclusive: str = "both", + inclusive: IntervalInclusiveType = "both", props: str | None = None, ) -> Styler: """ @@ -3969,7 +3970,7 @@ def _highlight_between( props: str, left: Scalar | Sequence | np.ndarray | NDFrame | None = None, right: Scalar | Sequence | np.ndarray | NDFrame | None = None, - inclusive: bool | str = True, + inclusive: bool | IntervalInclusiveType = True, ) -> np.ndarray: """ Return an array of css props based on condition of data values within given range. diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index caa191dc78493..3676e6eb0091e 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -14,6 +14,7 @@ import numpy as np +from pandas._typing import IntervalInclusiveType from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( @@ -487,7 +488,7 @@ def validate_endpoints(closed: str | None) -> tuple[bool, bool]: return left_closed, right_closed -def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]: +def validate_inclusive(inclusive: IntervalInclusiveType | None) -> tuple[bool, bool]: """ Check that the `inclusive` argument is among {"both", "neither", "left", "right"}.
This makes the usage of IntervalInclusiveType consistent. Also renamed for consistency with new argument
https://api.github.com/repos/pandas-dev/pandas/pulls/47646
2022-07-08T20:03:59Z
2022-07-09T14:36:07Z
2022-07-09T14:36:07Z
2022-07-09T20:47:33Z
ENH/TST: Add TestBaseArithmeticOps tests for ArrowExtensionArray #47601
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 2ab710a5762d3..5db859897b663 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -27,6 +27,7 @@ pa_version_under5p0, pa_version_under6p0, pa_version_under7p0, + pa_version_under8p0, ) if TYPE_CHECKING: @@ -158,4 +159,5 @@ def get_lzma_file() -> type[lzma.LZMAFile]: "pa_version_under5p0", "pa_version_under6p0", "pa_version_under7p0", + "pa_version_under8p0", ] diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 92aedbb836b38..07b09d78016fd 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -57,6 +57,76 @@ "ge": pc.greater_equal, } + ARROW_LOGICAL_FUNCS = { + "and": NotImplemented if pa_version_under2p0 else pc.and_kleene, + "rand": NotImplemented + if pa_version_under2p0 + else lambda x, y: pc.and_kleene(y, x), + "or": NotImplemented if pa_version_under2p0 else pc.or_kleene, + "ror": NotImplemented + if pa_version_under2p0 + else lambda x, y: pc.or_kleene(y, x), + "xor": NotImplemented if pa_version_under2p0 else pc.xor, + "rxor": NotImplemented if pa_version_under2p0 else lambda x, y: pc.xor(y, x), + } + + def cast_for_truediv( + arrow_array: pa.ChunkedArray, pa_object: pa.Array | pa.Scalar + ) -> pa.ChunkedArray: + # Ensure int / int -> float mirroring Python/Numpy behavior + # as pc.divide_checked(int, int) -> int + if pa.types.is_integer(arrow_array.type) and pa.types.is_integer( + pa_object.type + ): + return arrow_array.cast(pa.float64()) + return arrow_array + + def floordiv_compat( + left: pa.ChunkedArray | pa.Array | pa.Scalar, + right: pa.ChunkedArray | pa.Array | pa.Scalar, + ) -> pa.ChunkedArray: + # Ensure int // int -> int mirroring Python/Numpy behavior + # as pc.floor(pc.divide_checked(int, int)) -> float + result = pc.floor(pc.divide_checked(left, right)) + if pa.types.is_integer(left.type) and pa.types.is_integer(right.type): + result = result.cast(left.type) + return result + + ARROW_ARITHMETIC_FUNCS = { + "add": NotImplemented if pa_version_under2p0 else pc.add_checked, + "radd": NotImplemented + if pa_version_under2p0 + else lambda x, y: pc.add_checked(y, x), + "sub": NotImplemented if pa_version_under2p0 else pc.subtract_checked, + "rsub": NotImplemented + if pa_version_under2p0 + else lambda x, y: pc.subtract_checked(y, x), + "mul": NotImplemented if pa_version_under2p0 else pc.multiply_checked, + "rmul": NotImplemented + if pa_version_under2p0 + else lambda x, y: pc.multiply_checked(y, x), + "truediv": NotImplemented + if pa_version_under2p0 + else lambda x, y: pc.divide_checked(cast_for_truediv(x, y), y), + "rtruediv": NotImplemented + if pa_version_under2p0 + else lambda x, y: pc.divide_checked(y, cast_for_truediv(x, y)), + "floordiv": NotImplemented + if pa_version_under2p0 + else lambda x, y: floordiv_compat(x, y), + "rfloordiv": NotImplemented + if pa_version_under2p0 + else lambda x, y: floordiv_compat(y, x), + "mod": NotImplemented, + "rmod": NotImplemented, + "divmod": NotImplemented, + "rdivmod": NotImplemented, + "pow": NotImplemented if pa_version_under2p0 else pc.power_checked, + "rpow": NotImplemented + if pa_version_under2p0 + else lambda x, y: pc.power_checked(y, x), + } + if TYPE_CHECKING: from pandas import Series @@ -74,6 +144,7 @@ def to_pyarrow_type( elif isinstance(dtype, pa.DataType): pa_dtype = dtype elif dtype: + # Accepts python types too pa_dtype = pa.from_numpy_dtype(dtype) else: pa_dtype = None @@ -263,6 +334,28 @@ def _cmp_method(self, other, op): result = result.to_numpy() return BooleanArray._from_sequence(result) + def _evaluate_op_method(self, other, op, arrow_funcs): + pc_func = arrow_funcs[op.__name__] + if pc_func is NotImplemented: + raise NotImplementedError(f"{op.__name__} not implemented.") + if isinstance(other, ArrowExtensionArray): + result = pc_func(self._data, other._data) + elif isinstance(other, (np.ndarray, list)): + result = pc_func(self._data, pa.array(other, from_pandas=True)) + elif is_scalar(other): + result = pc_func(self._data, pa.scalar(other)) + else: + raise NotImplementedError( + f"{op.__name__} not implemented for {type(other)}" + ) + return type(self)(result) + + def _logical_method(self, other, op): + return self._evaluate_op_method(other, op, ARROW_LOGICAL_FUNCS) + + def _arith_method(self, other, op): + return self._evaluate_op_method(other, op, ARROW_ARITHMETIC_FUNCS) + def equals(self, other) -> bool: if not isinstance(other, ArrowExtensionArray): return False diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index 7421645baa463..f884264e9ab75 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -360,7 +360,7 @@ def _str_get_dummies(self, sep="|"): arr = Series(self).fillna("") try: arr = sep + arr + sep - except TypeError: + except (TypeError, NotImplementedError): arr = sep + arr.astype(str) + sep tags: set[str] = set() diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index b563f84207b22..a5eb6189db6f1 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -101,7 +101,7 @@ def test_add(dtype, request): "unsupported operand type(s) for +: 'ArrowStringArray' and " "'ArrowStringArray'" ) - mark = pytest.mark.xfail(raises=TypeError, reason=reason) + mark = pytest.mark.xfail(raises=NotImplementedError, reason=reason) request.node.add_marker(mark) a = pd.Series(["a", "b", "c", None, None], dtype=dtype) @@ -142,7 +142,7 @@ def test_add_2d(dtype, request): def test_add_sequence(dtype, request): if dtype.storage == "pyarrow": reason = "unsupported operand type(s) for +: 'ArrowStringArray' and 'list'" - mark = pytest.mark.xfail(raises=TypeError, reason=reason) + mark = pytest.mark.xfail(raises=NotImplementedError, reason=reason) request.node.add_marker(mark) a = pd.array(["a", "b", None, None], dtype=dtype) @@ -160,7 +160,7 @@ def test_add_sequence(dtype, request): def test_mul(dtype, request): if dtype.storage == "pyarrow": reason = "unsupported operand type(s) for *: 'ArrowStringArray' and 'int'" - mark = pytest.mark.xfail(raises=TypeError, reason=reason) + mark = pytest.mark.xfail(raises=NotImplementedError, reason=reason) request.node.add_marker(mark) a = pd.array(["a", "b", None], dtype=dtype) diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index a1d232b737da7..569782e55fd72 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -67,10 +67,10 @@ class BaseArithmeticOpsTests(BaseOpsUtil): * divmod_exc = TypeError """ - series_scalar_exc: type[TypeError] | None = TypeError - frame_scalar_exc: type[TypeError] | None = TypeError - series_array_exc: type[TypeError] | None = TypeError - divmod_exc: type[TypeError] | None = TypeError + series_scalar_exc: type[Exception] | None = TypeError + frame_scalar_exc: type[Exception] | None = TypeError + series_array_exc: type[Exception] | None = TypeError + divmod_exc: type[Exception] | None = TypeError def test_arith_series_with_scalar(self, data, all_arithmetic_operators): # series & scalar diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 7e0792a6010a7..ef576692c83b6 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -24,6 +24,7 @@ from pandas.compat import ( pa_version_under2p0, pa_version_under3p0, + pa_version_under8p0, ) import pandas as pd @@ -179,6 +180,16 @@ def data_missing_for_sorting(data_for_grouping): ) +@pytest.fixture +def data_for_twos(data): + """Length-100 array in which all the elements are two.""" + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype): + return pd.array([2] * 100, dtype=data.dtype) + # tests will be xfailed where 2 is not a valid scalar for pa_dtype + return data + + @pytest.fixture def na_value(): """The scalar missing value for this type. Default 'None'""" @@ -1211,6 +1222,20 @@ def test_EA_types(self, engine, data, request): class TestBaseMethods(base.BaseMethodsTests): + @pytest.mark.parametrize("periods", [1, -2]) + def test_diff(self, data, periods, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_unsigned_integer(pa_dtype) and periods == 1: + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + f"diff with {pa_dtype} and periods={periods} will overflow" + ), + ) + ) + super().test_diff(data, periods) + @pytest.mark.parametrize("dropna", [True, False]) def test_value_counts(self, all_data, dropna, request): pa_dtype = all_data.dtype.pyarrow_dtype @@ -1491,6 +1516,325 @@ def test_where_series(self, data, na_value, as_frame, request, using_array_manag super().test_where_series(data, na_value, as_frame) +class TestBaseArithmeticOps(base.BaseArithmeticOpsTests): + + divmod_exc = NotImplementedError + + def _patch_combine(self, obj, other, op): + # BaseOpsUtil._combine can upcast expected dtype + # (because it generates expected on python scalars) + # while ArrowExtensionArray maintains original type + expected = base.BaseArithmeticOpsTests._combine(self, obj, other, op) + was_frame = False + if isinstance(expected, pd.DataFrame): + was_frame = True + expected_data = expected.iloc[:, 0] + original_dtype = obj.iloc[:, 0].dtype + else: + expected_data = expected + original_dtype = obj.dtype + pa_array = pa.array(expected_data._values).cast(original_dtype.pyarrow_dtype) + pd_array = type(expected_data._values)(pa_array) + if was_frame: + expected = pd.DataFrame( + pd_array, index=expected.index, columns=expected.columns + ) + else: + expected = pd.Series(pd_array) + return expected + + def test_arith_series_with_scalar( + self, data, all_arithmetic_operators, request, monkeypatch + ): + pa_dtype = data.dtype.pyarrow_dtype + + arrow_temporal_supported = not pa_version_under8p0 and ( + all_arithmetic_operators in ("__add__", "__radd__") + and pa.types.is_duration(pa_dtype) + or all_arithmetic_operators in ("__sub__", "__rsub__") + and pa.types.is_temporal(pa_dtype) + ) + if ( + all_arithmetic_operators + in { + "__mod__", + "__rmod__", + } + or pa_version_under2p0 + ): + self.series_scalar_exc = NotImplementedError + elif arrow_temporal_supported: + self.series_scalar_exc = None + elif not ( + pa.types.is_floating(pa_dtype) + or pa.types.is_integer(pa_dtype) + or arrow_temporal_supported + ): + self.series_scalar_exc = pa.ArrowNotImplementedError + else: + self.series_scalar_exc = None + if ( + all_arithmetic_operators == "__rpow__" + and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)) + and not pa_version_under2p0 + ): + request.node.add_marker( + pytest.mark.xfail( + reason=( + f"GH 29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL " + f"for {pa_dtype}" + ) + ) + ) + elif arrow_temporal_supported: + request.node.add_marker( + pytest.mark.xfail( + raises=TypeError, + reason=( + f"{all_arithmetic_operators} not supported between" + f"pd.NA and {pa_dtype} Python scalar" + ), + ) + ) + elif ( + all_arithmetic_operators in {"__rtruediv__", "__rfloordiv__"} + and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)) + and not pa_version_under2p0 + ): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason="divide by 0", + ) + ) + if all_arithmetic_operators == "__floordiv__" and pa.types.is_integer(pa_dtype): + # BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does + # not upcast + monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine) + super().test_arith_series_with_scalar(data, all_arithmetic_operators) + + def test_arith_frame_with_scalar( + self, data, all_arithmetic_operators, request, monkeypatch + ): + pa_dtype = data.dtype.pyarrow_dtype + + arrow_temporal_supported = not pa_version_under8p0 and ( + all_arithmetic_operators in ("__add__", "__radd__") + and pa.types.is_duration(pa_dtype) + or all_arithmetic_operators in ("__sub__", "__rsub__") + and pa.types.is_temporal(pa_dtype) + ) + if ( + all_arithmetic_operators + in { + "__mod__", + "__rmod__", + } + or pa_version_under2p0 + ): + self.frame_scalar_exc = NotImplementedError + elif arrow_temporal_supported: + self.frame_scalar_exc = None + elif not (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)): + self.frame_scalar_exc = pa.ArrowNotImplementedError + else: + self.frame_scalar_exc = None + if ( + all_arithmetic_operators == "__rpow__" + and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)) + and not pa_version_under2p0 + ): + request.node.add_marker( + pytest.mark.xfail( + reason=( + f"GH 29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL " + f"for {pa_dtype}" + ) + ) + ) + elif arrow_temporal_supported: + request.node.add_marker( + pytest.mark.xfail( + raises=TypeError, + reason=( + f"{all_arithmetic_operators} not supported between" + f"pd.NA and {pa_dtype} Python scalar" + ), + ) + ) + elif ( + all_arithmetic_operators in {"__rtruediv__", "__rfloordiv__"} + and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)) + and not pa_version_under2p0 + ): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason="divide by 0", + ) + ) + if all_arithmetic_operators == "__floordiv__" and pa.types.is_integer(pa_dtype): + # BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does + # not upcast + monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine) + super().test_arith_frame_with_scalar(data, all_arithmetic_operators) + + def test_arith_series_with_array( + self, data, all_arithmetic_operators, request, monkeypatch + ): + pa_dtype = data.dtype.pyarrow_dtype + + arrow_temporal_supported = not pa_version_under8p0 and ( + all_arithmetic_operators in ("__add__", "__radd__") + and pa.types.is_duration(pa_dtype) + or all_arithmetic_operators in ("__sub__", "__rsub__") + and pa.types.is_temporal(pa_dtype) + ) + if ( + all_arithmetic_operators + in { + "__mod__", + "__rmod__", + } + or pa_version_under2p0 + ): + self.series_array_exc = NotImplementedError + elif arrow_temporal_supported: + self.series_array_exc = None + elif not (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)): + self.series_array_exc = pa.ArrowNotImplementedError + else: + self.series_array_exc = None + if ( + all_arithmetic_operators == "__rpow__" + and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)) + and not pa_version_under2p0 + ): + request.node.add_marker( + pytest.mark.xfail( + reason=( + f"GH 29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL " + f"for {pa_dtype}" + ) + ) + ) + elif ( + all_arithmetic_operators + in ( + "__sub__", + "__rsub__", + ) + and pa.types.is_unsigned_integer(pa_dtype) + and not pa_version_under2p0 + ): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + f"Implemented pyarrow.compute.subtract_checked " + f"which raises on overflow for {pa_dtype}" + ), + ) + ) + elif arrow_temporal_supported: + request.node.add_marker( + pytest.mark.xfail( + raises=TypeError, + reason=( + f"{all_arithmetic_operators} not supported between" + f"pd.NA and {pa_dtype} Python scalar" + ), + ) + ) + elif ( + all_arithmetic_operators in {"__rtruediv__", "__rfloordiv__"} + and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)) + and not pa_version_under2p0 + ): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason="divide by 0", + ) + ) + op_name = all_arithmetic_operators + ser = pd.Series(data) + # pd.Series([ser.iloc[0]] * len(ser)) may not return ArrowExtensionArray + # since ser.iloc[0] is a python scalar + other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype)) + if pa.types.is_floating(pa_dtype) or ( + pa.types.is_integer(pa_dtype) and all_arithmetic_operators != "__truediv__" + ): + monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine) + self.check_opname(ser, op_name, other, exc=self.series_array_exc) + + def test_add_series_with_extension_array(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if ( + not ( + pa.types.is_integer(pa_dtype) + or pa.types.is_floating(pa_dtype) + or (not pa_version_under8p0 and pa.types.is_duration(pa_dtype)) + ) + or pa_version_under2p0 + ): + request.node.add_marker( + pytest.mark.xfail( + raises=NotImplementedError, + reason=f"add_checked not implemented for {pa_dtype}", + ) + ) + super().test_add_series_with_extension_array(data) + + +class TestBaseComparisonOps(base.BaseComparisonOpsTests): + def assert_series_equal(self, left, right, *args, **kwargs): + # Series.combine for "expected" retains bool[pyarrow] dtype + # While "result" return "boolean" dtype + right = pd.Series(right._values.to_numpy(), dtype="boolean") + super().assert_series_equal(left, right, *args, **kwargs) + + def test_compare_array(self, data, comparison_op, na_value, request): + pa_dtype = data.dtype.pyarrow_dtype + ser = pd.Series(data) + # pd.Series([ser.iloc[0]] * len(ser)) may not return ArrowExtensionArray + # since ser.iloc[0] is a python scalar + other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype)) + if comparison_op.__name__ in ["eq", "ne"]: + # comparison should match point-wise comparisons + result = comparison_op(ser, other) + # Series.combine does not calculate the NA mask correctly + # when comparing over an array + assert result[8] is na_value + assert result[97] is na_value + expected = ser.combine(other, comparison_op) + expected[8] = na_value + expected[97] = na_value + self.assert_series_equal(result, expected) + + else: + exc = None + try: + result = comparison_op(ser, other) + except Exception as err: + exc = err + + if exc is None: + # Didn't error, then should match point-wise behavior + if pa.types.is_temporal(pa_dtype): + # point-wise comparison with pd.NA raises TypeError + assert result[8] is na_value + assert result[97] is na_value + result = result.drop([8, 97]).reset_index(drop=True) + ser = ser.drop([8, 97]) + other = other.drop([8, 97]) + expected = ser.combine(other, comparison_op) + self.assert_series_equal(result, expected) + else: + with pytest.raises(type(exc)): + ser.combine(other, comparison_op) + + def test_arrowdtype_construct_from_string_type_with_unsupported_parameters(): with pytest.raises(NotImplementedError, match="Passing pyarrow type"): ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]") diff --git a/pandas/tests/strings/test_api.py b/pandas/tests/strings/test_api.py index 974ecc152f17b..d76ed65be9e1b 100644 --- a/pandas/tests/strings/test_api.py +++ b/pandas/tests/strings/test_api.py @@ -132,7 +132,7 @@ def test_api_for_categorical(any_string_method, any_string_dtype, request): any_string_dtype == "string" and get_option("string_storage") == "pyarrow" ): # unsupported operand type(s) for +: 'ArrowStringArray' and 'str' - mark = pytest.mark.xfail(raises=TypeError, reason="Not Implemented") + mark = pytest.mark.xfail(raises=NotImplementedError, reason="Not Implemented") request.node.add_marker(mark) s = Series(list("aabb"), dtype=any_string_dtype)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). Generally the xfails correspond to: * pyarrow < 2 not having the *_checked ops * pyarrow < 8 not supporting arithmetic with some temporal types * pyarrow not having mod/rmod compute functions * `1**pandas.NA == 1` while `1**pyarrow.NA == NULL`
https://api.github.com/repos/pandas-dev/pandas/pulls/47645
2022-07-08T19:59:06Z
2022-07-16T02:18:52Z
2022-07-16T02:18:52Z
2022-07-17T17:30:59Z