title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
TST/CI: xfail test_round_sanity for 32 bit | diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index b6559385e1597..0dd3a88670ece 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -14,6 +14,7 @@
iNaT,
)
from pandas._libs.tslibs.dtypes import NpyDatetimeUnit
+from pandas.compat import IS64
from pandas.errors import OutOfBoundsTimedelta
import pandas as pd
@@ -690,6 +691,7 @@ def test_round_implementation_bounds(self):
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
+ @pytest.mark.xfail(not IS64, reason="Failing on 32 bit build", strict=False)
@given(val=st.integers(min_value=iNaT + 1, max_value=lib.i8max))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 2146e32a437a9..cc11037660ad2 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -21,6 +21,7 @@
)
from pandas._libs.tslibs.dtypes import NpyDatetimeUnit
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
+from pandas.compat import IS64
import pandas.util._test_decorators as td
import pandas._testing as tm
@@ -297,6 +298,7 @@ def test_round_implementation_bounds(self):
with pytest.raises(OverflowError, match=msg):
Timestamp.max.ceil("s")
+ @pytest.mark.xfail(not IS64, reason="Failing on 32 bit build", strict=False)
@given(val=st.integers(iNaT + 1, lib.i8max))
@pytest.mark.parametrize(
"method", [Timestamp.round, Timestamp.floor, Timestamp.ceil]
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
xfailing just to get the CI to green. Might be addressable in the future cc @jbrockmendel
| https://api.github.com/repos/pandas-dev/pandas/pulls/47803 | 2022-07-20T16:32:48Z | 2022-07-22T19:38:10Z | 2022-07-22T19:38:10Z | 2022-07-23T11:06:04Z |
TST: Test series construct of dtype timedelta64 #35465 | diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 4e4ee4fd12d5f..de9a682acdfd6 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1879,6 +1879,28 @@ def test_constructor_bool_dtype_missing_values(self):
expected = Series(True, index=[0], dtype="bool")
tm.assert_series_equal(result, expected)
+ def test_constructor_dtype_timedelta_alternative_construct(self):
+ # GH#35465
+ result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]")
+ expected = Series(pd.to_timedelta([1000000, 200000, 3000000], unit="ns"))
+ tm.assert_series_equal(result, expected)
+
+ def test_constructor_dtype_timedelta_ns_s(self):
+ # GH#35465
+ result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]")
+ expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]")
+ tm.assert_series_equal(result, expected)
+
+ def test_constructor_dtype_timedelta_ns_s_astype_int64(self):
+ # GH#35465
+ result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]").astype(
+ "int64"
+ )
+ expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]").astype(
+ "int64"
+ )
+ tm.assert_series_equal(result, expected)
+
@pytest.mark.filterwarnings(
"ignore:elementwise comparison failed:DeprecationWarning"
)
| - [x] closes #35465
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47801 | 2022-07-20T15:25:24Z | 2022-07-27T16:58:17Z | 2022-07-27T16:58:17Z | 2022-07-27T16:58:23Z |
DEPS: drop np19 support | diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml
index 2e1ffe6d0d17e..1a06ea31ccbb8 100644
--- a/.github/workflows/sdist.yml
+++ b/.github/workflows/sdist.yml
@@ -79,9 +79,9 @@ jobs:
run: |
case "${{matrix.python-version}}" in
3.8)
- pip install numpy==1.19.5 ;;
+ pip install numpy==1.20.3 ;;
3.9)
- pip install numpy==1.19.5 ;;
+ pip install numpy==1.20.3 ;;
3.10)
pip install numpy==1.21.2 ;;
esac
diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index 3ab27830060b2..89ebabbbc7469 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -17,7 +17,7 @@ dependencies:
# required dependencies
- python-dateutil=2.8.1
- - numpy=1.19.5
+ - numpy=1.20.3
- pytz=2020.1
# optional dependencies
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 5d9bfd97030b5..605a69b26a646 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -235,7 +235,7 @@ Dependencies
================================================================ ==========================
Package Minimum supported version
================================================================ ==========================
-`NumPy <https://numpy.org>`__ 1.19.5
+`NumPy <https://numpy.org>`__ 1.20.3
`python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__ 2.8.1
`pytz <https://pypi.org/project/pytz/>`__ 2020.1
================================================================ ==========================
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 4674f28744f7e..1572fdebe2643 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -416,7 +416,7 @@ If installed, we now require:
+-----------------+-----------------+----------+---------+
| Package | Minimum Version | Required | Changed |
+=================+=================+==========+=========+
-| numpy | 1.19.5 | X | X |
+| numpy | 1.20.3 | X | X |
+-----------------+-----------------+----------+---------+
| mypy (dev) | 0.971 | | X |
+-----------------+-----------------+----------+---------+
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 147134afd70c3..91d05ea66402b 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -17,7 +17,7 @@
from pandas._typing import F
from pandas.compat.numpy import (
is_numpy_dev,
- np_version_under1p20,
+ np_version_under1p21,
)
from pandas.compat.pyarrow import (
pa_version_under1p01,
@@ -152,7 +152,7 @@ def get_lzma_file() -> type[lzma.LZMAFile]:
__all__ = [
"is_numpy_dev",
- "np_version_under1p20",
+ "np_version_under1p21",
"pa_version_under1p01",
"pa_version_under2p0",
"pa_version_under3p0",
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 803f495b311b9..60ec74553a207 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -6,12 +6,11 @@
# numpy versioning
_np_version = np.__version__
_nlv = Version(_np_version)
-np_version_under1p20 = _nlv < Version("1.20")
+np_version_under1p21 = _nlv < Version("1.21")
np_version_under1p22 = _nlv < Version("1.22")
np_version_gte1p22 = _nlv >= Version("1.22")
is_numpy_dev = _nlv.dev is not None
-_min_numpy_ver = "1.19.5"
-is_numpy_min = _nlv == Version(_min_numpy_ver)
+_min_numpy_ver = "1.20.3"
if is_numpy_dev or not np_version_under1p22:
np_percentile_argname = "method"
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py
index 84160344437b5..17622e78d1b12 100644
--- a/pandas/core/array_algos/putmask.py
+++ b/pandas/core/array_algos/putmask.py
@@ -12,7 +12,7 @@
ArrayLike,
npt,
)
-from pandas.compat import np_version_under1p20
+from pandas.compat import np_version_under1p21
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import is_list_like
@@ -66,7 +66,7 @@ def putmask_without_repeat(
mask : np.ndarray[bool]
new : Any
"""
- if np_version_under1p20:
+ if np_version_under1p21:
new = setitem_datetimelike_compat(values, mask.sum(), new)
if getattr(new, "ndim", 0) >= 1:
@@ -78,7 +78,6 @@ def putmask_without_repeat(
shape = np.shape(new)
# np.shape compat for if setitem_datetimelike_compat
# changed arraylike to list e.g. test_where_dt64_2d
-
if nlocs == shape[-1]:
# GH#30567
# If length of ``new`` is less than the length of ``values``,
diff --git a/pandas/tests/arrays/floating/test_construction.py b/pandas/tests/arrays/floating/test_construction.py
index ebce80cba237d..2dcd54f443029 100644
--- a/pandas/tests/arrays/floating/test_construction.py
+++ b/pandas/tests/arrays/floating/test_construction.py
@@ -1,8 +1,6 @@
import numpy as np
import pytest
-from pandas.compat import np_version_under1p20
-
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
@@ -54,11 +52,6 @@ def test_floating_array_disallows_float16():
def test_floating_array_disallows_Float16_dtype(request):
# GH#44715
- if np_version_under1p20:
- # https://github.com/numpy/numpy/issues/20512
- mark = pytest.mark.xfail(reason="numpy does not raise on np.dtype('Float16')")
- request.node.add_marker(mark)
-
with pytest.raises(TypeError, match="data type 'Float16' not understood"):
pd.array([1.0, 2.0], dtype="Float16")
diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py
index 9593152735ed6..1a32c995f4afa 100644
--- a/pandas/tests/arrays/sparse/test_arithmetics.py
+++ b/pandas/tests/arrays/sparse/test_arithmetics.py
@@ -3,11 +3,8 @@
import numpy as np
import pytest
-from pandas.compat import np_version_under1p20
-
import pandas as pd
import pandas._testing as tm
-from pandas.core import ops
from pandas.core.arrays.sparse import (
SparseArray,
SparseDtype,
@@ -121,19 +118,7 @@ def test_float_scalar(
self, kind, mix, all_arithmetic_functions, fill_value, scalar, request
):
op = all_arithmetic_functions
-
- if np_version_under1p20:
- if op in [operator.floordiv, ops.rfloordiv]:
- if op is operator.floordiv and scalar != 0:
- pass
- elif op is ops.rfloordiv and scalar == 0:
- pass
- else:
- mark = pytest.mark.xfail(raises=AssertionError, reason="GH#38172")
- request.node.add_marker(mark)
-
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
-
a = SparseArray(values, kind=kind, fill_value=fill_value)
self._check_numeric_ops(a, scalar, values, scalar, mix, op)
@@ -171,14 +156,6 @@ def test_float_same_index_with_nans(
):
# when sp_index are the same
op = all_arithmetic_functions
-
- if (
- np_version_under1p20
- and op is ops.rfloordiv
- and not (mix and kind == "block")
- ):
- mark = pytest.mark.xfail(raises=AssertionError, reason="GH#38172")
- request.node.add_marker(mark)
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
@@ -353,13 +330,7 @@ def test_bool_array_logical(self, kind, fill_value):
def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request):
op = all_arithmetic_functions
-
- if np_version_under1p20 and op in [operator.floordiv, ops.rfloordiv] and mix:
- mark = pytest.mark.xfail(raises=AssertionError, reason="GH#38172")
- request.node.add_marker(mark)
-
rdtype = "int64"
-
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py
index 4987751f31dac..0eb8123e6bdb8 100644
--- a/pandas/tests/extension/base/casting.py
+++ b/pandas/tests/extension/base/casting.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas.compat import np_version_under1p20
+from pandas.compat import np_version_under1p21
import pandas.util._test_decorators as td
import pandas as pd
@@ -32,8 +32,7 @@ def test_astype_object_frame(self, all_data):
assert result._mgr.arrays[0].dtype == np.dtype(object)
# earlier numpy raises TypeError on e.g. np.dtype(np.int64) == "Int64"
- # instead of returning False
- if not np_version_under1p20:
+ if not np_version_under1p21:
# check that we can compare the dtypes
comp = result.dtypes == df.dtypes
assert not comp.any()
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index d13f6dab1cc9b..60eef0d8097e4 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -17,7 +17,6 @@
import numpy as np
import pytest
-from pandas.compat import np_version_under1p20
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import is_object_dtype
@@ -415,12 +414,9 @@ def test_astype_object_frame(self, all_data):
result = df.astype(object)
assert is_object_dtype(result._mgr.arrays[0].dtype)
- # earlier numpy raises TypeError on e.g. np.dtype(np.int64) == "Int64"
- # instead of returning False
- if not np_version_under1p20:
- # check that we can compare the dtypes
- comp = result.dtypes == df.dtypes
- assert not comp.any()
+ # check that we can compare the dtypes
+ comp = result.dtypes == df.dtypes
+ assert not comp.any()
def test_astype_str(self, data):
with tm.assert_produces_warning(FutureWarning, match="astype from Sparse"):
diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py
index 5b9883f3866e7..aa55a7c91d0e6 100644
--- a/pandas/tests/frame/indexing/test_where.py
+++ b/pandas/tests/frame/indexing/test_where.py
@@ -4,8 +4,6 @@
import numpy as np
import pytest
-from pandas.compat import np_version_under1p20
-
from pandas.core.dtypes.common import is_scalar
import pandas as pd
@@ -1006,7 +1004,6 @@ def _check_where_equivalences(df, mask, other, expected):
tm.assert_frame_equal(df, expected)
-@pytest.mark.xfail(np_version_under1p20, reason="failed on Numpy 1.19.5")
def test_where_dt64_2d():
dti = date_range("2016-01-01", periods=6)
dta = dti._data.reshape(3, 2)
diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py
index 798212f957e3c..16b82727fd069 100644
--- a/pandas/tests/frame/methods/test_quantile.py
+++ b/pandas/tests/frame/methods/test_quantile.py
@@ -1,7 +1,10 @@
import numpy as np
import pytest
-from pandas.compat.numpy import np_percentile_argname
+from pandas.compat.numpy import (
+ np_percentile_argname,
+ np_version_under1p21,
+)
import pandas as pd
from pandas import (
@@ -655,7 +658,7 @@ def compute_quantile(self, obj, qs):
result = obj.quantile(qs, numeric_only=False)
return result
- def test_quantile_ea(self, obj, index):
+ def test_quantile_ea(self, request, obj, index):
# result should be invariant to shuffling
indexer = np.arange(len(index), dtype=np.intp)
@@ -665,6 +668,11 @@ def test_quantile_ea(self, obj, index):
qs = [0.5, 0, 1]
result = self.compute_quantile(obj, qs)
+ if np_version_under1p21 and index.dtype == "timedelta64[ns]":
+ msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood"
+ mark = pytest.mark.xfail(reason=msg, raises=TypeError)
+ request.node.add_marker(mark)
+
exp_dtype = index.dtype
if index.dtype == "Int64":
# match non-nullable casting behavior
@@ -700,7 +708,7 @@ def test_quantile_ea_with_na(self, obj, index):
# TODO(GH#39763): filtering can be removed after GH#39763 is fixed
@pytest.mark.filterwarnings("ignore:Using .astype to convert:FutureWarning")
- def test_quantile_ea_all_na(self, obj, index):
+ def test_quantile_ea_all_na(self, request, obj, index):
obj.iloc[:] = index._na_value
# TODO(ArrayManager): this casting should be unnecessary after GH#39763 is fixed
@@ -715,6 +723,11 @@ def test_quantile_ea_all_na(self, obj, index):
qs = [0.5, 0, 1]
result = self.compute_quantile(obj, qs)
+ if np_version_under1p21 and index.dtype == "timedelta64[ns]":
+ msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood"
+ mark = pytest.mark.xfail(reason=msg, raises=TypeError)
+ request.node.add_marker(mark)
+
expected = index.take([-1, -1, -1], allow_fill=True, fill_value=index._na_value)
expected = Series(expected, index=qs, name="A")
if expected.dtype == "Int64":
@@ -722,7 +735,7 @@ def test_quantile_ea_all_na(self, obj, index):
expected = type(obj)(expected)
tm.assert_equal(result, expected)
- def test_quantile_ea_scalar(self, obj, index):
+ def test_quantile_ea_scalar(self, request, obj, index):
# scalar qs
# result should be invariant to shuffling
@@ -733,6 +746,11 @@ def test_quantile_ea_scalar(self, obj, index):
qs = 0.5
result = self.compute_quantile(obj, qs)
+ if np_version_under1p21 and index.dtype == "timedelta64[ns]":
+ msg = "failed on Numpy 1.20.3; TypeError: data type 'Int64' not understood"
+ mark = pytest.mark.xfail(reason=msg, raises=TypeError)
+ request.node.add_marker(mark)
+
exp_dtype = index.dtype
if index.dtype == "Int64":
exp_dtype = "Float64"
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index f7504e9173bf5..555d24e747f44 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -6,8 +6,6 @@
import numpy as np
import pytest
-from pandas.compat import np_version_under1p20
-
import pandas as pd
from pandas import (
DataFrame,
@@ -1316,12 +1314,6 @@ def test_replace_commutative(self, df, to_replace, exp):
)
def test_replace_replacer_dtype(self, request, replacer):
# GH26632
- if np.isscalar(replacer) and replacer.dtype.itemsize < 8:
- request.node.add_marker(
- pytest.mark.xfail(
- np_version_under1p20, reason="np.putmask doesn't coerce dtype"
- )
- )
df = DataFrame(["a"])
result = df.replace({"a": replacer, "b": replacer})
expected = DataFrame([replacer])
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 2a116c992231b..fdf741040407f 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -10,7 +10,6 @@
import numpy as np
import pytest
-from pandas.compat.numpy import is_numpy_min
from pandas.errors import IndexingError
import pandas.util._test_decorators as td
@@ -1199,7 +1198,6 @@ def test_iloc_getitem_int_single_ea_block_view(self):
arr[2] = arr[-1]
assert ser[0] == arr[-1]
- @pytest.mark.xfail(is_numpy_min, reason="Column A gets coerced to integer type")
def test_iloc_setitem_multicolumn_to_datetime(self, using_array_manager):
# GH#20511
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index de9a682acdfd6..fc17f0b942d09 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -13,7 +13,6 @@
iNaT,
lib,
)
-from pandas.compat.numpy import np_version_under1p20
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
@@ -1904,9 +1903,6 @@ def test_constructor_dtype_timedelta_ns_s_astype_int64(self):
@pytest.mark.filterwarnings(
"ignore:elementwise comparison failed:DeprecationWarning"
)
- @pytest.mark.xfail(
- np_version_under1p20, reason="np.array([td64nat, float, float]) raises"
- )
@pytest.mark.parametrize("func", [Series, DataFrame, Index, pd.array])
def test_constructor_mismatched_null_nullable_dtype(
self, func, any_numeric_ea_dtype
diff --git a/setup.cfg b/setup.cfg
index b191930acf4c5..8f7cfc288ecdb 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -31,9 +31,7 @@ project_urls =
[options]
packages = find:
install_requires =
- numpy>=1.18.5; platform_machine!='aarch64' and platform_machine!='arm64' and python_version<'3.10'
- numpy>=1.19.2; platform_machine=='aarch64' and python_version<'3.10'
- numpy>=1.20.0; platform_machine=='arm64' and python_version<'3.10'
+ numpy>=1.20.3; python_version<'3.10'
numpy>=1.21.0; python_version>='3.10'
python-dateutil>=2.8.1
pytz>=2020.1
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47796 | 2022-07-20T05:59:17Z | 2022-08-01T23:31:34Z | 2022-08-01T23:31:34Z | 2022-11-18T02:18:01Z |
Backport PR #47792 on branch 1.4.x (DOC: Fix versionadded for callable in on_bad_lines) | diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 3e792786b863a..d3a8b01e1da7b 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -371,6 +371,8 @@
.. versionadded:: 1.3.0
+ .. versionadded:: 1.4.0
+
- callable, function with signature
``(bad_line: list[str]) -> list[str] | None`` that will process a single
bad line. ``bad_line`` is a list of strings split by the ``sep``.
@@ -379,8 +381,6 @@
expected, a ``ParserWarning`` will be emitted while dropping extra elements.
Only supported when ``engine="python"``
- .. versionadded:: 1.4.0
-
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
| Backport PR #47792 | https://api.github.com/repos/pandas-dev/pandas/pulls/47795 | 2022-07-20T00:38:36Z | 2022-07-20T13:36:35Z | 2022-07-20T13:36:35Z | 2022-08-01T18:08:46Z |
BUG: fixed OutOfBoundsDatetime exception when errors=coerce #45319 | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index a0d33cb513722..3a9c24251b12f 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -897,6 +897,7 @@ Datetimelike
- Bug in :meth:`DatetimeIndex.resolution` incorrectly returning "day" instead of "nanosecond" for nanosecond-resolution indexes (:issue:`46903`)
- Bug in :class:`Timestamp` with an integer or float value and ``unit="Y"`` or ``unit="M"`` giving slightly-wrong results (:issue:`47266`)
- Bug in :class:`.DatetimeArray` construction when passed another :class:`.DatetimeArray` and ``freq=None`` incorrectly inferring the freq from the given array (:issue:`47296`)
+- Bug in :func:`to_datetime` where ``OutOfBoundsDatetime`` would be thrown even if ``errors=coerce`` if there were more than 50 rows (:issue:`45319`)
- Bug when adding a :class:`DateOffset` to a :class:`Series` would not add the ``nanoseconds`` field (:issue:`47856`)
-
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 7ec4bc1016a9d..4739fb49e0d07 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -227,7 +227,11 @@ def _maybe_cache(
unique_dates = unique(arg)
if len(unique_dates) < len(arg):
cache_dates = convert_listlike(unique_dates, format)
- cache_array = Series(cache_dates, index=unique_dates)
+ # GH#45319
+ try:
+ cache_array = Series(cache_dates, index=unique_dates)
+ except OutOfBoundsDatetime:
+ return cache_array
# GH#39882 and GH#35888 in case of None and NaT we get duplicates
if not cache_array.index.is_unique:
cache_array = cache_array[~cache_array.index.duplicated()]
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index b128838318ac0..aa5c3324fe0f0 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -2777,3 +2777,34 @@ def test_to_datetime_monotonic_increasing_index(cache):
result = to_datetime(times.iloc[:, 0], cache=cache)
expected = times.iloc[:, 0]
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "series_length",
+ [40, start_caching_at, (start_caching_at + 1), (start_caching_at + 5)],
+)
+def test_to_datetime_cache_coerce_50_lines_outofbounds(series_length):
+ # GH#45319
+ s = Series(
+ [datetime.fromisoformat("1446-04-12 00:00:00+00:00")]
+ + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length)
+ )
+ result1 = to_datetime(s, errors="coerce", utc=True)
+
+ expected1 = Series(
+ [NaT] + ([Timestamp("1991-10-20 00:00:00+00:00")] * series_length)
+ )
+
+ tm.assert_series_equal(result1, expected1)
+
+ result2 = to_datetime(s, errors="ignore", utc=True)
+
+ expected2 = Series(
+ [datetime.fromisoformat("1446-04-12 00:00:00+00:00")]
+ + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length)
+ )
+
+ tm.assert_series_equal(result2, expected2)
+
+ with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond timestamp"):
+ to_datetime(s, errors="raise", utc=True)
| - [ ] closes #45319
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47794 | 2022-07-19T23:56:13Z | 2022-08-15T16:49:43Z | 2022-08-15T16:49:43Z | 2022-08-15T16:49:51Z |
DOC: Fix versionadded for callable in on_bad_lines | diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 35227dcf6a82d..4858d56d71c42 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -381,6 +381,8 @@
.. versionadded:: 1.3.0
+ .. versionadded:: 1.4.0
+
- callable, function with signature
``(bad_line: list[str]) -> list[str] | None`` that will process a single
bad line. ``bad_line`` is a list of strings split by the ``sep``.
@@ -389,8 +391,6 @@
expected, a ``ParserWarning`` will be emitted while dropping extra elements.
Only supported when ``engine="python"``
- .. versionadded:: 1.4.0
-
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
| - [x] closes #47788 (Replace xxxx with the Github issue number)
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
would backport this
| https://api.github.com/repos/pandas-dev/pandas/pulls/47792 | 2022-07-19T18:34:43Z | 2022-07-20T00:35:30Z | 2022-07-20T00:35:29Z | 2022-07-20T00:39:09Z |
Backport PR #47763 on branch 1.4.x (BUG: fix regression in Series[string] setitem setting a scalar with a mask) | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 6ee140f59e096..6bd7378e05404 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`)
+- Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 919b882f22ecb..655ccb3a474ae 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -17,6 +17,7 @@
from pandas._typing import (
Dtype,
Scalar,
+ npt,
type_t,
)
from pandas.compat import pa_version_under1p01
@@ -413,6 +414,12 @@ def __setitem__(self, key, value):
super().__setitem__(key, value)
+ def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
+ # the super() method NDArrayBackedExtensionArray._putmask uses
+ # np.putmask which doesn't properly handle None/pd.NA, so using the
+ # base class implementation that uses __setitem__
+ ExtensionArray._putmask(self, mask, value)
+
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index b5b4007798135..24bb9df296a03 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -553,3 +553,23 @@ def test_isin(dtype, request, fixed_now_ts):
result = s.isin(["a", fixed_now_ts])
expected = pd.Series([True, False, False])
tm.assert_series_equal(result, expected)
+
+
+def test_setitem_scalar_with_mask_validation(dtype):
+ # https://github.com/pandas-dev/pandas/issues/47628
+ # setting None with a boolean mask (through _putmaks) should still result
+ # in pd.NA values in the underlying array
+ ser = pd.Series(["a", "b", "c"], dtype=dtype)
+ mask = np.array([False, True, False])
+
+ ser[mask] = None
+ assert ser.array[1] is pd.NA
+
+ # for other non-string we should also raise an error
+ ser = pd.Series(["a", "b", "c"], dtype=dtype)
+ if type(ser.array) is pd.arrays.StringArray:
+ msg = "Cannot set non-string value"
+ else:
+ msg = "Scalar must be NA or str"
+ with pytest.raises(ValueError, match=msg):
+ ser[mask] = 1
| Backport PR #47763: BUG: fix regression in Series[string] setitem setting a scalar with a mask | https://api.github.com/repos/pandas-dev/pandas/pulls/47784 | 2022-07-18T22:29:38Z | 2022-07-19T14:10:03Z | 2022-07-19T14:10:03Z | 2022-07-19T14:10:03Z |
BUG: PeriodIndex + TimedeltaArray-with-NaT | diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd
index 6e41a55f30929..352680143113d 100644
--- a/pandas/_libs/tslibs/dtypes.pxd
+++ b/pandas/_libs/tslibs/dtypes.pxd
@@ -3,7 +3,7 @@ from numpy cimport int64_t
from pandas._libs.tslibs.np_datetime cimport NPY_DATETIMEUNIT
-cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit)
+cpdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit)
cdef NPY_DATETIMEUNIT abbrev_to_npy_unit(str abbrev)
cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil
cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=*) except? -1
diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi
index 041c51533d8da..82f62e16c4205 100644
--- a/pandas/_libs/tslibs/dtypes.pyi
+++ b/pandas/_libs/tslibs/dtypes.pyi
@@ -8,6 +8,7 @@ _period_code_map: dict[str, int]
def periods_per_day(reso: int) -> int: ...
def periods_per_second(reso: int) -> int: ...
def is_supported_unit(reso: int) -> bool: ...
+def npy_unit_to_abbrev(reso: int) -> str: ...
class PeriodDtypeBase:
_dtype_code: int # PeriodDtypeCode
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index a0a7ab90ebb30..c09ac2a686d5c 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -289,7 +289,7 @@ def is_supported_unit(NPY_DATETIMEUNIT reso):
)
-cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit):
+cpdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit):
if unit == NPY_DATETIMEUNIT.NPY_FR_ns or unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC:
# generic -> default to nanoseconds
return "ns"
diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd
index 420d83909a78d..bfccedba9431e 100644
--- a/pandas/_libs/tslibs/np_datetime.pxd
+++ b/pandas/_libs/tslibs/np_datetime.pxd
@@ -101,6 +101,7 @@ cpdef cnp.ndarray astype_overflowsafe(
cnp.ndarray values, # ndarray[datetime64[anyunit]]
cnp.dtype dtype, # ndarray[datetime64[anyunit]]
bint copy=*,
+ bint round_ok=*,
)
cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1
diff --git a/pandas/_libs/tslibs/np_datetime.pyi b/pandas/_libs/tslibs/np_datetime.pyi
index 757165fbad268..d80d26375412b 100644
--- a/pandas/_libs/tslibs/np_datetime.pyi
+++ b/pandas/_libs/tslibs/np_datetime.pyi
@@ -9,7 +9,10 @@ class OutOfBoundsTimedelta(ValueError): ...
def py_get_unit_from_dtype(dtype: np.dtype): ...
def py_td64_to_tdstruct(td64: int, unit: int) -> dict: ...
def astype_overflowsafe(
- arr: np.ndarray, dtype: np.dtype, copy: bool = ...
+ arr: np.ndarray,
+ dtype: np.dtype,
+ copy: bool = ...,
+ round_ok: bool = ...,
) -> np.ndarray: ...
def is_unitless(dtype: np.dtype) -> bool: ...
def compare_mismatched_resolutions(
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 494eb5da7e107..5f8bad8398076 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -282,6 +282,7 @@ cpdef ndarray astype_overflowsafe(
ndarray values,
cnp.dtype dtype,
bint copy=True,
+ bint round_ok=True,
):
"""
Convert an ndarray with datetime64[X] to datetime64[Y]
@@ -314,10 +315,6 @@ cpdef ndarray astype_overflowsafe(
"datetime64/timedelta64 values and dtype must have a unit specified"
)
- if (<object>values).dtype.byteorder == ">":
- # GH#29684 we incorrectly get OutOfBoundsDatetime if we dont swap
- values = values.astype(values.dtype.newbyteorder("<"))
-
if from_unit == to_unit:
# Check this before allocating result for perf, might save some memory
if copy:
@@ -325,9 +322,17 @@ cpdef ndarray astype_overflowsafe(
return values
elif from_unit > to_unit:
- # e.g. ns -> us, so there is no risk of overflow, so we can use
- # numpy's astype safely. Note there _is_ risk of truncation.
- return values.astype(dtype)
+ if round_ok:
+ # e.g. ns -> us, so there is no risk of overflow, so we can use
+ # numpy's astype safely. Note there _is_ risk of truncation.
+ return values.astype(dtype)
+ else:
+ iresult2 = astype_round_check(values.view("i8"), from_unit, to_unit)
+ return iresult2.view(dtype)
+
+ if (<object>values).dtype.byteorder == ">":
+ # GH#29684 we incorrectly get OutOfBoundsDatetime if we dont swap
+ values = values.astype(values.dtype.newbyteorder("<"))
cdef:
ndarray i8values = values.view("i8")
@@ -356,10 +361,11 @@ cpdef ndarray astype_overflowsafe(
check_dts_bounds(&dts, to_unit)
except OutOfBoundsDatetime as err:
if is_td:
- tdval = np.timedelta64(value).view(values.dtype)
+ from_abbrev = np.datetime_data(values.dtype)[0]
+ np_val = np.timedelta64(value, from_abbrev)
msg = (
- "Cannot convert {tdval} to {dtype} without overflow"
- .format(tdval=str(tdval), dtype=str(dtype))
+ "Cannot convert {np_val} to {dtype} without overflow"
+ .format(np_val=str(np_val), dtype=str(dtype))
)
raise OutOfBoundsTimedelta(msg) from err
else:
@@ -453,6 +459,52 @@ cdef int op_to_op_code(op):
return Py_GT
+cdef ndarray astype_round_check(
+ ndarray i8values,
+ NPY_DATETIMEUNIT from_unit,
+ NPY_DATETIMEUNIT to_unit
+):
+ # cases with from_unit > to_unit, e.g. ns->us, raise if the conversion
+ # involves truncation, e.g. 1500ns->1us
+ cdef:
+ Py_ssize_t i, N = i8values.size
+
+ # equiv: iresult = np.empty((<object>i8values).shape, dtype="i8")
+ ndarray iresult = cnp.PyArray_EMPTY(
+ i8values.ndim, i8values.shape, cnp.NPY_INT64, 0
+ )
+ cnp.broadcast mi = cnp.PyArray_MultiIterNew2(iresult, i8values)
+
+ # Note the arguments to_unit, from unit are swapped vs how they
+ # are passed when going to a higher-frequency reso.
+ int64_t mult = get_conversion_factor(to_unit, from_unit)
+ int64_t value, mod
+
+ for i in range(N):
+ # Analogous to: item = i8values[i]
+ value = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
+
+ if value == NPY_DATETIME_NAT:
+ new_value = NPY_DATETIME_NAT
+ else:
+ new_value, mod = divmod(value, mult)
+ if mod != 0:
+ # TODO: avoid runtime import
+ from pandas._libs.tslibs.dtypes import npy_unit_to_abbrev
+ from_abbrev = npy_unit_to_abbrev(from_unit)
+ to_abbrev = npy_unit_to_abbrev(to_unit)
+ raise ValueError(
+ f"Cannot losslessly cast '{value} {from_abbrev}' to {to_abbrev}"
+ )
+
+ # Analogous to: iresult[i] = new_value
+ (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = new_value
+
+ cnp.PyArray_MultiIter_NEXT(mi)
+
+ return iresult
+
+
@cython.overflowcheck(True)
cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1:
"""
diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi
index ed6ddf7b02be1..c885b869f983a 100644
--- a/pandas/_libs/tslibs/offsets.pyi
+++ b/pandas/_libs/tslibs/offsets.pyi
@@ -111,6 +111,8 @@ def to_offset(freq: timedelta | str) -> BaseOffset: ...
class Tick(SingleConstructorOffset):
_reso: int
+ _prefix: str
+ _td64_unit: str
def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
@property
def delta(self) -> Timedelta: ...
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 81b59db6f0e18..5f4f6b998a60a 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -796,6 +796,7 @@ cdef class SingleConstructorOffset(BaseOffset):
cdef class Tick(SingleConstructorOffset):
_adjust_dst = False
_prefix = "undefined"
+ _td64_unit = "undefined"
_attributes = tuple(["n", "normalize"])
def __init__(self, n=1, normalize=False):
@@ -968,6 +969,7 @@ cdef class Tick(SingleConstructorOffset):
cdef class Day(Tick):
_nanos_inc = 24 * 3600 * 1_000_000_000
_prefix = "D"
+ _td64_unit = "D"
_period_dtype_code = PeriodDtypeCode.D
_reso = NPY_DATETIMEUNIT.NPY_FR_D
@@ -975,6 +977,7 @@ cdef class Day(Tick):
cdef class Hour(Tick):
_nanos_inc = 3600 * 1_000_000_000
_prefix = "H"
+ _td64_unit = "h"
_period_dtype_code = PeriodDtypeCode.H
_reso = NPY_DATETIMEUNIT.NPY_FR_h
@@ -982,6 +985,7 @@ cdef class Hour(Tick):
cdef class Minute(Tick):
_nanos_inc = 60 * 1_000_000_000
_prefix = "T"
+ _td64_unit = "m"
_period_dtype_code = PeriodDtypeCode.T
_reso = NPY_DATETIMEUNIT.NPY_FR_m
@@ -989,6 +993,7 @@ cdef class Minute(Tick):
cdef class Second(Tick):
_nanos_inc = 1_000_000_000
_prefix = "S"
+ _td64_unit = "s"
_period_dtype_code = PeriodDtypeCode.S
_reso = NPY_DATETIMEUNIT.NPY_FR_s
@@ -996,6 +1001,7 @@ cdef class Second(Tick):
cdef class Milli(Tick):
_nanos_inc = 1_000_000
_prefix = "L"
+ _td64_unit = "ms"
_period_dtype_code = PeriodDtypeCode.L
_reso = NPY_DATETIMEUNIT.NPY_FR_ms
@@ -1003,6 +1009,7 @@ cdef class Milli(Tick):
cdef class Micro(Tick):
_nanos_inc = 1000
_prefix = "U"
+ _td64_unit = "us"
_period_dtype_code = PeriodDtypeCode.U
_reso = NPY_DATETIMEUNIT.NPY_FR_us
@@ -1010,6 +1017,7 @@ cdef class Micro(Tick):
cdef class Nano(Tick):
_nanos_inc = 1
_prefix = "N"
+ _td64_unit = "ns"
_period_dtype_code = PeriodDtypeCode.N
_reso = NPY_DATETIMEUNIT.NPY_FR_ns
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 6e6de8399cc38..2d676f94c6a64 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -72,10 +72,7 @@
ABCSeries,
ABCTimedeltaArray,
)
-from pandas.core.dtypes.missing import (
- isna,
- notna,
-)
+from pandas.core.dtypes.missing import notna
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
@@ -792,20 +789,30 @@ def _add_timedelta_arraylike(
-------
result : ndarray[int64]
"""
- if not isinstance(self.freq, Tick):
+ freq = self.freq
+ if not isinstance(freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise TypeError(
f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}"
)
- if not np.all(isna(other)):
- delta = self._check_timedeltalike_freq_compat(other)
- else:
- # all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT
- return self + np.timedelta64("NaT")
+ dtype = np.dtype(f"m8[{freq._td64_unit}]")
+
+ try:
+ delta = astype_overflowsafe(
+ np.asarray(other), dtype=dtype, copy=False, round_ok=False
+ )
+ except ValueError as err:
+ # TODO: not actually a great exception message in this case
+ raise raise_on_incompatible(self, other) from err
+
+ b_mask = np.isnat(delta)
- ordinals = self._addsub_int_array_or_scalar(delta, operator.add).asi8
- return type(self)(ordinals, dtype=self.dtype)
+ res_values = algos.checked_add_with_arr(
+ self.asi8, delta.view("i8"), arr_mask=self._isnan, b_mask=b_mask
+ )
+ np.putmask(res_values, self._isnan | b_mask, iNaT)
+ return type(self)(res_values, freq=self.freq)
def _check_timedeltalike_freq_compat(self, other):
"""
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index 7adc407fd5de1..50f5ab8aee9dd 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -1243,6 +1243,21 @@ def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other):
with pytest.raises(TypeError, match=msg):
other - obj
+ # some but not *all* NaT
+ other = other.copy()
+ other[0] = np.timedelta64(0, "ns")
+ expected = PeriodIndex([pi[0]] + ["NaT"] * 8, freq="19D")
+ expected = tm.box_expected(expected, box_with_array)
+
+ result = obj + other
+ tm.assert_equal(result, expected)
+ result = other + obj
+ tm.assert_equal(result, expected)
+ result = obj - other
+ tm.assert_equal(result, expected)
+ with pytest.raises(TypeError, match=msg):
+ other - obj
+
# ---------------------------------------------------------------
# Unsorted
diff --git a/pandas/tests/tslibs/test_np_datetime.py b/pandas/tests/tslibs/test_np_datetime.py
index cc09f0fc77039..02edf1a093877 100644
--- a/pandas/tests/tslibs/test_np_datetime.py
+++ b/pandas/tests/tslibs/test_np_datetime.py
@@ -208,3 +208,15 @@ def test_astype_overflowsafe_td64(self):
result = astype_overflowsafe(arr, dtype2)
expected = arr.astype(dtype2)
tm.assert_numpy_array_equal(result, expected)
+
+ def test_astype_overflowsafe_disallow_rounding(self):
+ arr = np.array([-1500, 1500], dtype="M8[ns]")
+ dtype = np.dtype("M8[us]")
+
+ msg = "Cannot losslessly cast '-1500 ns' to us"
+ with pytest.raises(ValueError, match=msg):
+ astype_overflowsafe(arr, dtype, round_ok=False)
+
+ result = astype_overflowsafe(arr, dtype, round_ok=True)
+ expected = arr.astype(dtype)
+ tm.assert_numpy_array_equal(result, expected)
| Implements the same round_ok behavior in astype_overflowsafe that we use in Timestamp/Timedelta _as_reso, i.e. this will lead to more code-sharing in follow-ups. | https://api.github.com/repos/pandas-dev/pandas/pulls/47783 | 2022-07-18T21:23:11Z | 2022-07-20T00:10:24Z | 2022-07-20T00:10:24Z | 2022-07-20T16:43:19Z |
PERF: Bypass chunking/validation logic in StringDtype__from_arrow__ | diff --git a/doc/source/whatsnew/v2.1.0.rst b/doc/source/whatsnew/v2.1.0.rst
index f6a6c81bfe25d..3b24310014ff8 100644
--- a/doc/source/whatsnew/v2.1.0.rst
+++ b/doc/source/whatsnew/v2.1.0.rst
@@ -102,6 +102,7 @@ Performance improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- Performance improvement in :meth:`DataFrame.first_valid_index` and :meth:`DataFrame.last_valid_index` for extension array dtypes (:issue:`51549`)
- Performance improvement in :meth:`DataFrame.clip` and :meth:`Series.clip` (:issue:`51472`)
+- Performance improvement in :func:`read_parquet` on string columns when using ``use_nullable_dtypes=True`` (:issue:`47345`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index d3a64055f6c10..30b18bac7b73b 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -203,16 +203,19 @@ def __from_arrow__(
# pyarrow.ChunkedArray
chunks = array.chunks
- results = []
- for arr in chunks:
- # using _from_sequence to ensure None is converted to NA
- str_arr = StringArray._from_sequence(np.array(arr))
- results.append(str_arr)
-
- if results:
- return StringArray._concat_same_type(results)
+ if len(chunks) == 0:
+ arr = np.array([], dtype=object)
else:
- return StringArray(np.array([], dtype="object"))
+ arr = pyarrow.concat_arrays(chunks).to_numpy(zero_copy_only=False)
+ arr = lib.convert_nans_to_NA(arr)
+ # Bypass validation inside StringArray constructor, see GH#47781
+ new_string_array = StringArray.__new__(StringArray)
+ NDArrayBacked.__init__(
+ new_string_array,
+ arr,
+ StringDtype(storage="python"),
+ )
+ return new_string_array
class BaseStringArray(ExtensionArray):
| Instead of converting each chunk to a StringArray after casting to array and then concatenating, instead use pyarrow to concatenate chunks and convert to numpy.
Finally, bypass validation logic (unneeded as validated on parquet write) by initializing NDArrayBacked instead of StringArray.
This removes most of the performance overhead seen in #47345. There is still a slight overhead when comparing to `object` string arrays because of None -> NA conversion. I found that leaving that out still results in NA types in the example I gave (and would actually improve performance over the `object` case), but this is not consistent and thus conversion is left in.
- [x] closes #47345
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47781 | 2022-07-18T19:25:46Z | 2023-02-24T18:08:10Z | 2023-02-24T18:08:10Z | 2023-02-24T18:08:24Z |
BUG: PeriodIndex fails to handle NA, rather than putting NaT in its place (#46673) | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 7f07187e34c78..b9f2bf00355f0 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -908,6 +908,7 @@ Indexing
- Bug in :meth:`NDFrame.xs`, :meth:`DataFrame.iterrows`, :meth:`DataFrame.loc` and :meth:`DataFrame.iloc` not always propagating metadata (:issue:`28283`)
- Bug in :meth:`DataFrame.sum` min_count changes dtype if input contains NaNs (:issue:`46947`)
- Bug in :class:`IntervalTree` that lead to an infinite recursion. (:issue:`46658`)
+- Bug in :class:`PeriodIndex` raising ``AttributeError`` when indexing on ``NA``, rather than putting ``NaT`` in its place. (:issue:`46673`)
-
Missing
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 3332628627739..9b01bbc433b3b 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -43,6 +43,7 @@ from libc.time cimport (
import_datetime()
cimport pandas._libs.tslibs.util as util
+from pandas._libs.missing cimport C_NA
from pandas._libs.tslibs.np_datetime cimport (
NPY_DATETIMEUNIT,
NPY_FR_D,
@@ -1470,7 +1471,7 @@ cdef inline int64_t _extract_ordinal(object item, str freqstr, freq) except? -1:
cdef:
int64_t ordinal
- if checknull_with_nat(item):
+ if checknull_with_nat(item) or item is C_NA:
ordinal = NPY_NAT
elif util.is_integer_object(item):
if item == NPY_NAT:
diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py
index 73ac51c258a94..940aa5ffff040 100644
--- a/pandas/tests/arrays/categorical/test_indexing.py
+++ b/pandas/tests/arrays/categorical/test_indexing.py
@@ -1,12 +1,16 @@
+import math
+
import numpy as np
import pytest
from pandas import (
+ NA,
Categorical,
CategoricalIndex,
Index,
Interval,
IntervalIndex,
+ NaT,
PeriodIndex,
Series,
Timedelta,
@@ -194,6 +198,17 @@ def test_categories_assignments(self):
tm.assert_numpy_array_equal(cat.__array__(), exp)
tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
+ @pytest.mark.parametrize(
+ "null_val",
+ [None, np.nan, NaT, NA, math.nan, "NaT", "nat", "NAT", "nan", "NaN", "NAN"],
+ )
+ def test_periodindex_on_null_types(self, null_val):
+ # GH 46673
+ result = PeriodIndex(["2022-04-06", "2022-04-07", null_val], freq="D")
+ expected = PeriodIndex(["2022-04-06", "2022-04-07", "NaT"], dtype="period[D]")
+ assert result[2] is NaT
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_categories_assignments_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index 6bdf93c43c986..47d6cad0e1743 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -446,7 +446,7 @@ def test_astype_string_to_extension_dtype_roundtrip(
self, data, dtype, request, nullable_string_dtype
):
if dtype == "boolean" or (
- dtype in ("period[M]", "datetime64[ns]", "timedelta64[ns]") and NaT in data
+ dtype in ("datetime64[ns]", "timedelta64[ns]") and NaT in data
):
mark = pytest.mark.xfail(
reason="TODO StringArray.astype() with missing values #GH40566"
| - [x] closes #46673
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47780 | 2022-07-18T18:21:57Z | 2022-08-01T20:08:54Z | 2022-08-01T20:08:53Z | 2022-08-01T20:09:02Z |
PERF: efficient argmax/argmin for SparseArray | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index f313b49cd198d..7a90d96926475 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -802,6 +802,7 @@ Performance improvements
- Performance improvement in datetime arrays string formatting when one of the default strftime formats ``"%Y-%m-%d %H:%M:%S"`` or ``"%Y-%m-%d %H:%M:%S.%f"`` is used. (:issue:`44764`)
- Performance improvement in :meth:`Series.to_sql` and :meth:`DataFrame.to_sql` (:class:`SQLiteTable`) when processing time arrays. (:issue:`44764`)
- Performance improvements to :func:`read_sas` (:issue:`47403`, :issue:`47404`, :issue:`47405`)
+- Performance improvement in ``argmax`` and ``argmin`` for :class:`arrays.SparseArray` (:issue:`34197`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 5653d87a4570b..bf65da0412642 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -42,7 +42,10 @@
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
from pandas.util._exceptions import find_stack_level
-from pandas.util._validators import validate_insert_loc
+from pandas.util._validators import (
+ validate_bool_kwarg,
+ validate_insert_loc,
+)
from pandas.core.dtypes.astype import astype_nansafe
from pandas.core.dtypes.cast import (
@@ -1636,6 +1639,45 @@ def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar:
else:
return na_value_for_dtype(self.dtype.subtype, compat=False)
+ def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int:
+
+ values = self._sparse_values
+ index = self._sparse_index.indices
+ mask = np.asarray(isna(values))
+ func = np.argmax if kind == "argmax" else np.argmin
+
+ idx = np.arange(values.shape[0])
+ non_nans = values[~mask]
+ non_nan_idx = idx[~mask]
+
+ _candidate = non_nan_idx[func(non_nans)]
+ candidate = index[_candidate]
+
+ if isna(self.fill_value):
+ return candidate
+ if kind == "argmin" and self[candidate] < self.fill_value:
+ return candidate
+ if kind == "argmax" and self[candidate] > self.fill_value:
+ return candidate
+ _loc = self._first_fill_value_loc()
+ if _loc == -1:
+ # fill_value doesn't exist
+ return candidate
+ else:
+ return _loc
+
+ def argmax(self, skipna: bool = True) -> int:
+ validate_bool_kwarg(skipna, "skipna")
+ if not skipna and self._hasna:
+ raise NotImplementedError
+ return self._argmin_argmax("argmax")
+
+ def argmin(self, skipna: bool = True) -> int:
+ validate_bool_kwarg(skipna, "skipna")
+ if not skipna and self._hasna:
+ raise NotImplementedError
+ return self._argmin_argmax("argmin")
+
# ------------------------------------------------------------------------
# Ufuncs
# ------------------------------------------------------------------------
diff --git a/pandas/tests/arrays/sparse/test_reductions.py b/pandas/tests/arrays/sparse/test_reductions.py
index a33a282bb4869..2dd80c52f1419 100644
--- a/pandas/tests/arrays/sparse/test_reductions.py
+++ b/pandas/tests/arrays/sparse/test_reductions.py
@@ -268,3 +268,41 @@ def test_na_value_if_no_valid_values(self, func, data, dtype, expected):
assert result is NaT or np.isnat(result)
else:
assert np.isnan(result)
+
+
+class TestArgmaxArgmin:
+ @pytest.mark.parametrize(
+ "arr,argmax_expected,argmin_expected",
+ [
+ (SparseArray([1, 2, 0, 1, 2]), 1, 2),
+ (SparseArray([-1, -2, 0, -1, -2]), 2, 1),
+ (SparseArray([np.nan, 1, 0, 0, np.nan, -1]), 1, 5),
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2]), 5, 2),
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=-1), 5, 2),
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=0), 5, 2),
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=1), 5, 2),
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=2), 5, 2),
+ (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=3), 5, 2),
+ (SparseArray([0] * 10 + [-1], fill_value=0), 0, 10),
+ (SparseArray([0] * 10 + [-1], fill_value=-1), 0, 10),
+ (SparseArray([0] * 10 + [-1], fill_value=1), 0, 10),
+ (SparseArray([-1] + [0] * 10, fill_value=0), 1, 0),
+ (SparseArray([1] + [0] * 10, fill_value=0), 0, 1),
+ (SparseArray([-1] + [0] * 10, fill_value=-1), 1, 0),
+ (SparseArray([1] + [0] * 10, fill_value=1), 0, 1),
+ ],
+ )
+ def test_argmax_argmin(self, arr, argmax_expected, argmin_expected):
+ argmax_result = arr.argmax()
+ argmin_result = arr.argmin()
+ assert argmax_result == argmax_expected
+ assert argmin_result == argmin_expected
+
+ @pytest.mark.parametrize(
+ "arr,method",
+ [(SparseArray([]), "argmax"), (SparseArray([]), "argmin")],
+ )
+ def test_empty_array(self, arr, method):
+ msg = f"attempt to get {method} of an empty sequence"
+ with pytest.raises(ValueError, match=msg):
+ arr.argmax() if method == "argmax" else arr.argmin()
| - [ ] partially closes #34197
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Thanks for the review. Currently, only argmax/argmin are implemented since argsort has many annoying corner cases and I have to spent more time on the correctness and take the follow-up in a separate PR. Simple benchmark:
```
>>> val = np.random.rand(1000000)
>>> mask = val < 0.99
>>> val[mask] = np.nan
>>> arr = SparseArray(val)
>>> %timeit arr.argmax()
8.6 ms ± 343 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) <- master
44.3 µs ± 934 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each) <- this pr
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/47779 | 2022-07-18T18:04:22Z | 2022-07-27T16:40:20Z | 2022-07-27T16:40:20Z | 2022-07-27T17:03:36Z |
Specify that both ``by`` and ``level`` should not be specified in ``groupby`` - GH40378 | diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 5d8ef7ce02097..34244a8edcbfa 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -345,6 +345,17 @@ Index level names may be supplied as keys.
More on the ``sum`` function and aggregation later.
+When using ``.groupby()`` on a DatFrame with a MultiIndex, do not specify both ``by`` and ``level``.
+The argument validation should be done in ``.groupby()``, using the name of the specific index.
+
+.. ipython:: python
+
+ df = pd.DataFrame({"col1": ["a", "b", "c"]})
+ df.index = pd.MultiIndex.from_arrays([["a", "a", "b"],
+ [1, 2, 1]],
+ names=["x", "y"])
+ df.groupby(["col1", "x"])
+
Grouping DataFrame with Index levels and columns
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A DataFrame may be grouped by a combination of columns and index levels by
| - [ ] closes #40378 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47778 | 2022-07-18T14:07:41Z | 2022-07-18T16:59:08Z | 2022-07-18T16:59:07Z | 2022-07-22T20:50:36Z |
Cln tests interval wrt inclusive | diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py
index f7f015cbe4a23..79e73fec706f1 100644
--- a/pandas/tests/arrays/test_array.py
+++ b/pandas/tests/arrays/test_array.py
@@ -298,7 +298,7 @@ def test_array_inference(data, expected):
[
# mix of frequencies
[pd.Period("2000", "D"), pd.Period("2001", "A")],
- # mix of closed
+ # mix of inclusive
[pd.Interval(0, 1, "left"), pd.Interval(1, 2, "right")],
# Mix of timezones
[pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")],
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 695b06690b358..64849c4223486 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -593,13 +593,13 @@ def test_construction_string_regex(self, subtype):
@pytest.mark.parametrize(
"subtype", ["interval[int64]", "Interval[int64]", "int64", np.dtype("int64")]
)
- def test_construction_allows_closed_none(self, subtype):
+ def test_construction_allows_inclusive_none(self, subtype):
# GH#38394
dtype = IntervalDtype(subtype)
assert dtype.inclusive is None
- def test_closed_mismatch(self):
+ def test_inclusive_mismatch(self):
msg = "'inclusive' keyword does not match value specified in dtype string"
with pytest.raises(ValueError, match=msg):
IntervalDtype("interval[int64, left]", "right")
@@ -638,7 +638,7 @@ def test_construction_errors(self, subtype):
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
- def test_closed_must_match(self):
+ def test_inclusive_must_match(self):
# GH#37933
dtype = IntervalDtype(np.float64, "left")
@@ -646,7 +646,7 @@ def test_closed_must_match(self):
with pytest.raises(ValueError, match=msg):
IntervalDtype(dtype, inclusive="both")
- def test_closed_invalid(self):
+ def test_inclusive_invalid(self):
with pytest.raises(ValueError, match="inclusive must be one of"):
IntervalDtype(np.float64, "foo")
@@ -822,7 +822,7 @@ def test_not_string(self):
# GH30568: though IntervalDtype has object kind, it cannot be string
assert not is_string_dtype(IntervalDtype())
- def test_unpickling_without_closed(self):
+ def test_unpickling_without_inclusive(self):
# GH#38394
dtype = IntervalDtype("interval")
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index 8c8998a8e4be9..a23f66d241cd9 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -104,8 +104,8 @@ def test_constructor_dtype(self, constructor, breaks, subtype):
timedelta_range("1 day", periods=5),
],
)
- def test_constructor_pass_closed(self, constructor, breaks):
- # not passing closed to IntervalDtype, but to IntervalArray constructor
+ def test_constructor_pass_inclusive(self, constructor, breaks):
+ # not passing inclusive to IntervalDtype, but to IntervalArray constructor
warn = None
if isinstance(constructor, partial) and constructor.func is Index:
# passing kwargs to Index is deprecated
@@ -193,7 +193,7 @@ def test_generic_errors(self, constructor):
# filler input data to be used when supplying invalid kwargs
filler = self.get_kwargs_from_breaks(range(10))
- # invalid closed
+ # invalid inclusive
msg = "inclusive must be one of 'right', 'left', 'both', 'neither'"
with pytest.raises(ValueError, match=msg):
constructor(inclusive="invalid", **filler)
@@ -399,7 +399,7 @@ def test_constructor_string(self):
pass
def test_constructor_errors(self, constructor):
- # mismatched closed within intervals with no constructor override
+ # mismatched inclusive within intervals with no constructor override
ivs = [Interval(0, 1, inclusive="right"), Interval(2, 3, inclusive="left")]
msg = "intervals must all be inclusive on the same side"
with pytest.raises(ValueError, match=msg):
@@ -420,7 +420,7 @@ def test_constructor_errors(self, constructor):
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize(
- "data, closed",
+ "data, inclusive",
[
([], "both"),
([np.nan, np.nan], "neither"),
@@ -438,14 +438,14 @@ def test_constructor_errors(self, constructor):
(IntervalIndex.from_breaks(range(5), inclusive="both"), "right"),
],
)
- def test_override_inferred_closed(self, constructor, data, closed):
+ def test_override_inferred_inclusive(self, constructor, data, inclusive):
# GH 19370
if isinstance(data, IntervalIndex):
tuples = data.to_tuples()
else:
tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data]
- expected = IntervalIndex.from_tuples(tuples, inclusive=closed)
- result = constructor(data, inclusive=closed)
+ expected = IntervalIndex.from_tuples(tuples, inclusive=inclusive)
+ result = constructor(data, inclusive=inclusive)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
@@ -460,7 +460,7 @@ def test_index_object_dtype(self, values_constructor):
assert type(result) is Index
tm.assert_numpy_array_equal(result.values, np.array(values))
- def test_index_mixed_closed(self):
+ def test_index_mixed_inclusive(self):
# GH27172
intervals = [
Interval(0, 1, inclusive="left"),
@@ -473,8 +473,8 @@ def test_index_mixed_closed(self):
tm.assert_index_equal(result, expected)
-def test_dtype_closed_mismatch():
- # GH#38394 closed specified in both dtype and IntervalIndex constructor
+def test_dtype_inclusive_mismatch():
+ # GH#38394
dtype = IntervalDtype(np.int64, "left")
diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py
index 4cf754a7e52e0..e05cb73cfe446 100644
--- a/pandas/tests/indexes/interval/test_indexing.py
+++ b/pandas/tests/indexes/interval/test_indexing.py
@@ -76,12 +76,12 @@ def test_get_loc_length_one_scalar(self, scalar, closed):
with pytest.raises(KeyError, match=str(scalar)):
index.get_loc(scalar)
- @pytest.mark.parametrize("other_closed", ["left", "right", "both", "neither"])
+ @pytest.mark.parametrize("other_inclusive", ["left", "right", "both", "neither"])
@pytest.mark.parametrize("left, right", [(0, 5), (-1, 4), (-1, 6), (6, 7)])
- def test_get_loc_length_one_interval(self, left, right, closed, other_closed):
+ def test_get_loc_length_one_interval(self, left, right, closed, other_inclusive):
# GH 20921
index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed)
- interval = Interval(left, right, inclusive=other_closed)
+ interval = Interval(left, right, inclusive=other_inclusive)
if interval == index[0]:
result = index.get_loc(interval)
assert result == 0
@@ -89,7 +89,7 @@ def test_get_loc_length_one_interval(self, left, right, closed, other_closed):
with pytest.raises(
KeyError,
match=re.escape(
- f"Interval({left}, {right}, inclusive='{other_closed}')"
+ f"Interval({left}, {right}, inclusive='{other_inclusive}')"
),
):
index.get_loc(interval)
diff --git a/pandas/tests/indexes/interval/test_pickle.py b/pandas/tests/indexes/interval/test_pickle.py
index 7f5784b6d76b9..ef6db9c8a0513 100644
--- a/pandas/tests/indexes/interval/test_pickle.py
+++ b/pandas/tests/indexes/interval/test_pickle.py
@@ -1,13 +1,10 @@
-import pytest
-
from pandas import IntervalIndex
import pandas._testing as tm
class TestPickle:
- @pytest.mark.parametrize("inclusive", ["left", "right", "both"])
- def test_pickle_round_trip_closed(self, inclusive):
+ def test_pickle_round_trip_inclusive(self, closed):
# https://github.com/pandas-dev/pandas/issues/35658
- idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], inclusive=inclusive)
+ idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], inclusive=closed)
result = tm.round_trip_pickle(idx)
tm.assert_index_equal(result, idx)
diff --git a/pandas/tests/indexes/interval/test_setops.py b/pandas/tests/indexes/interval/test_setops.py
index 5933961cc0f9d..2e1f6f7925374 100644
--- a/pandas/tests/indexes/interval/test_setops.py
+++ b/pandas/tests/indexes/interval/test_setops.py
@@ -10,22 +10,22 @@
import pandas._testing as tm
-def monotonic_index(start, end, dtype="int64", closed="right"):
+def monotonic_index(start, end, dtype="int64", inclusive="right"):
return IntervalIndex.from_breaks(
- np.arange(start, end, dtype=dtype), inclusive=closed
+ np.arange(start, end, dtype=dtype), inclusive=inclusive
)
-def empty_index(dtype="int64", closed="right"):
- return IntervalIndex(np.array([], dtype=dtype), inclusive=closed)
+def empty_index(dtype="int64", inclusive="right"):
+ return IntervalIndex(np.array([], dtype=dtype), inclusive=inclusive)
class TestIntervalIndex:
def test_union(self, closed, sort):
- index = monotonic_index(0, 11, closed=closed)
- other = monotonic_index(5, 13, closed=closed)
+ index = monotonic_index(0, 11, inclusive=closed)
+ other = monotonic_index(5, 13, inclusive=closed)
- expected = monotonic_index(0, 13, closed=closed)
+ expected = monotonic_index(0, 13, inclusive=closed)
result = index[::-1].union(other, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
@@ -41,12 +41,12 @@ def test_union(self, closed, sort):
def test_union_empty_result(self, closed, sort):
# GH 19101: empty result, same dtype
- index = empty_index(dtype="int64", closed=closed)
+ index = empty_index(dtype="int64", inclusive=closed)
result = index.union(index, sort=sort)
tm.assert_index_equal(result, index)
# GH 19101: empty result, different numeric dtypes -> common dtype is f8
- other = empty_index(dtype="float64", closed=closed)
+ other = empty_index(dtype="float64", inclusive=closed)
result = index.union(other, sort=sort)
expected = other
tm.assert_index_equal(result, expected)
@@ -54,7 +54,7 @@ def test_union_empty_result(self, closed, sort):
other = index.union(index, sort=sort)
tm.assert_index_equal(result, expected)
- other = empty_index(dtype="uint64", closed=closed)
+ other = empty_index(dtype="uint64", inclusive=closed)
result = index.union(other, sort=sort)
tm.assert_index_equal(result, expected)
@@ -62,10 +62,10 @@ def test_union_empty_result(self, closed, sort):
tm.assert_index_equal(result, expected)
def test_intersection(self, closed, sort):
- index = monotonic_index(0, 11, closed=closed)
- other = monotonic_index(5, 13, closed=closed)
+ index = monotonic_index(0, 11, inclusive=closed)
+ other = monotonic_index(5, 13, inclusive=closed)
- expected = monotonic_index(5, 11, closed=closed)
+ expected = monotonic_index(5, 11, inclusive=closed)
result = index[::-1].intersection(other, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
@@ -100,21 +100,21 @@ def test_intersection(self, closed, sort):
tm.assert_index_equal(result, expected)
def test_intersection_empty_result(self, closed, sort):
- index = monotonic_index(0, 11, closed=closed)
+ index = monotonic_index(0, 11, inclusive=closed)
# GH 19101: empty result, same dtype
- other = monotonic_index(300, 314, closed=closed)
- expected = empty_index(dtype="int64", closed=closed)
+ other = monotonic_index(300, 314, inclusive=closed)
+ expected = empty_index(dtype="int64", inclusive=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different numeric dtypes -> common dtype is float64
- other = monotonic_index(300, 314, dtype="float64", closed=closed)
+ other = monotonic_index(300, 314, dtype="float64", inclusive=closed)
result = index.intersection(other, sort=sort)
expected = other[:0]
tm.assert_index_equal(result, expected)
- other = monotonic_index(300, 314, dtype="uint64", closed=closed)
+ other = monotonic_index(300, 314, dtype="uint64", inclusive=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
@@ -136,7 +136,7 @@ def test_difference(self, closed, sort):
# GH 19101: empty result, same dtype
result = index.difference(index, sort=sort)
- expected = empty_index(dtype="int64", closed=closed)
+ expected = empty_index(dtype="int64", inclusive=closed)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
@@ -147,7 +147,7 @@ def test_difference(self, closed, sort):
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self, closed, sort):
- index = monotonic_index(0, 11, closed=closed)
+ index = monotonic_index(0, 11, inclusive=closed)
result = index[1:].symmetric_difference(index[:-1], sort=sort)
expected = IntervalIndex([index[0], index[-1]])
if sort is None:
@@ -156,7 +156,7 @@ def test_symmetric_difference(self, closed, sort):
# GH 19101: empty result, same dtype
result = index.symmetric_difference(index, sort=sort)
- expected = empty_index(dtype="int64", closed=closed)
+ expected = empty_index(dtype="int64", inclusive=closed)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
@@ -166,7 +166,7 @@ def test_symmetric_difference(self, closed, sort):
index.left.astype("float64"), index.right, inclusive=closed
)
result = index.symmetric_difference(other, sort=sort)
- expected = empty_index(dtype="float64", closed=closed)
+ expected = empty_index(dtype="float64", inclusive=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.filterwarnings("ignore:'<' not supported between:RuntimeWarning")
@@ -174,7 +174,7 @@ def test_symmetric_difference(self, closed, sort):
"op_name", ["union", "intersection", "difference", "symmetric_difference"]
)
def test_set_incompatible_types(self, closed, op_name, sort):
- index = monotonic_index(0, 11, closed=closed)
+ index = monotonic_index(0, 11, inclusive=closed)
set_op = getattr(index, op_name)
# TODO: standardize return type of non-union setops type(self vs other)
@@ -187,8 +187,8 @@ def test_set_incompatible_types(self, closed, op_name, sort):
tm.assert_index_equal(result, expected)
# mixed closed -> cast to object
- for other_closed in {"right", "left", "both", "neither"} - {closed}:
- other = monotonic_index(0, 11, closed=other_closed)
+ for other_inclusive in {"right", "left", "both", "neither"} - {closed}:
+ other = monotonic_index(0, 11, inclusive=other_inclusive)
expected = getattr(index.astype(object), op_name)(other, sort=sort)
if op_name == "difference":
expected = index
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index cec06d054d766..4e4ee4fd12d5f 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1200,8 +1200,8 @@ def test_constructor_infer_interval(self, data_constructor):
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
- def test_constructor_interval_mixed_closed(self, data_constructor):
- # GH 23563: mixed closed results in object dtype (not interval dtype)
+ def test_constructor_interval_mixed_inclusive(self, data_constructor):
+ # GH 23563: mixed inclusive results in object dtype (not interval dtype)
data = [Interval(0, 1, inclusive="both"), Interval(0, 2, inclusive="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
| I think this is the last one of those
| https://api.github.com/repos/pandas-dev/pandas/pulls/47775 | 2022-07-18T00:48:10Z | 2022-07-18T17:00:23Z | 2022-07-18T17:00:23Z | 2022-07-20T00:39:38Z |
BUG: Fix fillna on multi indexed Dataframe doesn't work | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index dbdeebad06af0..3d0a6e01826f8 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -14,6 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
+- Fixed regression in :meth:`DataFrame.fillna` not working :class:`DataFrame` with :class:`MultiIndex` (:issue:`47649`)
- Fixed regression in taking NULL :class:`objects` from a :class:`DataFrame` causing a segmentation violation. These NULL values are created by :meth:`numpy.empty_like` (:issue:`46848`)
- Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`)
- Fixed regression in calling bitwise numpy ufuncs (for example, ``np.bitwise_and``) on Index objects (:issue:`46769`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8b1a427e1658a..abab32ae145bd 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6861,6 +6861,7 @@ def fillna(
for k, v in value.items():
if k not in result:
continue
+
# error: Item "None" of "Optional[Dict[Any, Any]]" has no
# attribute "get"
downcast_k = (
@@ -6868,9 +6869,14 @@ def fillna(
if not is_dict
else downcast.get(k) # type: ignore[union-attr]
)
- result.loc[:, k] = result[k].fillna(
- v, limit=limit, downcast=downcast_k
+ # GH47649
+ result.loc[:, k] = (
+ result[k].fillna(v, limit=limit, downcast=downcast_k).values
)
+ # TODO: result.loc[:, k] = result.loc[:, k].fillna(
+ # v, limit=limit, downcast=downcast_k
+ # )
+ # Revert when GH45751 is fixed
return result if not inplace else None
elif not is_list_like(value):
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index 20e59ed72666a..8355502c47c61 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -715,6 +715,34 @@ def test_single_block_df_with_horizontal_axis(self):
)
tm.assert_frame_equal(result, expected)
+ def test_fillna_with_multi_index_frame(self):
+ # GH 47649
+ pdf = DataFrame(
+ {
+ ("x", "a"): [np.nan, 2.0, 3.0],
+ ("x", "b"): [1.0, 2.0, np.nan],
+ ("y", "c"): [1.0, 2.0, np.nan],
+ }
+ )
+ expected = DataFrame(
+ {
+ ("x", "a"): [-1.0, 2.0, 3.0],
+ ("x", "b"): [1.0, 2.0, -1.0],
+ ("y", "c"): [1.0, 2.0, np.nan],
+ }
+ )
+ tm.assert_frame_equal(pdf.fillna({"x": -1}), expected)
+ tm.assert_frame_equal(pdf.fillna({"x": -1, ("x", "b"): -2}), expected)
+
+ expected = DataFrame(
+ {
+ ("x", "a"): [-1.0, 2.0, 3.0],
+ ("x", "b"): [1.0, 2.0, -2.0],
+ ("y", "c"): [1.0, 2.0, np.nan],
+ }
+ )
+ tm.assert_frame_equal(pdf.fillna({("x", "b"): -2, "x": -1}), expected)
+
def test_fillna_nonconsolidated_frame():
# https://github.com/pandas-dev/pandas/issues/36495
| - [ ] closes #47649 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47774 | 2022-07-17T23:34:50Z | 2022-08-23T12:10:45Z | 2022-08-23T12:10:45Z | 2022-08-23T16:29:32Z |
REGR: fix pd.cut with datetime IntervalIndex as bins | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 96e4ad4321c60..0393879a766a5 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -16,6 +16,7 @@ Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in taking NULL :class:`objects` from a :class:`DataFrame` causing a segmentation violation. These NULL values are created by :meth:`numpy.empty_like` (:issue:`46848`)
- Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`)
+- Fixed regression in :func:`cut` using a ``datetime64`` IntervalIndex as bins (:issue:`46218`)
- Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`)
- Fixed regression in :meth:`DataFrame.loc` not aligning index in some cases when setting a :class:`DataFrame` (:issue:`47578`)
- Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 816260c8a6d2d..1e8ba81c877ac 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3987,8 +3987,14 @@ def _should_partial_index(self, target: Index) -> bool:
Should we attempt partial-matching indexing?
"""
if is_interval_dtype(self.dtype):
+ if is_interval_dtype(target.dtype):
+ return False
+ # See https://github.com/pandas-dev/pandas/issues/47772 the commented
+ # out code can be restored (instead of hardcoding `return True`)
+ # once that issue if fixed
# "Index" has no attribute "left"
- return self.left._should_compare(target) # type: ignore[attr-defined]
+ # return self.left._should_compare(target) # type: ignore[attr-defined]
+ return True
return False
@final
diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py
index 9b4afcc9c00b8..4653981a1285d 100644
--- a/pandas/tests/indexes/interval/test_indexing.py
+++ b/pandas/tests/indexes/interval/test_indexing.py
@@ -8,6 +8,7 @@
from pandas import (
NA,
CategoricalIndex,
+ DatetimeIndex,
Index,
Interval,
IntervalIndex,
@@ -302,6 +303,20 @@ def test_get_indexer_categorical_with_nans(self):
expected = np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
+ def test_get_indexer_datetime(self):
+ ii = IntervalIndex.from_breaks(date_range("2018-01-01", periods=4))
+ result = ii.get_indexer(DatetimeIndex(["2018-01-02"]))
+ expected = np.array([0], dtype=np.intp)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = ii.get_indexer(DatetimeIndex(["2018-01-02"]).astype(str))
+ tm.assert_numpy_array_equal(result, expected)
+
+ # TODO this should probably be deprecated?
+ # https://github.com/pandas-dev/pandas/issues/47772
+ result = ii.get_indexer(DatetimeIndex(["2018-01-02"]).asi8)
+ tm.assert_numpy_array_equal(result, expected)
+
@pytest.mark.parametrize(
"tuples, closed",
[
diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py
index 1425686f027e4..3b9ab6a83a575 100644
--- a/pandas/tests/reshape/test_cut.py
+++ b/pandas/tests/reshape/test_cut.py
@@ -14,6 +14,7 @@
Timestamp,
cut,
date_range,
+ interval_range,
isna,
qcut,
timedelta_range,
@@ -734,3 +735,12 @@ def test_cut_with_timestamp_tuple_labels():
expected = Categorical.from_codes([0, 1, 2], labels, ordered=True)
tm.assert_categorical_equal(result, expected)
+
+
+def test_cut_bins_datetime_intervalindex():
+ # https://github.com/pandas-dev/pandas/issues/46218
+ bins = interval_range(Timestamp("2022-02-25"), Timestamp("2022-02-27"), freq="1D")
+ # passing Series instead of list is important to trigger bug
+ result = cut(Series([Timestamp("2022-02-26")]), bins=bins)
+ expected = Categorical.from_codes([0], bins, ordered=True)
+ tm.assert_categorical_equal(result.array, expected)
| Closes #46218
xref https://github.com/pandas-dev/pandas/issues/46218#issuecomment-1073362376 for the actual reason that causes `cut` to fail: inside the implementation, we convert the actual timestamp data to floats (to pass to the underlying algorithm), but then when passing those values to `IntervalIndex.get_indexer`, those numeric values no longer "match" with the datetime64 interval dtype. And in theory, `get_indexer` should then fail (return -1 for not finding the target values), but until https://github.com/pandas-dev/pandas/pull/42227 this actually happily worked (and therefore also let `cut` with datetime64 interval bins work).
This PR doesn't solve the root cause (we should change the logic inside `cut` so that we don't create this mismatch in values vs bins), but it is a short-term fix of the regression. It basically reverts the (unintended, I think) behaviour change introduced by https://github.com/pandas-dev/pandas/pull/42227, but without actually reverting that PR (I am keeping the refactor introducing `_should_partial_index` of that PR, but I am only changing `_should_partial_index` itself a little bit to match better with what happened before in practice).
I will open a separate issue about the issue in `cut` and that we should make `IntervalIndex.from_indexer` more strict. (opened -> https://github.com/pandas-dev/pandas/issues/47772) | https://api.github.com/repos/pandas-dev/pandas/pulls/47771 | 2022-07-17T19:52:47Z | 2022-08-19T19:31:05Z | 2022-08-19T19:31:05Z | 2022-08-20T06:51:44Z |
REF: de-duplicate get_conversion_factor | diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd
index 356bd9dc3d7a0..6e41a55f30929 100644
--- a/pandas/_libs/tslibs/dtypes.pxd
+++ b/pandas/_libs/tslibs/dtypes.pxd
@@ -8,7 +8,6 @@ cdef NPY_DATETIMEUNIT abbrev_to_npy_unit(str abbrev)
cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil
cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=*) except? -1
cpdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1
-cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1
cdef dict attrname_to_abbrevs
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index 01616666bba3f..a0a7ab90ebb30 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -4,7 +4,10 @@ cimport cython
from enum import Enum
-from pandas._libs.tslibs.np_datetime cimport NPY_DATETIMEUNIT
+from pandas._libs.tslibs.np_datetime cimport (
+ NPY_DATETIMEUNIT,
+ get_conversion_factor,
+)
cdef class PeriodDtypeBase:
@@ -386,83 +389,11 @@ cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns)
"""
How many of the given time units fit into a single day?
"""
- cdef:
- int64_t day_units
-
- if reso == NPY_DATETIMEUNIT.NPY_FR_ps:
- # pico is the smallest unit for which we don't overflow, so
- # we exclude femto and atto
- day_units = 24 * 3600 * 1_000_000_000_000
- elif reso == NPY_DATETIMEUNIT.NPY_FR_ns:
- day_units = 24 * 3600 * 1_000_000_000
- elif reso == NPY_DATETIMEUNIT.NPY_FR_us:
- day_units = 24 * 3600 * 1_000_000
- elif reso == NPY_DATETIMEUNIT.NPY_FR_ms:
- day_units = 24 * 3600 * 1_000
- elif reso == NPY_DATETIMEUNIT.NPY_FR_s:
- day_units = 24 * 3600
- elif reso == NPY_DATETIMEUNIT.NPY_FR_m:
- day_units = 24 * 60
- elif reso == NPY_DATETIMEUNIT.NPY_FR_h:
- day_units = 24
- elif reso == NPY_DATETIMEUNIT.NPY_FR_D:
- day_units = 1
- else:
- raise NotImplementedError(reso)
- return day_units
+ return get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_D, reso)
cpdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1:
- if reso == NPY_DATETIMEUNIT.NPY_FR_ns:
- return 1_000_000_000
- elif reso == NPY_DATETIMEUNIT.NPY_FR_us:
- return 1_000_000
- elif reso == NPY_DATETIMEUNIT.NPY_FR_ms:
- return 1_000
- elif reso == NPY_DATETIMEUNIT.NPY_FR_s:
- return 1
- else:
- raise NotImplementedError(reso)
-
-
-@cython.overflowcheck(True)
-cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1:
- """
- Find the factor by which we need to multiply to convert from from_unit to to_unit.
- """
- if (
- from_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC
- or to_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC
- ):
- raise ValueError("unit-less resolutions are not supported")
- if from_unit > to_unit:
- raise ValueError
-
- if from_unit == to_unit:
- return 1
-
- if from_unit == NPY_DATETIMEUNIT.NPY_FR_W:
- return 7 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_D, to_unit)
- elif from_unit == NPY_DATETIMEUNIT.NPY_FR_D:
- return 24 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_h, to_unit)
- elif from_unit == NPY_DATETIMEUNIT.NPY_FR_h:
- return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_m, to_unit)
- elif from_unit == NPY_DATETIMEUNIT.NPY_FR_m:
- return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_s, to_unit)
- elif from_unit == NPY_DATETIMEUNIT.NPY_FR_s:
- return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ms, to_unit)
- elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ms:
- return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_us, to_unit)
- elif from_unit == NPY_DATETIMEUNIT.NPY_FR_us:
- return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ns, to_unit)
- elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ns:
- return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ps, to_unit)
- elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ps:
- return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_fs, to_unit)
- elif from_unit == NPY_DATETIMEUNIT.NPY_FR_fs:
- return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_as, to_unit)
- else:
- raise ValueError(from_unit, to_unit)
+ return get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_s, reso)
cdef dict _reso_str_map = {
diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd
index 290483a741fe7..420d83909a78d 100644
--- a/pandas/_libs/tslibs/np_datetime.pxd
+++ b/pandas/_libs/tslibs/np_datetime.pxd
@@ -102,6 +102,7 @@ cpdef cnp.ndarray astype_overflowsafe(
cnp.dtype dtype, # ndarray[datetime64[anyunit]]
bint copy=*,
)
+cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1
cdef bint cmp_dtstructs(npy_datetimestruct* left, npy_datetimestruct* right, int op)
cdef get_implementation_bounds(
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 692b4430fa577..494eb5da7e107 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -1,3 +1,4 @@
+cimport cython
from cpython.datetime cimport (
PyDateTime_DATE_GET_HOUR,
PyDateTime_DATE_GET_MICROSECOND,
@@ -450,3 +451,43 @@ cdef int op_to_op_code(op):
return Py_GE
if op is operator.gt:
return Py_GT
+
+
+@cython.overflowcheck(True)
+cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1:
+ """
+ Find the factor by which we need to multiply to convert from from_unit to to_unit.
+ """
+ if (
+ from_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC
+ or to_unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC
+ ):
+ raise ValueError("unit-less resolutions are not supported")
+ if from_unit > to_unit:
+ raise ValueError
+
+ if from_unit == to_unit:
+ return 1
+
+ if from_unit == NPY_DATETIMEUNIT.NPY_FR_W:
+ return 7 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_D, to_unit)
+ elif from_unit == NPY_DATETIMEUNIT.NPY_FR_D:
+ return 24 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_h, to_unit)
+ elif from_unit == NPY_DATETIMEUNIT.NPY_FR_h:
+ return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_m, to_unit)
+ elif from_unit == NPY_DATETIMEUNIT.NPY_FR_m:
+ return 60 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_s, to_unit)
+ elif from_unit == NPY_DATETIMEUNIT.NPY_FR_s:
+ return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ms, to_unit)
+ elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ms:
+ return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_us, to_unit)
+ elif from_unit == NPY_DATETIMEUNIT.NPY_FR_us:
+ return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ns, to_unit)
+ elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ns:
+ return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_ps, to_unit)
+ elif from_unit == NPY_DATETIMEUNIT.NPY_FR_ps:
+ return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_fs, to_unit)
+ elif from_unit == NPY_DATETIMEUNIT.NPY_FR_fs:
+ return 1000 * get_conversion_factor(NPY_DATETIMEUNIT.NPY_FR_as, to_unit)
+ else:
+ raise ValueError(from_unit, to_unit)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index fef2a317a4f26..c64a9fb4d9c36 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -35,10 +35,7 @@ from pandas._libs.tslibs.conversion cimport (
cast_from_unit,
precision_from_unit,
)
-from pandas._libs.tslibs.dtypes cimport (
- get_conversion_factor,
- npy_unit_to_abbrev,
-)
+from pandas._libs.tslibs.dtypes cimport npy_unit_to_abbrev
from pandas._libs.tslibs.nattype cimport (
NPY_NAT,
c_NaT as NaT,
@@ -50,6 +47,7 @@ from pandas._libs.tslibs.np_datetime cimport (
NPY_FR_ns,
cmp_dtstructs,
cmp_scalar,
+ get_conversion_factor,
get_datetime64_unit,
get_timedelta64_value,
get_unit_from_dtype,
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index ae3ce46cbc3c8..3cf9c9bcda538 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -54,7 +54,6 @@ from pandas._libs.tslibs.conversion cimport (
maybe_localize_tso,
)
from pandas._libs.tslibs.dtypes cimport (
- get_conversion_factor,
npy_unit_to_abbrev,
periods_per_day,
periods_per_second,
@@ -83,6 +82,7 @@ from pandas._libs.tslibs.np_datetime cimport (
NPY_FR_ns,
cmp_dtstructs,
cmp_scalar,
+ get_conversion_factor,
get_datetime64_unit,
get_datetime64_value,
get_unit_from_dtype,
| Moving it to np_datetime bc we'll end up using it in astype_overflowsafe | https://api.github.com/repos/pandas-dev/pandas/pulls/47770 | 2022-07-17T19:35:54Z | 2022-07-18T17:14:44Z | 2022-07-18T17:14:44Z | 2022-07-18T18:46:39Z |
STYLE add future annotations where possible | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 06025c730700f..92f3b3ce83297 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -94,8 +94,6 @@ repos:
stages: [manual]
additional_dependencies: &pyright_dependencies
- pyright@1.1.258
-- repo: local
- hooks:
- id: pyright_reportGeneralTypeIssues
name: pyright reportGeneralTypeIssues
entry: pyright --skipunannotated -p pyright_reportGeneralTypeIssues.json
@@ -105,8 +103,6 @@ repos:
types: [python]
stages: [manual]
additional_dependencies: *pyright_dependencies
-- repo: local
- hooks:
- id: mypy
name: mypy
entry: mypy
@@ -115,8 +111,6 @@ repos:
pass_filenames: false
types: [python]
stages: [manual]
-- repo: local
- hooks:
- id: flake8-rst
name: flake8-rst
description: Run flake8 on code snippets in docstrings or RST files
@@ -237,3 +231,15 @@ repos:
additional_dependencies:
- flake8==4.0.1
- flake8-pyi==22.5.1
+ - id: future-annotations
+ name: import annotations from __future__
+ entry: 'from __future__ import annotations'
+ language: pygrep
+ args: [--negate]
+ files: ^pandas/
+ types: [python]
+ exclude: |
+ (?x)
+ /(__init__\.py)|(api\.py)|(_version\.py)|(testing\.py)|(conftest\.py)$
+ |/tests/
+ |/_testing/
diff --git a/pandas/_config/dates.py b/pandas/_config/dates.py
index 5bf2b49ce5904..b37831f96eb73 100644
--- a/pandas/_config/dates.py
+++ b/pandas/_config/dates.py
@@ -1,6 +1,8 @@
"""
config for datetime formatting
"""
+from __future__ import annotations
+
from pandas._config import config as cf
pc_date_dayfirst_doc = """
diff --git a/pandas/compat/chainmap.py b/pandas/compat/chainmap.py
index 9af7962fe4ad0..5bec8e5fa1913 100644
--- a/pandas/compat/chainmap.py
+++ b/pandas/compat/chainmap.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from typing import (
ChainMap,
TypeVar,
diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py
index eef2bb6639c36..833cda20368a2 100644
--- a/pandas/compat/pyarrow.py
+++ b/pandas/compat/pyarrow.py
@@ -1,5 +1,7 @@
""" support pyarrow compatibility across versions """
+from __future__ import annotations
+
from pandas.util.version import Version
try:
diff --git a/pandas/core/_numba/kernels/shared.py b/pandas/core/_numba/kernels/shared.py
index ec25e78a8d897..6e6bcef590d06 100644
--- a/pandas/core/_numba/kernels/shared.py
+++ b/pandas/core/_numba/kernels/shared.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import numba
import numpy as np
diff --git a/pandas/core/array_algos/transforms.py b/pandas/core/array_algos/transforms.py
index 27aebb9911e83..93b029c21760e 100644
--- a/pandas/core/array_algos/transforms.py
+++ b/pandas/core/array_algos/transforms.py
@@ -2,6 +2,8 @@
transforms.py is for shape-preserving functions.
"""
+from __future__ import annotations
+
import numpy as np
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py
index d2875be0f58cd..280a599de84ed 100644
--- a/pandas/core/arraylike.py
+++ b/pandas/core/arraylike.py
@@ -4,6 +4,8 @@
Index
ExtensionArray
"""
+from __future__ import annotations
+
import operator
from typing import Any
import warnings
diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py
index 7be617de63a40..3221b158241f5 100644
--- a/pandas/core/computation/check.py
+++ b/pandas/core/computation/check.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from pandas.compat._optional import import_optional_dependency
ne = import_optional_dependency("numexpr", errors="warn")
diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py
index ebf4d4ea9154e..a1ac3dfa06ee0 100644
--- a/pandas/core/computation/common.py
+++ b/pandas/core/computation/common.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from functools import reduce
import numpy as np
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index a49e35539656f..8c1a3fece255e 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -9,6 +9,8 @@
module is imported, register them here rather than in the module.
"""
+from __future__ import annotations
+
import os
from typing import Callable
import warnings
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index f47aeb16e19f1..893e4a9be58ef 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -1,5 +1,7 @@
""" basic inference routines """
+from __future__ import annotations
+
from collections import abc
from numbers import Number
import re
diff --git a/pandas/core/exchange/buffer.py b/pandas/core/exchange/buffer.py
index 098c596bff4cd..a3b05a0c5d24a 100644
--- a/pandas/core/exchange/buffer.py
+++ b/pandas/core/exchange/buffer.py
@@ -1,7 +1,4 @@
-from typing import (
- Optional,
- Tuple,
-)
+from __future__ import annotations
import numpy as np
from packaging import version
@@ -60,7 +57,7 @@ def __dlpack__(self):
return self._x.__dlpack__()
raise NotImplementedError("__dlpack__")
- def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]:
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
"""
Device type and device ID for where the data in the buffer resides.
"""
diff --git a/pandas/core/exchange/dataframe_protocol.py b/pandas/core/exchange/dataframe_protocol.py
index ee2ae609e73f9..367b906332741 100644
--- a/pandas/core/exchange/dataframe_protocol.py
+++ b/pandas/core/exchange/dataframe_protocol.py
@@ -2,6 +2,8 @@
A verbatim copy (vendored) of the spec from https://github.com/data-apis/dataframe-api
"""
+from __future__ import annotations
+
from abc import (
ABC,
abstractmethod,
@@ -9,11 +11,8 @@
import enum
from typing import (
Any,
- Dict,
Iterable,
- Optional,
Sequence,
- Tuple,
TypedDict,
)
@@ -90,18 +89,18 @@ class ColumnNullType(enum.IntEnum):
class ColumnBuffers(TypedDict):
# first element is a buffer containing the column data;
# second element is the data buffer's associated dtype
- data: Tuple["Buffer", Any]
+ data: tuple[Buffer, Any]
# first element is a buffer containing mask values indicating missing data;
# second element is the mask value buffer's associated dtype.
# None if the null representation is not a bit or byte mask
- validity: Optional[Tuple["Buffer", Any]]
+ validity: tuple[Buffer, Any] | None
# first element is a buffer containing the offset values for
# variable-size binary data (e.g., variable-length strings);
# second element is the offsets buffer's associated dtype.
# None if the data buffer does not have an associated offsets buffer
- offsets: Optional[Tuple["Buffer", Any]]
+ offsets: tuple[Buffer, Any] | None
class CategoricalDescription(TypedDict):
@@ -111,7 +110,7 @@ class CategoricalDescription(TypedDict):
is_dictionary: bool
# Python-level only (e.g. ``{int: str}``).
# None if not a dictionary-style categorical.
- mapping: Optional[dict]
+ mapping: dict | None
class Buffer(ABC):
@@ -161,7 +160,7 @@ def __dlpack__(self):
raise NotImplementedError("__dlpack__")
@abstractmethod
- def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]:
+ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
"""
Device type and device ID for where the data in the buffer resides.
Uses device type codes matching DLPack.
@@ -239,7 +238,7 @@ def offset(self) -> int:
@property
@abstractmethod
- def dtype(self) -> Tuple[DtypeKind, int, str, str]:
+ def dtype(self) -> tuple[DtypeKind, int, str, str]:
"""
Dtype description as a tuple ``(kind, bit-width, format string, endianness)``.
@@ -293,7 +292,7 @@ def describe_categorical(self) -> CategoricalDescription:
@property
@abstractmethod
- def describe_null(self) -> Tuple[ColumnNullType, Any]:
+ def describe_null(self) -> tuple[ColumnNullType, Any]:
"""
Return the missing value (or "null") representation the column dtype
uses, as a tuple ``(kind, value)``.
@@ -306,7 +305,7 @@ def describe_null(self) -> Tuple[ColumnNullType, Any]:
@property
@abstractmethod
- def null_count(self) -> Optional[int]:
+ def null_count(self) -> int | None:
"""
Number of null elements, if known.
@@ -316,7 +315,7 @@ def null_count(self) -> Optional[int]:
@property
@abstractmethod
- def metadata(self) -> Dict[str, Any]:
+ def metadata(self) -> dict[str, Any]:
"""
The metadata for the column. See `DataFrame.metadata` for more details.
"""
@@ -330,7 +329,7 @@ def num_chunks(self) -> int:
pass
@abstractmethod
- def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable["Column"]:
+ def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:
"""
Return an iterator yielding the chunks.
@@ -395,7 +394,7 @@ def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
@property
@abstractmethod
- def metadata(self) -> Dict[str, Any]:
+ def metadata(self) -> dict[str, Any]:
"""
The metadata for the data frame, as a dictionary with string keys. The
contents of `metadata` may be anything, they are meant for a library
@@ -415,7 +414,7 @@ def num_columns(self) -> int:
pass
@abstractmethod
- def num_rows(self) -> Optional[int]:
+ def num_rows(self) -> int | None:
# TODO: not happy with Optional, but need to flag it may be expensive
# why include it if it may be None - what do we expect consumers
# to do here?
@@ -460,21 +459,21 @@ def get_columns(self) -> Iterable[Column]:
pass
@abstractmethod
- def select_columns(self, indices: Sequence[int]) -> "DataFrame":
+ def select_columns(self, indices: Sequence[int]) -> DataFrame:
"""
Create a new DataFrame by selecting a subset of columns by index.
"""
pass
@abstractmethod
- def select_columns_by_name(self, names: Sequence[str]) -> "DataFrame":
+ def select_columns_by_name(self, names: Sequence[str]) -> DataFrame:
"""
Create a new DataFrame by selecting a subset of columns by name.
"""
pass
@abstractmethod
- def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable["DataFrame"]:
+ def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:
"""
Return an iterator yielding the chunks.
diff --git a/pandas/core/exchange/from_dataframe.py b/pandas/core/exchange/from_dataframe.py
index cb1967b5701a0..a33e47ba3b68e 100644
--- a/pandas/core/exchange/from_dataframe.py
+++ b/pandas/core/exchange/from_dataframe.py
@@ -1,13 +1,8 @@
+from __future__ import annotations
+
import ctypes
import re
-from typing import (
- Any,
- Dict,
- List,
- Optional,
- Tuple,
- Union,
-)
+from typing import Any
import numpy as np
@@ -24,7 +19,7 @@
Endianness,
)
-_NP_DTYPES: Dict[DtypeKind, Dict[int, Any]] = {
+_NP_DTYPES: dict[DtypeKind, dict[int, Any]] = {
DtypeKind.INT: {8: np.int8, 16: np.int16, 32: np.int32, 64: np.int64},
DtypeKind.UINT: {8: np.uint8, 16: np.uint16, 32: np.uint32, 64: np.uint64},
DtypeKind.FLOAT: {32: np.float32, 64: np.float64},
@@ -108,7 +103,7 @@ def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:
"""
# We need a dict of columns here, with each column being a NumPy array (at
# least for now, deal with non-NumPy dtypes later).
- columns: Dict[str, Any] = {}
+ columns: dict[str, Any] = {}
buffers = [] # hold on to buffers, keeps memory alive
for name in df.column_names():
if not isinstance(name, str):
@@ -140,7 +135,7 @@ def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:
return pandas_df
-def primitive_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
+def primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
"""
Convert a column holding one of the primitive dtypes to a NumPy array.
@@ -165,7 +160,7 @@ def primitive_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
return data, buffers
-def categorical_column_to_series(col: Column) -> Tuple[pd.Series, Any]:
+def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]:
"""
Convert a column holding categorical data to a pandas Series.
@@ -205,7 +200,7 @@ def categorical_column_to_series(col: Column) -> Tuple[pd.Series, Any]:
return data, buffers
-def string_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
+def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
"""
Convert a column holding string data to a NumPy array.
@@ -268,7 +263,7 @@ def string_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
null_pos = ~null_pos
# Assemble the strings from the code units
- str_list: List[Union[None, float, str]] = [None] * col.size
+ str_list: list[None | float | str] = [None] * col.size
for i in range(col.size):
# Check for missing values
if null_pos is not None and null_pos[i]:
@@ -324,7 +319,7 @@ def parse_datetime_format_str(format_str, data):
raise NotImplementedError(f"DateTime kind is not supported: {format_str}")
-def datetime_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
+def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:
"""
Convert a column holding DateTime data to a NumPy array.
@@ -362,9 +357,9 @@ def datetime_column_to_ndarray(col: Column) -> Tuple[np.ndarray, Any]:
def buffer_to_ndarray(
buffer: Buffer,
- dtype: Tuple[DtypeKind, int, str, str],
+ dtype: tuple[DtypeKind, int, str, str],
offset: int = 0,
- length: Optional[int] = None,
+ length: int | None = None,
) -> np.ndarray:
"""
Build a NumPy array from the passed buffer.
@@ -470,9 +465,9 @@ def bitmask_to_bool_ndarray(
def set_nulls(
- data: Union[np.ndarray, pd.Series],
+ data: np.ndarray | pd.Series,
col: Column,
- validity: Optional[Tuple[Buffer, Tuple[DtypeKind, int, str, str]]],
+ validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None,
allow_modify_inplace: bool = True,
):
"""
diff --git a/pandas/core/exchange/utils.py b/pandas/core/exchange/utils.py
index 0c746113babee..2cc5126591718 100644
--- a/pandas/core/exchange/utils.py
+++ b/pandas/core/exchange/utils.py
@@ -2,6 +2,8 @@
Utility functions and objects for implementing the exchange API.
"""
+from __future__ import annotations
+
import re
import typing
diff --git a/pandas/core/flags.py b/pandas/core/flags.py
index b4e1039e216c0..f07c6917d91e5 100644
--- a/pandas/core/flags.py
+++ b/pandas/core/flags.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import weakref
diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py
index 2caaadbc05cff..6a1c586d90b6e 100644
--- a/pandas/core/ops/array_ops.py
+++ b/pandas/core/ops/array_ops.py
@@ -2,6 +2,8 @@
Functions for arithmetic and comparison operations on NumPy arrays and
ExtensionArrays.
"""
+from __future__ import annotations
+
import datetime
from functools import partial
import operator
diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py
index b883fe7751daa..f0e6aa3750cee 100644
--- a/pandas/core/ops/common.py
+++ b/pandas/core/ops/common.py
@@ -1,6 +1,8 @@
"""
Boilerplate functions used in defining binary operations.
"""
+from __future__ import annotations
+
from functools import wraps
from typing import Callable
diff --git a/pandas/core/ops/dispatch.py b/pandas/core/ops/dispatch.py
index bfd4afe0de86f..2f500703ccfb3 100644
--- a/pandas/core/ops/dispatch.py
+++ b/pandas/core/ops/dispatch.py
@@ -1,6 +1,8 @@
"""
Functions for defining unary operations.
"""
+from __future__ import annotations
+
from typing import Any
from pandas._typing import ArrayLike
diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py
index e069c765d5299..eb27cf7450119 100644
--- a/pandas/core/ops/invalid.py
+++ b/pandas/core/ops/invalid.py
@@ -1,6 +1,8 @@
"""
Templates for invalid operations.
"""
+from __future__ import annotations
+
import operator
import numpy as np
diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py
index d1f704635ba64..e8a930083a778 100644
--- a/pandas/core/ops/methods.py
+++ b/pandas/core/ops/methods.py
@@ -1,6 +1,8 @@
"""
Functions to generate methods and pin them to the appropriate classes.
"""
+from __future__ import annotations
+
import operator
from pandas.core.dtypes.generic import (
diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py
index 8d5f7fb8de758..850ca44e996c4 100644
--- a/pandas/core/ops/missing.py
+++ b/pandas/core/ops/missing.py
@@ -21,6 +21,8 @@
3) divmod behavior consistent with 1) and 2).
"""
+from __future__ import annotations
+
import operator
import numpy as np
diff --git a/pandas/core/roperator.py b/pandas/core/roperator.py
index 15b16b6fa976a..2f320f4e9c6b9 100644
--- a/pandas/core/roperator.py
+++ b/pandas/core/roperator.py
@@ -2,6 +2,8 @@
Reversed Operations not available in the stdlib operator module.
Defining these instead of using lambdas allows us to reference them by name.
"""
+from __future__ import annotations
+
import operator
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 15144116fa924..ed2a4002f5ce7 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -1,4 +1,6 @@
"""Common utility functions for rolling operations"""
+from __future__ import annotations
+
from collections import defaultdict
from typing import cast
diff --git a/pandas/core/window/doc.py b/pandas/core/window/doc.py
index 61cfa29ffc481..4fe08e2fa20b3 100644
--- a/pandas/core/window/doc.py
+++ b/pandas/core/window/doc.py
@@ -1,4 +1,6 @@
"""Any shareable docstring components for rolling/expanding/ewm"""
+from __future__ import annotations
+
from textwrap import dedent
from pandas.core.shared_docs import _shared_docs
diff --git a/pandas/core/window/online.py b/pandas/core/window/online.py
index bb973f05687e2..2e25bdd12d3e0 100644
--- a/pandas/core/window/online.py
+++ b/pandas/core/window/online.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from typing import TYPE_CHECKING
import numpy as np
diff --git a/pandas/io/formats/_color_data.py b/pandas/io/formats/_color_data.py
index e5b72b2befa4f..2e7cb7f29646e 100644
--- a/pandas/io/formats/_color_data.py
+++ b/pandas/io/formats/_color_data.py
@@ -3,6 +3,8 @@
# This data has been copied here, instead of being imported from matplotlib,
# not to have ``to_excel`` methods require matplotlib.
# source: matplotlib._color_data (3.3.3)
+from __future__ import annotations
+
CSS4_COLORS = {
"aliceblue": "F0F8FF",
"antiquewhite": "FAEBD7",
diff --git a/pandas/io/sas/sas_constants.py b/pandas/io/sas/sas_constants.py
index 979b2cacbf706..366e6924a1e16 100644
--- a/pandas/io/sas/sas_constants.py
+++ b/pandas/io/sas/sas_constants.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
magic = (
b"\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"\x00\x00\x00\x00\xc2\xea\x81\x60"
diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py
index c731c40f10a05..6015662999a7d 100644
--- a/pandas/plotting/_matplotlib/compat.py
+++ b/pandas/plotting/_matplotlib/compat.py
@@ -1,4 +1,6 @@
# being a bit too dynamic
+from __future__ import annotations
+
import operator
from pandas.util.version import Version
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index b995c6ac78b80..169c9cc18a7fd 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from pandas._libs.tslibs.offsets import (
FY5253,
BaseOffset,
| - [x] closes #41901 (Replace xxxx with the Github issue number)
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47769 | 2022-07-17T19:24:02Z | 2022-07-18T17:16:24Z | 2022-07-18T17:16:24Z | 2022-07-19T14:52:33Z |
TYP: Appender also works with properties | diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index cec4ee40a8c7a..f8359edaa8d44 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -12,7 +12,10 @@
import warnings
from pandas._libs.properties import cache_readonly
-from pandas._typing import F
+from pandas._typing import (
+ F,
+ T,
+)
from pandas.util._exceptions import find_stack_level
@@ -485,7 +488,7 @@ def __init__(self, addendum: str | None, join: str = "", indents: int = 0) -> No
self.addendum = addendum
self.join = join
- def __call__(self, func: F) -> F:
+ def __call__(self, func: T) -> T:
func.__doc__ = func.__doc__ if func.__doc__ else ""
self.addendum = self.addendum if self.addendum else ""
docitems = [func.__doc__, self.addendum]
| Technically, `Appender` works with literally any `object` as all of them have `__doc__`.
This change helps pyright in this case https://github.com/pandas-dev/pandas/blob/bdd9314c7006611021bab2b7adf7210cd874a0c2/pandas/core/series.py#L738
Mypy still needs the ignore because it doesn't support decorating a property. | https://api.github.com/repos/pandas-dev/pandas/pulls/47768 | 2022-07-17T19:16:53Z | 2022-07-18T17:17:17Z | 2022-07-18T17:17:17Z | 2022-09-10T01:39:01Z |
PERF: operations with zoneinfo tzinfos | diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 2b7f9b9659354..4487136aa7fb8 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -642,9 +642,7 @@ cdef int64_t _tz_localize_using_tzinfo_api(
if not to_utc:
# tz.utcoffset only makes sense if datetime
# is _wall time_, so if val is a UTC timestamp convert to wall time
- dt = datetime_new(dts.year, dts.month, dts.day, dts.hour,
- dts.min, dts.sec, dts.us, utc_pytz)
- dt = dt.astimezone(tz)
+ dt = _astimezone(dts, tz)
if fold is not NULL:
# NB: fold is only passed with to_utc=False
@@ -658,6 +656,27 @@ cdef int64_t _tz_localize_using_tzinfo_api(
return delta
+cdef datetime _astimezone(npy_datetimestruct dts, tzinfo tz):
+ """
+ Optimized equivalent to:
+
+ dt = datetime(dts.year, dts.month, dts.day, dts.hour,
+ dts.min, dts.sec, dts.us, utc_pytz)
+ dt = dt.astimezone(tz)
+
+ Derived from the datetime.astimezone implementation at
+ https://github.com/python/cpython/blob/main/Modules/_datetimemodule.c#L6187
+
+ NB: we are assuming tz is not None.
+ """
+ cdef:
+ datetime result
+
+ result = datetime_new(dts.year, dts.month, dts.day, dts.hour,
+ dts.min, dts.sec, dts.us, tz)
+ return tz.fromutc(result)
+
+
# NB: relies on dateutil internals, subject to change.
@cython.boundscheck(False)
@cython.wraparound(False)
| ```
import zoneinfo
import pandas as pd
tz = zoneinfo.ZoneInfo("US/Pacific")
dti = pd.date_range("2016-01-01", periods=10**5, freq="s", tz=tz)
In [3]: %timeit dti.normalize()
142 ms ± 3.44 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) # <- main
90.4 ms ± 4.34 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) # <- PR
```
The corresponding timing with pytz is 49.9 ms. @pganssle any thoughts on getting to near-parity (so we can drop pytz xref #46463)? Is there any prospect of exposing zoneinfo_fromutc in the C-API? | https://api.github.com/repos/pandas-dev/pandas/pulls/47767 | 2022-07-17T17:59:21Z | 2022-07-18T17:19:28Z | 2022-07-18T17:19:28Z | 2022-07-18T18:19:52Z |
add ignore for new mypy error 'type-var' | diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 83626a42134d6..917382544199a 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -205,7 +205,7 @@ def __getattr__(self, attr: str):
# error: Signature of "obj" incompatible with supertype "BaseGroupBy"
@property
- def obj(self) -> NDFrameT: # type: ignore[override]
+ def obj(self) -> NDFrame: # type: ignore[override]
# error: Incompatible return value type (got "Optional[Any]",
# expected "NDFrameT")
return self.groupby.obj # type: ignore[return-value]
| A new error introduced to mypy is detected in pandas' repository. This PR adds comment to ignore that error.
> pandas (https://github.com/pandas-dev/pandas)
> + pandas/core/resample.py:208: error: A function returning TypeVar should receive at least one argument containing the same Typevar [type-var]
> + pandas/core/resample.py:208: note: Error code "type-var" not covered by "type: ignore" comment
Reference:
https://github.com/python/mypy/pull/13166
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. | https://api.github.com/repos/pandas-dev/pandas/pulls/47766 | 2022-07-17T17:08:29Z | 2022-07-17T23:29:07Z | 2022-07-17T23:29:07Z | 2022-07-18T11:51:05Z |
TST: misplaced string array test | diff --git a/pandas/tests/arrays/numpy_/test_indexing.py b/pandas/tests/arrays/numpy_/test_indexing.py
index f92411efe774c..225d64ad7d258 100644
--- a/pandas/tests/arrays/numpy_/test_indexing.py
+++ b/pandas/tests/arrays/numpy_/test_indexing.py
@@ -7,6 +7,17 @@
class TestSearchsorted:
+ def test_searchsorted_string(self, string_dtype):
+ arr = pd.array(["a", "b", "c"], dtype=string_dtype)
+
+ result = arr.searchsorted("a", side="left")
+ assert is_scalar(result)
+ assert result == 0
+
+ result = arr.searchsorted("a", side="right")
+ assert is_scalar(result)
+ assert result == 1
+
def test_searchsorted_numeric_dtypes_scalar(self, any_real_numpy_dtype):
arr = pd.array([1, 3, 90], dtype=any_real_numpy_dtype)
result = arr.searchsorted(30)
diff --git a/pandas/tests/arrays/string_/test_indexing.py b/pandas/tests/arrays/string_/test_indexing.py
deleted file mode 100644
index 41466c43288c3..0000000000000
--- a/pandas/tests/arrays/string_/test_indexing.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from pandas.core.dtypes.common import is_scalar
-
-import pandas as pd
-
-
-class TestSearchsorted:
- def test_searchsorted(self, string_dtype):
- arr = pd.array(["a", "b", "c"], dtype=string_dtype)
-
- result = arr.searchsorted("a", side="left")
- assert is_scalar(result)
- assert result == 0
-
- result = arr.searchsorted("a", side="right")
- assert is_scalar(result)
- assert result == 1
| Just encountered this because I wanted to add a test there for another PR.
Those tests were moved and splitted in https://github.com/pandas-dev/pandas/pull/46136, but the "string" tests are not actually about StringArray, they are about PandasArray with string numpy dtype (`np.dtype("U")`), and thus belong with the others in `tests/arrays/numpy_`, since `tests/arrays/string_` is for StringArray | https://api.github.com/repos/pandas-dev/pandas/pulls/47765 | 2022-07-17T15:27:21Z | 2022-07-18T15:36:09Z | 2022-07-18T15:36:08Z | 2022-07-18T19:48:43Z |
TYP: a few mismatches found by stubtest | diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi
index 29d1365cad6fc..f55ff0ae8b574 100644
--- a/pandas/_libs/algos.pyi
+++ b/pandas/_libs/algos.pyi
@@ -42,7 +42,7 @@ def groupsort_indexer(
np.ndarray, # ndarray[int64_t, ndim=1]
]: ...
def kth_smallest(
- a: np.ndarray, # numeric[:]
+ arr: np.ndarray, # numeric[:]
k: int,
) -> Any: ... # numeric
diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi
index 2f0c3980c0c02..c7cb9705d7cb9 100644
--- a/pandas/_libs/groupby.pyi
+++ b/pandas/_libs/groupby.pyi
@@ -105,8 +105,9 @@ def group_last(
values: np.ndarray, # ndarray[rank_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
mask: npt.NDArray[np.bool_] | None,
- result_mask: npt.NDArray[np.bool_] | None,
+ result_mask: npt.NDArray[np.bool_] | None = ...,
min_count: int = ..., # Py_ssize_t
+ is_datetimelike: bool = ...,
) -> None: ...
def group_nth(
out: np.ndarray, # rank_t[:, ::1]
@@ -114,9 +115,10 @@ def group_nth(
values: np.ndarray, # ndarray[rank_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
mask: npt.NDArray[np.bool_] | None,
- result_mask: npt.NDArray[np.bool_] | None,
+ result_mask: npt.NDArray[np.bool_] | None = ...,
min_count: int = ..., # int64_t
rank: int = ..., # int64_t
+ is_datetimelike: bool = ...,
) -> None: ...
def group_rank(
out: np.ndarray, # float64_t[:, ::1]
@@ -124,7 +126,7 @@ def group_rank(
labels: np.ndarray, # const int64_t[:]
ngroups: int,
is_datetimelike: bool,
- ties_method: Literal["aveage", "min", "max", "first", "dense"] = ...,
+ ties_method: Literal["average", "min", "max", "first", "dense"] = ...,
ascending: bool = ...,
pct: bool = ...,
na_option: Literal["keep", "top", "bottom"] = ...,
@@ -136,6 +138,7 @@ def group_max(
values: np.ndarray, # ndarray[groupby_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
min_count: int = ...,
+ is_datetimelike: bool = ...,
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
@@ -145,6 +148,7 @@ def group_min(
values: np.ndarray, # ndarray[groupby_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
min_count: int = ...,
+ is_datetimelike: bool = ...,
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
@@ -154,6 +158,9 @@ def group_cummin(
labels: np.ndarray, # const int64_t[:]
ngroups: int,
is_datetimelike: bool,
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+ skipna: bool = ...,
) -> None: ...
def group_cummax(
out: np.ndarray, # groupby_t[:, ::1]
@@ -161,4 +168,7 @@ def group_cummax(
labels: np.ndarray, # const int64_t[:]
ngroups: int,
is_datetimelike: bool,
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+ skipna: bool = ...,
) -> None: ...
diff --git a/pandas/_libs/internals.pyi b/pandas/_libs/internals.pyi
index 6a90fbc729580..201c7b7b565cc 100644
--- a/pandas/_libs/internals.pyi
+++ b/pandas/_libs/internals.pyi
@@ -32,7 +32,7 @@ def update_blklocs_and_blknos(
loc: int,
nblocks: int,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
-
+@final
class BlockPlacement:
def __init__(self, val: int | slice | np.ndarray): ...
@property
diff --git a/pandas/_libs/join.pyi b/pandas/_libs/join.pyi
index a5e91e2ce83eb..8d02f8f57dee1 100644
--- a/pandas/_libs/join.pyi
+++ b/pandas/_libs/join.pyi
@@ -56,6 +56,7 @@ def asof_join_backward_on_X_by_Y(
right_by_values: np.ndarray, # by_t[:]
allow_exact_matches: bool = ...,
tolerance: np.number | int | float | None = ...,
+ use_hashtable: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
def asof_join_forward_on_X_by_Y(
left_values: np.ndarray, # asof_t[:]
@@ -64,6 +65,7 @@ def asof_join_forward_on_X_by_Y(
right_by_values: np.ndarray, # by_t[:]
allow_exact_matches: bool = ...,
tolerance: np.number | int | float | None = ...,
+ use_hashtable: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
def asof_join_nearest_on_X_by_Y(
left_values: np.ndarray, # asof_t[:]
@@ -72,22 +74,5 @@ def asof_join_nearest_on_X_by_Y(
right_by_values: np.ndarray, # by_t[:]
allow_exact_matches: bool = ...,
tolerance: np.number | int | float | None = ...,
-) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
-def asof_join_backward(
- left_values: np.ndarray, # asof_t[:]
- right_values: np.ndarray, # asof_t[:]
- allow_exact_matches: bool = ...,
- tolerance: np.number | int | float | None = ...,
-) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
-def asof_join_forward(
- left_values: np.ndarray, # asof_t[:]
- right_values: np.ndarray, # asof_t[:]
- allow_exact_matches: bool = ...,
- tolerance: np.number | int | float | None = ...,
-) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
-def asof_join_nearest(
- left_values: np.ndarray, # asof_t[:]
- right_values: np.ndarray, # asof_t[:]
- allow_exact_matches: bool = ...,
- tolerance: np.number | int | float | None = ...,
+ use_hashtable: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
diff --git a/pandas/_libs/missing.pyi b/pandas/_libs/missing.pyi
index 3a4cc9def07bd..27f227558dee5 100644
--- a/pandas/_libs/missing.pyi
+++ b/pandas/_libs/missing.pyi
@@ -1,7 +1,8 @@
import numpy as np
from numpy import typing as npt
-class NAType: ...
+class NAType:
+ def __new__(cls, *args, **kwargs): ...
NA: NAType
diff --git a/pandas/_libs/parsers.pyi b/pandas/_libs/parsers.pyi
index 01f5d5802ccd5..6b0bbf183f07e 100644
--- a/pandas/_libs/parsers.pyi
+++ b/pandas/_libs/parsers.pyi
@@ -63,7 +63,6 @@ class TextReader:
skip_blank_lines: bool = ...,
encoding_errors: bytes | str = ...,
): ...
- def set_error_bad_lines(self, status: int) -> None: ...
def set_noconvert(self, i: int) -> None: ...
def remove_noconvert(self, i: int) -> None: ...
def close(self) -> None: ...
diff --git a/pandas/_libs/tslibs/ccalendar.pyi b/pandas/_libs/tslibs/ccalendar.pyi
index 5d5b935ffa54b..993f18a61d74a 100644
--- a/pandas/_libs/tslibs/ccalendar.pyi
+++ b/pandas/_libs/tslibs/ccalendar.pyi
@@ -8,7 +8,5 @@ def get_firstbday(year: int, month: int) -> int: ...
def get_lastbday(year: int, month: int) -> int: ...
def get_day_of_year(year: int, month: int, day: int) -> int: ...
def get_iso_calendar(year: int, month: int, day: int) -> tuple[int, int, int]: ...
-def is_leapyear(year: int) -> bool: ...
def get_week_of_year(year: int, month: int, day: int) -> int: ...
def get_days_in_month(year: int, month: int) -> int: ...
-def dayofweek(y: int, m: int, d: int) -> int: ...
diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi
index dd439ebfc4798..041c51533d8da 100644
--- a/pandas/_libs/tslibs/dtypes.pyi
+++ b/pandas/_libs/tslibs/dtypes.pyi
@@ -14,10 +14,12 @@ class PeriodDtypeBase:
# actually __cinit__
def __new__(cls, code: int): ...
+ @property
def _freq_group_code(self) -> int: ...
@property
def _resolution_obj(self) -> Resolution: ...
def _get_to_timestamp_base(self) -> int: ...
+ @property
def _freqstr(self) -> str: ...
class FreqGroup(Enum):
diff --git a/pandas/_libs/tslibs/nattype.pyi b/pandas/_libs/tslibs/nattype.pyi
index e5a7e0223e534..0aa80330b15bc 100644
--- a/pandas/_libs/tslibs/nattype.pyi
+++ b/pandas/_libs/tslibs/nattype.pyi
@@ -12,8 +12,6 @@ NaT: NaTType
iNaT: int
nat_strings: set[str]
-def is_null_datetimelike(val: object, inat_is_null: bool = ...) -> bool: ...
-
_NaTComparisonTypes = datetime | timedelta | Period | np.datetime64 | np.timedelta64
class _NatComparison:
@@ -21,6 +19,7 @@ class _NatComparison:
class NaTType:
value: np.int64
+ @property
def asm8(self) -> np.datetime64: ...
def to_datetime64(self) -> np.datetime64: ...
def to_numpy(
diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi
index 12b113f0b73b1..1fe92e2870400 100644
--- a/pandas/_libs/tslibs/offsets.pyi
+++ b/pandas/_libs/tslibs/offsets.pyi
@@ -80,6 +80,7 @@ class BaseOffset:
def name(self) -> str: ...
@property
def rule_code(self) -> str: ...
+ @property
def freqstr(self) -> str: ...
def apply_index(self, dtindex: DatetimeIndex) -> DatetimeIndex: ...
def _apply_array(self, dtarr) -> None: ...
diff --git a/pandas/_libs/tslibs/timezones.pyi b/pandas/_libs/tslibs/timezones.pyi
index 20c403e93b149..d241a35f21cca 100644
--- a/pandas/_libs/tslibs/timezones.pyi
+++ b/pandas/_libs/tslibs/timezones.pyi
@@ -6,8 +6,6 @@ from typing import Callable
import numpy as np
-from pandas._typing import npt
-
# imported from dateutil.tz
dateutil_gettz: Callable[[str], tzinfo]
@@ -17,9 +15,6 @@ def infer_tzinfo(
start: datetime | None,
end: datetime | None,
) -> tzinfo | None: ...
-def get_dst_info(
- tz: tzinfo,
-) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64], str]: ...
def maybe_get_tz(tz: str | int | np.int64 | tzinfo | None) -> tzinfo | None: ...
def get_timezone(tz: tzinfo) -> tzinfo | str: ...
def is_utc(tz: tzinfo | None) -> bool: ...
| xref #47760 | https://api.github.com/repos/pandas-dev/pandas/pulls/47764 | 2022-07-17T14:19:18Z | 2022-07-18T17:20:26Z | 2022-07-18T17:20:26Z | 2022-07-18T17:20:34Z |
BUG: fix regression in Series[string] setitem setting a scalar with a mask | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 6ee140f59e096..6bd7378e05404 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`)
+- Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index c9abef226770c..c68ffec600c8a 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -14,6 +14,7 @@
from pandas._typing import (
Dtype,
Scalar,
+ npt,
type_t,
)
from pandas.compat import pa_version_under1p01
@@ -410,6 +411,12 @@ def __setitem__(self, key, value):
super().__setitem__(key, value)
+ def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
+ # the super() method NDArrayBackedExtensionArray._putmask uses
+ # np.putmask which doesn't properly handle None/pd.NA, so using the
+ # base class implementation that uses __setitem__
+ ExtensionArray._putmask(self, mask, value)
+
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index a5eb6189db6f1..4376a0de37a8c 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -588,3 +588,23 @@ def test_isin(dtype, fixed_now_ts):
result = s.isin(["a", fixed_now_ts])
expected = pd.Series([True, False, False])
tm.assert_series_equal(result, expected)
+
+
+def test_setitem_scalar_with_mask_validation(dtype):
+ # https://github.com/pandas-dev/pandas/issues/47628
+ # setting None with a boolean mask (through _putmaks) should still result
+ # in pd.NA values in the underlying array
+ ser = pd.Series(["a", "b", "c"], dtype=dtype)
+ mask = np.array([False, True, False])
+
+ ser[mask] = None
+ assert ser.array[1] is pd.NA
+
+ # for other non-string we should also raise an error
+ ser = pd.Series(["a", "b", "c"], dtype=dtype)
+ if type(ser.array) is pd.arrays.StringArray:
+ msg = "Cannot set non-string value"
+ else:
+ msg = "Scalar must be NA or str"
+ with pytest.raises(ValueError, match=msg):
+ ser[mask] = 1
| - [x] closes #47628
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47763 | 2022-07-17T13:47:12Z | 2022-07-18T22:29:09Z | 2022-07-18T22:29:08Z | 2022-07-19T06:13:31Z |
REGR: preserve reindexed array object (instead of creating new array) for concat with all-NA array | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index e03e6cd41ebd3..2ce4d4b37f922 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -17,6 +17,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.fillna` not working :class:`DataFrame` with :class:`MultiIndex` (:issue:`47649`)
- Fixed regression in taking NULL :class:`objects` from a :class:`DataFrame` causing a segmentation violation. These NULL values are created by :meth:`numpy.empty_like` (:issue:`46848`)
- Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`)
+- Fixed regression in :func:`concat` or :func:`merge` handling of all-NaN ExtensionArrays with custom attributes (:issue:`47762`)
- Fixed regression in calling bitwise numpy ufuncs (for example, ``np.bitwise_and``) on Index objects (:issue:`46769`)
- Fixed regression in :func:`cut` using a ``datetime64`` IntervalIndex as bins (:issue:`46218`)
- Fixed regression in :meth:`DataFrame.select_dtypes` where ``include="number"`` included :class:`BooleanDtype` (:issue:`46870`)
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 77197dac3363b..0df8aa5a055b0 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -476,16 +476,21 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
return DatetimeArray(i8values, dtype=empty_dtype)
elif is_1d_only_ea_dtype(empty_dtype):
- empty_dtype = cast(ExtensionDtype, empty_dtype)
- cls = empty_dtype.construct_array_type()
-
- missing_arr = cls._from_sequence([], dtype=empty_dtype)
- ncols, nrows = self.shape
- assert ncols == 1, ncols
- empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
- return missing_arr.take(
- empty_arr, allow_fill=True, fill_value=fill_value
- )
+ if is_dtype_equal(blk_dtype, empty_dtype) and self.indexers:
+ # avoid creating new empty array if we already have an array
+ # with correct dtype that can be reindexed
+ pass
+ else:
+ empty_dtype = cast(ExtensionDtype, empty_dtype)
+ cls = empty_dtype.construct_array_type()
+
+ missing_arr = cls._from_sequence([], dtype=empty_dtype)
+ ncols, nrows = self.shape
+ assert ncols == 1, ncols
+ empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
+ return missing_arr.take(
+ empty_arr, allow_fill=True, fill_value=fill_value
+ )
elif isinstance(empty_dtype, ExtensionDtype):
# TODO: no tests get here, a handful would if we disabled
# the dt64tz special-case above (which is faster)
diff --git a/pandas/tests/extension/array_with_attr/__init__.py b/pandas/tests/extension/array_with_attr/__init__.py
new file mode 100644
index 0000000000000..49da6af024a31
--- /dev/null
+++ b/pandas/tests/extension/array_with_attr/__init__.py
@@ -0,0 +1,6 @@
+from pandas.tests.extension.array_with_attr.array import (
+ FloatAttrArray,
+ FloatAttrDtype,
+)
+
+__all__ = ["FloatAttrArray", "FloatAttrDtype"]
diff --git a/pandas/tests/extension/array_with_attr/array.py b/pandas/tests/extension/array_with_attr/array.py
new file mode 100644
index 0000000000000..d9327ca9f2f3f
--- /dev/null
+++ b/pandas/tests/extension/array_with_attr/array.py
@@ -0,0 +1,84 @@
+"""
+Test extension array that has custom attribute information (not stored on the dtype).
+
+"""
+from __future__ import annotations
+
+import numbers
+
+import numpy as np
+
+from pandas._typing import type_t
+
+from pandas.core.dtypes.base import ExtensionDtype
+
+import pandas as pd
+from pandas.core.arrays import ExtensionArray
+
+
+class FloatAttrDtype(ExtensionDtype):
+ type = float
+ name = "float_attr"
+ na_value = np.nan
+
+ @classmethod
+ def construct_array_type(cls) -> type_t[FloatAttrArray]:
+ """
+ Return the array type associated with this dtype.
+
+ Returns
+ -------
+ type
+ """
+ return FloatAttrArray
+
+
+class FloatAttrArray(ExtensionArray):
+ dtype = FloatAttrDtype()
+ __array_priority__ = 1000
+
+ def __init__(self, values, attr=None) -> None:
+ if not isinstance(values, np.ndarray):
+ raise TypeError("Need to pass a numpy array of float64 dtype as values")
+ if not values.dtype == "float64":
+ raise TypeError("Need to pass a numpy array of float64 dtype as values")
+ self.data = values
+ self.attr = attr
+
+ @classmethod
+ def _from_sequence(cls, scalars, dtype=None, copy=False):
+ data = np.array(scalars, dtype="float64", copy=copy)
+ return cls(data)
+
+ def __getitem__(self, item):
+ if isinstance(item, numbers.Integral):
+ return self.data[item]
+ else:
+ # slice, list-like, mask
+ item = pd.api.indexers.check_array_indexer(self, item)
+ return type(self)(self.data[item], self.attr)
+
+ def __len__(self) -> int:
+ return len(self.data)
+
+ def isna(self):
+ return np.isnan(self.data)
+
+ def take(self, indexer, allow_fill=False, fill_value=None):
+ from pandas.api.extensions import take
+
+ data = self.data
+ if allow_fill and fill_value is None:
+ fill_value = self.dtype.na_value
+
+ result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)
+ return type(self)(result, self.attr)
+
+ def copy(self):
+ return type(self)(self.data.copy(), self.attr)
+
+ @classmethod
+ def _concat_same_type(cls, to_concat):
+ data = np.concatenate([x.data for x in to_concat])
+ attr = to_concat[0].attr if len(to_concat) else None
+ return cls(data, attr)
diff --git a/pandas/tests/extension/array_with_attr/test_array_with_attr.py b/pandas/tests/extension/array_with_attr/test_array_with_attr.py
new file mode 100644
index 0000000000000..3735fe40a0d67
--- /dev/null
+++ b/pandas/tests/extension/array_with_attr/test_array_with_attr.py
@@ -0,0 +1,33 @@
+import numpy as np
+
+import pandas as pd
+import pandas._testing as tm
+from pandas.tests.extension.array_with_attr import FloatAttrArray
+
+
+def test_concat_with_all_na():
+ # https://github.com/pandas-dev/pandas/pull/47762
+ # ensure that attribute of the column array is preserved (when it gets
+ # preserved in reindexing the array) during merge/concat
+ arr = FloatAttrArray(np.array([np.nan, np.nan], dtype="float64"), attr="test")
+
+ df1 = pd.DataFrame({"col": arr, "key": [0, 1]})
+ df2 = pd.DataFrame({"key": [0, 1], "col2": [1, 2]})
+ result = pd.merge(df1, df2, on="key")
+ expected = pd.DataFrame({"col": arr, "key": [0, 1], "col2": [1, 2]})
+ tm.assert_frame_equal(result, expected)
+ assert result["col"].array.attr == "test"
+
+ df1 = pd.DataFrame({"col": arr, "key": [0, 1]})
+ df2 = pd.DataFrame({"key": [0, 2], "col2": [1, 2]})
+ result = pd.merge(df1, df2, on="key")
+ expected = pd.DataFrame({"col": arr.take([0]), "key": [0], "col2": [1]})
+ tm.assert_frame_equal(result, expected)
+ assert result["col"].array.attr == "test"
+
+ result = pd.concat([df1.set_index("key"), df2.set_index("key")], axis=1)
+ expected = pd.DataFrame(
+ {"col": arr.take([0, 1, -1]), "col2": [1, np.nan, 2], "key": [0, 1, 2]}
+ ).set_index("key")
+ tm.assert_frame_equal(result, expected)
+ assert result["col"].array.attr == "test"
| Originally reported in GeoPandas: https://github.com/geopandas/geopandas/issues/2493
This worked in pandas 1.3.5, was then originally "broken" by https://github.com/pandas-dev/pandas/pull/43043 (as an unintended side effect of some of the (proper) changes in that PR) somewhere between 1.3 and 1.4.0 but got fixed again before the final 1.4.0 because of subsequent refactoring in the internal concat code. Some of those refactors were then reverted in 1.4.3 for being able to revert the all-NA change (https://github.com/pandas-dev/pandas/pull/47372), surfacing the "bug" again.
This is not necessarily strictly a "bug" in pandas (I also don't know if we make any guarantees about preserving array objects, let alone its attributes), but I think this is a decent change nevertheless.
Currently, when concatting/merging a column with all-NA data, we create the "empty" (all-NA) array for the result from scratch. While if there is no dtype change, and so the original column already has the correct dtype, we can actually just reindex that array instead of creating a new all-NA array. For builtin dtypes that might not matter much, but for custom ExtensionArrays that avoids going through some complex code to create this new all-NA array:
https://github.com/pandas-dev/pandas/blob/fc68a9a290fc314c090e037597138c74fa23ee6d/pandas/core/internals/concat.py#L479-L488
In GeoPandas, our ExtensionArray holds some optional attribute information (eg the coordinate reference system, a spatial index, ..) that is not part of the dtype. And so in that case, reindexing the original array lets geopandas preserve that information (if appropriate), while if creating an empty array from scratch with the code above, this information is definitely lost.
Apart from the two-line actual code change, this PR adds a new, minimal test ExtensionArray that exhibits this behaviour of having an attribute, to be able to test this without dependency on GeoPandas. | https://api.github.com/repos/pandas-dev/pandas/pulls/47762 | 2022-07-17T12:04:03Z | 2022-08-30T12:16:17Z | 2022-08-30T12:16:17Z | 2022-09-06T17:59:48Z |
DEPR: returning tuple when grouping by a list containing single element | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index b69cf415ac21e..b5a6cf50fcb6a 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -776,6 +776,7 @@ Other Deprecations
- Deprecated argument ``errors`` for :meth:`Series.mask`, :meth:`Series.where`, :meth:`DataFrame.mask`, and :meth:`DataFrame.where` as ``errors`` had no effect on this methods (:issue:`47728`)
- Deprecated arguments ``*args`` and ``**kwargs`` in :class:`Rolling`, :class:`Expanding`, and :class:`ExponentialMovingWindow` ops. (:issue:`47836`)
- Deprecated unused arguments ``encoding`` and ``verbose`` in :meth:`Series.to_excel` and :meth:`DataFrame.to_excel` (:issue:`47912`)
+- Deprecated producing a single element when iterating over a :class:`DataFrameGroupBy` or a :class:`SeriesGroupBy` that has been grouped by a list of length 1; A tuple of length one will be returned instead (:issue:`42795`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.performance:
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 9e26598d85e74..631f70f390319 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -465,7 +465,9 @@ def _transform_general(self, func: Callable, *args, **kwargs) -> Series:
klass = type(self.obj)
results = []
- for name, group in self:
+ for name, group in self.grouper.get_iterator(
+ self._selected_obj, axis=self.axis
+ ):
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 28e1b2b388035..8e0ed959fabc3 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -645,6 +645,7 @@ class BaseGroupBy(PandasObject, SelectionMixin[NDFrameT], GroupByIndexingMixin):
axis: int
grouper: ops.BaseGrouper
+ keys: _KeysArgType | None = None
group_keys: bool | lib.NoDefault
@final
@@ -821,6 +822,19 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]:
Generator yielding sequence of (name, subsetted object)
for each group
"""
+ keys = self.keys
+ if isinstance(keys, list) and len(keys) == 1:
+ warnings.warn(
+ (
+ "In a future version of pandas, a length 1 "
+ "tuple will be returned when iterating over a "
+ "a groupby with a grouper equal to a list of "
+ "length 1. Don't supply a list with a single grouper "
+ "to avoid this warning."
+ ),
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
return self.grouper.get_iterator(self._selected_obj, axis=self.axis)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 6ce5ffac9de52..e06a288c1eb38 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -150,7 +150,7 @@ def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces):
if all(item in right.columns for item in by):
rby = right.groupby(by, sort=False)
- for key, lhs in lby:
+ for key, lhs in lby.grouper.get_iterator(lby._selected_obj, axis=lby.axis):
if rby is None:
rhs = right
diff --git a/pandas/plotting/_matplotlib/groupby.py b/pandas/plotting/_matplotlib/groupby.py
index 4f1cd3f38343a..17a214292608b 100644
--- a/pandas/plotting/_matplotlib/groupby.py
+++ b/pandas/plotting/_matplotlib/groupby.py
@@ -16,6 +16,8 @@
concat,
)
+from pandas.plotting._matplotlib.misc import unpack_single_str_list
+
def create_iter_data_given_by(
data: DataFrame, kind: str = "hist"
@@ -108,7 +110,8 @@ def reconstruct_data_with_by(
1 3.0 4.0 NaN NaN
2 NaN NaN 5.0 6.0
"""
- grouped = data.groupby(by)
+ by_modified = unpack_single_str_list(by)
+ grouped = data.groupby(by_modified)
data_list = []
for key, group in grouped:
diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py
index 3b151d67c70be..62242a4a2ddab 100644
--- a/pandas/plotting/_matplotlib/hist.py
+++ b/pandas/plotting/_matplotlib/hist.py
@@ -33,6 +33,7 @@
create_iter_data_given_by,
reformat_hist_y_given_by,
)
+from pandas.plotting._matplotlib.misc import unpack_single_str_list
from pandas.plotting._matplotlib.tools import (
create_subplots,
flatten_axes,
@@ -67,7 +68,8 @@ def _args_adjust(self):
# where subplots are created based on by argument
if is_integer(self.bins):
if self.by is not None:
- grouped = self.data.groupby(self.by)[self.columns]
+ by_modified = unpack_single_str_list(self.by)
+ grouped = self.data.groupby(by_modified)[self.columns]
self.bins = [self._calculate_bins(group) for key, group in grouped]
else:
self.bins = self._calculate_bins(self.data)
diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py
index e2a0d50544f22..4b74b067053a6 100644
--- a/pandas/plotting/_matplotlib/misc.py
+++ b/pandas/plotting/_matplotlib/misc.py
@@ -475,3 +475,11 @@ def r(h):
ax.legend()
ax.grid()
return ax
+
+
+def unpack_single_str_list(keys):
+ # GH 42795
+ if isinstance(keys, list):
+ if len(keys) == 1 and isinstance(keys[0], str):
+ keys = keys[0]
+ return keys
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 920b869ef799b..73aeb17d8c274 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2795,3 +2795,19 @@ def test_groupby_none_column_name():
result = df.groupby(by=[None]).sum()
expected = DataFrame({"b": [2, 5], "c": [9, 13]}, index=Index([1, 2], name=None))
tm.assert_frame_equal(result, expected)
+
+
+def test_single_element_list_grouping():
+ # GH 42795
+ df = DataFrame(
+ {"a": [np.nan, 1], "b": [np.nan, 5], "c": [np.nan, 2]}, index=["x", "y"]
+ )
+ msg = (
+ "In a future version of pandas, a length 1 "
+ "tuple will be returned when iterating over a "
+ "a groupby with a grouper equal to a list of "
+ "length 1. Don't supply a list with a single grouper "
+ "to avoid this warning."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ values, _ = next(iter(df.groupby(["a"])))
| - [X] closes #42795 (Replace xxxx with the Github issue number)
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
- [x] Applied the removal in #47719
| https://api.github.com/repos/pandas-dev/pandas/pulls/47761 | 2022-07-17T03:32:25Z | 2022-08-01T20:15:10Z | 2022-08-01T20:15:10Z | 2022-12-05T03:22:33Z |
TYP: reflect ensure_* function removals | diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi
index 0cc9209fbdfc5..29d1365cad6fc 100644
--- a/pandas/_libs/algos.pyi
+++ b/pandas/_libs/algos.pyi
@@ -129,18 +129,11 @@ def diff_2d(
) -> None: ...
def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ...
def ensure_object(arr: object) -> npt.NDArray[np.object_]: ...
-def ensure_complex64(arr: object, copy=...) -> npt.NDArray[np.complex64]: ...
-def ensure_complex128(arr: object, copy=...) -> npt.NDArray[np.complex128]: ...
def ensure_float64(arr: object, copy=...) -> npt.NDArray[np.float64]: ...
-def ensure_float32(arr: object, copy=...) -> npt.NDArray[np.float32]: ...
def ensure_int8(arr: object, copy=...) -> npt.NDArray[np.int8]: ...
def ensure_int16(arr: object, copy=...) -> npt.NDArray[np.int16]: ...
def ensure_int32(arr: object, copy=...) -> npt.NDArray[np.int32]: ...
def ensure_int64(arr: object, copy=...) -> npt.NDArray[np.int64]: ...
-def ensure_uint8(arr: object, copy=...) -> npt.NDArray[np.uint8]: ...
-def ensure_uint16(arr: object, copy=...) -> npt.NDArray[np.uint16]: ...
-def ensure_uint32(arr: object, copy=...) -> npt.NDArray[np.uint32]: ...
-def ensure_uint64(arr: object, copy=...) -> npt.NDArray[np.uint64]: ...
def take_1d_int8_int8(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
| These were removed in #44207 | https://api.github.com/repos/pandas-dev/pandas/pulls/47758 | 2022-07-16T21:00:12Z | 2022-07-17T14:38:21Z | 2022-07-17T14:38:21Z | 2022-08-25T05:21:37Z |
BUG: wide_to_long fails when stubname misses and i contains string type column | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 22a5f2a08362f..841b24bb06b6c 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -1019,6 +1019,7 @@ Reshaping
- Bug in :meth:`DataFrame.join` with a list when using suffixes to join DataFrames with duplicate column names (:issue:`46396`)
- Bug in :meth:`DataFrame.pivot_table` with ``sort=False`` results in sorted index (:issue:`17041`)
- Bug in :meth:`concat` when ``axis=1`` and ``sort=False`` where the resulting Index was a :class:`Int64Index` instead of a :class:`RangeIndex` (:issue:`46675`)
+- Bug in :meth:`wide_to_long` raises when ``stubnames`` is missing in columns and ``i`` contains string dtype column (:issue:`46044`)
Sparse
^^^^^^
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 06127c8ecb932..5de9c8e2f4108 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -131,7 +131,11 @@ def melt(
for col in id_vars:
id_data = frame.pop(col)
if is_extension_array_dtype(id_data):
- id_data = concat([id_data] * K, ignore_index=True)
+ if K > 0:
+ id_data = concat([id_data] * K, ignore_index=True)
+ else:
+ # We can't concat empty list. (GH 46044)
+ id_data = type(id_data)([], name=id_data.name, dtype=id_data.dtype)
else:
# error: Incompatible types in assignment (expression has type
# "ndarray[Any, dtype[Any]]", variable has type "Series")
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index 4fbfee6f829ba..2013b3484ebff 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -1086,3 +1086,27 @@ def test_warn_of_column_name_value(self):
with tm.assert_produces_warning(FutureWarning):
result = df.melt(id_vars="value")
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", ["O", "string"])
+ def test_missing_stubname(self, dtype):
+ # GH46044
+ df = DataFrame({"id": ["1", "2"], "a-1": [100, 200], "a-2": [300, 400]})
+ df = df.astype({"id": dtype})
+ result = wide_to_long(
+ df,
+ stubnames=["a", "b"],
+ i="id",
+ j="num",
+ sep="-",
+ )
+ index = pd.Index(
+ [("1", 1), ("2", 1), ("1", 2), ("2", 2)],
+ name=("id", "num"),
+ )
+ expected = DataFrame(
+ {"a": [100, 200, 300, 400], "b": [np.nan] * 4},
+ index=index,
+ )
+ new_level = expected.index.levels[0].astype(dtype)
+ expected.index = expected.index.set_levels(new_level, level=0)
+ tm.assert_frame_equal(result, expected)
| - [x] closes #46044
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47757 | 2022-07-16T20:57:00Z | 2022-07-19T19:22:39Z | 2022-07-19T19:22:39Z | 2022-07-19T19:27:37Z |
TYP: Update timestamps.pyi | diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
index f6a62688fc72d..082f26cf6f213 100644
--- a/pandas/_libs/tslibs/timestamps.pyi
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -85,10 +85,10 @@ class Timestamp(datetime):
def fold(self) -> int: ...
@classmethod
def fromtimestamp(
- cls: type[_DatetimeT], t: float, tz: _tzinfo | None = ...
+ cls: type[_DatetimeT], ts: float, tz: _tzinfo | None = ...
) -> _DatetimeT: ...
@classmethod
- def utcfromtimestamp(cls: type[_DatetimeT], t: float) -> _DatetimeT: ...
+ def utcfromtimestamp(cls: type[_DatetimeT], ts: float) -> _DatetimeT: ...
@classmethod
def today(cls: type[_DatetimeT], tz: _tzinfo | str | None = ...) -> _DatetimeT: ...
@classmethod
@@ -118,19 +118,25 @@ class Timestamp(datetime):
def date(self) -> _date: ...
def time(self) -> _time: ...
def timetz(self) -> _time: ...
- def replace(
+ # LSP violation: nanosecond is not present in datetime.datetime.replace
+ # and has positional args following it
+ def replace( # type: ignore[override]
self: _DatetimeT,
- year: int = ...,
- month: int = ...,
- day: int = ...,
- hour: int = ...,
- minute: int = ...,
- second: int = ...,
- microsecond: int = ...,
- tzinfo: _tzinfo | None = ...,
- fold: int = ...,
+ year: int | None = ...,
+ month: int | None = ...,
+ day: int | None = ...,
+ hour: int | None = ...,
+ minute: int | None = ...,
+ second: int | None = ...,
+ microsecond: int | None = ...,
+ nanosecond: int | None = ...,
+ tzinfo: _tzinfo | type[object] | None = ...,
+ fold: int | None = ...,
+ ) -> _DatetimeT: ...
+ # LSP violation: datetime.datetime.astimezone has a default value for tz
+ def astimezone( # type: ignore[override]
+ self: _DatetimeT, tz: _tzinfo | None
) -> _DatetimeT: ...
- def astimezone(self: _DatetimeT, tz: _tzinfo | None = ...) -> _DatetimeT: ...
def ctime(self) -> str: ...
def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
@classmethod
@@ -206,8 +212,6 @@ class Timestamp(datetime):
@property
def dayofweek(self) -> int: ...
@property
- def day_of_month(self) -> int: ...
- @property
def day_of_year(self) -> int: ...
@property
def dayofyear(self) -> int: ...
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47756 | 2022-07-16T20:42:59Z | 2022-07-17T14:39:24Z | 2022-07-17T14:39:24Z | 2022-07-18T11:54:41Z |
TST: Add additional test for future warning when call Series.str.cat(Series.str) | diff --git a/pandas/tests/strings/test_cat.py b/pandas/tests/strings/test_cat.py
index 8abbc59343e78..4decdff8063a8 100644
--- a/pandas/tests/strings/test_cat.py
+++ b/pandas/tests/strings/test_cat.py
@@ -376,3 +376,22 @@ def test_cat_different_classes(klass):
result = s.str.cat(klass(["x", "y", "z"]))
expected = Series(["ax", "by", "cz"])
tm.assert_series_equal(result, expected)
+
+
+def test_cat_on_series_dot_str():
+ # GH 28277
+ # Test future warning of `Series.str.__iter__`
+ ps = Series(["AbC", "de", "FGHI", "j", "kLLLm"])
+ with tm.assert_produces_warning(FutureWarning):
+ ps.str.cat(others=ps.str)
+ # TODO(2.0): The following code can be uncommented
+ # when `Series.str.__iter__` is removed.
+
+ # message = re.escape(
+ # "others must be Series, Index, DataFrame, np.ndarray "
+ # "or list-like (either containing only strings or "
+ # "containing only objects of type Series/Index/"
+ # "np.ndarray[1-dim])"
+ # )
+ # with pytest.raises(TypeError, match=message):
+ # ps.str.cat(others=ps.str)
| - [x] closes #28277 (Replace xxxx with the Github issue number)
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47755 | 2022-07-16T20:41:33Z | 2022-07-19T19:06:30Z | 2022-07-19T19:06:30Z | 2022-07-19T21:30:34Z |
BUG: Set y-axis label, limits and ticks for a secondary y-axis (#47753) | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index b081f743f9b0b..6283321c7f710 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -977,6 +977,7 @@ Plotting
- The function :meth:`DataFrame.plot.scatter` now accepts ``color`` as an alias for ``c`` and ``size`` as an alias for ``s`` for consistency to other plotting functions (:issue:`44670`)
- Fix showing "None" as ylabel in :meth:`Series.plot` when not setting ylabel (:issue:`46129`)
- Bug in :meth:`DataFrame.plot` that led to xticks and vertical grids being improperly placed when plotting a quarterly series (:issue:`47602`)
+- Bug in :meth:`DataFrame.plot` that prevented setting y-axis label, limits and ticks for a secondary y-axis (:issue:`47753`)
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 3641cd7213fec..301474edc6a8e 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -679,6 +679,7 @@ def _adorn_subplots(self):
)
for ax in self.axes:
+ ax = getattr(ax, "right_ax", ax)
if self.yticks is not None:
ax.set_yticks(self.yticks)
diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py
index 3ec3744e43653..538c9c2fb5059 100644
--- a/pandas/tests/plotting/frame/test_frame.py
+++ b/pandas/tests/plotting/frame/test_frame.py
@@ -2204,6 +2204,17 @@ def test_xlabel_ylabel_dataframe_plane_plot(self, kind, xlabel, ylabel):
assert ax.get_xlabel() == (xcol if xlabel is None else xlabel)
assert ax.get_ylabel() == (ycol if ylabel is None else ylabel)
+ @pytest.mark.parametrize("secondary_y", (False, True))
+ def test_secondary_y(self, secondary_y):
+ ax_df = DataFrame([0]).plot(
+ secondary_y=secondary_y, ylabel="Y", ylim=(0, 100), yticks=[99]
+ )
+ for ax in ax_df.figure.axes:
+ if ax.yaxis.get_visible():
+ assert ax.get_ylabel() == "Y"
+ assert ax.get_ylim() == (0, 100)
+ assert ax.get_yticks()[0] == 99
+
def _generate_4_axes_via_gridspec():
import matplotlib as mpl
| When passing `secondary_y=True` to a plotting function, a second axes with a
y-axis on the right side is created. Passing `ylabel`, `ylim` or `yticks` changed
these properties of the original invisible left y-axis, not the secondary
y-axis.
- [x] closes #47753
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47754 | 2022-07-16T19:03:10Z | 2022-07-18T17:37:41Z | 2022-07-18T17:37:41Z | 2022-07-19T08:22:49Z |
BUG: Fix pc.power_checked min version | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 07b09d78016fd..8957ea493e9ad 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -18,6 +18,7 @@
from pandas.compat import (
pa_version_under1p01,
pa_version_under2p0,
+ pa_version_under4p0,
pa_version_under5p0,
pa_version_under6p0,
)
@@ -121,9 +122,9 @@ def floordiv_compat(
"rmod": NotImplemented,
"divmod": NotImplemented,
"rdivmod": NotImplemented,
- "pow": NotImplemented if pa_version_under2p0 else pc.power_checked,
+ "pow": NotImplemented if pa_version_under4p0 else pc.power_checked,
"rpow": NotImplemented
- if pa_version_under2p0
+ if pa_version_under4p0
else lambda x, y: pc.power_checked(y, x),
}
| xref https://github.com/pandas-dev/pandas/pull/47645#discussion_r922685768=
First supported in pyarrow 4.0: https://arrow.apache.org/docs/4.0/python/api/compute.html
| https://api.github.com/repos/pandas-dev/pandas/pulls/47752 | 2022-07-16T18:01:28Z | 2022-07-17T09:53:59Z | 2022-07-17T09:53:59Z | 2022-07-17T17:27:16Z |
TYP: def validate_* | diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi
index 12b113f0b73b1..4567dde4e056b 100644
--- a/pandas/_libs/tslibs/offsets.pyi
+++ b/pandas/_libs/tslibs/offsets.pyi
@@ -104,7 +104,9 @@ class SingleConstructorOffset(BaseOffset):
@overload
def to_offset(freq: None) -> None: ...
@overload
-def to_offset(freq: timedelta | BaseOffset | str) -> BaseOffset: ...
+def to_offset(freq: _BaseOffsetT) -> _BaseOffsetT: ...
+@overload
+def to_offset(freq: timedelta | str) -> BaseOffset: ...
class Tick(SingleConstructorOffset):
_reso: int
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py
index e3aa5bb52f2ba..140d41782e6d3 100644
--- a/pandas/compat/numpy/function.py
+++ b/pandas/compat/numpy/function.py
@@ -17,7 +17,11 @@
"""
from __future__ import annotations
-from typing import Any
+from typing import (
+ Any,
+ TypeVar,
+ overload,
+)
from numpy import ndarray
@@ -25,6 +29,7 @@
is_bool,
is_integer,
)
+from pandas._typing import Axis
from pandas.errors import UnsupportedFunctionCall
from pandas.util._validators import (
validate_args,
@@ -32,6 +37,8 @@
validate_kwargs,
)
+AxisNoneT = TypeVar("AxisNoneT", Axis, None)
+
class CompatValidator:
def __init__(
@@ -84,7 +91,7 @@ def __call__(
)
-def process_skipna(skipna, args):
+def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]:
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
@@ -92,7 +99,7 @@ def process_skipna(skipna, args):
return skipna, args
-def validate_argmin_with_skipna(skipna, args, kwargs):
+def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
"""
If 'Series.argmin' is called via the 'numpy' library, the third parameter
in its signature is 'out', which takes either an ndarray or 'None', so
@@ -104,7 +111,7 @@ def validate_argmin_with_skipna(skipna, args, kwargs):
return skipna
-def validate_argmax_with_skipna(skipna, args, kwargs):
+def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
"""
If 'Series.argmax' is called via the 'numpy' library, the third parameter
in its signature is 'out', which takes either an ndarray or 'None', so
@@ -137,7 +144,7 @@ def validate_argmax_with_skipna(skipna, args, kwargs):
)
-def validate_argsort_with_ascending(ascending, args, kwargs):
+def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool:
"""
If 'Categorical.argsort' is called via the 'numpy' library, the first
parameter in its signature is 'axis', which takes either an integer or
@@ -149,7 +156,8 @@ def validate_argsort_with_ascending(ascending, args, kwargs):
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
- return ascending
+ # error: Incompatible return value type (got "int", expected "bool")
+ return ascending # type: ignore[return-value]
CLIP_DEFAULTS: dict[str, Any] = {"out": None}
@@ -158,7 +166,19 @@ def validate_argsort_with_ascending(ascending, args, kwargs):
)
-def validate_clip_with_axis(axis, args, kwargs):
+@overload
+def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None:
+ ...
+
+
+@overload
+def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT:
+ ...
+
+
+def validate_clip_with_axis(
+ axis: ndarray | AxisNoneT, args, kwargs
+) -> AxisNoneT | None:
"""
If 'NDFrame.clip' is called via the numpy library, the third parameter in
its signature is 'out', which can takes an ndarray, so check if the 'axis'
@@ -167,10 +187,14 @@ def validate_clip_with_axis(axis, args, kwargs):
"""
if isinstance(axis, ndarray):
args = (axis,) + args
- axis = None
+ # error: Incompatible types in assignment (expression has type "None",
+ # variable has type "Union[ndarray[Any, Any], str, int]")
+ axis = None # type: ignore[assignment]
validate_clip(args, kwargs)
- return axis
+ # error: Incompatible return value type (got "Union[ndarray[Any, Any],
+ # str, int]", expected "Union[str, int, None]")
+ return axis # type: ignore[return-value]
CUM_FUNC_DEFAULTS: dict[str, Any] = {}
@@ -184,7 +208,7 @@ def validate_clip_with_axis(axis, args, kwargs):
)
-def validate_cum_func_with_skipna(skipna, args, kwargs, name):
+def validate_cum_func_with_skipna(skipna, args, kwargs, name) -> bool:
"""
If this function is called via the 'numpy' library, the third parameter in
its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so
@@ -288,7 +312,7 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
-def validate_take_with_convert(convert, args, kwargs):
+def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> bool:
"""
If this function is called via the 'numpy' library, the third parameter in
its signature is 'axis', which takes either an ndarray or 'None', so check
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 0f88ad9811bf0..325c94d0ea267 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -2164,7 +2164,17 @@ def ensure_arraylike_for_datetimelike(data, copy: bool, cls_name: str):
return data, copy
-def validate_periods(periods):
+@overload
+def validate_periods(periods: None) -> None:
+ ...
+
+
+@overload
+def validate_periods(periods: int | float) -> int:
+ ...
+
+
+def validate_periods(periods: int | float | None) -> int | None:
"""
If a `periods` argument is passed to the Datetime/Timedelta Array/Index
constructor, cast it to an integer.
@@ -2187,7 +2197,9 @@ def validate_periods(periods):
periods = int(periods)
elif not lib.is_integer(periods):
raise TypeError(f"periods must be a number, got {periods}")
- return periods
+ # error: Incompatible return value type (got "Optional[float]",
+ # expected "Optional[int]")
+ return periods # type: ignore[return-value]
def validate_inferred_freq(freq, inferred_freq, freq_infer):
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 106afcc3c12ea..d9f6cecc8d61d 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -251,7 +251,7 @@ def _scalar_type(self) -> type[Timestamp]:
# Constructors
_dtype: np.dtype | DatetimeTZDtype
- _freq = None
+ _freq: BaseOffset | None = None
_default_dtype = DT64NS_DTYPE # used in TimeLikeOps.__init__
@classmethod
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index fa7c4e0d0aa70..6e6de8399cc38 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -8,6 +8,8 @@
Callable,
Literal,
Sequence,
+ TypeVar,
+ overload,
)
import numpy as np
@@ -92,6 +94,8 @@
TimedeltaArray,
)
+BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset)
+
_shared_doc_kwargs = {
"klass": "PeriodArray",
@@ -976,7 +980,19 @@ def period_array(
return PeriodArray._from_sequence(data, dtype=dtype)
-def validate_dtype_freq(dtype, freq):
+@overload
+def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT:
+ ...
+
+
+@overload
+def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset:
+ ...
+
+
+def validate_dtype_freq(
+ dtype, freq: BaseOffsetT | timedelta | str | None
+) -> BaseOffsetT:
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
@@ -996,7 +1012,10 @@ def validate_dtype_freq(dtype, freq):
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
- freq = to_offset(freq)
+ # error: Incompatible types in assignment (expression has type
+ # "BaseOffset", variable has type "Union[BaseOffsetT, timedelta,
+ # str, None]")
+ freq = to_offset(freq) # type: ignore[assignment]
if dtype is not None:
dtype = pandas_dtype(dtype)
@@ -1006,7 +1025,9 @@ def validate_dtype_freq(dtype, freq):
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
- return freq
+ # error: Incompatible return value type (got "Union[BaseOffset, Any, None]",
+ # expected "BaseOffset")
+ return freq # type: ignore[return-value]
def dt64arr_to_periodarr(
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index 3676e6eb0091e..fc3439a57a002 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -5,6 +5,7 @@
from __future__ import annotations
from typing import (
+ Any,
Iterable,
Sequence,
TypeVar,
@@ -265,7 +266,9 @@ def validate_bool_kwarg(
return value
-def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
+def validate_axis_style_args(
+ data, args, kwargs, arg_name, method_name
+) -> dict[str, Any]:
"""
Argument handler for mixed index, columns / axis functions
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47750 | 2022-07-16T15:49:09Z | 2022-07-18T17:38:58Z | 2022-07-18T17:38:58Z | 2022-09-10T01:39:04Z |
BUG: Correct numeric_only default for resample var and std | diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 2af9e09d1c713..83626a42134d6 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -937,7 +937,13 @@ def asfreq(self, fill_value=None):
"""
return self._upsample("asfreq", fill_value=fill_value)
- def std(self, ddof=1, numeric_only: bool = False, *args, **kwargs):
+ def std(
+ self,
+ ddof=1,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
+ *args,
+ **kwargs,
+ ):
"""
Compute standard deviation of groups, excluding missing values.
@@ -958,7 +964,13 @@ def std(self, ddof=1, numeric_only: bool = False, *args, **kwargs):
nv.validate_resampler_func("std", args, kwargs)
return self._downsample("std", ddof=ddof, numeric_only=numeric_only)
- def var(self, ddof=1, numeric_only: bool = False, *args, **kwargs):
+ def var(
+ self,
+ ddof=1,
+ numeric_only: bool | lib.NoDefault = lib.no_default,
+ *args,
+ **kwargs,
+ ):
"""
Compute variance of groups, excluding missing values.
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 2d74b703b9bb1..c5cd777962df3 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -859,6 +859,10 @@ def test_frame_downsample_method(method, numeric_only, expected_data):
expected_index = date_range("2018-12-31", periods=1, freq="Y")
df = DataFrame({"cat": ["cat_1", "cat_2"], "num": [5, 20]}, index=index)
resampled = df.resample("Y")
+ if numeric_only is lib.no_default:
+ kwargs = {}
+ else:
+ kwargs = {"numeric_only": numeric_only}
func = getattr(resampled, method)
if numeric_only is lib.no_default and method not in (
@@ -882,9 +886,9 @@ def test_frame_downsample_method(method, numeric_only, expected_data):
if isinstance(expected_data, str):
klass = TypeError if method == "var" else ValueError
with pytest.raises(klass, match=expected_data):
- _ = func(numeric_only=numeric_only)
+ _ = func(**kwargs)
else:
- result = func(numeric_only=numeric_only)
+ result = func(**kwargs)
expected = DataFrame(expected_data, index=expected_index)
tm.assert_frame_equal(result, expected)
| - [x] closes #46560 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
No whatsnew since this was introduced in 1.5.0. The test would have caught it but it was explicitly passing `numeric_only=lib.no_default` instead of just not passing any arg.
As far as I know, this was the last task for #46560. | https://api.github.com/repos/pandas-dev/pandas/pulls/47749 | 2022-07-16T15:42:47Z | 2022-07-16T18:19:56Z | 2022-07-16T18:19:56Z | 2022-07-18T21:09:50Z |
FIX: PeriodIndex json roundtrip | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 82090c93a965e..252bea3ba774a 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -956,6 +956,7 @@ I/O
- Bug in :func:`read_sas` that scrambled column names (:issue:`31243`)
- Bug in :func:`read_sas` with RLE-compressed SAS7BDAT files that contain 0x00 control bytes (:issue:`47099`)
- Bug in :func:`read_parquet` with ``use_nullable_dtypes=True`` where ``float64`` dtype was returned instead of nullable ``Float64`` dtype (:issue:`45694`)
+- Bug in :meth:`DataFrame.to_json` where ``PeriodDtype`` would not make the serialization roundtrip when read back with :meth:`read_json` (:issue:`44720`)
Period
^^^^^^
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py
index 44c5ce0e5ee83..b7a8b5cc82f7a 100644
--- a/pandas/io/json/_table_schema.py
+++ b/pandas/io/json/_table_schema.py
@@ -197,6 +197,9 @@ def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype:
elif typ == "datetime":
if field.get("tz"):
return f"datetime64[ns, {field['tz']}]"
+ elif field.get("freq"):
+ # GH#47747 using datetime over period to minimize the change surface
+ return f"period[{field['freq']}]"
else:
return "datetime64[ns]"
elif typ == "any":
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index c90ac2fb3b813..f4c8b9e764d6d 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -708,6 +708,44 @@ def test_read_json_table_orient_raises(self, index_nm, vals, recwarn):
with pytest.raises(NotImplementedError, match="can not yet read "):
pd.read_json(out, orient="table")
+ @pytest.mark.parametrize(
+ "index_nm",
+ [None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"],
+ )
+ @pytest.mark.parametrize(
+ "vals",
+ [
+ {"ints": [1, 2, 3, 4]},
+ {"objects": ["a", "b", "c", "d"]},
+ {"objects": ["1", "2", "3", "4"]},
+ {"date_ranges": pd.date_range("2016-01-01", freq="d", periods=4)},
+ {"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))},
+ {
+ "ordered_cats": pd.Series(
+ pd.Categorical(["a", "b", "c", "c"], ordered=True)
+ )
+ },
+ {"floats": [1.0, 2.0, 3.0, 4.0]},
+ {"floats": [1.1, 2.2, 3.3, 4.4]},
+ {"bools": [True, False, False, True]},
+ {
+ "timezones": pd.date_range(
+ "2016-01-01", freq="d", periods=4, tz="US/Central"
+ ) # added in # GH 35973
+ },
+ ],
+ )
+ def test_read_json_table_period_orient(self, index_nm, vals, recwarn):
+ df = DataFrame(
+ vals,
+ index=pd.Index(
+ (pd.Period(f"2022Q{q}") for q in range(1, 5)), name=index_nm
+ ),
+ )
+ out = df.to_json(orient="table")
+ result = pd.read_json(out, orient="table")
+ tm.assert_frame_equal(df, result)
+
@pytest.mark.parametrize(
"idx",
[
| - [X] closes #44720
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47747 | 2022-07-16T11:09:54Z | 2022-07-18T22:41:22Z | 2022-07-18T22:41:21Z | 2022-07-18T22:41:28Z |
ENH/TST: Add quantile & mode tests for ArrowExtensionArray | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 5db859897b663..147134afd70c3 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -28,6 +28,7 @@
pa_version_under6p0,
pa_version_under7p0,
pa_version_under8p0,
+ pa_version_under9p0,
)
if TYPE_CHECKING:
@@ -160,4 +161,5 @@ def get_lzma_file() -> type[lzma.LZMAFile]:
"pa_version_under6p0",
"pa_version_under7p0",
"pa_version_under8p0",
+ "pa_version_under9p0",
]
diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py
index 833cda20368a2..6965865acb5da 100644
--- a/pandas/compat/pyarrow.py
+++ b/pandas/compat/pyarrow.py
@@ -17,6 +17,7 @@
pa_version_under6p0 = _palv < Version("6.0.0")
pa_version_under7p0 = _palv < Version("7.0.0")
pa_version_under8p0 = _palv < Version("8.0.0")
+ pa_version_under9p0 = _palv < Version("9.0.0")
except ImportError:
pa_version_under1p01 = True
pa_version_under2p0 = True
@@ -26,3 +27,4 @@
pa_version_under6p0 = True
pa_version_under7p0 = True
pa_version_under8p0 = True
+ pa_version_under9p0 = True
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index b0e4d46564ba4..2c4859061998b 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -825,6 +825,57 @@ def _indexing_key_to_indices(
indices = np.arange(n)[key]
return indices
+ # TODO: redefine _rank using pc.rank with pyarrow 9.0
+
+ def _quantile(
+ self: ArrowExtensionArrayT, qs: npt.NDArray[np.float64], interpolation: str
+ ) -> ArrowExtensionArrayT:
+ """
+ Compute the quantiles of self for each quantile in `qs`.
+
+ Parameters
+ ----------
+ qs : np.ndarray[float64]
+ interpolation: str
+
+ Returns
+ -------
+ same type as self
+ """
+ if pa_version_under4p0:
+ raise NotImplementedError(
+ "quantile only supported for pyarrow version >= 4.0"
+ )
+ result = pc.quantile(self._data, q=qs, interpolation=interpolation)
+ return type(self)(result)
+
+ def _mode(self: ArrowExtensionArrayT, dropna: bool = True) -> ArrowExtensionArrayT:
+ """
+ Returns the mode(s) of the ExtensionArray.
+
+ Always returns `ExtensionArray` even if only one value.
+
+ Parameters
+ ----------
+ dropna : bool, default True
+ Don't consider counts of NA values.
+ Not implemented by pyarrow.
+
+ Returns
+ -------
+ same type as self
+ Sorted, if possible.
+ """
+ if pa_version_under6p0:
+ raise NotImplementedError("mode only supported for pyarrow version >= 6.0")
+ modes = pc.mode(self._data, pc.count_distinct(self._data).as_py())
+ values = modes.field(0)
+ counts = modes.field(1)
+ # counts sorted descending i.e counts[0] = max
+ mask = pc.equal(counts, counts[0])
+ most_common = values.filter(mask)
+ return type(self)(most_common)
+
def _maybe_convert_setitem_value(self, value):
"""Maybe convert value to be pyarrow compatible."""
# TODO: Make more robust like ArrowStringArray._maybe_convert_setitem_value
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index a2a96da02b2a6..136c147c07f2e 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -10,7 +10,6 @@
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
-
from datetime import (
date,
datetime,
@@ -24,8 +23,10 @@
from pandas.compat import (
pa_version_under2p0,
pa_version_under3p0,
+ pa_version_under4p0,
pa_version_under6p0,
pa_version_under8p0,
+ pa_version_under9p0,
)
import pandas as pd
@@ -1946,3 +1947,72 @@ def test_compare_array(self, data, comparison_op, na_value, request):
def test_arrowdtype_construct_from_string_type_with_unsupported_parameters():
with pytest.raises(NotImplementedError, match="Passing pyarrow type"):
ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]")
+
+
+@pytest.mark.xfail(
+ pa_version_under4p0,
+ raises=NotImplementedError,
+ reason="quantile only supported for pyarrow version >= 4.0",
+)
+@pytest.mark.parametrize(
+ "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
+)
+@pytest.mark.parametrize("quantile", [0.5, [0.5, 0.5]])
+def test_quantile(data, interpolation, quantile, request):
+ pa_dtype = data.dtype.pyarrow_dtype
+ if not (pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype)):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=pa.ArrowNotImplementedError,
+ reason=f"quantile not supported by pyarrow for {pa_dtype}",
+ )
+ )
+ data = data.take([0, 0, 0])
+ ser = pd.Series(data)
+ result = ser.quantile(q=quantile, interpolation=interpolation)
+ if quantile == 0.5:
+ assert result == data[0]
+ else:
+ # Just check the values
+ result = result.astype("float64[pyarrow]")
+ expected = pd.Series(
+ data.take([0, 0]).astype("float64[pyarrow]"), index=[0.5, 0.5]
+ )
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.xfail(
+ pa_version_under6p0,
+ raises=NotImplementedError,
+ reason="mode only supported for pyarrow version >= 6.0",
+)
+@pytest.mark.parametrize("dropna", [True, False])
+@pytest.mark.parametrize(
+ "take_idx, exp_idx",
+ [[[0, 0, 2, 2, 4, 4], [4, 0]], [[0, 0, 0, 2, 4, 4], [0]]],
+ ids=["multi_mode", "single_mode"],
+)
+def test_mode(data_for_grouping, dropna, take_idx, exp_idx, request):
+ pa_dtype = data_for_grouping.dtype.pyarrow_dtype
+ if pa.types.is_temporal(pa_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=pa.ArrowNotImplementedError,
+ reason=f"mode not supported by pyarrow for {pa_dtype}",
+ )
+ )
+ elif (
+ pa.types.is_boolean(pa_dtype)
+ and "multi_mode" in request.node.nodeid
+ and pa_version_under9p0
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="https://issues.apache.org/jira/browse/ARROW-17096",
+ )
+ )
+ data = data_for_grouping.take(take_idx)
+ ser = pd.Series(data)
+ result = ser.mode(dropna=dropna)
+ expected = pd.Series(data_for_grouping.take(exp_idx))
+ tm.assert_series_equal(result, expected)
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47744 | 2022-07-15T21:27:05Z | 2022-07-28T18:12:36Z | 2022-07-28T18:12:36Z | 2022-07-28T18:23:36Z |
TST: add test for last() on dataframe grouped by on boolean column (#46409) | diff --git a/pandas/tests/frame/methods/test_dtypes.py b/pandas/tests/frame/methods/test_dtypes.py
index 31592f987f04d..87e6ed5b1b135 100644
--- a/pandas/tests/frame/methods/test_dtypes.py
+++ b/pandas/tests/frame/methods/test_dtypes.py
@@ -1,6 +1,7 @@
from datetime import timedelta
import numpy as np
+import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
@@ -79,6 +80,20 @@ def test_dtypes_are_correct_after_column_slice(self):
Series({"a": np.float_, "b": np.float_, "c": np.float_}),
)
+ @pytest.mark.parametrize(
+ "data",
+ [pd.NA, True],
+ )
+ def test_dtypes_are_correct_after_groupby_last(self, data):
+ # GH46409
+ df = DataFrame(
+ {"id": [1, 2, 3, 4], "test": [True, pd.NA, data, False]}
+ ).convert_dtypes()
+ result = df.groupby("id").last().test
+ expected = df.set_index("id").test
+ assert result.dtype == pd.BooleanDtype()
+ tm.assert_series_equal(expected, result)
+
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
| - [x] closes #46409
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47736 | 2022-07-15T14:17:44Z | 2022-07-18T19:06:22Z | 2022-07-18T19:06:21Z | 2022-07-18T19:06:31Z |
DOC: Updating some capitalization in doc/source/user_guide #32550 | diff --git a/doc/source/user_guide/sparse.rst b/doc/source/user_guide/sparse.rst
index ef2cb8909b59d..bc4eec1c23a35 100644
--- a/doc/source/user_guide/sparse.rst
+++ b/doc/source/user_guide/sparse.rst
@@ -266,8 +266,8 @@ have no replacement.
.. _sparse.scipysparse:
-Interaction with scipy.sparse
------------------------------
+Interaction with *scipy.sparse*
+-------------------------------
Use :meth:`DataFrame.sparse.from_spmatrix` to create a :class:`DataFrame` with sparse values from a sparse matrix.
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index c67d028b65b3e..ed7688f229ca8 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -388,7 +388,7 @@ We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by
.. _timeseries.origin:
-Using the ``origin`` Parameter
+Using the ``origin`` parameter
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using the ``origin`` parameter, one can specify an alternative starting point for creation
@@ -1523,7 +1523,7 @@ or calendars with additional rules.
.. _timeseries.advanced_datetime:
-Time series-related instance methods
+Time Series-related instance methods
------------------------------------
Shifting / lagging
@@ -2601,7 +2601,7 @@ Transform nonexistent times to ``NaT`` or shift the times.
.. _timeseries.timezone_series:
-Time zone series operations
+Time zone Series operations
~~~~~~~~~~~~~~~~~~~~~~~~~~~
A :class:`Series` with time zone **naive** values is
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 72600289dcf75..d6426fe8bed2d 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -3,7 +3,7 @@
{{ header }}
*******************
-Chart Visualization
+Chart visualization
*******************
This section demonstrates visualization through charting. For information on
@@ -1746,7 +1746,7 @@ Andrews curves charts:
plt.close("all")
-Plotting directly with matplotlib
+Plotting directly with Matplotlib
---------------------------------
In some situations it may still be preferable or necessary to prepare plots
diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst
index 2407fd3113830..e08fa81c5fa09 100644
--- a/doc/source/user_guide/window.rst
+++ b/doc/source/user_guide/window.rst
@@ -3,7 +3,7 @@
{{ header }}
********************
-Windowing Operations
+Windowing operations
********************
pandas contains a compact set of APIs for performing windowing operations - an operation that performs
@@ -490,7 +490,7 @@ For all supported aggregation functions, see :ref:`api.functions_expanding`.
.. _window.exponentially_weighted:
-Exponentially Weighted window
+Exponentially weighted window
-----------------------------
An exponentially weighted window is similar to an expanding window but with each prior point
|
- [x] contributes to #32550
- [x] I left RadViz, SparseArray, SparseDtype, and PandasObject as is
| https://api.github.com/repos/pandas-dev/pandas/pulls/47732 | 2022-07-15T04:00:32Z | 2022-07-22T18:05:06Z | 2022-07-22T18:05:06Z | 2022-07-22T18:05:06Z |
BUG: groupby.corrwith fails with axis=1 and other=df | diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 89e47af4cb614..b44db88723742 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1024,7 +1024,7 @@ def curried(x):
curried, self._obj_with_exclusions, is_transform=is_transform
)
- if self._selected_obj.ndim != 1 and self.axis != 1:
+ if self._selected_obj.ndim != 1 and self.axis != 1 and result.ndim != 1:
missing = self._obj_with_exclusions.columns.difference(result.columns)
if len(missing) > 0:
warn_dropping_nuisance_columns_deprecated(
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 7d6c5310942e2..de2ff20ff3a96 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1505,3 +1505,15 @@ def test_groupby_empty_dataset(dtype, kwargs):
expected = df.groupby("A").B.describe(**kwargs).reset_index(drop=True).iloc[:0]
expected.index = Index([])
tm.assert_frame_equal(result, expected)
+
+
+def test_corrwith_with_1_axis():
+ # GH 47723
+ df = DataFrame({"a": [1, 1, 2], "b": [3, 7, 4]})
+ result = df.groupby("a").corrwith(df, axis=1)
+ index = Index(
+ data=[(1, 0), (1, 1), (1, 2), (2, 2), (2, 0), (2, 1)],
+ name=("a", None),
+ )
+ expected = Series([np.nan] * 6, index=index)
+ tm.assert_series_equal(result, expected)
| - [x] closes #47723
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Since this one is only confirmed on master, whatsnew ommited. | https://api.github.com/repos/pandas-dev/pandas/pulls/47731 | 2022-07-15T03:43:15Z | 2022-07-15T20:50:50Z | 2022-07-15T20:50:50Z | 2022-07-15T21:01:38Z |
ENH/TST: Add Reduction tests for ArrowExtensionArray | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 8957ea493e9ad..ab0e262caa6a9 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -628,6 +628,69 @@ def _concat_same_type(
arr = pa.chunked_array(chunks)
return cls(arr)
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
+ """
+ Return a scalar result of performing the reduction operation.
+
+ Parameters
+ ----------
+ name : str
+ Name of the function, supported values are:
+ { any, all, min, max, sum, mean, median, prod,
+ std, var, sem, kurt, skew }.
+ skipna : bool, default True
+ If True, skip NaN values.
+ **kwargs
+ Additional keyword arguments passed to the reduction function.
+ Currently, `ddof` is the only supported kwarg.
+
+ Returns
+ -------
+ scalar
+
+ Raises
+ ------
+ TypeError : subclass does not define reductions
+ """
+ if name == "sem":
+
+ def pyarrow_meth(data, skipna, **kwargs):
+ numerator = pc.stddev(data, skip_nulls=skipna, **kwargs)
+ denominator = pc.sqrt_checked(
+ pc.subtract_checked(
+ pc.count(self._data, skip_nulls=skipna), kwargs["ddof"]
+ )
+ )
+ return pc.divide_checked(numerator, denominator)
+
+ else:
+ pyarrow_name = {
+ "median": "approximate_median",
+ "prod": "product",
+ "std": "stddev",
+ "var": "variance",
+ }.get(name, name)
+ # error: Incompatible types in assignment
+ # (expression has type "Optional[Any]", variable has type
+ # "Callable[[Any, Any, KwArg(Any)], Any]")
+ pyarrow_meth = getattr(pc, pyarrow_name, None) # type: ignore[assignment]
+ if pyarrow_meth is None:
+ # Let ExtensionArray._reduce raise the TypeError
+ return super()._reduce(name, skipna=skipna, **kwargs)
+ try:
+ result = pyarrow_meth(self._data, skip_nulls=skipna, **kwargs)
+ except (AttributeError, NotImplementedError, TypeError) as err:
+ msg = (
+ f"'{type(self).__name__}' with dtype {self.dtype} "
+ f"does not support reduction '{name}' with pyarrow "
+ f"version {pa.__version__}. '{name}' may be supported by "
+ f"upgrading pyarrow."
+ )
+ raise TypeError(msg) from err
+ if pc.is_null(result).as_py():
+ return self.dtype.na_value
+ return result.as_py()
+
def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None:
"""Set one or more values inplace.
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 4376a0de37a8c..6a17a56a47cbc 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -5,7 +5,10 @@
import numpy as np
import pytest
-from pandas.compat import pa_version_under2p0
+from pandas.compat import (
+ pa_version_under2p0,
+ pa_version_under6p0,
+)
from pandas.errors import PerformanceWarning
import pandas.util._test_decorators as td
@@ -375,7 +378,7 @@ def test_reduce_missing(skipna, dtype):
@pytest.mark.parametrize("method", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max(method, skipna, dtype, request):
- if dtype.storage == "pyarrow":
+ if dtype.storage == "pyarrow" and pa_version_under6p0:
reason = "'ArrowStringArray' object has no attribute 'max'"
mark = pytest.mark.xfail(raises=TypeError, reason=reason)
request.node.add_marker(mark)
@@ -392,7 +395,7 @@ def test_min_max(method, skipna, dtype, request):
@pytest.mark.parametrize("method", ["min", "max"])
@pytest.mark.parametrize("box", [pd.Series, pd.array])
def test_min_max_numpy(method, box, dtype, request):
- if dtype.storage == "pyarrow":
+ if dtype.storage == "pyarrow" and (pa_version_under6p0 or box is pd.array):
if box is pd.array:
reason = "'<=' not supported between instances of 'str' and 'NoneType'"
else:
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index ef576692c83b6..62f8a855ce263 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -24,6 +24,7 @@
from pandas.compat import (
pa_version_under2p0,
pa_version_under3p0,
+ pa_version_under6p0,
pa_version_under8p0,
)
@@ -303,6 +304,95 @@ def test_loc_iloc_frame_single_dtype(self, request, using_array_manager, data):
super().test_loc_iloc_frame_single_dtype(data)
+class TestBaseNumericReduce(base.BaseNumericReduceTests):
+ def check_reduce(self, ser, op_name, skipna):
+ pa_dtype = ser.dtype.pyarrow_dtype
+ result = getattr(ser, op_name)(skipna=skipna)
+ if pa.types.is_boolean(pa_dtype):
+ # Can't convert if ser contains NA
+ pytest.skip(
+ "pandas boolean data with NA does not fully support all reductions"
+ )
+ elif pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype):
+ ser = ser.astype("Float64")
+ expected = getattr(ser, op_name)(skipna=skipna)
+ tm.assert_almost_equal(result, expected)
+
+ @pytest.mark.parametrize("skipna", [True, False])
+ def test_reduce_series(self, data, all_numeric_reductions, skipna, request):
+ pa_dtype = data.dtype.pyarrow_dtype
+ xfail_mark = pytest.mark.xfail(
+ raises=TypeError,
+ reason=(
+ f"{all_numeric_reductions} is not implemented in "
+ f"pyarrow={pa.__version__} for {pa_dtype}"
+ ),
+ )
+ if all_numeric_reductions in {"skew", "kurt"}:
+ request.node.add_marker(xfail_mark)
+ elif (
+ all_numeric_reductions in {"median", "var", "std", "prod", "max", "min"}
+ and pa_version_under6p0
+ ):
+ request.node.add_marker(xfail_mark)
+ elif all_numeric_reductions in {"sum", "mean"} and pa_version_under2p0:
+ request.node.add_marker(xfail_mark)
+ elif (
+ all_numeric_reductions in {"sum", "mean"}
+ and skipna is False
+ and pa_version_under6p0
+ and (pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype))
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=AssertionError,
+ reason=(
+ f"{all_numeric_reductions} with skip_nulls={skipna} did not "
+ f"return NA for {pa_dtype} with pyarrow={pa.__version__}"
+ ),
+ )
+ )
+ elif not (
+ pa.types.is_integer(pa_dtype)
+ or pa.types.is_floating(pa_dtype)
+ or pa.types.is_boolean(pa_dtype)
+ ) and not (
+ all_numeric_reductions in {"min", "max"}
+ and (pa.types.is_temporal(pa_dtype) and not pa.types.is_duration(pa_dtype))
+ ):
+ request.node.add_marker(xfail_mark)
+ elif pa.types.is_boolean(pa_dtype) and all_numeric_reductions in {
+ "std",
+ "var",
+ "median",
+ }:
+ request.node.add_marker(xfail_mark)
+ super().test_reduce_series(data, all_numeric_reductions, skipna)
+
+
+class TestBaseBooleanReduce(base.BaseBooleanReduceTests):
+ @pytest.mark.parametrize("skipna", [True, False])
+ def test_reduce_series(
+ self, data, all_boolean_reductions, skipna, na_value, request
+ ):
+ pa_dtype = data.dtype.pyarrow_dtype
+ xfail_mark = pytest.mark.xfail(
+ raises=TypeError,
+ reason=(
+ f"{all_boolean_reductions} is not implemented in "
+ f"pyarrow={pa.__version__} for {pa_dtype}"
+ ),
+ )
+ if not pa.types.is_boolean(pa_dtype):
+ request.node.add_marker(xfail_mark)
+ elif pa_version_under3p0:
+ request.node.add_marker(xfail_mark)
+ op_name = all_boolean_reductions
+ s = pd.Series(data)
+ result = getattr(s, op_name)(skipna=skipna)
+ assert result is (op_name == "any")
+
+
class TestBaseGroupby(base.BaseGroupbyTests):
def test_groupby_agg_extension(self, data_for_grouping, request):
tz = getattr(data_for_grouping.dtype.pyarrow_dtype, "tz", None)
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47730 | 2022-07-15T03:10:09Z | 2022-07-22T02:30:18Z | 2022-07-22T02:30:18Z | 2022-07-22T02:57:02Z |
TYP: freq and na_value | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 325c94d0ea267..c3fbd716ad09d 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -2202,7 +2202,9 @@ def validate_periods(periods: int | float | None) -> int | None:
return periods # type: ignore[return-value]
-def validate_inferred_freq(freq, inferred_freq, freq_infer):
+def validate_inferred_freq(
+ freq, inferred_freq, freq_infer
+) -> tuple[BaseOffset | None, bool]:
"""
If the user passes a freq and another freq is inferred from passed data,
require that they match.
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index d9f6cecc8d61d..7a56bba0e58b3 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -264,7 +264,10 @@ def _validate_dtype(cls, values, dtype):
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
- cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
+ cls,
+ values: np.ndarray,
+ freq: BaseOffset | None = None,
+ dtype=DT64NS_DTYPE,
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
assert dtype.kind == "M"
@@ -291,7 +294,7 @@ def _from_sequence_not_strict(
dtype=None,
copy: bool = False,
tz=None,
- freq=lib.no_default,
+ freq: str | BaseOffset | lib.NoDefault | None = lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous="raise",
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fe101926a6782..427c744b92a0a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1746,7 +1746,7 @@ def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
- na_value=lib.no_default,
+ na_value: object = lib.no_default,
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index f776585926024..3a7adb19f1c01 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -25,6 +25,7 @@
lib,
)
from pandas._libs.tslibs import (
+ BaseOffset,
Resolution,
periods_per_day,
timezones,
@@ -312,7 +313,7 @@ def isocalendar(self) -> DataFrame:
def __new__(
cls,
data=None,
- freq=lib.no_default,
+ freq: str | BaseOffset | lib.NoDefault = lib.no_default,
tz=None,
normalize: bool = False,
closed=None,
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 3a8ed54d6c634..88f81064b826f 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -1132,7 +1132,7 @@ def as_array(
self,
dtype=None,
copy: bool = False,
- na_value=lib.no_default,
+ na_value: object = lib.no_default,
) -> np.ndarray:
"""
Convert the blockmanager data into an numpy array.
| Type annotations are only related by having `lib.no_default` as a default value (pyright then assumes that their type is `lib.NoDefault`). | https://api.github.com/repos/pandas-dev/pandas/pulls/47729 | 2022-07-15T02:19:11Z | 2022-07-18T19:19:56Z | 2022-07-18T19:19:56Z | 2022-09-10T01:39:00Z |
DEPR: deprecate unused errors in NDFrame.where/mask | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 9651269963803..112f5f08a3393 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -771,6 +771,7 @@ Other Deprecations
- Clarified warning from :func:`to_datetime` when delimited dates can't be parsed in accordance to specified ``dayfirst`` argument (:issue:`46210`)
- Deprecated :class:`Series` and :class:`Resampler` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) raising a ``NotImplementedError`` when the dtype is non-numric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`)
- Deprecated :meth:`Series.rank` returning an empty result when the dtype is non-numeric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`)
+- Deprecated argument ``errors`` for :meth:`Series.mask`, :meth:`Series.where`, :meth:`DataFrame.mask`, and :meth:`DataFrame.where` as ``errors`` had no effect on this methods (:issue:`47728`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.performance:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ead4ea744c647..e70312c562907 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -11788,6 +11788,7 @@ def where(
...
# error: Signature of "where" incompatible with supertype "NDFrame"
+ @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
@@ -11807,7 +11808,6 @@ def where( # type: ignore[override]
inplace=inplace,
axis=axis,
level=level,
- errors=errors,
try_cast=try_cast,
)
@@ -11854,6 +11854,7 @@ def mask(
...
# error: Signature of "mask" incompatible with supertype "NDFrame"
+ @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
@@ -11873,7 +11874,6 @@ def mask( # type: ignore[override]
inplace=inplace,
axis=axis,
level=level,
- errors=errors,
try_cast=try_cast,
)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 4325f0eb04a9c..6e00f33f486d9 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9381,7 +9381,6 @@ def _where(
inplace=False,
axis=None,
level=None,
- errors: IgnoreRaise | lib.NoDefault = "raise",
):
"""
Equivalent to public method `where`, except that `other` is not
@@ -9548,6 +9547,7 @@ def where(
) -> NDFrameT | None:
...
+ @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
@@ -9599,6 +9599,9 @@ def where(
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
+ .. deprecated:: 1.5.0
+ This argument had no effect.
+
try_cast : bool, default None
Try to cast the result back to the input type (if possible).
@@ -9721,7 +9724,7 @@ def where(
stacklevel=find_stack_level(),
)
- return self._where(cond, other, inplace, axis, level, errors=errors)
+ return self._where(cond, other, inplace, axis, level)
@overload
def mask(
@@ -9765,6 +9768,7 @@ def mask(
) -> NDFrameT | None:
...
+ @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
@@ -9808,7 +9812,6 @@ def mask(
inplace=inplace,
axis=axis,
level=level,
- errors=errors,
)
@doc(klass=_shared_doc_kwargs["klass"])
diff --git a/pandas/core/series.py b/pandas/core/series.py
index ef4ea0172c505..60898ee75f7c2 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -61,6 +61,7 @@
from pandas.util._decorators import (
Appender,
Substitution,
+ deprecate_kwarg,
deprecate_nonkeyword_arguments,
doc,
)
@@ -6069,6 +6070,7 @@ def where(
...
# error: Signature of "where" incompatible with supertype "NDFrame"
+ @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
@@ -6088,7 +6090,6 @@ def where( # type: ignore[override]
inplace=inplace,
axis=axis,
level=level,
- errors=errors,
try_cast=try_cast,
)
@@ -6135,6 +6136,7 @@ def mask(
...
# error: Signature of "mask" incompatible with supertype "NDFrame"
+ @deprecate_kwarg(old_arg_name="errors", new_arg_name=None)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
@@ -6154,7 +6156,6 @@ def mask( # type: ignore[override]
inplace=inplace,
axis=axis,
level=level,
- errors=errors,
try_cast=try_cast,
)
diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py
index 9d004613116b8..5b9883f3866e7 100644
--- a/pandas/tests/frame/indexing/test_where.py
+++ b/pandas/tests/frame/indexing/test_where.py
@@ -1035,3 +1035,17 @@ def test_where_dt64_2d():
mask[:] = True
expected = df
_check_where_equivalences(df, mask, other, expected)
+
+
+def test_where_mask_deprecated(frame_or_series):
+ # GH 47728
+ obj = DataFrame(np.random.randn(4, 3))
+ obj = tm.get_obj(obj, frame_or_series)
+
+ mask = obj > 0
+
+ with tm.assert_produces_warning(FutureWarning):
+ obj.where(mask, -1, errors="raise")
+
+ with tm.assert_produces_warning(FutureWarning):
+ obj.mask(mask, -1, errors="raise")
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47728 | 2022-07-15T02:15:41Z | 2022-07-15T17:37:34Z | 2022-07-15T17:37:34Z | 2022-09-10T01:39:06Z |
DOC: update min package versions in install.rst to align with v.1.5.0 requirements | diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 39c9db2c883b8..5d9bfd97030b5 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -199,7 +199,7 @@ the code base as of this writing. To run it on your machine to verify that
everything is working (and that you have all of the dependencies, soft and hard,
installed), make sure you have `pytest
<https://docs.pytest.org/en/latest/>`__ >= 6.0 and `Hypothesis
-<https://hypothesis.readthedocs.io/en/latest/>`__ >= 3.58, then run:
+<https://hypothesis.readthedocs.io/en/latest/>`__ >= 6.13.0, then run:
::
@@ -247,11 +247,11 @@ Recommended dependencies
* `numexpr <https://github.com/pydata/numexpr>`__: for accelerating certain numerical operations.
``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups.
- If installed, must be Version 2.7.1 or higher.
+ If installed, must be Version 2.7.3 or higher.
* `bottleneck <https://github.com/pydata/bottleneck>`__: for accelerating certain types of ``nan``
evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups. If installed,
- must be Version 1.3.1 or higher.
+ must be Version 1.3.2 or higher.
.. note::
@@ -277,8 +277,8 @@ Visualization
Dependency Minimum Version Notes
========================= ================== =============================================================
matplotlib 3.3.2 Plotting library
-Jinja2 2.11 Conditional formatting with DataFrame.style
-tabulate 0.8.7 Printing in Markdown-friendly format (see `tabulate`_)
+Jinja2 3.0.0 Conditional formatting with DataFrame.style
+tabulate 0.8.9 Printing in Markdown-friendly format (see `tabulate`_)
========================= ================== =============================================================
Computation
@@ -287,10 +287,10 @@ Computation
========================= ================== =============================================================
Dependency Minimum Version Notes
========================= ================== =============================================================
-SciPy 1.4.1 Miscellaneous statistical functions
-numba 0.50.1 Alternative execution engine for rolling operations
+SciPy 1.7.1 Miscellaneous statistical functions
+numba 0.53.1 Alternative execution engine for rolling operations
(see :ref:`Enhancing Performance <enhancingperf.numba>`)
-xarray 0.15.1 pandas-like API for N-dimensional data
+xarray 0.19.0 pandas-like API for N-dimensional data
========================= ================== =============================================================
Excel files
@@ -301,9 +301,9 @@ Dependency Minimum Version Notes
========================= ================== =============================================================
xlrd 2.0.1 Reading Excel
xlwt 1.3.0 Writing Excel
-xlsxwriter 1.2.2 Writing Excel
-openpyxl 3.0.3 Reading / writing for xlsx files
-pyxlsb 1.0.6 Reading for xlsb files
+xlsxwriter 1.4.3 Writing Excel
+openpyxl 3.0.7 Reading / writing for xlsx files
+pyxlsb 1.0.8 Reading for xlsb files
========================= ================== =============================================================
HTML
@@ -312,9 +312,9 @@ HTML
========================= ================== =============================================================
Dependency Minimum Version Notes
========================= ================== =============================================================
-BeautifulSoup4 4.8.2 HTML parser for read_html
+BeautifulSoup4 4.9.3 HTML parser for read_html
html5lib 1.1 HTML parser for read_html
-lxml 4.5.0 HTML parser for read_html
+lxml 4.6.3 HTML parser for read_html
========================= ================== =============================================================
One of the following combinations of libraries is needed to use the
@@ -356,9 +356,9 @@ SQL databases
========================= ================== =============================================================
Dependency Minimum Version Notes
========================= ================== =============================================================
-SQLAlchemy 1.4.0 SQL support for databases other than sqlite
-psycopg2 2.8.4 PostgreSQL engine for sqlalchemy
-pymysql 0.10.1 MySQL engine for sqlalchemy
+SQLAlchemy 1.4.16 SQL support for databases other than sqlite
+psycopg2 2.8.6 PostgreSQL engine for sqlalchemy
+pymysql 1.0.2 MySQL engine for sqlalchemy
========================= ================== =============================================================
Other data sources
@@ -368,11 +368,11 @@ Other data sources
Dependency Minimum Version Notes
========================= ================== =============================================================
PyTables 3.6.1 HDF5-based reading / writing
-blosc 1.20.1 Compression for HDF5
+blosc 1.21.0 Compression for HDF5
zlib Compression for HDF5
fastparquet 0.4.0 Parquet reading / writing
pyarrow 1.0.1 Parquet, ORC, and feather reading / writing
-pyreadstat 1.1.0 SPSS files (.sav) reading
+pyreadstat 1.1.2 SPSS files (.sav) reading
========================= ================== =============================================================
.. _install.warn_orc:
@@ -396,10 +396,10 @@ Access data in the cloud
========================= ================== =============================================================
Dependency Minimum Version Notes
========================= ================== =============================================================
-fsspec 0.7.4 Handling files aside from simple local and HTTP
-gcsfs 0.6.0 Google Cloud Storage access
-pandas-gbq 0.14.0 Google Big Query access
-s3fs 0.4.0 Amazon S3 access
+fsspec 2021.5.0 Handling files aside from simple local and HTTP
+gcsfs 2021.5.0 Google Cloud Storage access
+pandas-gbq 0.15.0 Google Big Query access
+s3fs 2021.05.0 Amazon S3 access
========================= ================== =============================================================
Clipboard
| Documentation update: should be part of the 1.5 milestone
Closes #47740
Makes the min package versions in `doc/source/install.rst` consistent with those recommended in [`doc/source/whatsnew/v1.5.0.rst`.](https://github.com/pandas-dev/pandas/blob/main/doc/source/whatsnew/v1.5.0.rst#increased-minimum-versions-for-dependencies)
If this is not done, there will be a mistmatch in optional library recommendations between the release notes and the documentation.
<img width="524" alt="image" src="https://user-images.githubusercontent.com/23153616/179132443-da09ff46-b6a4-4baf-83f0-ef5716cb7d54.png">
| https://api.github.com/repos/pandas-dev/pandas/pulls/47727 | 2022-07-15T02:11:45Z | 2022-07-18T19:07:48Z | 2022-07-18T19:07:48Z | 2022-09-28T20:45:45Z |
BUG: numeric_only with axis=1 in DataFrame.corrwith and DataFrameGroupBy.cummin/max | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e70312c562907..629f711d8ec73 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -10550,7 +10550,8 @@ def corrwith(
else:
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
- other = other._get_numeric_data()
+ if numeric_only_bool:
+ other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
@@ -10563,11 +10564,15 @@ def corrwith(
right = right + left * 0
# demeaned data
- ldem = left - left.mean()
- rdem = right - right.mean()
+ ldem = left - left.mean(numeric_only=numeric_only_bool)
+ rdem = right - right.mean(numeric_only=numeric_only_bool)
num = (ldem * rdem).sum()
- dom = (left.count() - 1) * left.std() * right.std()
+ dom = (
+ (left.count() - 1)
+ * left.std(numeric_only=numeric_only_bool)
+ * right.std(numeric_only=numeric_only_bool)
+ )
correl = num / dom
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 89e47af4cb614..09545aa5c3184 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -3630,7 +3630,11 @@ def cummin(self, axis=0, numeric_only=False, **kwargs) -> NDFrameT:
skipna = kwargs.get("skipna", True)
if axis != 0:
f = lambda x: np.minimum.accumulate(x, axis)
- return self._python_apply_general(f, self._selected_obj, is_transform=True)
+ numeric_only_bool = self._resolve_numeric_only("cummax", numeric_only, axis)
+ obj = self._selected_obj
+ if numeric_only_bool:
+ obj = obj._get_numeric_data()
+ return self._python_apply_general(f, obj, is_transform=True)
return self._cython_transform(
"cummin", numeric_only=numeric_only, skipna=skipna
@@ -3650,7 +3654,11 @@ def cummax(self, axis=0, numeric_only=False, **kwargs) -> NDFrameT:
skipna = kwargs.get("skipna", True)
if axis != 0:
f = lambda x: np.maximum.accumulate(x, axis)
- return self._python_apply_general(f, self._selected_obj, is_transform=True)
+ numeric_only_bool = self._resolve_numeric_only("cummax", numeric_only, axis)
+ obj = self._selected_obj
+ if numeric_only_bool:
+ obj = obj._get_numeric_data()
+ return self._python_apply_general(f, obj, is_transform=True)
return self._cython_transform(
"cummax", numeric_only=numeric_only, skipna=skipna
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 7d6c5310942e2..9c622e0bfb69e 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -555,6 +555,81 @@ def test_idxmin_idxmax_axis1():
gb2.idxmax(axis=1)
+@pytest.mark.parametrize("numeric_only", [True, False, None])
+def test_axis1_numeric_only(request, groupby_func, numeric_only):
+ if groupby_func in ("idxmax", "idxmin"):
+ pytest.skip("idxmax and idx_min tested in test_idxmin_idxmax_axis1")
+ if groupby_func in ("mad", "tshift"):
+ pytest.skip("mad and tshift are deprecated")
+ if groupby_func in ("corrwith", "skew"):
+ msg = "GH#47723 groupby.corrwith and skew do not correctly implement axis=1"
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
+
+ df = DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"])
+ df["E"] = "x"
+ groups = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4]
+ gb = df.groupby(groups)
+ method = getattr(gb, groupby_func)
+ args = (0,) if groupby_func == "fillna" else ()
+ kwargs = {"axis": 1}
+ if numeric_only is not None:
+ # when numeric_only is None we don't pass any argument
+ kwargs["numeric_only"] = numeric_only
+
+ # Functions without numeric_only and axis args
+ no_args = ("cumprod", "cumsum", "diff", "fillna", "pct_change", "rank", "shift")
+ # Functions with axis args
+ has_axis = (
+ "cumprod",
+ "cumsum",
+ "diff",
+ "pct_change",
+ "rank",
+ "shift",
+ "cummax",
+ "cummin",
+ "idxmin",
+ "idxmax",
+ "fillna",
+ )
+ if numeric_only is not None and groupby_func in no_args:
+ msg = "got an unexpected keyword argument 'numeric_only'"
+ with pytest.raises(TypeError, match=msg):
+ method(*args, **kwargs)
+ elif groupby_func not in has_axis:
+ msg = "got an unexpected keyword argument 'axis'"
+ warn = FutureWarning if groupby_func == "skew" and not numeric_only else None
+ with tm.assert_produces_warning(warn, match="Dropping of nuisance columns"):
+ with pytest.raises(TypeError, match=msg):
+ method(*args, **kwargs)
+ # fillna and shift are successful even on object dtypes
+ elif (numeric_only is None or not numeric_only) and groupby_func not in (
+ "fillna",
+ "shift",
+ ):
+ msgs = (
+ # cummax, cummin, rank
+ "not supported between instances of",
+ # cumprod
+ "can't multiply sequence by non-int of type 'float'",
+ # cumsum, diff, pct_change
+ "unsupported operand type",
+ )
+ with pytest.raises(TypeError, match=f"({'|'.join(msgs)})"):
+ method(*args, **kwargs)
+ else:
+ result = method(*args, **kwargs)
+
+ df_expected = df.drop(columns="E").T if numeric_only else df.T
+ expected = getattr(df_expected, groupby_func)(*args).T
+ if groupby_func == "shift" and not numeric_only:
+ # shift with axis=1 leaves the leftmost column as numeric
+ # but transposing for expected gives us object dtype
+ expected = expected.astype(float)
+
+ tm.assert_equal(result, expected)
+
+
def test_groupby_cumprod():
# GH 4095
df = DataFrame({"key": ["b"] * 10, "value": 2})
@@ -1321,7 +1396,7 @@ def test_deprecate_numeric_only(
assert "b" not in result.columns
elif (
# kernels that work on any dtype and have numeric_only arg
- kernel in ("first", "last", "corrwith")
+ kernel in ("first", "last")
or (
# kernels that work on any dtype and don't have numeric_only arg
kernel in ("any", "all", "bfill", "ffill", "fillna", "nth", "nunique")
@@ -1339,7 +1414,8 @@ def test_deprecate_numeric_only(
"(not allowed for this dtype"
"|must be a string or a number"
"|cannot be performed against 'object' dtypes"
- "|must be a string or a real number)"
+ "|must be a string or a real number"
+ "|unsupported operand type)"
)
with pytest.raises(TypeError, match=msg):
method(*args, **kwargs)
| Part of #46560
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
No whatsnew note as these methods gained numeric_ony in 1.5.0 | https://api.github.com/repos/pandas-dev/pandas/pulls/47724 | 2022-07-14T22:35:42Z | 2022-07-16T18:14:59Z | 2022-07-16T18:14:59Z | 2022-07-17T14:16:12Z |
ENH: Timestamp.min/max/resolution support non-nano | diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 692b4430fa577..5c47c176b52ef 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -1,4 +1,5 @@
from cpython.datetime cimport (
+ PyDateTime_CheckExact,
PyDateTime_DATE_GET_HOUR,
PyDateTime_DATE_GET_MICROSECOND,
PyDateTime_DATE_GET_MINUTE,
@@ -228,7 +229,13 @@ def py_td64_to_tdstruct(int64_t td64, NPY_DATETIMEUNIT unit):
cdef inline void pydatetime_to_dtstruct(datetime dt, npy_datetimestruct *dts):
- dts.year = PyDateTime_GET_YEAR(dt)
+ if PyDateTime_CheckExact(dt):
+ dts.year = PyDateTime_GET_YEAR(dt)
+ else:
+ # We use dt.year instead of PyDateTime_GET_YEAR because with Timestamp
+ # we override year such that PyDateTime_GET_YEAR is incorrect.
+ dts.year = dt.year
+
dts.month = PyDateTime_GET_MONTH(dt)
dts.day = PyDateTime_GET_DAY(dt)
dts.hour = PyDateTime_DATE_GET_HOUR(dt)
diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd
index 69a1dd436dec0..0ecb26822cf50 100644
--- a/pandas/_libs/tslibs/timestamps.pxd
+++ b/pandas/_libs/tslibs/timestamps.pxd
@@ -22,7 +22,7 @@ cdef _Timestamp create_timestamp_from_ts(int64_t value,
cdef class _Timestamp(ABCTimestamp):
cdef readonly:
- int64_t value, nanosecond
+ int64_t value, nanosecond, year
BaseOffset _freq
NPY_DATETIMEUNIT _reso
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index ae3ce46cbc3c8..5163bfd8b7760 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -143,12 +143,27 @@ cdef inline _Timestamp create_timestamp_from_ts(
""" convenience routine to construct a Timestamp from its parts """
cdef:
_Timestamp ts_base
-
- ts_base = _Timestamp.__new__(Timestamp, dts.year, dts.month,
+ int64_t pass_year = dts.year
+
+ # We pass year=1970/1972 here and set year below because with non-nanosecond
+ # resolution we may have datetimes outside of the stdlib pydatetime
+ # implementation bounds, which would raise.
+ # NB: this means the C-API macro PyDateTime_GET_YEAR is unreliable.
+ if 1 <= pass_year <= 9999:
+ # we are in-bounds for pydatetime
+ pass
+ elif ccalendar.is_leapyear(dts.year):
+ pass_year = 1972
+ else:
+ pass_year = 1970
+
+ ts_base = _Timestamp.__new__(Timestamp, pass_year, dts.month,
dts.day, dts.hour, dts.min,
dts.sec, dts.us, tz, fold=fold)
+
ts_base.value = value
ts_base._freq = freq
+ ts_base.year = dts.year
ts_base.nanosecond = dts.ps // 1000
ts_base._reso = reso
@@ -179,6 +194,40 @@ def integer_op_not_supported(obj):
return TypeError(int_addsub_msg)
+class MinMaxReso:
+ """
+ We need to define min/max/resolution on both the Timestamp _instance_
+ and Timestamp class. On an instance, these depend on the object's _reso.
+ On the class, we default to the values we would get with nanosecond _reso.
+
+ See also: timedeltas.MinMaxReso
+ """
+ def __init__(self, name):
+ self._name = name
+
+ def __get__(self, obj, type=None):
+ cls = Timestamp
+ if self._name == "min":
+ val = np.iinfo(np.int64).min + 1
+ elif self._name == "max":
+ val = np.iinfo(np.int64).max
+ else:
+ assert self._name == "resolution"
+ val = 1
+ cls = Timedelta
+
+ if obj is None:
+ # i.e. this is on the class, default to nanos
+ return cls(val)
+ elif self._name == "resolution":
+ return Timedelta._from_value_and_reso(val, obj._reso)
+ else:
+ return Timestamp._from_value_and_reso(val, obj._reso, tz=None)
+
+ def __set__(self, obj, value):
+ raise AttributeError(f"{self._name} is not settable.")
+
+
# ----------------------------------------------------------------------
cdef class _Timestamp(ABCTimestamp):
@@ -188,6 +237,10 @@ cdef class _Timestamp(ABCTimestamp):
dayofweek = _Timestamp.day_of_week
dayofyear = _Timestamp.day_of_year
+ min = MinMaxReso("min")
+ max = MinMaxReso("max")
+ resolution = MinMaxReso("resolution") # GH#21336, GH#21365
+
cpdef void _set_freq(self, freq):
# set the ._freq attribute without going through the constructor,
# which would issue a warning
@@ -248,10 +301,12 @@ cdef class _Timestamp(ABCTimestamp):
def __hash__(_Timestamp self):
if self.nanosecond:
return hash(self.value)
+ if not (1 <= self.year <= 9999):
+ # out of bounds for pydatetime
+ return hash(self.value)
if self.fold:
return datetime.__hash__(self.replace(fold=0))
return datetime.__hash__(self)
- # TODO(non-nano): what if we are out of bounds for pydatetime?
def __richcmp__(_Timestamp self, object other, int op):
cdef:
@@ -968,6 +1023,9 @@ cdef class _Timestamp(ABCTimestamp):
"""
base_ts = "microseconds" if timespec == "nanoseconds" else timespec
base = super(_Timestamp, self).isoformat(sep=sep, timespec=base_ts)
+ # We need to replace the fake year 1970 with our real year
+ base = f"{self.year}-" + base.split("-", 1)[1]
+
if self.nanosecond == 0 and timespec != "nanoseconds":
return base
@@ -2332,29 +2390,24 @@ default 'raise'
Return the day of the week represented by the date.
Monday == 1 ... Sunday == 7.
"""
- return super().isoweekday()
+ # same as super().isoweekday(), but that breaks because of how
+ # we have overriden year, see note in create_timestamp_from_ts
+ return self.weekday() + 1
def weekday(self):
"""
Return the day of the week represented by the date.
Monday == 0 ... Sunday == 6.
"""
- return super().weekday()
+ # same as super().weekday(), but that breaks because of how
+ # we have overriden year, see note in create_timestamp_from_ts
+ return ccalendar.dayofweek(self.year, self.month, self.day)
# Aliases
Timestamp.weekofyear = Timestamp.week
Timestamp.daysinmonth = Timestamp.days_in_month
-# Add the min and max fields at the class level
-cdef int64_t _NS_UPPER_BOUND = np.iinfo(np.int64).max
-cdef int64_t _NS_LOWER_BOUND = NPY_NAT + 1
-
-# Resolution is in nanoseconds
-Timestamp.min = Timestamp(_NS_LOWER_BOUND)
-Timestamp.max = Timestamp(_NS_UPPER_BOUND)
-Timestamp.resolution = Timedelta(nanoseconds=1) # GH#21336, GH#21365
-
# ----------------------------------------------------------------------
# Scalar analogues to functions in vectorized.pyx
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 353c99688c139..67ad152dcab30 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -1011,6 +1011,35 @@ def test_sub_timedeltalike_mismatched_reso(self, ts_tz):
# With a mismatched td64 as opposed to Timedelta
ts + np.timedelta64(1, "ns")
+ def test_min(self, ts):
+ assert ts.min <= ts
+ assert ts.min._reso == ts._reso
+ assert ts.min.value == NaT.value + 1
+
+ def test_max(self, ts):
+ assert ts.max >= ts
+ assert ts.max._reso == ts._reso
+ assert ts.max.value == np.iinfo(np.int64).max
+
+ def test_resolution(self, ts):
+ expected = Timedelta._from_value_and_reso(1, ts._reso)
+ result = ts.resolution
+ assert result == expected
+ assert result._reso == expected._reso
+
+
+def test_timestamp_class_min_max_resolution():
+ # when accessed on the class (as opposed to an instance), we default
+ # to nanoseconds
+ assert Timestamp.min == Timestamp(NaT.value + 1)
+ assert Timestamp.min._reso == NpyDatetimeUnit.NPY_FR_ns.value
+
+ assert Timestamp.max == Timestamp(np.iinfo(np.int64).max)
+ assert Timestamp.max._reso == NpyDatetimeUnit.NPY_FR_ns.value
+
+ assert Timestamp.resolution == Timedelta(1)
+ assert Timestamp.resolution._reso == NpyDatetimeUnit.NPY_FR_ns.value
+
class TestAsUnit:
def test_as_unit(self):
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47720 | 2022-07-14T15:08:31Z | 2022-07-21T17:41:44Z | 2022-07-21T17:41:44Z | 2022-07-22T17:43:21Z |
TST: add test for groupby with dropna=False on multi-index | diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py
index ca55263146db3..515c96780e731 100644
--- a/pandas/tests/groupby/test_groupby_dropna.py
+++ b/pandas/tests/groupby/test_groupby_dropna.py
@@ -378,3 +378,12 @@ def test_groupby_nan_included():
tm.assert_numpy_array_equal(result_values, expected_values)
assert np.isnan(list(result.keys())[2])
assert list(result.keys())[0:2] == ["g1", "g2"]
+
+
+def test_groupby_drop_nan_with_multi_index():
+ # GH 39895
+ df = pd.DataFrame([[np.nan, 0, 1]], columns=["a", "b", "c"])
+ df = df.set_index(["a", "b"])
+ result = df.groupby(["a", "b"], dropna=False).first()
+ expected = df
+ tm.assert_frame_equal(result, expected)
| - [x] closes #39895
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47717 | 2022-07-14T07:28:01Z | 2022-07-14T16:53:47Z | 2022-07-14T16:53:46Z | 2022-07-14T16:57:51Z |
opt out of bottleneck for nanmean | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index a6408b940119d..a8af7f023d34d 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -844,7 +844,7 @@ Numeric
- Bug in operations with array-likes with ``dtype="boolean"`` and :attr:`NA` incorrectly altering the array in-place (:issue:`45421`)
- Bug in division, ``pow`` and ``mod`` operations on array-likes with ``dtype="boolean"`` not being like their ``np.bool_`` counterparts (:issue:`46063`)
- Bug in multiplying a :class:`Series` with ``IntegerDtype`` or ``FloatingDtype`` by an array-like with ``timedelta64[ns]`` dtype incorrectly raising (:issue:`45622`)
--
+- Bug in :meth:`mean` where the optional dependency ``bottleneck`` causes precision loss linear in the length of the array. ``bottleneck`` has been disabled for :meth:`mean` improving the loss to log-linear but may result in a performance decrease. (:issue:`42878`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 05a9bde700e32..81766dc91f271 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -162,6 +162,10 @@ def f(
def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool:
# Bottleneck chokes on datetime64, PeriodDtype (or and EA)
if not is_object_dtype(dtype) and not needs_i8_conversion(dtype):
+ # GH 42878
+ # Bottleneck uses naive summation leading to O(n) loss of precision
+ # unlike numpy which implements pairwise summation, which has O(log(n)) loss
+ # crossref: https://github.com/pydata/bottleneck/issues/379
# GH 15507
# bottleneck does not properly upcast during the sum
@@ -171,7 +175,7 @@ def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool:
# further we also want to preserve NaN when all elements
# are NaN, unlike bottleneck/numpy which consider this
# to be 0
- return name not in ["nansum", "nanprod"]
+ return name not in ["nansum", "nanprod", "nanmean"]
return False
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 005f7b088271f..f46d5c8e2590e 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -1120,3 +1120,25 @@ def test_check_below_min_count__large_shape(min_count, expected_result):
shape = (2244367, 1253)
result = nanops.check_below_min_count(shape, mask=None, min_count=min_count)
assert result == expected_result
+
+
+@pytest.mark.parametrize("func", ["nanmean", "nansum"])
+@pytest.mark.parametrize(
+ "dtype",
+ [
+ np.uint8,
+ np.uint16,
+ np.uint32,
+ np.uint64,
+ np.int8,
+ np.int16,
+ np.int32,
+ np.int64,
+ np.float16,
+ np.float32,
+ np.float64,
+ ],
+)
+def test_check_bottleneck_disallow(dtype, func):
+ # GH 42878 bottleneck sometimes produces unreliable results for mean and sum
+ assert not nanops._bn_ok_dtype(dtype, func)
| - [x] closes #42878
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47716 | 2022-07-14T07:23:58Z | 2022-07-18T19:14:18Z | 2022-07-18T19:14:17Z | 2022-07-18T19:51:36Z |
TST: Test for the Enum triggering TypeError (#22551 issue) | diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 0864032b741c9..25257a2c102fd 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -1,5 +1,6 @@
from collections import deque
from datetime import datetime
+from enum import Enum
import functools
import operator
import re
@@ -2050,3 +2051,15 @@ def _constructor_sliced(self):
result = sdf + sdf
tm.assert_frame_equal(result, expected)
+
+
+def test_enum_column_equality():
+ Cols = Enum("Cols", "col1 col2")
+
+ q1 = DataFrame({Cols.col1: [1, 2, 3]})
+ q2 = DataFrame({Cols.col1: [1, 2, 3]})
+
+ result = q1[Cols.col1] == q2[Cols.col1]
+ expected = Series([True, True, True], name=Cols.col1)
+
+ tm.assert_series_equal(result, expected)
| - [x] closes #22551
| https://api.github.com/repos/pandas-dev/pandas/pulls/47715 | 2022-07-14T04:20:41Z | 2022-07-16T18:08:28Z | 2022-07-16T18:08:27Z | 2022-07-16T18:08:41Z |
BUG: df.fillna ignores axis when df is single block | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index a6408b940119d..9651269963803 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -914,6 +914,7 @@ Missing
- Bug in :meth:`Series.fillna` and :meth:`DataFrame.fillna` with :class:`IntervalDtype` and incompatible value raising instead of casting to a common (usually object) dtype (:issue:`45796`)
- Bug in :meth:`DataFrame.interpolate` with object-dtype column not returning a copy with ``inplace=False`` (:issue:`45791`)
- Bug in :meth:`DataFrame.dropna` allows to set both ``how`` and ``thresh`` incompatible arguments (:issue:`46575`)
+- Bug in :meth:`DataFrame.fillna` ignored ``axis`` when :class:`DataFrame` is single block (:issue:`47713`)
MultiIndex
^^^^^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e392802bdb5ea..4325f0eb04a9c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6679,7 +6679,7 @@ def fillna(
return result if not inplace else None
elif not is_list_like(value):
- if not self._mgr.is_single_block and axis == 1:
+ if axis == 1:
result = self.T.fillna(value=value, limit=limit).T
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index f5c9dd65e4760..d86c1b2aedcac 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -685,6 +685,29 @@ def test_inplace_dict_update_view(self, val):
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(result_view, expected)
+ def test_single_block_df_with_horizontal_axis(self):
+ # GH 47713
+ df = DataFrame(
+ {
+ "col1": [5, 0, np.nan, 10, np.nan],
+ "col2": [7, np.nan, np.nan, 5, 3],
+ "col3": [12, np.nan, 1, 2, 0],
+ "col4": [np.nan, 1, 1, np.nan, 18],
+ }
+ )
+ result = df.fillna(50, limit=1, axis=1)
+ expected = DataFrame(
+ [
+ [5.0, 7.0, 12.0, 50.0],
+ [0.0, 50.0, np.nan, 1.0],
+ [50.0, np.nan, 1.0, 1.0],
+ [10.0, 5.0, 2.0, 50.0],
+ [50.0, 3.0, 0.0, 18.0],
+ ],
+ columns=["col1", "col2", "col3", "col4"],
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_fillna_nonconsolidated_frame():
# https://github.com/pandas-dev/pandas/issues/36495
| - [x] closes #47713
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47714 | 2022-07-14T04:11:29Z | 2022-07-14T20:41:38Z | 2022-07-14T20:41:37Z | 2022-07-14T20:56:04Z |
DOC: fix typos in "See also" documentation section | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e70312c562907..5c3fdf04c4342 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -10355,9 +10355,10 @@ def cov(
See Also
--------
Series.cov : Compute covariance with another Series.
- core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
- core.window.Expanding.cov : Expanding sample covariance.
- core.window.Rolling.cov : Rolling sample covariance.
+ core.window.ewm.ExponentialMovingWindow.cov : Exponential weighted sample
+ covariance.
+ core.window.expanding.Expanding.cov : Expanding sample covariance.
+ core.window.rolling.Rolling.cov : Rolling sample covariance.
Notes
-----
@@ -11167,7 +11168,7 @@ def quantile(
See Also
--------
- core.window.Rolling.quantile: Rolling quantile.
+ core.window.rolling.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6e00f33f486d9..81af89a7e0bdb 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -12193,7 +12193,7 @@ def _doc_params(cls):
See Also
--------
-core.window.Expanding.{accum_func_name} : Similar functionality
+core.window.expanding.Expanding.{accum_func_name} : Similar functionality
but ignores ``NaN`` values.
{name2}.{accum_func_name} : Return the {desc} over
{name2} axis.
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47712 | 2022-07-14T02:02:42Z | 2022-07-16T19:53:02Z | 2022-07-16T19:53:02Z | 2022-07-17T01:15:09Z |
ENH/TST: Add BaseUnaryOpsTests tests for ArrowExtensionArray | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 92aedbb836b38..69cd2a44dfed4 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -235,6 +235,20 @@ def __arrow_array__(self, type=None):
"""Convert myself to a pyarrow ChunkedArray."""
return self._data
+ def __invert__(self: ArrowExtensionArrayT) -> ArrowExtensionArrayT:
+ if pa_version_under2p0:
+ raise NotImplementedError("__invert__ not implement for pyarrow < 2.0")
+ return type(self)(pc.invert(self._data))
+
+ def __neg__(self: ArrowExtensionArrayT) -> ArrowExtensionArrayT:
+ return type(self)(pc.negate_checked(self._data))
+
+ def __pos__(self: ArrowExtensionArrayT) -> ArrowExtensionArrayT:
+ return type(self)(self._data)
+
+ def __abs__(self: ArrowExtensionArrayT) -> ArrowExtensionArrayT:
+ return type(self)(pc.abs_checked(self._data))
+
def _cmp_method(self, other, op):
from pandas.arrays import BooleanArray
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 7e0792a6010a7..c6e9bed030567 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -1210,6 +1210,24 @@ def test_EA_types(self, engine, data, request):
super().test_EA_types(engine, data)
+class TestBaseUnaryOps(base.BaseUnaryOpsTests):
+ @pytest.mark.xfail(
+ pa_version_under2p0,
+ raises=NotImplementedError,
+ reason="pyarrow.compute.invert not supported in pyarrow<2.0",
+ )
+ def test_invert(self, data, request):
+ pa_dtype = data.dtype.pyarrow_dtype
+ if not pa.types.is_boolean(pa_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=pa.ArrowNotImplementedError,
+ reason=f"pyarrow.compute.invert does support {pa_dtype}",
+ )
+ )
+ super().test_invert(data)
+
+
class TestBaseMethods(base.BaseMethodsTests):
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna, request):
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47711 | 2022-07-14T00:13:00Z | 2022-07-21T20:35:51Z | 2022-07-21T20:35:51Z | 2022-07-21T21:58:54Z |
GH: Add CITATION.cff | diff --git a/CITATION.cff b/CITATION.cff
new file mode 100644
index 0000000000000..0161dfa92fdef
--- /dev/null
+++ b/CITATION.cff
@@ -0,0 +1,10 @@
+cff-version: 1.2.0
+title: 'pandas-dev/pandas: Pandas'
+message: 'If you use this software, please cite it as below.'
+authors:
+ - name: "The pandas development team"
+license: BSD-3-Clause
+license-url: "https://github.com/pandas-dev/pandas/blob/main/LICENSE"
+repository-code: "https://github.com/pandas-dev/pandas"
+type: software
+url: "https://github.com/pandas-dev/pandas"
diff --git a/web/pandas/about/citing.md b/web/pandas/about/citing.md
index e2821dbc19a4e..b4c7848e5db00 100644
--- a/web/pandas/about/citing.md
+++ b/web/pandas/about/citing.md
@@ -5,7 +5,7 @@
If you use _pandas_ for a scientific publication, we would appreciate citations to the published software and the
following paper:
-- [pandas on Zenodo](https://zenodo.org/record/3715232#.XoqFyC2ZOL8),
+- [pandas on Zenodo](https://zenodo.org/search?page=1&size=20&q=conceptrecid%3A%223509134%22&sort=-version&all_versions=True),
Please find us on Zenodo and replace with the citation for the version you are using. You can replace the full author
list from there with "The pandas development team" like in the example below.
| Enables a citation widget on the side of the repository that provides a copy-pastable ABA & Bibtex citation: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-citation-files
Followed this schema: https://github.com/citation-file-format/citation-file-format/blob/1.2.0/schema-guide.md
| https://api.github.com/repos/pandas-dev/pandas/pulls/47710 | 2022-07-13T22:18:24Z | 2022-07-16T02:12:22Z | 2022-07-16T02:12:22Z | 2022-07-16T17:35:38Z |
Backport PR #47670 on branch 1.4.x (CI: Fix npdev build post Cython annotation change) | diff --git a/pandas/_libs/arrays.pyx b/pandas/_libs/arrays.pyx
index 8895a2bcfca89..f63d16e819c92 100644
--- a/pandas/_libs/arrays.pyx
+++ b/pandas/_libs/arrays.pyx
@@ -157,7 +157,7 @@ cdef class NDArrayBacked:
return self._from_backing_data(res_values)
# TODO: pass NPY_MAXDIMS equiv to axis=None?
- def repeat(self, repeats, axis: int = 0):
+ def repeat(self, repeats, axis: int | np.integer = 0):
if axis is None:
axis = 0
res_values = cnp.PyArray_Repeat(self._ndarray, repeats, <int>axis)
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index b2ea2e746b44c..ef565d3e0e746 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1654,7 +1654,7 @@ cdef class _Period(PeriodMixin):
return freq
@classmethod
- def _from_ordinal(cls, ordinal: int, freq) -> "Period":
+ def _from_ordinal(cls, ordinal: int64_t, freq) -> "Period":
"""
Fast creation from an ordinal and freq that are already validated!
"""
diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py
index 456dd049d2f4a..a1aba949e74fe 100644
--- a/pandas/tests/io/parser/test_quoting.py
+++ b/pandas/tests/io/parser/test_quoting.py
@@ -38,7 +38,7 @@ def test_bad_quote_char(all_parsers, kwargs, msg):
@pytest.mark.parametrize(
"quoting,msg",
[
- ("foo", '"quoting" must be an integer'),
+ ("foo", '"quoting" must be an integer|Argument'),
(5, 'bad "quoting" value'), # quoting must be in the range [0, 3]
],
)
| Backport PR #47670: CI: Fix npdev build post Cython annotation change | https://api.github.com/repos/pandas-dev/pandas/pulls/47709 | 2022-07-13T20:58:59Z | 2022-07-14T11:42:15Z | 2022-07-14T11:42:15Z | 2022-07-14T11:42:15Z |
GH: Convert feature request template to GH form | diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index 0c30b941bc520..0000000000000
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-
-name: Feature Request
-about: Suggest an idea for pandas
-title: "ENH:"
-labels: "Enhancement, Needs Triage"
-
----
-
-#### Is your feature request related to a problem?
-
-[this should provide a description of what the problem is, e.g. "I wish I could use pandas to do [...]"]
-
-#### Describe the solution you'd like
-
-[this should provide a description of the feature request, e.g. "`DataFrame.foo` should get a new parameter `bar` that [...]", try to write a docstring for the desired feature]
-
-#### API breaking implications
-
-[this should provide a description of how this feature will affect the API]
-
-#### Describe alternatives you've considered
-
-[this should provide a description of any alternative solutions or features you've considered]
-
-#### Additional context
-
-[add any other context, code examples, or references to existing implementations about the feature request here]
-
-```python
-# Your code here, if applicable
-
-```
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml
new file mode 100644
index 0000000000000..f837eb1ca5bb7
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yaml
@@ -0,0 +1,72 @@
+name: Feature Request
+description: Suggest an idea for pandas
+title: "ENH: "
+labels: [Enhancement, Needs Triage]
+
+body:
+ - type: checkboxes
+ id: checks
+ attributes:
+ label: Feature Type
+ description: Please check what type of feature request you would like to propose.
+ options:
+ - label: >
+ Adding new functionality to pandas
+ - label: >
+ Changing existing functionality in pandas
+ - label: >
+ Removing existing functionality in pandas
+ - type: textarea
+ id: description
+ attributes:
+ label: Problem Description
+ description: >
+ Please describe what problem the feature would solve, e.g. "I wish I could use pandas to ..."
+ placeholder: >
+ I wish I could use pandas to return a Series from a DataFrame when possible.
+ validations:
+ required: true
+ - type: textarea
+ id: feature
+ attributes:
+ label: Feature Description
+ description: >
+ Please describe how the new feature would be implemented, using psudocode if relevant.
+ placeholder: >
+ Add a new parameter to DataFrame, to_series, to return a Series if possible.
+
+ def __init__(self, ..., to_series: bool=False):
+ """
+ Parameters
+ ----------
+ ...
+
+ to_series : bool, default False
+ Return a Series if possible
+ """
+ if to_series:
+ return Series(data)
+ validations:
+ required: true
+ - type: textarea
+ id: alternative
+ attributes:
+ label: Alternative Solutions
+ description: >
+ Please describe any alternative solution (existing functionality, 3rd party package, etc.)
+ that would satisfy the feature request.
+ placeholder: >
+ Write a custom function to return a Series when possible.
+
+ def to_series(...)
+ result = pd.DataFrame(...)
+ ...
+ validations:
+ required: true
+ - type: textarea
+ id: context
+ attributes:
+ label: Additional Context
+ description: >
+ Please provide any relevant Github issues, code examples or references that help describe and support
+ the feature request.
| Mostly a 1:1 transfer. I removed the `API breaking implications` section though as the reviewers should have more/just-as-good insight into potential API impacts. | https://api.github.com/repos/pandas-dev/pandas/pulls/47696 | 2022-07-13T06:19:16Z | 2022-07-16T02:13:34Z | 2022-07-16T02:13:34Z | 2022-07-16T17:37:35Z |
DOC: Centeralized testing guidance for contributions | diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst
index 81cd69aa384a4..c74c44fb1d5f0 100644
--- a/doc/source/development/contributing_codebase.rst
+++ b/doc/source/development/contributing_codebase.rst
@@ -324,8 +324,169 @@ Writing tests
All tests should go into the ``tests`` subdirectory of the specific package.
This folder contains many current examples of tests, and we suggest looking to these for
-inspiration. Please reference our :ref:`testing location guide <test_organization>` if you are unsure
-where to place a new unit test.
+inspiration. Ideally, there should be one, and only one, obvious place for a test to reside.
+Until we reach that ideal, these are some rules of thumb for where a test should
+be located.
+
+1. Does your test depend only on code in ``pd._libs.tslibs``?
+ This test likely belongs in one of:
+
+ - tests.tslibs
+
+ .. note::
+
+ No file in ``tests.tslibs`` should import from any pandas modules
+ outside of ``pd._libs.tslibs``
+
+ - tests.scalar
+ - tests.tseries.offsets
+
+2. Does your test depend only on code in pd._libs?
+ This test likely belongs in one of:
+
+ - tests.libs
+ - tests.groupby.test_libgroupby
+
+3. Is your test for an arithmetic or comparison method?
+ This test likely belongs in one of:
+
+ - tests.arithmetic
+
+ .. note::
+
+ These are intended for tests that can be shared to test the behavior
+ of DataFrame/Series/Index/ExtensionArray using the ``box_with_array``
+ fixture.
+
+ - tests.frame.test_arithmetic
+ - tests.series.test_arithmetic
+
+4. Is your test for a reduction method (min, max, sum, prod, ...)?
+ This test likely belongs in one of:
+
+ - tests.reductions
+
+ .. note::
+
+ These are intended for tests that can be shared to test the behavior
+ of DataFrame/Series/Index/ExtensionArray.
+
+ - tests.frame.test_reductions
+ - tests.series.test_reductions
+ - tests.test_nanops
+
+5. Is your test for an indexing method?
+ This is the most difficult case for deciding where a test belongs, because
+ there are many of these tests, and many of them test more than one method
+ (e.g. both ``Series.__getitem__`` and ``Series.loc.__getitem__``)
+
+ A) Is the test specifically testing an Index method (e.g. ``Index.get_loc``,
+ ``Index.get_indexer``)?
+ This test likely belongs in one of:
+
+ - tests.indexes.test_indexing
+ - tests.indexes.fooindex.test_indexing
+
+ Within that files there should be a method-specific test class e.g.
+ ``TestGetLoc``.
+
+ In most cases, neither ``Series`` nor ``DataFrame`` objects should be
+ needed in these tests.
+
+ B) Is the test for a Series or DataFrame indexing method *other* than
+ ``__getitem__`` or ``__setitem__``, e.g. ``xs``, ``where``, ``take``,
+ ``mask``, ``lookup``, or ``insert``?
+ This test likely belongs in one of:
+
+ - tests.frame.indexing.test_methodname
+ - tests.series.indexing.test_methodname
+
+ C) Is the test for any of ``loc``, ``iloc``, ``at``, or ``iat``?
+ This test likely belongs in one of:
+
+ - tests.indexing.test_loc
+ - tests.indexing.test_iloc
+ - tests.indexing.test_at
+ - tests.indexing.test_iat
+
+ Within the appropriate file, test classes correspond to either types of
+ indexers (e.g. ``TestLocBooleanMask``) or major use cases
+ (e.g. ``TestLocSetitemWithExpansion``).
+
+ See the note in section D) about tests that test multiple indexing methods.
+
+ D) Is the test for ``Series.__getitem__``, ``Series.__setitem__``,
+ ``DataFrame.__getitem__``, or ``DataFrame.__setitem__``?
+ This test likely belongs in one of:
+
+ - tests.series.test_getitem
+ - tests.series.test_setitem
+ - tests.frame.test_getitem
+ - tests.frame.test_setitem
+
+ If many cases such a test may test multiple similar methods, e.g.
+
+ .. code-block:: python
+
+ import pandas as pd
+ import pandas._testing as tm
+
+ def test_getitem_listlike_of_ints():
+ ser = pd.Series(range(5))
+
+ result = ser[[3, 4]]
+ expected = pd.Series([2, 3])
+ tm.assert_series_equal(result, expected)
+
+ result = ser.loc[[3, 4]]
+ tm.assert_series_equal(result, expected)
+
+ In cases like this, the test location should be based on the *underlying*
+ method being tested. Or in the case of a test for a bugfix, the location
+ of the actual bug. So in this example, we know that ``Series.__getitem__``
+ calls ``Series.loc.__getitem__``, so this is *really* a test for
+ ``loc.__getitem__``. So this test belongs in ``tests.indexing.test_loc``.
+
+6. Is your test for a DataFrame or Series method?
+
+ A) Is the method a plotting method?
+ This test likely belongs in one of:
+
+ - tests.plotting
+
+ B) Is the method an IO method?
+ This test likely belongs in one of:
+
+ - tests.io
+
+ C) Otherwise
+ This test likely belongs in one of:
+
+ - tests.series.methods.test_mymethod
+ - tests.frame.methods.test_mymethod
+
+ .. note::
+
+ If a test can be shared between DataFrame/Series using the
+ ``frame_or_series`` fixture, by convention it goes in the
+ ``tests.frame`` file.
+
+7. Is your test for an Index method, not depending on Series/DataFrame?
+ This test likely belongs in one of:
+
+ - tests.indexes
+
+8) Is your test for one of the pandas-provided ExtensionArrays (``Categorical``,
+ ``DatetimeArray``, ``TimedeltaArray``, ``PeriodArray``, ``IntervalArray``,
+ ``PandasArray``, ``FloatArray``, ``BoolArray``, ``StringArray``)?
+ This test likely belongs in one of:
+
+ - tests.arrays
+
+9) Is your test for *all* ExtensionArray subclasses (the "EA Interface")?
+ This test likely belongs in one of:
+
+ - tests.extension
Using ``pytest``
~~~~~~~~~~~~~~~~
@@ -388,6 +549,8 @@ xfail is not to be used for tests involving failure due to invalid user argument
For these tests, we need to verify the correct exception type and error message
is being raised, using ``pytest.raises`` instead.
+.. _contributing.warnings:
+
Testing a warning
^^^^^^^^^^^^^^^^^
@@ -405,6 +568,27 @@ If a warning should specifically not happen in a block of code, pass ``False`` i
with tm.assert_produces_warning(False):
pd.no_warning_function()
+If you have a test that would emit a warning, but you aren't actually testing the
+warning itself (say because it's going to be removed in the future, or because we're
+matching a 3rd-party library's behavior), then use ``pytest.mark.filterwarnings`` to
+ignore the error.
+
+.. code-block:: python
+
+ @pytest.mark.filterwarnings("ignore:msg:category")
+ def test_thing(self):
+ pass
+
+If you need finer-grained control, you can use Python's
+`warnings module <https://docs.python.org/3/library/warnings.html>`__
+to control whether a warning is ignored or raised at different places within
+a single test.
+
+.. code-block:: python
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", FutureWarning)
+
Testing an exception
^^^^^^^^^^^^^^^^^^^^
@@ -570,59 +754,6 @@ preferred if the inputs or logic are simple, with Hypothesis tests reserved
for cases with complex logic or where there are too many combinations of
options or subtle interactions to test (or think of!) all of them.
-.. _contributing.warnings:
-
-Testing warnings
-~~~~~~~~~~~~~~~~
-
-By default, the :ref:`Continuous Integration <contributing.ci>` will fail if any unhandled warnings are emitted.
-
-If your change involves checking that a warning is actually emitted, use
-``tm.assert_produces_warning(ExpectedWarning)``.
-
-
-.. code-block:: python
-
- import pandas._testing as tm
-
-
- df = pd.DataFrame()
- with tm.assert_produces_warning(FutureWarning):
- df.some_operation()
-
-We prefer this to the ``pytest.warns`` context manager because ours checks that the warning's
-stacklevel is set correctly. The stacklevel is what ensure the *user's* file name and line number
-is printed in the warning, rather than something internal to pandas. It represents the number of
-function calls from user code (e.g. ``df.some_operation()``) to the function that actually emits
-the warning. Our linter will fail the build if you use ``pytest.warns`` in a test.
-
-If you have a test that would emit a warning, but you aren't actually testing the
-warning itself (say because it's going to be removed in the future, or because we're
-matching a 3rd-party library's behavior), then use ``pytest.mark.filterwarnings`` to
-ignore the error.
-
-.. code-block:: python
-
- @pytest.mark.filterwarnings("ignore:msg:category")
- def test_thing(self):
- ...
-
-If the test generates a warning of class ``category`` whose message starts
-with ``msg``, the warning will be ignored and the test will pass.
-
-If you need finer-grained control, you can use Python's usual
-`warnings module <https://docs.python.org/3/library/warnings.html>`__
-to control whether a warning is ignored / raised at different places within
-a single test.
-
-.. code-block:: python
-
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", FutureWarning)
- # Or use warnings.filterwarnings(...)
-
-Alternatively, consider breaking up the unit test.
-
Running the test suite
----------------------
diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst
index 01509705bb92c..1dbe162cd1a6b 100644
--- a/doc/source/development/index.rst
+++ b/doc/source/development/index.rst
@@ -18,7 +18,6 @@ Development
contributing_codebase
maintaining
internals
- test_writing
debugging_extensions
extending
developer
diff --git a/doc/source/development/test_writing.rst b/doc/source/development/test_writing.rst
deleted file mode 100644
index 76eae505471b7..0000000000000
--- a/doc/source/development/test_writing.rst
+++ /dev/null
@@ -1,167 +0,0 @@
-.. _test_organization:
-
-Test organization
-=================
-Ideally, there should be one, and only one, obvious place for a test to reside.
-Until we reach that ideal, these are some rules of thumb for where a test should
-be located.
-
-1. Does your test depend only on code in ``pd._libs.tslibs``?
- This test likely belongs in one of:
-
- - tests.tslibs
-
- .. note::
-
- No file in ``tests.tslibs`` should import from any pandas modules
- outside of ``pd._libs.tslibs``
-
- - tests.scalar
- - tests.tseries.offsets
-
-2. Does your test depend only on code in pd._libs?
- This test likely belongs in one of:
-
- - tests.libs
- - tests.groupby.test_libgroupby
-
-3. Is your test for an arithmetic or comparison method?
- This test likely belongs in one of:
-
- - tests.arithmetic
-
- .. note::
-
- These are intended for tests that can be shared to test the behavior
- of DataFrame/Series/Index/ExtensionArray using the ``box_with_array``
- fixture.
-
- - tests.frame.test_arithmetic
- - tests.series.test_arithmetic
-
-4. Is your test for a reduction method (min, max, sum, prod, ...)?
- This test likely belongs in one of:
-
- - tests.reductions
-
- .. note::
-
- These are intended for tests that can be shared to test the behavior
- of DataFrame/Series/Index/ExtensionArray.
-
- - tests.frame.test_reductions
- - tests.series.test_reductions
- - tests.test_nanops
-
-5. Is your test for an indexing method?
- This is the most difficult case for deciding where a test belongs, because
- there are many of these tests, and many of them test more than one method
- (e.g. both ``Series.__getitem__`` and ``Series.loc.__getitem__``)
-
- A) Is the test specifically testing an Index method (e.g. ``Index.get_loc``,
- ``Index.get_indexer``)?
- This test likely belongs in one of:
-
- - tests.indexes.test_indexing
- - tests.indexes.fooindex.test_indexing
-
- Within that files there should be a method-specific test class e.g.
- ``TestGetLoc``.
-
- In most cases, neither ``Series`` nor ``DataFrame`` objects should be
- needed in these tests.
-
- B) Is the test for a Series or DataFrame indexing method *other* than
- ``__getitem__`` or ``__setitem__``, e.g. ``xs``, ``where``, ``take``,
- ``mask``, ``lookup``, or ``insert``?
- This test likely belongs in one of:
-
- - tests.frame.indexing.test_methodname
- - tests.series.indexing.test_methodname
-
- C) Is the test for any of ``loc``, ``iloc``, ``at``, or ``iat``?
- This test likely belongs in one of:
-
- - tests.indexing.test_loc
- - tests.indexing.test_iloc
- - tests.indexing.test_at
- - tests.indexing.test_iat
-
- Within the appropriate file, test classes correspond to either types of
- indexers (e.g. ``TestLocBooleanMask``) or major use cases
- (e.g. ``TestLocSetitemWithExpansion``).
-
- See the note in section D) about tests that test multiple indexing methods.
-
- D) Is the test for ``Series.__getitem__``, ``Series.__setitem__``,
- ``DataFrame.__getitem__``, or ``DataFrame.__setitem__``?
- This test likely belongs in one of:
-
- - tests.series.test_getitem
- - tests.series.test_setitem
- - tests.frame.test_getitem
- - tests.frame.test_setitem
-
- If many cases such a test may test multiple similar methods, e.g.
-
- .. code-block:: python
-
- import pandas as pd
- import pandas._testing as tm
-
- def test_getitem_listlike_of_ints():
- ser = pd.Series(range(5))
-
- result = ser[[3, 4]]
- expected = pd.Series([2, 3])
- tm.assert_series_equal(result, expected)
-
- result = ser.loc[[3, 4]]
- tm.assert_series_equal(result, expected)
-
- In cases like this, the test location should be based on the *underlying*
- method being tested. Or in the case of a test for a bugfix, the location
- of the actual bug. So in this example, we know that ``Series.__getitem__``
- calls ``Series.loc.__getitem__``, so this is *really* a test for
- ``loc.__getitem__``. So this test belongs in ``tests.indexing.test_loc``.
-
-6. Is your test for a DataFrame or Series method?
-
- A) Is the method a plotting method?
- This test likely belongs in one of:
-
- - tests.plotting
-
- B) Is the method an IO method?
- This test likely belongs in one of:
-
- - tests.io
-
- C) Otherwise
- This test likely belongs in one of:
-
- - tests.series.methods.test_mymethod
- - tests.frame.methods.test_mymethod
-
- .. note::
-
- If a test can be shared between DataFrame/Series using the
- ``frame_or_series`` fixture, by convention it goes in the
- ``tests.frame`` file.
-
-7. Is your test for an Index method, not depending on Series/DataFrame?
- This test likely belongs in one of:
-
- - tests.indexes
-
-8) Is your test for one of the pandas-provided ExtensionArrays (``Categorical``,
- ``DatetimeArray``, ``TimedeltaArray``, ``PeriodArray``, ``IntervalArray``,
- ``PandasArray``, ``FloatArray``, ``BoolArray``, ``StringArray``)?
- This test likely belongs in one of:
-
- - tests.arrays
-
-9) Is your test for *all* ExtensionArray subclasses (the "EA Interface")?
- This test likely belongs in one of:
-
- - tests.extension
| * Move contents of `doc/source/development/test_writing.rst` to testing section in `doc/source/development/contributing_codebase.rst`
* Consolidate 2 warning testing sections to 1 | https://api.github.com/repos/pandas-dev/pandas/pulls/47692 | 2022-07-13T00:30:01Z | 2022-07-16T02:15:05Z | 2022-07-16T02:15:05Z | 2022-07-22T03:58:34Z |
ENH: dt64/td64 comparison support non-nano | diff --git a/pandas/_libs/tslibs/np_datetime.pyi b/pandas/_libs/tslibs/np_datetime.pyi
index 27871a78f8aaf..757165fbad268 100644
--- a/pandas/_libs/tslibs/np_datetime.pyi
+++ b/pandas/_libs/tslibs/np_datetime.pyi
@@ -1,5 +1,7 @@
import numpy as np
+from pandas._typing import npt
+
class OutOfBoundsDatetime(ValueError): ...
class OutOfBoundsTimedelta(ValueError): ...
@@ -10,3 +12,6 @@ def astype_overflowsafe(
arr: np.ndarray, dtype: np.dtype, copy: bool = ...
) -> np.ndarray: ...
def is_unitless(dtype: np.dtype) -> bool: ...
+def compare_mismatched_resolutions(
+ left: np.ndarray, right: np.ndarray, op
+) -> npt.NDArray[np.bool_]: ...
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 1aab5dcd6f70b..692b4430fa577 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -20,12 +20,14 @@ from cpython.object cimport (
import_datetime()
import numpy as np
+
cimport numpy as cnp
cnp.import_array()
from numpy cimport (
int64_t,
ndarray,
+ uint8_t,
)
from pandas._libs.tslibs.util cimport get_c_string_buf_and_size
@@ -370,3 +372,81 @@ cpdef ndarray astype_overflowsafe(
cnp.PyArray_MultiIter_NEXT(mi)
return iresult.view(dtype)
+
+
+# TODO: try to upstream this fix to numpy
+def compare_mismatched_resolutions(ndarray left, ndarray right, op):
+ """
+ Overflow-safe comparison of timedelta64/datetime64 with mismatched resolutions.
+
+ >>> left = np.array([500], dtype="M8[Y]")
+ >>> right = np.array([0], dtype="M8[ns]")
+ >>> left < right # <- wrong!
+ array([ True])
+ """
+
+ if left.dtype.kind != right.dtype.kind or left.dtype.kind not in ["m", "M"]:
+ raise ValueError("left and right must both be timedelta64 or both datetime64")
+
+ cdef:
+ int op_code = op_to_op_code(op)
+ NPY_DATETIMEUNIT left_unit = get_unit_from_dtype(left.dtype)
+ NPY_DATETIMEUNIT right_unit = get_unit_from_dtype(right.dtype)
+
+ # equiv: result = np.empty((<object>left).shape, dtype="bool")
+ ndarray result = cnp.PyArray_EMPTY(
+ left.ndim, left.shape, cnp.NPY_BOOL, 0
+ )
+
+ ndarray lvalues = left.view("i8")
+ ndarray rvalues = right.view("i8")
+
+ cnp.broadcast mi = cnp.PyArray_MultiIterNew3(result, lvalues, rvalues)
+ int64_t lval, rval
+ bint res_value
+
+ Py_ssize_t i, N = left.size
+ npy_datetimestruct ldts, rdts
+
+
+ for i in range(N):
+ # Analogous to: lval = lvalues[i]
+ lval = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 1))[0]
+
+ # Analogous to: rval = rvalues[i]
+ rval = (<int64_t*>cnp.PyArray_MultiIter_DATA(mi, 2))[0]
+
+ if lval == NPY_DATETIME_NAT or rval == NPY_DATETIME_NAT:
+ res_value = op_code == Py_NE
+
+ else:
+ pandas_datetime_to_datetimestruct(lval, left_unit, &ldts)
+ pandas_datetime_to_datetimestruct(rval, right_unit, &rdts)
+
+ res_value = cmp_dtstructs(&ldts, &rdts, op_code)
+
+ # Analogous to: result[i] = res_value
+ (<uint8_t*>cnp.PyArray_MultiIter_DATA(mi, 0))[0] = res_value
+
+ cnp.PyArray_MultiIter_NEXT(mi)
+
+ return result
+
+
+import operator
+
+
+cdef int op_to_op_code(op):
+ # TODO: should exist somewhere?
+ if op is operator.eq:
+ return Py_EQ
+ if op is operator.ne:
+ return Py_NE
+ if op is operator.le:
+ return Py_LE
+ if op is operator.lt:
+ return Py_LT
+ if op is operator.ge:
+ return Py_GE
+ if op is operator.gt:
+ return Py_GT
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index eadf47b36d7fc..0f88ad9811bf0 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -46,6 +46,7 @@
RoundTo,
round_nsint64,
)
+from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions
from pandas._libs.tslibs.timestamps import integer_op_not_supported
from pandas._typing import (
ArrayLike,
@@ -1065,6 +1066,24 @@ def _cmp_method(self, other, op):
)
return result
+ if other is NaT:
+ if op is operator.ne:
+ result = np.ones(self.shape, dtype=bool)
+ else:
+ result = np.zeros(self.shape, dtype=bool)
+ return result
+
+ if not is_period_dtype(self.dtype):
+ self = cast(TimelikeOps, self)
+ if self._reso != other._reso:
+ if not isinstance(other, type(self)):
+ # i.e. Timedelta/Timestamp, cast to ndarray and let
+ # compare_mismatched_resolutions handle broadcasting
+ other_arr = np.array(other.asm8)
+ else:
+ other_arr = other._ndarray
+ return compare_mismatched_resolutions(self._ndarray, other_arr, op)
+
other_vals = self._unbox(other)
# GH#37462 comparison on i8 values is almost 2x faster than M8/m8
result = op(self._ndarray.view("i8"), other_vals.view("i8"))
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 63601ff963609..af1a292a2975a 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -1,6 +1,8 @@
"""
Tests for DatetimeArray
"""
+import operator
+
import numpy as np
import pytest
@@ -169,6 +171,42 @@ def test_repr(self, dta_dti, unit):
assert repr(dta) == repr(dti._data).replace("[ns", f"[{unit}")
+ # TODO: tests with td64
+ def test_compare_mismatched_resolutions(self, comparison_op):
+ # comparison that numpy gets wrong bc of silent overflows
+ op = comparison_op
+
+ iinfo = np.iinfo(np.int64)
+ vals = np.array([iinfo.min, iinfo.min + 1, iinfo.max], dtype=np.int64)
+
+ # Construct so that arr2[1] < arr[1] < arr[2] < arr2[2]
+ arr = np.array(vals).view("M8[ns]")
+ arr2 = arr.view("M8[s]")
+
+ left = DatetimeArray._simple_new(arr, dtype=arr.dtype)
+ right = DatetimeArray._simple_new(arr2, dtype=arr2.dtype)
+
+ if comparison_op is operator.eq:
+ expected = np.array([False, False, False])
+ elif comparison_op is operator.ne:
+ expected = np.array([True, True, True])
+ elif comparison_op in [operator.lt, operator.le]:
+ expected = np.array([False, False, True])
+ else:
+ expected = np.array([False, True, False])
+
+ result = op(left, right)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = op(left[1], right)
+ tm.assert_numpy_array_equal(result, expected)
+
+ if op not in [operator.eq, operator.ne]:
+ # check that numpy still gets this wrong; if it is fixed we may be
+ # able to remove compare_mismatched_resolutions
+ np_res = op(left._ndarray, right._ndarray)
+ tm.assert_numpy_array_equal(np_res[1:], ~expected[1:])
+
class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47691 | 2022-07-12T23:31:55Z | 2022-07-13T20:00:40Z | 2022-07-13T20:00:40Z | 2022-07-13T20:40:01Z |
TST: avoid sort when concat int-index Dataframes with sort=False | diff --git a/pandas/tests/reshape/concat/test_sort.py b/pandas/tests/reshape/concat/test_sort.py
index a789dc0f8dc83..e83880625f3d6 100644
--- a/pandas/tests/reshape/concat/test_sort.py
+++ b/pandas/tests/reshape/concat/test_sort.py
@@ -93,6 +93,22 @@ def test_concat_frame_with_sort_false(self):
tm.assert_frame_equal(result, expected)
+ # GH 37937
+ df1 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[1, 2, 3])
+ df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}, index=[3, 1, 6])
+ result = pd.concat([df2, df1], axis=1, sort=False)
+ expected = DataFrame(
+ [
+ [7.0, 10.0, 3.0, 6.0],
+ [8.0, 11.0, 1.0, 4.0],
+ [9.0, 12.0, np.nan, np.nan],
+ [np.nan, np.nan, 2.0, 5.0],
+ ],
+ index=[3, 1, 6, 2],
+ columns=["c", "d", "a", "b"],
+ )
+ tm.assert_frame_equal(result, expected)
+
def test_concat_sort_none_warning(self):
# GH#41518
df = DataFrame({1: [1, 2], "a": [3, 4]})
| - [x] closes #37937
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
This issue may have been fixed by #36299. Add the test and close. | https://api.github.com/repos/pandas-dev/pandas/pulls/47685 | 2022-07-12T19:14:38Z | 2022-07-12T21:40:09Z | 2022-07-12T21:40:09Z | 2022-07-13T01:23:39Z |
DOC: Clarify that FrozenList is hashable | diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index deb6ac2c80a81..90713e846fbd1 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -18,7 +18,7 @@
class FrozenList(PandasObject, list):
"""
Container that doesn't allow setting item *but*
- because it's technically non-hashable, will be used
+ because it's technically hashable, will be used
for lookups, appropriately, etc.
"""
| - [x] closes #47683
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47684 | 2022-07-12T18:48:11Z | 2022-07-12T19:10:32Z | 2022-07-12T19:10:32Z | 2022-07-12T19:10:40Z |
CLN: non-nano follow-ups | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index ee3964b892e2e..9c7f35d240f96 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -152,7 +152,7 @@ def format_array_from_datetime(
# a format based on precision
basic_format = format is None
if basic_format:
- reso_obj = get_resolution(values, reso=reso)
+ reso_obj = get_resolution(values, tz=tz, reso=reso)
show_ns = reso_obj == Resolution.RESO_NS
show_us = reso_obj == Resolution.RESO_US
show_ms = reso_obj == Resolution.RESO_MS
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 00e2c8b8b6be6..0dfb859a3444f 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -144,9 +144,13 @@ cpdef inline (int64_t, int) precision_from_unit(str unit):
NPY_DATETIMEUNIT reso = abbrev_to_npy_unit(unit)
if reso == NPY_DATETIMEUNIT.NPY_FR_Y:
+ # each 400 years we have 97 leap years, for an average of 97/400=.2425
+ # extra days each year. We get 31556952 by writing
+ # 3600*24*365.2425=31556952
m = 1_000_000_000 * 31556952
p = 9
elif reso == NPY_DATETIMEUNIT.NPY_FR_M:
+ # 2629746 comes from dividing the "Y" case by 12.
m = 1_000_000_000 * 2629746
p = 9
elif reso == NPY_DATETIMEUNIT.NPY_FR_W:
diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi
index 7eb4695b9ca2c..d24541aede8d8 100644
--- a/pandas/_libs/tslibs/vectorized.pyi
+++ b/pandas/_libs/tslibs/vectorized.pyi
@@ -42,5 +42,5 @@ def ints_to_pydatetime(
def tz_convert_from_utc(
stamps: npt.NDArray[np.int64],
tz: tzinfo | None,
- reso: int = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.int64]: ...
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 58b4d82bcbe5f..a212da050e1f1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -6897,6 +6897,7 @@ def insert(self, loc: int, item) -> Index:
# Use self._constructor instead of Index to retain NumericIndex GH#43921
# TODO(2.0) can use Index instead of self._constructor
+ # Check if doing so fixes GH#47071
return self._constructor._with_infer(new_values, name=self.name)
def drop(
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47682 | 2022-07-12T16:10:23Z | 2022-07-12T19:01:24Z | 2022-07-12T19:01:24Z | 2022-07-12T19:19:11Z |
WEB: Updating links of the governance page | diff --git a/web/pandas/about/governance.md b/web/pandas/about/governance.md
index 56ca0a2aac3db..92923db6e6763 100644
--- a/web/pandas/about/governance.md
+++ b/web/pandas/about/governance.md
@@ -1,23 +1,21 @@
-# Main Governance Document
+# Project governance
The official version of this document, along with a list of
individuals and institutions in the roles defined in the governance
-section below, is contained in The Project Governance Repository at:
+section below, is contained in the
+[Project governance](https://pandas.pydata.org/about/governance.html)
+page of the pandas website.
-[https://github.com/pydata/pandas-governance](https://github.com/pydata/pandas-governance)
-
-The Project
-===========
+## The Project
The pandas Project (The Project) is an open source software project affiliated
with the 501(c)3 NumFOCUS Foundation. The goal of The Project is to develop open
source software for data ingest, data preparation, data analysis, and data
visualization for the Python programming language. The Software developed by
The Project is released under the BSD (or similar) open source license,
-developed openly and hosted in public GitHub repositories under the [PyData
-GitHub organization](https://github.com/pydata). Examples of Project Software
-include the main pandas code repository, pandas-website, and the
-pandas-datareader add-on library.
+developed openly and hosted in public GitHub repositories under the [pandas
+GitHub organization](https://github.com/pandas-dev). Examples of Project Software
+include the main pandas code repository and the pandas-stubs library.
Through its affiliation with NumFOCUS, The Project has the right to receive
tax-deductible donations in the United States of America.
@@ -34,7 +32,7 @@ transparency.
Here is a list of the current Contributors to the main pandas repository:
-[https://github.com/pydata/pandas/graphs/contributors](https://github.com/pydata/pandas/graphs/contributors)
+[https://github.com/pandas-dev/pandas/graphs/contributors](https://github.com/pandas-dev/pandas/graphs/contributors)
There are also many other Contributors listed in the logs of other repositories of
the pandas project.
@@ -45,14 +43,13 @@ Community and we strive to keep the barrier between Contributors and Users as
low as possible.
The Project is formally affiliated with the 501(c)3 NumFOCUS Foundation
-([http://numfocus.org](http://numfocus.org)), which serves as its fiscal
+([https://numfocus.org](https://numfocus.org)), which serves as its fiscal
sponsor, may hold project trademarks and other intellectual property, helps
manage project donations and acts as a parent legal entity. NumFOCUS is the
only legal entity that has a formal relationship with the project (see
Institutional Partners section below).
-Governance
-==========
+## Governance
This section describes the governance and leadership model of The Project.
@@ -76,8 +73,7 @@ need for a more formal governance model. Moving forward The Project leadership
will consist of a BDFL and Core Team. We view this governance model as the
formalization of what we are already doing, rather than a change in direction.
-BDFL
-----
+### BDFL
The Project will have a BDFL (Benevolent Dictator for Life), who is currently
Wes McKinney. As Dictator, the BDFL has the authority to make all final
@@ -103,8 +99,7 @@ vote. If no BDFL candidate receives 2/3 of the votes of the Core Team, the Core
Team members shall propose the BDFL candidates to the Main NumFOCUS board, who
will then make the final decision.
-Core Team
----------
+### Core Team
The Project's Core Team will consist of Project Contributors who have produced
contributions that are substantial in quality and quantity, and sustained over
@@ -238,8 +233,7 @@ interactions with NumFOCUS.
employment or contracting work (including the reportee, i.e. the reportee + 1
is the max). This avoids effective majorities resting on one person.
-Institutional Partners and Funding
-==================================
+## Institutional Partners and Funding
The BDFL and Core Team are the primary leadership for the project. No outside
institution, individual or legal entity has the ability to own, control, usurp
@@ -300,23 +294,20 @@ Institutional Partners, with associated benefits:
**Tier 2** = an institution with at least one Institutional Contributor
-Breach
-======
+## Breach
Non-compliance with the terms of the governance documents shall be reported to
the Core Team either through public or private channels as deemed appropriate.
-Changing the Governance Documents
-=================================
+## Changing the Governance
-Changes to the governance documents are submitted via a GitHub pull request to
-The Project's governance documents GitHub repository at
-[https://github.com/pydata/pandas-governance](https://github.com/pydata/pandas-governance).
+Changes to the governance are submitted via a GitHub pull request to The Project's
+[governance page](https://github.com/pandas-dev/pandas/blob/main/web/pandas/about/governance.md).
The pull request is then refined in response to public comment and review, with
the goal being consensus in the community. After this open period, a Core Team
Member proposes to the Core Team that the changes be ratified and the pull
request merged (accepting the proposed changes) or proposes that the pull
-request be closed without merging (rejecting the proposed changes). The Member
+request be closed without merging (rejecting the proposed changes). The Member
should state the final commit hash in the pull request being proposed for
acceptance or rejection and briefly summarize the pull request. A minimum of
80% of the Core Team must vote and at least 2/3 of the votes must be positive
| Changing links and header formatting of the governance page. There are no material changes in this PR, I'll follow up with actual proposed updates to the governance. | https://api.github.com/repos/pandas-dev/pandas/pulls/47679 | 2022-07-12T10:57:09Z | 2022-07-12T17:15:00Z | 2022-07-12T17:15:00Z | 2022-07-12T17:15:10Z |
WEB: Update sponsors in website | diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index aeef826157b90..1330addf9a229 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -118,17 +118,27 @@ sponsors:
url: https://www.twosigma.com/
logo: /static/img/partners/two_sigma.svg
kind: partner
- description: "Phillip Cloud, Jeff Reback"
- - name: "Ursa Labs"
- url: https://ursalabs.org/
- logo: /static/img/partners/ursa_labs.svg
+ description: "Jeff Reback"
+ - name: "Voltron Data"
+ url: https://voltrondata.com/
+ logo: /static/img/partners/voltron_data.svg
kind: partner
- description: "Wes McKinney, Joris Van den Bossche"
+ description: "Joris Van den Bossche"
- name: "d-fine GmbH"
url: https://www.d-fine.com/en/
logo: /static/img/partners/dfine.svg
kind: partner
description: "Patrick Hoefler"
+ - name: "Quansight"
+ url: https://quansight.com/
+ logo: /static/img/partners/quansight_labs.svg
+ kind: partner
+ description: "Marco Gorelli"
+ - name: "Nvidia"
+ url: https://www.nvidia.com
+ logo: /static/img/partners/nvidia.svg
+ kind: partner
+ description: "Matthew Roeschke"
- name: "Tidelift"
url: https://tidelift.com
logo: /static/img/partners/tidelift.svg
@@ -139,6 +149,11 @@ sponsors:
logo: /static/img/partners/czi.svg
kind: regular
description: "<i>pandas</i> is funded by the Essential Open Source Software for Science program of the Chan Zuckerberg Initiative. The funding is used for general maintenance, improve extension types, and a efficient string type."
+ - name: "Bodo"
+ url: https://www.bodo.ai/
+ logo: /static/img/partners/bodo.svg
+ kind: regular
+ description: "Bodo's parallel computing platform uses pandas API, and Bodo financially supports pandas development to help improve pandas, in particular the pandas API"
inkind: # not included in active so they don't appear in the home page
- name: "OVH"
url: https://us.ovhcloud.com/
@@ -152,10 +167,13 @@ sponsors:
kind: partner
- name: "Anaconda"
url: https://www.anaconda.com/
- logo: /static/img/partners/anaconda.svg
kind: partner
- name: "RStudio"
url: https://www.rstudio.com/
- logo: /static/img/partners/r_studio.svg
kind: partner
- description: "Wes McKinney"
+ - name: "Ursa Labs"
+ url: https://ursalabs.org/
+ kind: partner
+ - name: "Gousto"
+ url: https://www.gousto.co.uk/
+ kind: partner
diff --git a/web/pandas/static/img/partners/anaconda.svg b/web/pandas/static/img/partners/anaconda.svg
deleted file mode 100644
index fcddf72ebaa28..0000000000000
--- a/web/pandas/static/img/partners/anaconda.svg
+++ /dev/null
@@ -1,99 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- viewBox="0 0 530.44 90.053329"
- height="90.053329"
- width="530.44"
- xml:space="preserve"
- id="svg2"
- version="1.1"><metadata
- id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs
- id="defs6" /><g
- transform="matrix(1.3333333,0,0,-1.3333333,0,90.053333)"
- id="g10"><g
- transform="scale(0.1)"
- id="g12"><path
- id="path14"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 958.313,274.5 53.637,120.406 h 1.64 L 1068.32,274.5 Z m 67.867,251.754 c -1.65,3.285 -3.83,6.027 -9.31,6.027 h -5.47 c -4.93,0 -7.66,-2.742 -9.31,-6.027 L 831.887,157.93 c -3.282,-7.117 1.097,-14.231 9.304,-14.231 h 47.618 c 8.754,0 13.679,5.473 15.867,10.942 l 26.82,59.113 h 163.644 l 26.81,-59.113 c 3.83,-7.657 7.66,-10.942 15.88,-10.942 h 47.61 c 8.21,0 12.59,7.114 9.3,14.231 l -168.56,368.324" /><path
- id="path16"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 1547.94,526.801 h -50.35 c -6.03,0 -10.4,-4.922 -10.4,-10.395 V 290.371 h -0.55 l -227.67,241.91 h -13.68 c -5.48,0 -10.4,-4.383 -10.4,-9.855 V 154.102 c 0,-5.481 4.92,-10.403 10.4,-10.403 h 49.8 c 6.02,0 10.4,4.922 10.4,10.403 v 235.332 h 0.54 L 1534.8,138.227 h 13.14 c 5.47,0 10.4,4.378 10.4,9.847 v 368.332 c 0,5.473 -4.93,10.395 -10.4,10.395" /><path
- id="path18"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 1725.97,274.5 53.64,120.406 h 1.64 L 1835.98,274.5 Z m 67.87,251.754 c -1.64,3.285 -3.83,6.027 -9.31,6.027 h -5.47 c -4.93,0 -7.66,-2.742 -9.31,-6.027 L 1599.55,157.93 c -3.29,-7.117 1.09,-14.231 9.3,-14.231 h 47.62 c 8.75,0 13.68,5.473 15.87,10.942 l 26.82,59.113 h 163.64 l 26.81,-59.113 c 3.83,-7.657 7.67,-10.942 15.88,-10.942 h 47.61 c 8.21,0 12.59,7.114 9.3,14.231 l -168.56,368.324" /><path
- id="path20"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 2261.6,241.117 c -3.29,3.285 -9.31,3.836 -13.69,0 -22.98,-18.605 -50.9,-31.191 -83.73,-31.191 -70.06,0 -122.6,58.008 -122.6,126.418 0,68.965 51.99,127.519 122.05,127.519 30.64,0 61.3,-12.039 84.28,-32.285 4.38,-4.379 9.85,-4.379 13.69,0 l 33.38,34.477 c 4.38,4.375 4.38,10.941 -0.55,15.328 -37.21,33.383 -77.17,50.898 -132.45,50.898 -109.45,0 -197.57,-88.117 -197.57,-197.574 0,-109.465 88.12,-196.48 197.57,-196.48 48.72,0 95.78,16.964 133,53.086 3.83,3.835 4.92,10.949 0.55,14.777 l -33.93,35.027" /><path
- id="path22"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 2520.21,209.379 c -68.95,0 -125.33,56.371 -125.33,125.328 0,68.957 56.38,126.426 125.33,126.426 68.96,0 125.88,-57.469 125.88,-126.426 0,-68.957 -56.92,-125.328 -125.88,-125.328 z m 0,322.902 c -109.46,0 -196.48,-88.117 -196.48,-197.574 0,-109.465 87.02,-196.48 196.48,-196.48 109.46,0 197.03,87.015 197.03,196.48 0,109.457 -87.57,197.574 -197.03,197.574" /><path
- id="path24"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 3090.17,526.801 h -50.35 c -6.02,0 -10.4,-4.922 -10.4,-10.395 V 290.371 h -0.54 l -227.68,241.91 h -13.68 c -5.47,0 -10.4,-4.383 -10.4,-9.855 V 154.102 c 0,-5.481 4.93,-10.403 10.4,-10.403 h 49.8 c 6.02,0 10.4,4.922 10.4,10.403 v 235.332 h 0.55 l 228.77,-251.207 h 13.13 c 5.47,0 10.4,4.378 10.4,9.847 v 368.332 c 0,5.473 -4.93,10.395 -10.4,10.395" /><path
- id="path26"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 3303.16,210.465 h -62.39 v 250.121 h 62.39 c 71.15,0 123.14,-53.641 123.14,-124.785 0,-71.696 -51.99,-125.336 -123.14,-125.336 z m 6.57,316.336 h -129.71 c -5.47,0 -9.85,-4.922 -9.85,-10.395 V 154.102 c 0,-5.481 4.38,-10.403 9.85,-10.403 h 129.71 c 105.63,0 192.1,85.926 192.1,192.102 0,105.082 -86.47,191 -192.1,191" /><path
- id="path28"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 3631.32,274.5 53.64,120.406 h 1.64 L 3741.33,274.5 Z m 236.43,-116.57 -168.57,368.324 c -1.64,3.285 -3.82,6.027 -9.29,6.027 h -5.48 c -4.93,0 -7.67,-2.742 -9.3,-6.027 L 3504.9,157.93 c -3.29,-7.117 1.09,-14.231 9.3,-14.231 h 47.62 c 8.76,0 13.68,5.473 15.87,10.942 l 26.82,59.113 h 163.63 l 26.83,-59.113 c 3.82,-7.657 7.66,-10.942 15.86,-10.942 h 47.62 c 8.21,0 12.59,7.114 9.3,14.231" /><path
- id="path30"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 3940.9,176.27 h 7.99 c 2.7,0 4.5,-1.793 4.5,-4.403 0,-2.422 -1.8,-4.394 -4.5,-4.394 h -7.99 z m -4.85,-26.582 h 3.33 c 0.99,0 1.7,0.808 1.7,1.707 v 10.148 h 5.57 l 4.49,-10.598 c 0.27,-0.629 0.9,-1.257 1.62,-1.257 h 4.04 c 1.26,0 2.16,1.257 1.53,2.425 -1.53,3.235 -3.15,6.645 -4.76,9.969 2.69,0.984 6.82,3.5 6.82,9.879 0,6.824 -5.48,10.594 -11.04,10.594 h -13.3 c -0.98,0 -1.7,-0.809 -1.7,-1.703 v -29.457 c 0,-0.899 0.72,-1.707 1.7,-1.707" /><path
- id="path32"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 3945.93,192.078 c 14.46,0 26.05,-11.586 26.05,-26.043 0,-14.371 -11.59,-26.047 -26.05,-26.047 -14.37,0 -26.04,11.676 -26.04,26.047 0,14.457 11.67,26.043 26.04,26.043 z m 0,-58.285 c 17.79,0 32.33,14.461 32.33,32.242 0,17.781 -14.54,32.328 -32.33,32.328 -17.78,0 -32.24,-14.547 -32.24,-32.328 0,-17.781 14.46,-32.242 32.24,-32.242" /><path
- id="path34"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 125.527,158.422 0.051,2.484 c 0.414,19.649 1.977,39.149 4.684,57.961 l 0.254,1.77 -1.668,0.679 c -17.871,7.305 -35.4574,15.782 -52.2699,25.219 l -2.1172,1.184 -1.0742,-2.16 C 62.3164,223.238 52.9844,199.707 45.6836,175.602 l -0.7031,-2.254 2.2812,-0.629 C 72.0234,165.91 97.5195,161.184 123.051,158.66 l 2.476,-0.238" /><path
- id="path36"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 177.781,500.941 c 0.032,0.196 0.063,0.395 0.094,0.59 -14.668,-0.258 -29.324,-1.265 -43.926,-2.965 1.891,-14.777 4.481,-29.437 7.828,-43.925 10.02,16.949 22.121,32.511 36.004,46.3" /><path
- id="path38"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 125.527,140.855 -0.039,2.051 -2.043,0.199 c -21.406,2.02 -43.2223,5.661 -64.8278,10.821 l -5.668,1.355 3.211,-4.855 C 75.5742,121.098 99.3125,95.0195 126.73,72.9258 l 4.43,-3.5899 -0.719,5.668 c -2.906,22.6719 -4.554,44.8321 -4.914,65.8511" /><path
- id="path40"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 230.566,657.227 c -26.32,-9.008 -51.164,-21.161 -74.101,-36.036 17.359,-3.07 34.469,-7.097 51.273,-12.027 6.696,16.375 14.297,32.426 22.828,48.063" /><path
- id="path42"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 339.918,675.43 c -13.023,0 -25.848,-0.813 -38.488,-2.25 17.925,-12.489 35.066,-26.145 51.238,-41.051 l 13.43,-12.391 -13.168,-12.672 c -10.899,-10.488 -21.559,-21.898 -31.688,-33.918 l -0.512,-0.585 c -0.117,-0.125 -2.003,-2.219 -5.152,-6.055 8,0.84 16.117,1.293 24.34,1.293 127.07,0 230.086,-103.016 230.086,-230.086 0,-127.074 -103.016,-230.086 -230.086,-230.086 -44.094,0 -85.277,12.426 -120.277,33.934 -17.27,-1.918 -34.629,-2.922 -52.012,-2.922 -8.074,0 -16.152,0.211 -24.227,0.629 0.524,-26.172 3.016,-53.3052 7.477,-81.438 C 204.82,21.3242 269.879,0 339.918,0 c 186.516,0 337.715,151.199 337.715,337.715 0,186.512 -151.199,337.715 -337.715,337.715" /><path
- id="path44"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 295.145,595.602 c 6.726,7.968 13.671,15.695 20.765,23.101 -15.824,13.469 -32.531,25.758 -50.004,36.856 -10.742,-18.161 -20.09,-36.977 -28.093,-56.282 15.195,-5.574 30.066,-11.953 44.589,-19.031 6.711,8.617 11.399,13.883 12.743,15.356" /><path
- id="path46"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 65.9219,402.934 1.289,-2.09 2.0118,1.433 c 15.6289,11.235 32.0823,21.594 48.9103,30.789 l 1.582,0.864 -0.449,1.738 c -5.028,19.227 -8.868,39.055 -11.414,58.941 l -0.305,2.399 -2.387,-0.434 C 80.168,492.027 55.4609,485.344 31.7383,476.703 l -2.2227,-0.816 0.8789,-2.188 c 9.7422,-24.562 21.6914,-48.363 35.5274,-70.765" /><path
- id="path48"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="M 62.0469,370.18 60.125,368.629 C 41.9492,353.844 24.7266,337.414 8.93359,319.797 L 7.375,318.066 9.13281,316.531 C 26.6641,301.188 45.5547,287.094 65.2734,274.645 l 2.0274,-1.293 1.2031,2.097 c 8.8828,15.781 18.8945,31.356 29.7695,46.278 l 1.0938,1.503 -1.2383,1.383 c -12.3281,13.746 -23.9883,28.395 -34.668,43.547 l -1.414,2.02" /><path
- id="path50"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 194.48,157.273 5.868,0.348 -4.559,3.723 c -17.976,14.715 -33.625,32.09 -46.453,51.656 l -0.106,0.621 -3.75,1.649 -0.433,-3.184 c -2.262,-16.856 -3.586,-34.566 -3.945,-52.625 l -0.039,-2.215 2.207,-0.129 c 8.003,-0.429 16.078,-0.644 24.171,-0.644 9.004,0 18.032,0.269 27.039,0.8" /><path
- id="path52"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 183.219,530.238 c 3.633,16.649 8.109,33.121 13.511,49.317 -21.125,6.078 -42.769,10.617 -64.789,13.523 -1.867,-22.047 -2.082,-44.082 -0.707,-65.941 17.278,1.988 34.629,3.011 51.985,3.101" /><path
- id="path54"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 215.813,531.414 c 14.707,9.441 30.539,17.266 47.281,23.195 -11.875,5.59 -24,10.661 -36.348,15.184 -4.219,-12.633 -7.863,-25.441 -10.933,-38.379" /><path
- id="path56"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 58.6914,257.121 -1.7773,1.113 C 39.4922,269.16 22.6055,281.363 6.74609,294.496 l -4.51953,3.742 0.76953,-5.812 C 7.30078,260.039 16.2734,228.496 29.6406,198.684 l 2.3672,-5.278 1.9024,5.465 c 6.6406,19.125 14.6601,38.102 23.8281,56.387 l 0.9531,1.863" /><path
- id="path58"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="M 102.133,577.48 C 81.9766,557.492 64.3555,534.969 49.7266,510.445 c 17.4804,5.215 35.1836,9.371 53.0194,12.528 -1.23,18.082 -1.465,36.273 -0.613,54.507" /><path
- id="path60"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 112.121,340.762 0.234,5.824 c 0.79,20.598 4.309,40.855 10.461,60.195 l 1.793,5.653 -5.129,-2.961 c -13.152,-7.59 -26.1792,-16.012 -38.7222,-25.047 l -1.8281,-1.328 1.293,-1.86 c 8.6992,-12.406 18.1562,-24.535 28.0973,-36.062 l 3.801,-4.414" /><path
- id="path62"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 114.383,305.906 -0.805,5.707 -3.34,-4.691 C 100.836,293.727 92.082,279.945 84.2227,265.961 l -1.1133,-1.992 1.9922,-1.133 c 14.1562,-7.984 29.0114,-15.305 44.1564,-21.762 l 5.402,-2.316 -2.406,5.363 c -8.863,19.668 -14.875,40.453 -17.871,61.785" /><path
- id="path64"
- style="fill:#43b53b;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 48.6602,386.676 1.5976,1.273 -1.0781,1.735 c -10.5859,16.918 -20.1836,34.707 -28.5469,52.867 l -2.457,5.355 -1.8125,-5.605 C 6.51172,411.789 1.05859,379.887 0.160156,347.473 L 0,341.523 4.10938,345.82 c 14.01172,14.598 28.99612,28.34 44.55082,40.856" /></g></g></svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/partners/bodo.svg b/web/pandas/static/img/partners/bodo.svg
new file mode 100644
index 0000000000000..9dc6cb47505a3
--- /dev/null
+++ b/web/pandas/static/img/partners/bodo.svg
@@ -0,0 +1 @@
+<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" width="930.84" height="267.43" viewBox="0 0 930.84 267.43"><defs><style>.cls-1{fill:#5e5e5e;}.cls-2{fill:#1db100;}</style></defs><title>bodo-grey-green</title><path class="cls-1" d="M85.73,93a83.24,83.24,0,0,1,33.14,6.64,85.67,85.67,0,0,1,0,158.11,83.45,83.45,0,0,1-33.14,6.67,83.06,83.06,0,0,1-33.34-6.67A87.46,87.46,0,0,1,6.67,212,83.23,83.23,0,0,1,0,178.67V11.44A10.82,10.82,0,0,1,3.43,3.25a11.3,11.3,0,0,1,8-3.24,10.66,10.66,0,0,1,7.81,3.24,11,11,0,0,1,3.24,8.19V121.17a85.33,85.33,0,0,1,23.24-17.91,86.89,86.89,0,0,1,19.47-7.64A81.71,81.71,0,0,1,85.77,93m0,149a62.21,62.21,0,0,0,24.57-5,63.2,63.2,0,0,0,33.72-33.53,64,64,0,0,0,0-49.34,62.65,62.65,0,0,0-33.72-33.72,63.48,63.48,0,0,0-49.15,0,62.72,62.72,0,0,0-33.72,33.72,64,64,0,0,0,0,49.34A63.25,63.25,0,0,0,61.19,237,62.19,62.19,0,0,0,85.77,242"/><path class="cls-1" d="M264.79,264.43a83.15,83.15,0,0,1-33.33-6.67,88.56,88.56,0,0,1-27.27-18.29,84.93,84.93,0,0,1-18.48-27.05,85.76,85.76,0,1,1,158.12,0,84.61,84.61,0,0,1-18.48,27.05,87.75,87.75,0,0,1-27.43,18.29,83.53,83.53,0,0,1-33.15,6.67m0-148.59a61.81,61.81,0,0,0-24.58,5,62.65,62.65,0,0,0-33.72,33.72,63.48,63.48,0,0,0,0,49.15,62.72,62.72,0,0,0,33.72,33.72,63.48,63.48,0,0,0,49.15,0,62.74,62.74,0,0,0,33.75-33.76,63.48,63.48,0,0,0,0-49.15,62.65,62.65,0,0,0-33.72-33.72,62,62,0,0,0-24.58-5"/><path class="cls-1" d="M443.48,93a85.82,85.82,0,0,1,63.24,28.19V11.06A10.66,10.66,0,0,1,510,3.25,11.31,11.31,0,0,1,529.19,11V178.7A83.06,83.06,0,0,1,522.52,212,85.76,85.76,0,1,1,410.19,99.62,82.89,82.89,0,0,1,443.53,93m63.24,85.72a61.86,61.86,0,0,0-4.95-24.57,62.65,62.65,0,0,0-33.72-33.72,63.48,63.48,0,0,0-49.15,0,62.74,62.74,0,0,0-33.76,33.75,64,64,0,0,0,0,49.34,63.39,63.39,0,0,0,116.59,0,61.48,61.48,0,0,0,5-24.77"/><path class="cls-1" d="M622.94,264.43a83.06,83.06,0,0,1-33.34-6.67,88.6,88.6,0,0,1-27.24-18.29,84.77,84.77,0,0,1-18.48-27.05,85.76,85.76,0,1,1,158.11,0,84.42,84.42,0,0,1-18.47,27.05,87.91,87.91,0,0,1-27.44,18.29,83.45,83.45,0,0,1-33.14,6.67m0-148.59a62,62,0,0,0-24.58,5,62.72,62.72,0,0,0-33.72,33.72,63.48,63.48,0,0,0,0,49.15,62.78,62.78,0,0,0,33.72,33.72,63.48,63.48,0,0,0,49.15,0,62.72,62.72,0,0,0,33.68-33.76,63.48,63.48,0,0,0,0-49.15,62.65,62.65,0,0,0-33.72-33.72,61.86,61.86,0,0,0-24.57-5"/><path class="cls-2" d="M850.77,254.14a86.3,86.3,0,0,1-19.24,7.62,81.2,81.2,0,0,1-20.76,2.67,83.23,83.23,0,0,1-33.34-6.67A87.46,87.46,0,0,1,731.71,212a85.77,85.77,0,1,1,158.12-66.51,83.44,83.44,0,0,1,6.66,33.14V253a11.31,11.31,0,0,1-3.23,8,10.84,10.84,0,0,1-8.2,3.43,10.38,10.38,0,0,1-7.81-3.43,11.34,11.34,0,0,1-3.24-8V236.23a81.85,81.85,0,0,1-23.24,17.91M747.52,178.7A63.44,63.44,0,0,0,786.19,237a63.45,63.45,0,0,0,49.14,0,62.65,62.65,0,0,0,33.72-33.72,63.48,63.48,0,0,0,0-49.15,62.59,62.59,0,0,0-33.72-33.72,63.45,63.45,0,0,0-49.14,0,63.4,63.4,0,0,0-38.72,58.29"/><path class="cls-2" d="M919.35,78.12a11.07,11.07,0,0,1-11-11V62.93a11.33,11.33,0,0,1,3.24-8,10.31,10.31,0,0,1,7.81-3.43,11.71,11.71,0,0,1,11.43,11.43v4.19a10.31,10.31,0,0,1-3.43,7.81,11.31,11.31,0,0,1-8,3.24m0,186.31a10.68,10.68,0,0,1-7.81-3.24,11.14,11.14,0,0,1-3.24-8.19V104.41a11.12,11.12,0,0,1,3.24-8.19A10.6,10.6,0,0,1,919.4,93a11.22,11.22,0,0,1,8,3.24,10.75,10.75,0,0,1,3.43,8.19V253a10.76,10.76,0,0,1-3.43,8.19,11.31,11.31,0,0,1-8,3.24"/><path class="cls-2" d="M731.81,234.54a19.27,19.27,0,1,1-27.25,0h0a19.26,19.26,0,0,1,27.25,0"/></svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/partners/dfine.svg b/web/pandas/static/img/partners/dfine.svg
old mode 100755
new mode 100644
diff --git a/web/pandas/static/img/partners/nvidia.svg b/web/pandas/static/img/partners/nvidia.svg
new file mode 100644
index 0000000000000..59f9e19cf00ad
--- /dev/null
+++ b/web/pandas/static/img/partners/nvidia.svg
@@ -0,0 +1,56 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ version="1.1"
+ id="svg2"
+ x="0px"
+ y="0px"
+ width="450"
+ height="340"
+ viewBox="35.188 31.512 450.00001 340"
+ enable-background="new 35.188 31.512 351.46 258.785"
+ xml:space="preserve"
+ sodipodi:docname="nvidia.svg"
+ inkscape:version="1.0.1 (3bc2e813f5, 2020-09-07)"><metadata
+ id="metadata11"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title>generated by pstoedit version:3.44 from NVBadge_2D.eps</dc:title></cc:Work></rdf:RDF></metadata><defs
+ id="defs9" /><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="1051"
+ id="namedview7"
+ showgrid="false"
+ inkscape:zoom="2.2870273"
+ inkscape:cx="163.94912"
+ inkscape:cy="154.68953"
+ inkscape:window-x="0"
+ inkscape:window-y="0"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg2" />
+<title
+ id="title4">generated by pstoedit version:3.44 from NVBadge_2D.eps</title>
+
+
+<path
+ id="path17"
+ d="m 433.465,322.7165 c 0,3.771 -2.769,6.302 -6.047,6.302 v -0.023 c -3.371,0.023 -6.089,-2.508 -6.089,-6.278 0,-3.769 2.718,-6.293 6.089,-6.293 3.279,-0.001 6.047,2.523 6.047,6.292 z m 2.453,0 c 0,-5.175 -4.02,-8.179 -8.5,-8.179 -4.511,0 -8.531,3.004 -8.531,8.179 0,5.172 4.021,8.188 8.531,8.188 4.481,0 8.5,-3.016 8.5,-8.188 m -9.91,0.692 h 0.91 l 2.109,3.703 h 2.316 l -2.336,-3.859 c 1.207,-0.086 2.2,-0.661 2.2,-2.286 0,-2.019 -1.392,-2.668 -3.75,-2.668 h -3.411 v 8.813 h 1.961 v -3.703 m 10e-4,-1.492 v -2.122 h 1.364 c 0.742,0 1.753,0.06 1.753,0.965 0,0.985 -0.523,1.157 -1.398,1.157 h -1.719" /><path
+ id="path19"
+ d="m 378.676,277.6345 10.598,28.993 H 367.75 Z m -11.35,-11.289 -24.423,61.88 h 17.246 l 3.863,-10.934 h 28.903 l 3.656,10.934 h 18.722 l -24.605,-61.888 z m -49.033,61.903 h 17.497 v -61.922 l -17.5,-0.004 z m -121.467,-61.926 -14.598,49.078 -13.984,-49.074 -18.879,-0.004 19.972,61.926 h 25.207 l 20.133,-61.926 z m 70.725,13.484 h 7.52 c 10.91,0 17.966,4.898 17.966,17.609 0,12.714 -7.056,17.613 -17.966,17.613 h -7.52 z m -17.35,-13.484 v 61.926 h 28.366 c 15.113,0 20.048,-2.512 25.384,-8.148 3.769,-3.957 6.207,-12.641 6.207,-22.134 0,-8.707 -2.063,-16.468 -5.66,-21.304 -6.481,-8.649 -15.817,-10.34 -29.75,-10.34 z m -165.743,-0.086 v 62.012 h 17.645 v -47.086 l 13.672,0.004 c 4.527,0 7.754,1.128 9.934,3.457 2.765,2.945 3.894,7.699 3.894,16.395 v 27.23 h 17.098 v -34.262 c 0,-24.453 -15.586,-27.75 -30.836,-27.75 z m 137.583,0.086 0.007,61.926 h 17.489 v -61.926 z" /><path
+ id="path21"
+ fill="#77b900"
+ d="m 131.481,143.0215 c 0,0 22.504,-33.203 67.437,-36.638 v -12.046 c -49.769,3.997 -92.867,46.149 -92.867,46.149 0,0 24.41,70.565 92.867,77.026 v -12.804 c -50.237,-6.32 -67.437,-61.687 -67.437,-61.687 z m 67.437,36.223 v 11.726 c -37.968,-6.769 -48.507,-46.237 -48.507,-46.237 0,0 18.23,-20.195 48.507,-23.47 v 12.867 c -0.023,0 -0.039,-0.007 -0.058,-0.007 -15.891,-1.907 -28.305,12.938 -28.305,12.938 0,0 6.958,24.991 28.363,32.183 m 0,-107.125 v 22.218 c 1.461,-0.112 2.922,-0.207 4.391,-0.257 56.582,-1.907 93.449,46.406 93.449,46.406 0,0 -42.343,51.488 -86.457,51.488 -4.043,0 -7.828,-0.375 -11.383,-1.005 v 13.739 c 3.04,0.386 6.192,0.613 9.481,0.613 41.051,0 70.738,-20.965 99.484,-45.778 4.766,3.817 24.278,13.103 28.289,17.168 -27.332,22.883 -91.031,41.329 -127.144,41.329 -3.481,0 -6.824,-0.211 -10.11,-0.528 v 19.306 H 354.95 V 72.1195 Z m 0,49.144 v -14.879 c 1.446,-0.101 2.903,-0.179 4.391,-0.226 40.688,-1.278 67.382,34.965 67.382,34.965 0,0 -28.832,40.043 -59.746,40.043 -4.449,0 -8.438,-0.715 -12.028,-1.922 v -45.114 c 15.84,1.914 19.028,8.911 28.551,24.786 l 21.18,-17.859 c 0,0 -15.461,-20.277 -41.524,-20.277 -2.833,-0.001 -5.544,0.198 -8.206,0.483" />
+</svg>
diff --git a/web/pandas/static/img/partners/quansight_labs.svg b/web/pandas/static/img/partners/quansight_labs.svg
new file mode 100644
index 0000000000000..d49ab1b7d39ec
--- /dev/null
+++ b/web/pandas/static/img/partners/quansight_labs.svg
@@ -0,0 +1 @@
+<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 511.28 230.68"><defs><style>.cls-1{fill:#452391;}.cls-2{fill:#99c941;}.cls-3{fill:#973896;}</style></defs><path class="cls-1" d="M41.94,188.52c-2.37.51-7.48,2.71-12.51,5.54h3.46c5-2.71,10-5,14.08-6a15,15,0,0,0,1.62-.46Z"/><path class="cls-1" d="M21.51,200.85s9.82-6.64,19.13-6.73C52.76,194,64,212.43,78.34,212.43c13.42,0,19.3-9.22,22.1-16.83l-.58-.26c-5.22,7.72-10.57,10.32-19.14,10.32-15,0-20-15-40.06-14.73C30.05,191.06,21.41,199.35,21.51,200.85Z"/><path class="cls-1" d="M62.94,178.35c.13-.15.25-.31.37-.46a30.46,30.46,0,0,0,6.35-18.57,27.52,27.52,0,0,0-.42-4.5,30.27,30.27,0,0,0-3.75-10.91l.21.25C61,135.79,52.5,130.1,42.82,129.88h-1.3c-9.69.22-18.18,5.91-22.89,14.28l.21-.25A30.48,30.48,0,0,0,15,155.57a29.28,29.28,0,0,0-.31,3.85c.08,16.05,12.4,29.22,27.41,29.22A26.8,26.8,0,0,0,62.9,178.41ZM39.15,133.72l.38-.05.41,0,.69-.07.35,0,.44,0h1.5l.44,0,.34,0,.69.07.41,0,.38.05.43.08c11.63,2.15,16.16,15,16.16,25.45,0,6.22-1.59,13.22-5.3,18.35h0l-.33.45a16.86,16.86,0,0,1-13.87,6.76,18.31,18.31,0,0,1-2.76-.2c-.63-.05-1.26-.13-1.88-.22A16.13,16.13,0,0,1,32.19,182l.14,0c-6.93-4.77-9.77-14.46-9.77-22.76,0-10.48,4.53-23.3,16.16-25.45Z"/><path class="cls-1" d="M113.62,185.14c0,.43-.44.65-1.32.65H104a1.38,1.38,0,0,1-1.32-.9L101.38,182c-.28-.54-.55-.57-.82-.08q-3.54,4.68-10.27,4.68a10.93,10.93,0,0,1-8.42-3.4c-2.11-2.28-3.16-5.49-3.16-9.66v-16.5a3.43,3.43,0,0,0-2.71-3.7l-1.73-.49c-.87-.28-1.31-.58-1.31-.91s.44-.65,1.31-.65H85.61c.71,0,1.07.19,1.07.57a7.93,7.93,0,0,1-1,1.11c-.69.69-1,2.31-1,4.89v17q0,7.4,7.64,7.39A11.09,11.09,0,0,0,98,180.54c2.14-1.32,3.2-2.88,3.2-4.69v-18.8a3.43,3.43,0,0,0-2.71-3.7l-1.72-.49c-.88-.28-1.32-.58-1.32-.91s.44-.65,1.32-.65h9.61c.76,0,1.12.32,1.07,1l-.33,5.42v22.43a3.27,3.27,0,0,0,2.79,3.61l2.3.49C113.15,184.45,113.62,184.75,113.62,185.14Z"/><path class="cls-1" d="M149.59,185.14c0,.43-.44.65-1.31.65h-8.14a1.52,1.52,0,0,1-1.39-.9l-.66-1.15a1,1,0,0,0-.82-.58,1.9,1.9,0,0,0-1.07.41,17.43,17.43,0,0,1-10.26,3,11,11,0,0,1-7.15-2.21,8.23,8.23,0,0,1-3-6.66q0-5.91,5.67-8.37a31.87,31.87,0,0,1,5.42-1.56c3.07-.72,5.06-1.21,6-1.48,2.9-.88,4.35-2.27,4.35-4.19v-3.21a4.59,4.59,0,0,0-2.63-4.18,10,10,0,0,0-4.84-1.15c-3,0-5.25.65-6.7,2s-2.5,3.53-3.16,6.65a.59.59,0,0,1-.58.41.43.43,0,0,1-.49-.49l-.08-9.86c0-.49.27-.76.82-.82.88-.05,2.6-.25,5.18-.57a41.59,41.59,0,0,1,5.17-.42q6.57,0,9.69,2.55,3.54,3,3.53,9.69V180q0,3.21,2.63,3.7l2.47.49C149.15,184.4,149.59,184.7,149.59,185.14Zm-12.32-7.89v-8.13q0-1.06-.9-.66l-10.76,3.12q-4,1.15-4,4.77,0,7.14,6.41,7.14a11.85,11.85,0,0,0,6.57-1.76A5.21,5.21,0,0,0,137.27,177.25Z"/><path class="cls-1" d="M189.92,185.14c0,.43-.44.65-1.32.65H177.27c-.71,0-1.07-.19-1.07-.57a7.21,7.21,0,0,1,1-1.11c.68-.69,1-2.31,1-4.89v-17q0-7.4-7.63-7.39a11.26,11.26,0,0,0-5.67,1.72q-3.28,2.05-3.29,4.68V180a3.45,3.45,0,0,0,2.71,3.7l1.73.49c.87.28,1.31.58,1.31.91s-.44.65-1.31.65H151.4q-1.32,0-1.32-.57t1.32-1l1.72-.49a3.43,3.43,0,0,0,2.63-3.7V157c0-2.13-.93-3.34-2.79-3.61l-3.37-.49c-.88-.11-1.32-.41-1.32-.91s.44-.65,1.32-.65h9.28a1.36,1.36,0,0,1,1.31.9l1.32,2.87c.27.55.55.58.82.09q3.52-4.68,10.27-4.69a11,11,0,0,1,8.42,3.41q3.15,3.42,3.16,9.65V180a3.44,3.44,0,0,0,2.71,3.7l1.72.49C189.48,184.51,189.92,184.81,189.92,185.14Z"/><path class="cls-1" d="M218.58,176.18q0,10.44-12.73,10.43A43.59,43.59,0,0,1,194.27,185c-.71-.22-1.15-1-1.31-2.38,0-1,0-2-.09-3.08s-.19-2.51-.41-4.31c-.11-.66.08-1,.58-1s.79.3.9.91c1.15,3.28,2.77,5.53,4.85,6.73,1.8,1.1,4.49,1.64,8.05,1.64a8.34,8.34,0,0,0,4.6-1.31,4.68,4.68,0,0,0,2.38-4q0-3.53-5.34-5.83-4.84-2.06-9.78-4.19-5.34-3-5.33-8,0-9.69,11.41-9.7a86.77,86.77,0,0,1,11.17,1c.55.06.82.33.82.82l.17,8.13c0,.66-.17,1-.49,1s-.5-.25-.66-.74q-1.48-4.44-4.11-5.91c-1.53-.88-4-1.32-7.39-1.32q-6.4,0-6.41,4.93,0,2.79,5.34,5.09,9.45,4,10,4.36Q218.58,171,218.58,176.18Z"/><path class="cls-1" d="M242.07,185.22c0,.38-.44.57-1.31.57H223.51q-1.32,0-1.32-.57c0-.5.44-.82,1.32-1l2.54-.49q2.72-.5,2.71-3.7V157q0-3.12-2.71-3.61l-2.79-.49c-.87-.17-1.31-.47-1.31-.91s.44-.65,1.31-.65h10.6c.82,0,1.2.32,1.15,1l-.33,5.42V180c0,2.19.9,3.43,2.71,3.7l3.37.49C241.63,184.34,242.07,184.67,242.07,185.22ZM235,137.74a4.15,4.15,0,0,1-1.43,3.08,4.49,4.49,0,0,1-3.21,1.36,3.42,3.42,0,0,1-2.4-1,3.06,3.06,0,0,1-1-2.34,4.18,4.18,0,0,1,1.38-3.12,4.49,4.49,0,0,1,3.17-1.31Q235,134.46,235,137.74Z"/><path class="cls-1" d="M280.67,154c0,.54-.3.82-.9.82l-8.95.25a11.67,11.67,0,0,1-1.4,16.53A16,16,0,0,1,259,175h-2.38c-3.29,0-4.93,1.06-4.93,3.16q0,2.76,3.45,2.76c.33,0,1.67-.06,4-.17s4.25-.16,5.67-.16c4,0,6.95.63,8.87,1.89q3.38,2.3,3.37,7.8,0,6.74-6.74,10.18a27.43,27.43,0,0,1-12.73,2.72,17.13,17.13,0,0,1-9.11-2.22,8.74,8.74,0,0,1-4.36-7.89q0-5.74,5.42-8.7a4.85,4.85,0,0,1-2-4,5.83,5.83,0,0,1,3.78-5.91c.38-.16.58-.36.58-.57s-.17-.42-.51-.58c-4.86-2.14-7.3-5.69-7.3-10.68a10.82,10.82,0,0,1,4.25-8.83,16.31,16.31,0,0,1,10.52-3.41,18.24,18.24,0,0,1,7.18,1.4l13.7-.57c.6,0,.9.22.9.65Zm-8.21,37.61c0-2.13-.71-3.64-2.14-4.51s-3.86-1.32-7.31-1.32c-1.58,0-3.87,0-6.85.13l-5,.12q-2.55.83-2.55,5,0,3.85,4.35,5.91a18.64,18.64,0,0,0,7.89,1.56Q272.46,198.52,272.46,191.62Zm-4.76-28q0-9.94-9.33-9.94-8.09,0-8.09,8.13a11.06,11.06,0,0,0,2.4,7.47q2.38,2.81,7,2.8a7.56,7.56,0,0,0,5.94-2.3A8.88,8.88,0,0,0,267.7,163.62Z"/><path class="cls-1" d="M319.28,185.14c0,.43-.44.65-1.32.65H306.63c-.72,0-1.07-.19-1.07-.57a7.21,7.21,0,0,1,1-1.11q1-1,1-4.23V162.22q0-7.4-7.64-7.39a10.82,10.82,0,0,0-5.75,1.72q-3.2,2-3.2,4.68V180a3.44,3.44,0,0,0,2.71,3.7l1.73.49c.87.28,1.31.58,1.31.91s-.44.65-1.31.65H280.75c-.87,0-1.31-.19-1.31-.57s.44-.71,1.31-1l1.73-.49a3.43,3.43,0,0,0,2.63-3.7v-42c0-2.13-.91-3.34-2.71-3.61L279,134c-.88-.11-1.31-.41-1.31-.91s.43-.65,1.31-.65H290.2c.82,0,1.2.32,1.15,1l-.33,5.42v16a.39.39,0,0,0,.66.33q3.52-4.68,10.27-4.69a10.92,10.92,0,0,1,8.41,3.41q3.17,3.42,3.17,9.65V180a3.44,3.44,0,0,0,2.71,3.7l1.8.49C318.86,184.56,319.28,184.86,319.28,185.14Z"/><path class="cls-1" d="M343.34,183.71a.51.51,0,0,1-.16.65,11.25,11.25,0,0,1-6.74,1.84q-10.1,0-10.1-7.41V155.4h-6.49c-.55,0-.82-.25-.82-.77v-2.48c0-.57.27-.85.82-.85h4.76q1.39,0,2.22-1.89l3.86-9.2c.22-.49.49-.74.82-.74.5,0,.74.33.74,1V151.3h9.36c.55,0,.83.22.83.65v2.8c0,.43-.25.65-.74.65h-9.45v21.82q0,6.53,5.09,6.52a9.31,9.31,0,0,0,4.77-1.15c.33-.33.6-.3.82.08Z"/><path class="cls-1" d="M396,172.82l-2,12.07a1,1,0,0,1-1.15.9H360.92q-1.32,0-1.32-.57t1.32-1q2.54-.57,2.54-3.69V142.18q0-3.21-2.71-3.61l-3.37-.5c-.87-.11-1.31-.41-1.31-.9s.44-.66,1.31-.66h18.48q1.32,0,1.32.57c0,.5-.44.82-1.32,1l-2.71.49c-1.86.33-2.79,1.55-2.79,3.68v38.63c0,1,.58,1.47,1.73,1.47h10.26c3.95-.05,6.76-1,8.46-2.87q1.89-2,3.37-7.48c.22-.71.6-1.06,1.15-1.06S396.21,171.56,396,172.82Z"/><path class="cls-1" d="M431.47,185.14c0,.43-.44.65-1.32.65H422a1.52,1.52,0,0,1-1.39-.9l-.66-1.15a1,1,0,0,0-.82-.58,1.9,1.9,0,0,0-1.07.41,17.44,17.44,0,0,1-10.27,3,11,11,0,0,1-7.14-2.21,8.23,8.23,0,0,1-3-6.66q0-5.91,5.67-8.37a31.58,31.58,0,0,1,5.42-1.56q4.59-1.08,6-1.48c2.91-.88,4.36-2.27,4.36-4.19v-3.21a4.59,4.59,0,0,0-2.63-4.18,10,10,0,0,0-4.85-1.15c-3,0-5.24.65-6.69,2s-2.51,3.53-3.16,6.65a.59.59,0,0,1-.58.41.43.43,0,0,1-.49-.49l-.08-9.86c0-.49.27-.76.82-.82.87-.05,2.6-.25,5.17-.57a41.72,41.72,0,0,1,5.18-.42q6.57,0,9.69,2.55,3.53,3,3.53,9.69V180q0,3.21,2.63,3.7l2.46.49C431,184.4,431.47,184.7,431.47,185.14Zm-12.32-7.89v-8.13c0-.71-.3-.93-.91-.66l-10.75,3.12q-4,1.15-4,4.77,0,7.14,6.41,7.14a11.85,11.85,0,0,0,6.57-1.76A5.23,5.23,0,0,0,419.15,177.25Z"/><path class="cls-1" d="M465.88,168.79q0,8-4.35,12.9a13.71,13.71,0,0,1-10.68,4.92,26.07,26.07,0,0,1-6.82-.82,23.07,23.07,0,0,0-3.69-.82,5.45,5.45,0,0,0-2.47.44,14.15,14.15,0,0,1-1.56.79c-.65,0-1-.3-1-.9a2,2,0,0,1,.45-.86,2.8,2.8,0,0,0,.45-1.69V138.07c0-2.13-.9-3.34-2.71-3.61l-3.37-.49c-.87-.11-1.31-.41-1.31-.91s.44-.65,1.31-.65h11.17c.83,0,1.21.32,1.15,1l-.32,5.42v15.44c0,.28.1.44.32.5a.58.58,0,0,0,.58-.17,11.68,11.68,0,0,1,8.7-4.11,12.5,12.5,0,0,1,9.86,4.19Q465.89,159.52,465.88,168.79Zm-6.24,0q0-14.08-9.28-14.09-4.61,0-7.64,4.67a3.25,3.25,0,0,0-.57,1.72v14.17q0,4.42,1.8,6.31,1.89,2.13,6.57,2.13a7.52,7.52,0,0,0,6.66-4Q459.64,175.64,459.64,168.75Z"/><path class="cls-1" d="M496.6,176.18q0,10.44-12.73,10.43A43.48,43.48,0,0,1,472.29,185c-.71-.22-1.15-1-1.32-2.38,0-1,0-2-.08-3.08s-.19-2.51-.41-4.31c-.11-.66.08-1,.58-1s.79.3.9.91c1.15,3.28,2.76,5.53,4.85,6.73,1.8,1.1,4.48,1.64,8,1.64a8.31,8.31,0,0,0,4.6-1.31,4.67,4.67,0,0,0,2.39-4q0-3.53-5.34-5.83-4.85-2.06-9.78-4.19-5.34-3-5.34-8,0-9.69,11.42-9.7a86.77,86.77,0,0,1,11.17,1c.55.06.82.33.82.82l.17,8.13c0,.66-.17,1-.5,1s-.49-.25-.65-.74q-1.49-4.44-4.11-5.91c-1.53-.88-4-1.32-7.39-1.32q-6.4,0-6.41,4.93,0,2.79,5.34,5.09,9.45,4,10,4.36Q496.6,171,496.6,176.18Z"/><rect class="cls-1" x="179.9" y="34" width="154.24" height="4.36"/><path class="cls-1" d="M270.14,85.37,270.06,81c1.24,0,3.05,0,5.14,0,4.63,0,11,0,13.71-.28a14.89,14.89,0,0,0,8.51-4c3.41-3.08,5.1-7.06,5.16-12.18,0-1.64,0-3.28,0-4.92,0-2.58,0-5.25.09-7.9a17.67,17.67,0,0,1,17.56-17.56c3.14-.09,6.32-.08,9.4-.07h4v4.36c-1.33,0-2.66,0-4,0-3,0-6.19,0-9.27.07a13.26,13.26,0,0,0-13.32,13.33c-.08,2.58-.08,5.23-.09,7.78,0,1.66,0,3.31,0,5A20.12,20.12,0,0,1,300.34,80a19.34,19.34,0,0,1-10.94,5.06c-3,.34-9.21.33-14.21.31C273.12,85.36,271.33,85.35,270.14,85.37Z"/><path class="cls-1" d="M242.35,85.37c-1.22,0-2.51,0-3.89,0a82.68,82.68,0,0,1-9.55-.31A19.37,19.37,0,0,1,218,80c-4.3-3.88-6.52-9-6.6-15.36,0-1.65,0-3.31,0-5,0-2.56,0-5.2-.08-7.79a13.27,13.27,0,0,0-13.33-13.33c-3.13-.08-8.74-.08-13.24-.07-1.85,0-3.52,0-4.8,0l0-4.36h4.77c4.54,0,10.18,0,13.38.07a17.67,17.67,0,0,1,17.56,17.56c.07,2.65.08,5.32.09,7.91,0,1.64,0,3.28,0,4.91.07,5.12,1.76,9.1,5.17,12.18a14.86,14.86,0,0,0,8.51,4,84.4,84.4,0,0,0,9,.28c1.4,0,2.72,0,4,0Z"/><path class="cls-2" d="M256.86,97.52A14.47,14.47,0,0,1,242.39,82.9a14.26,14.26,0,0,1,14.67-14.32A14.46,14.46,0,0,1,271.33,83.1,14.69,14.69,0,0,1,256.86,97.52Z"/><path class="cls-1" d="M348.8,21.45A14.45,14.45,0,0,1,363.08,36a14.66,14.66,0,0,1-14.45,14.43,14.45,14.45,0,0,1-14.49-14.6A14.27,14.27,0,0,1,348.8,21.45Z"/><path class="cls-3" d="M164.39,20.16A15.75,15.75,0,0,1,179.9,36a16,16,0,0,1-15.78,15.67,15.75,15.75,0,0,1-15.74-16A15.55,15.55,0,0,1,164.39,20.16Z"/><path class="cls-1" d="M256.72,99.43a16.38,16.38,0,0,1,.08-32.76H257a16.37,16.37,0,0,1,16.16,16.45,16.54,16.54,0,0,1-16.38,16.31Zm.08-28.94a12.41,12.41,0,0,0-12.59,12.42,12.59,12.59,0,0,0,3.67,9,12.4,12.4,0,0,0,8.84,3.68v1.91l0-1.91a12.71,12.71,0,0,0,12.57-12.53A12.55,12.55,0,0,0,257,70.49Z"/><path class="cls-1" d="M348.46,52.3a16.13,16.13,0,0,1-11.53-4.8,16.38,16.38,0,0,1-4.79-11.72,16.19,16.19,0,0,1,16.39-16.24h.2A16.34,16.34,0,0,1,364.9,36,16.54,16.54,0,0,1,348.55,52.3Zm.07-28.94A12.41,12.41,0,0,0,336,35.8a12.57,12.57,0,0,0,3.68,9,12.34,12.34,0,0,0,8.82,3.67h.07a12.7,12.7,0,0,0,12.55-12.54,12.51,12.51,0,0,0-12.4-12.58Z"/><path class="cls-1" d="M164,53.59a17.44,17.44,0,0,1-12.46-5.18,17.71,17.71,0,0,1-5.16-12.69,17.49,17.49,0,0,1,17.7-17.47h.24A17.67,17.67,0,0,1,181.72,36,17.85,17.85,0,0,1,164,53.59Zm.08-31.52A13.7,13.7,0,0,0,150.2,35.75a13.89,13.89,0,0,0,4.05,10A13.63,13.63,0,0,0,164,49.77v1.91l0-1.91A14,14,0,0,0,177.9,36,13.59,13.59,0,0,0,174,26.27a13.74,13.74,0,0,0-9.72-4.2Z"/></svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/partners/r_studio.svg b/web/pandas/static/img/partners/r_studio.svg
deleted file mode 100644
index 15a1d2a30ff30..0000000000000
--- a/web/pandas/static/img/partners/r_studio.svg
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Generator: Adobe Illustrator 22.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
-<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 1784.1 625.9" style="enable-background:new 0 0 1784.1 625.9;" xml:space="preserve">
-<style type="text/css">
- .st0{fill:#75AADB;}
- .st1{fill:#4D4D4D;}
- .st2{fill:#FFFFFF;}
- .st3{fill:url(#SVGID_1_);}
- .st4{fill:url(#SVGID_2_);}
- .st5{fill:url(#SVGID_3_);}
- .st6{fill:url(#SVGID_4_);}
- .st7{fill:url(#SVGID_5_);}
- .st8{fill:url(#SVGID_6_);}
- .st9{fill:url(#SVGID_7_);}
- .st10{fill:url(#SVGID_8_);}
- .st11{fill:url(#SVGID_9_);}
- .st12{fill:url(#SVGID_10_);}
- .st13{opacity:0.18;fill:url(#SVGID_11_);}
- .st14{opacity:0.3;}
-</style>
-<g id="Gray_Logo">
-</g>
-<g id="Black_Letters">
-</g>
-<g id="Blue_Gradient_Letters">
- <g>
-
- <ellipse transform="matrix(0.7071 -0.7071 0.7071 0.7071 -127.9265 317.0317)" class="st0" cx="318.7" cy="312.9" rx="309.8" ry="309.8"/>
- <g>
- <path class="st1" d="M694.4,404.8c16.1,10.3,39.1,18.1,63.9,18.1c36.7,0,58.1-19.4,58.1-47.4c0-25.5-14.8-40.8-52.3-54.8 c-45.3-16.5-73.3-40.4-73.3-79.1c0-43.3,35.8-75.4,89.8-75.4c28,0,49,6.6,61,13.6l-9.9,29.3c-8.7-5.4-27.2-13.2-52.3-13.2 c-37.9,0-52.3,22.7-52.3,41.6c0,26,16.9,38.7,55.2,53.6c47,18.1,70.5,40.8,70.5,81.6c0,42.8-31.3,80.3-96.8,80.3 c-26.8,0-56-8.2-70.9-18.1L694.4,404.8z"/>
- <path class="st1" d="M943.3,201.3v47.8h51.9v27.6h-51.9v107.5c0,24.7,7,38.7,27.2,38.7c9.9,0,15.7-0.8,21-2.5l1.6,27.6 c-7,2.5-18.1,4.9-32.1,4.9c-16.9,0-30.5-5.8-39.1-15.2c-9.9-11.1-14-28.8-14-52.3V276.7h-30.9v-27.6h30.9V212L943.3,201.3z"/>
- <path class="st1" d="M1202.8,393.7c0,21,0.4,39.1,1.6,54.8h-32.1l-2.1-32.5h-0.8c-9.1,16.1-30.5,37.1-65.9,37.1 c-31.3,0-68.8-17.7-68.8-87.3V249.1h36.3v110c0,37.9,11.9,63.9,44.5,63.9c24.3,0,41.2-16.9,47.8-33.4c2.1-4.9,3.3-11.5,3.3-18.5 v-122h36.3V393.7z"/>
- <path class="st1" d="M1434.8,156v241c0,17.7,0.8,37.9,1.6,51.5h-32.1l-1.6-34.6h-1.2c-10.7,22.2-34.6,39.1-67.2,39.1 c-48.2,0-85.7-40.8-85.7-101.4c-0.4-66.3,41.2-106.7,89.4-106.7c30.9,0,51.1,14.4,60.2,30.1h0.8V156H1434.8z M1398.9,330.2 c0-4.5-0.4-10.7-1.6-15.2c-5.4-22.7-25.1-41.6-52.3-41.6c-37.5,0-59.7,33-59.7,76.6c0,40.4,20.2,73.8,58.9,73.8 c24.3,0,46.6-16.5,53.1-43.3c1.2-4.9,1.6-9.9,1.6-15.7V330.2z"/>
- <path class="st1" d="M1535.7,193c0,12.4-8.7,22.2-23.1,22.2c-13.2,0-21.8-9.9-21.8-22.2c0-12.4,9.1-22.7,22.7-22.7 C1526.6,170.4,1535.7,180.3,1535.7,193z M1495.3,448.5V249.1h36.3v199.4H1495.3z"/>
- <path class="st1" d="M1772.2,347.1c0,73.7-51.5,105.9-99.3,105.9c-53.6,0-95.6-39.6-95.6-102.6c0-66.3,44.1-105.5,98.9-105.5 C1733.5,245,1772.2,286.6,1772.2,347.1z M1614.4,349.2c0,43.7,24.7,76.6,60.2,76.6c34.6,0,60.6-32.5,60.6-77.5 c0-33.8-16.9-76.2-59.7-76.2C1632.9,272.1,1614.4,311.7,1614.4,349.2z"/>
- </g>
- <g>
- <path class="st2" d="M424.7,411.8h33.6v26.1h-51.3L322,310.5h-45.3v101.3h44.3v26.1H209.5v-26.1h38.3V187.3l-38.3-4.7v-24.7 c14.5,3.3,27.1,5.6,42.9,5.6c23.8,0,48.1-5.6,71.9-5.6c46.2,0,89.1,21,89.1,72.3c0,39.7-23.8,64.9-60.7,75.6L424.7,411.8z M276.7,285.3l24.3,0.5c59.3,0.9,82.1-21.9,82.1-52.3c0-35.5-25.7-49.5-58.3-49.5c-15.4,0-31.3,1.4-48.1,3.3V285.3z"/>
- </g>
- <g>
- <path class="st1" d="M1751.8,170.4c-12.9,0-23.4,10.5-23.4,23.4c0,12.9,10.5,23.4,23.4,23.4c12.9,0,23.4-10.5,23.4-23.4 C1775.2,180.9,1764.7,170.4,1751.8,170.4z M1771.4,193.8c0,10.8-8.8,19.5-19.5,19.5c-10.8,0-19.5-8.8-19.5-19.5 c0-10.8,8.8-19.5,19.5-19.5C1762.6,174.2,1771.4,183,1771.4,193.8z"/>
- <path class="st1" d="M1760.1,203.3l-5.8-8.5c3.3-1.2,5-3.6,5-7c0-5.1-4.3-6.9-8.4-6.9c-1.1,0-2.2,0.1-3.2,0.3 c-1,0.1-2.1,0.2-3.1,0.2c-1.4,0-2.5-0.2-3.7-0.5l-0.6-0.1v3.3l3.4,0.4v18.8h-3.4v3.4h10.9v-3.4h-3.9v-7.9h3.2l7.3,11l0.2,0.2h5.3 v-3.4H1760.1z M1755.6,188.1c0,1.2-0.5,2.2-1.4,2.9c-1.1,0.8-2.8,1.2-5,1.2l-1.9,0v-7.7c1.4-0.1,2.6-0.2,3.7-0.2 C1753.1,184.3,1755.6,185,1755.6,188.1z"/>
- </g>
- </g>
-</g>
-<g id="White_Letters">
-</g>
-<g id="R_Ball">
-</g>
-</svg>
\ No newline at end of file
diff --git a/web/pandas/static/img/partners/ursa_labs.svg b/web/pandas/static/img/partners/ursa_labs.svg
deleted file mode 100644
index cacc80e337d25..0000000000000
--- a/web/pandas/static/img/partners/ursa_labs.svg
+++ /dev/null
@@ -1,106 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!-- Generator: Adobe Illustrator 23.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
-<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
- viewBox="0 0 359 270" style="enable-background:new 0 0 359 270;" xml:space="preserve">
-<style type="text/css">
- .st0{fill-rule:evenodd;clip-rule:evenodd;fill:#404040;}
- .st1{filter:url(#Adobe_OpacityMaskFilter);}
- .st2{fill-rule:evenodd;clip-rule:evenodd;fill:#FFFFFF;}
- .st3{mask:url(#mask-2_1_);}
-</style>
-<title>HOME 1 Copy 8</title>
-<desc>Created with Sketch.</desc>
-<g id="HOME-1-Copy-8">
- <g id="Group" transform="translate(20.000000, 20.000000)">
- <path id="URSA-LABS-Copy" class="st0" d="M0,158.4h9.1V214c0,0.3,0,0.7,0.1,1.1c0,0.3,0,0.9,0.1,1.6s0.2,1.5,0.6,2.3
- c0.3,0.8,0.9,1.5,1.6,2.1c0.7,0.6,1.8,0.9,3.3,0.9c0.3,0,0.9,0,1.6-0.1c0.7-0.1,1.4-0.4,2.1-0.9c1-0.9,1.6-2,1.8-3.3
- s0.3-3.2,0.4-5.5v-53.8h9.2v54.4c0,0.6,0,1.3-0.1,2.1c-0.1,0.8-0.2,1.7-0.3,2.6s-0.3,1.8-0.5,2.6c-0.7,2.3-1.7,4.1-3,5.4
- c-1.3,1.3-2.7,2.3-4.2,2.9c-1.5,0.7-2.9,1.1-4.2,1.2c-1.3,0.1-2.3,0.2-3,0.2c-0.6,0-1.5-0.1-2.7-0.2c-1.2-0.1-2.5-0.5-3.8-1
- s-2.6-1.4-3.8-2.5c-1.2-1.1-2.2-2.7-3-4.6c-0.4-1-0.7-2.1-0.9-3.3c-0.2-1.2-0.3-2.9-0.4-5V158.4z M44,158.4h17
- c0.6,0,1.2,0,1.7,0.1c0.6,0.1,1.3,0.2,2.2,0.3c0.9,0.1,1.7,0.4,2.6,0.8c0.8,0.4,1.6,1.1,2.3,2c0.7,0.9,1.2,2.1,1.6,3.7
- c0.4,1.8,0.6,5.1,0.6,10.1c0,1.3,0,2.7-0.1,4.1c0,1.4-0.1,2.8-0.2,4.2c-0.1,0.9-0.3,1.9-0.4,2.9s-0.4,1.9-0.7,2.7
- c-0.4,0.9-0.9,1.6-1.6,2.1s-1.3,0.8-2,1c-0.7,0.2-1.3,0.3-1.9,0.3H64v0.5c1.3,0.1,2.4,0.3,3.3,0.6c0.9,0.3,1.8,1,2.5,2.1
- c0.8,1.3,1.3,2.7,1.5,4.3c0.2,1.6,0.3,3.9,0.3,6.8v7.7c0,2,0,3.6,0.1,4.9c0.1,1.3,0.2,2.4,0.3,3.3c0.1,0.9,0.3,1.8,0.5,2.7
- c0.2,0.9,0.6,1.8,1,2.9h-9.7c-0.3-1.7-0.6-3-0.8-4.1s-0.3-2.2-0.4-3.2c-0.1-1-0.2-2.1-0.2-3.2c0-1.1-0.1-2.5-0.1-4.2v-5
- c-0.1-1.2-0.1-2.4-0.2-3.6c0-1.2-0.1-2.4-0.3-3.6c-0.1-0.9-0.3-1.7-0.5-2.5c-0.2-0.8-0.6-1.5-1.2-2c-0.5-0.3-1-0.5-1.5-0.6
- s-1-0.2-1.6-0.2h-3.8v32.4H44V158.4z M53.4,166.9v21.7h4.4c1.2,0,2.2-0.2,2.9-0.6c0.7-0.4,1.2-1.2,1.6-2.5
- c0.2-0.9,0.3-2.3,0.4-4.2s0.1-4.1,0.1-6.6c0-0.7,0-1.5-0.1-2.2c0-0.8-0.1-1.5-0.2-2.2c-0.1-1.4-0.4-2.3-1-2.8
- c-0.3-0.3-0.8-0.5-1.3-0.5c-0.5,0-1.2,0-2.2,0H53.4z M110.6,169.1v12.4h-8.5v-12.4c0-0.2,0-0.6-0.1-1.1c0-0.5-0.2-1.1-0.4-1.6
- c-0.2-0.5-0.6-1-1.1-1.4s-1.3-0.6-2.3-0.6c-1.1,0-2,0.2-2.6,0.6c-0.6,0.4-1.1,1-1.4,1.7c-0.3,0.7-0.5,1.5-0.6,2.3
- c-0.1,0.9-0.1,1.7-0.1,2.5c0,1.5,0.1,2.8,0.3,4c0.2,1.2,0.5,2.3,0.9,3.4s0.9,2.2,1.5,3.2s1.3,2.2,2.1,3.4c0.7,1.1,1.3,2.1,2,3.1
- c0.7,1,1.4,2,2.1,3.1c1,1.4,2,2.9,3.1,4.6c1.2,1.9,2.2,3.7,2.9,5.3c0.7,1.6,1.3,3.1,1.7,4.5c0.4,1.4,0.7,2.7,0.8,3.9
- c0.1,1.2,0.2,2.3,0.2,3.3c0,0.4,0,1.3-0.1,2.6c-0.1,1.3-0.4,2.8-0.9,4.4c-0.5,1.6-1.3,3.3-2.3,4.9c-1,1.6-2.6,2.9-4.6,3.7
- c-0.6,0.3-1.4,0.5-2.4,0.7c-1,0.2-2.3,0.3-3.8,0.3c-2.9,0-5.1-0.5-6.8-1.4s-2.8-1.9-3.6-2.8c-1.5-1.7-2.3-3.4-2.6-5.3
- s-0.4-3.8-0.5-5.9V203h8.6v12.8c0,1.7,0.2,3,0.5,3.8c0.3,0.8,0.8,1.5,1.6,2c0.2,0.1,0.5,0.3,1,0.5c0.5,0.2,1.1,0.3,1.8,0.3
- c1.1,0,2-0.3,2.7-0.8c0.6-0.6,1.1-1.3,1.4-2.1c0.3-0.8,0.4-1.7,0.5-2.7c0-1,0.1-1.8,0.1-2.6c0-2.5-0.3-4.6-0.8-6.4
- c-0.5-1.7-1.4-3.7-2.7-5.9c-1.3-2.3-2.8-4.5-4.3-6.6s-2.9-4.3-4.3-6.5c-0.4-0.6-0.9-1.4-1.5-2.4c-0.6-1-1.2-2.2-1.8-3.6
- c-0.6-1.4-1.1-3-1.5-4.7s-0.7-3.7-0.7-5.7c0-3.9,0.7-6.9,2.1-9s2.8-3.7,4.3-4.5c0.7-0.5,1.8-0.9,3.1-1.3c1.3-0.4,3-0.6,5-0.6
- c0.5,0,1.2,0,2.3,0.1c1,0.1,2.1,0.3,3.3,0.7c1.1,0.4,2.2,1.1,3.3,2c1.1,0.9,1.9,2.3,2.4,4c0.2,0.7,0.4,1.4,0.5,2.1
- C110.5,166.6,110.5,167.7,110.6,169.1z M140.1,158.4l10.9,70.3h-9.1l-1.8-12.9h-10.6l-1.6,12.9h-9.1l10-70.3H140.1z M133.5,183
- l-3,24.2h8.4l-3.5-24.4c-0.1-0.6-0.2-1.2-0.3-1.8c0-0.6-0.1-1.2-0.2-1.8c-0.1-1.3-0.1-2.6-0.1-3.8c0-1.3,0-2.5-0.1-3.8H134
- c-0.1,1.9-0.1,3.8-0.2,5.7C133.7,179.2,133.6,181.1,133.5,183z M190.2,158.4V220h15.4v8.7h-24.7v-70.3H190.2z M232,158.4
- l10.9,70.3h-9.1l-1.8-12.9h-10.6l-1.6,12.9h-9.1l10-70.3H232z M225.4,183l-3,24.2h8.4l-3.5-24.4c-0.1-0.6-0.2-1.2-0.3-1.8
- c0-0.6-0.1-1.2-0.2-1.8c-0.1-1.3-0.1-2.6-0.1-3.8c0-1.3,0-2.5-0.1-3.8h-0.8c-0.1,1.9-0.1,3.8-0.2,5.7
- C225.6,179.2,225.5,181.1,225.4,183z M251.9,158.4h16.5c1.5,0,2.9,0.1,4.4,0.2s2.8,0.8,3.9,1.8c1.3,1.2,2,2.7,2.2,4.5
- c0.2,1.8,0.3,4.3,0.4,7.4c0,0.6,0,1.2,0.1,1.8c0,0.6,0.1,1.2,0.1,1.8c0,1.1,0,2.2-0.1,3.3c0,1.1-0.1,2.2-0.2,3.3
- c0,0.2,0,0.9-0.1,2.1c-0.1,1.2-0.3,2.3-0.8,3.3c-0.4,0.7-1,1.3-1.7,1.8c-0.7,0.5-1.4,0.8-2.2,1c-0.4,0.1-0.8,0.2-1.3,0.2
- c-0.5,0-0.8,0-0.9,0.1v0.5c1.3,0.1,2.4,0.4,3.5,0.7c1,0.4,1.9,1.1,2.6,2.2c0.5,1,0.8,2.2,0.9,3.7c0.1,1.5,0.1,3.4,0.1,5.9
- c0.1,0.9,0.1,1.9,0.1,2.8v7c0,1.4-0.1,2.8-0.2,4.3c0,0.2,0,0.6-0.1,1.2c0,0.6-0.2,1.3-0.4,2.1c-0.2,0.8-0.5,1.6-0.9,2.5
- s-1,1.6-1.7,2.3c-1.4,1.1-3,1.8-4.9,1.9s-3.6,0.2-5.3,0.2h-14.2V158.4z M260.9,166.8v21.1h3.6c1.5-0.1,2.7-0.2,3.7-0.5
- c1-0.3,1.6-1.3,1.8-3c0.2-1.4,0.3-3.8,0.3-7.1c0-2.2-0.1-4.4-0.3-6.6c-0.1-1.7-0.4-2.8-1-3.3c-0.3-0.3-0.8-0.5-1.3-0.5
- c-0.5,0-1.2,0-2.1,0H260.9z M260.9,195.5V220h4.8c0.5,0,1,0,1.5,0c0.5,0,0.9-0.1,1.3-0.2c0.4-0.1,0.7-0.3,1-0.6
- c0.3-0.3,0.5-0.8,0.6-1.4c0-0.3,0-0.7,0.1-1.4c0-0.7,0.1-1.5,0.1-2.4c0-0.9,0.1-1.9,0.1-2.9c0-1.1,0.1-2.1,0.1-3.1
- c0-1.2,0-2.4-0.1-3.5c0-1.2-0.1-2.3-0.2-3.5c-0.1-0.7-0.2-1.4-0.3-2.3c-0.1-0.9-0.4-1.6-1-2.1c-0.4-0.3-0.9-0.5-1.4-0.6
- s-1-0.1-1.5-0.1H260.9z M318.4,169.1v12.4h-8.5v-12.4c0-0.2,0-0.6-0.1-1.1c0-0.5-0.2-1.1-0.4-1.6c-0.2-0.5-0.6-1-1.1-1.4
- c-0.5-0.4-1.3-0.6-2.3-0.6c-1.1,0-2,0.2-2.6,0.6s-1.1,1-1.4,1.7s-0.5,1.5-0.6,2.3c-0.1,0.9-0.1,1.7-0.1,2.5c0,1.5,0.1,2.8,0.3,4
- s0.5,2.3,0.9,3.4s0.9,2.2,1.5,3.2c0.6,1.1,1.3,2.2,2.1,3.4c0.7,1.1,1.3,2.1,2,3.1c0.7,1,1.4,2,2.1,3.1c1,1.4,2,2.9,3.1,4.6
- c1.2,1.9,2.2,3.7,2.9,5.3c0.7,1.6,1.3,3.1,1.7,4.5c0.4,1.4,0.7,2.7,0.8,3.9c0.1,1.2,0.2,2.3,0.2,3.3c0,0.4,0,1.3-0.1,2.6
- c-0.1,1.3-0.4,2.8-0.9,4.4c-0.5,1.6-1.3,3.3-2.3,4.9c-1,1.6-2.6,2.9-4.6,3.7c-0.6,0.3-1.4,0.5-2.4,0.7c-1,0.2-2.3,0.3-3.8,0.3
- c-2.9,0-5.1-0.5-6.8-1.4c-1.6-0.9-2.8-1.9-3.6-2.8c-1.5-1.7-2.3-3.4-2.6-5.3c-0.3-1.9-0.4-3.8-0.5-5.9V203h8.6v12.8
- c0,1.7,0.2,3,0.5,3.8c0.3,0.8,0.8,1.5,1.6,2c0.2,0.1,0.5,0.3,1,0.5c0.5,0.2,1.1,0.3,1.8,0.3c1.1,0,2-0.3,2.7-0.8
- c0.6-0.6,1.1-1.3,1.4-2.1c0.3-0.8,0.4-1.7,0.5-2.7c0-1,0.1-1.8,0.1-2.6c0-2.5-0.3-4.6-0.8-6.4c-0.5-1.7-1.4-3.7-2.7-5.9
- c-1.3-2.3-2.8-4.5-4.3-6.6c-1.5-2.1-2.9-4.3-4.3-6.5c-0.4-0.6-0.9-1.4-1.5-2.4c-0.6-1-1.2-2.2-1.8-3.6c-0.6-1.4-1.1-3-1.5-4.7
- c-0.4-1.8-0.7-3.7-0.7-5.7c0-3.9,0.7-6.9,2.1-9s2.8-3.7,4.3-4.5c0.7-0.5,1.8-0.9,3.1-1.3c1.3-0.4,3-0.6,5-0.6c0.5,0,1.2,0,2.3,0.1
- c1,0.1,2.1,0.3,3.3,0.7s2.2,1.1,3.3,2s1.9,2.3,2.4,4c0.2,0.7,0.4,1.4,0.5,2.1C318.2,166.6,318.3,167.7,318.4,169.1z"/>
- <g id="Group-3-Copy" transform="translate(47.000000, 0.000000)">
- <g id="Clip-2">
- </g>
- <defs>
- <filter id="Adobe_OpacityMaskFilter" filterUnits="userSpaceOnUse" x="0" y="0" width="225.8" height="123.9">
- <feColorMatrix type="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0"/>
- </filter>
- </defs>
- <mask maskUnits="userSpaceOnUse" x="0" y="0" width="225.8" height="123.9" id="mask-2_1_">
- <g class="st1">
- <polygon id="path-1_1_" class="st2" points="0,0 225.9,0 225.9,123.9 0,123.9 "/>
- </g>
- </mask>
- <g id="Page-1" class="st3">
- <g id="Mask">
- <path class="st0" d="M177.2,54.3c6.1,21.2,19.4,48.5,24,54.7c5.3-1.2,9.1,1.2,12.4,5.1c-1.2,0.9-2.7,1.5-3.4,2.6
- c-2.7,4.4-6.9,3-10.7,3.2c-2.8,0.2-5.6,0.3-8.4,0.3c-0.9,0-1.8-0.3-2.7-0.5c-1-0.3-1.9-1-2.8-1c-2.5,0.1-4.7,0-7.1-1.1
- c-1-0.5-2.6,0.9-3.6-0.8c-1.1-1.8-2.2-3.6-3.4-5.5c-1.2,0.2-2.2,0.4-3.4,0.6c-2.4-3-3.4-14.8-6.1-17.7
- c-0.6-0.7-2.1-2.2-3.8-2.7c-0.3-0.9-5.4-7.2-5.9-8.7c-0.2-0.5-0.3-1.2-0.7-1.4c-3.1-2-4.2-4.9-4-8.5c0-0.4-0.2-0.7-0.4-1.7
- c-1.2,2.7-2.2,4.8-3.2,7.1c-0.6,1.4-1,2.9-1.8,4.3c-0.5,0.9-1.3,1.6-2,2.3c-2.4,2.2-1.8,0.9-3.2,3.6c-1.1,2-2,4-3,6.1
- c-0.5,1.1-0.9,2.2-1.1,3.3c-0.7,4.1-3.2,7.6-1.5,11.2c3,0.6,6.3,0.5,8.6,2c2.2,1.5,3.5,4.5,5,6.7c-3.1,0.5-5.9,1.2-8.7,1.4
- c-3.8,0.3-7.6,0.2-11.3,0.2c-5,0-10.1-0.1-15.1-0.1c-2.6,0-3.9-1.5-5.4-3.7c-2.1-3.1-1.1-6-0.8-9.1c0.1-0.8,0-3.3-0.1-4.2
- c-0.1-0.9-0.1-1.9,0-2.9c0.2-1.3,0.8-2.6,0.9-3.9c0.1-1.5-0.4-3-0.4-4.5c0-1.5,0.1-3.1,0.5-4.6c0.7-2.7-0.1,0,0.7-2.7
- c0.1-0.2,0-0.7,0-0.8c-0.9-3.6,1.8-6,2.8-8.8c0-0.1,0-0.1-0.1-0.5c-1.8,1.8-4.1,0.8-6.1,1.2c-2.9,0.6-5.7,2.1-8,3
- c-1.4-0.1-2.5-0.4-3.5-0.2c-2,0.5-3.9,1.1-6.2,0.9c-2.5-0.2-5.1,0.6-7.7,0.8c-2.2,0.2-4.8,0.9-6.5,0c-1.5-0.7-2.8-0.9-4.4-1
- c-1.6-0.1-2.4,0.7-2.6,2.1c-1.1,6.3-2.3,12.7-3.1,19.1c-0.4,3.3-0.2,6.6-0.2,9.9c0,1.5,0.6,2.5,1.9,3.5
- c1.5,1.1,2.6,2.7,3.6,4.3c0.8,1.3,0.6,2.6-1.5,2.7c-7.3,0.2-14.6,0.5-21.9,0.4c-2.1,0-4.2-1.5-6.2-2.5
- c-0.3-0.2-0.4-1.1-0.4-1.7c0-4.4,0-13.5,0-18.4c-1,0.6-1.3,0.8-1.6,1c-2.5,2.3-4.9,4.1-7.3,6.4c-1.9,1.8-1.6,3.3,0.2,5.4
- c2.4,2.7,4.4,5.7,4.4,9.5c0,2.5-2.2,3.2-3.8,3.3c-5.7,0.4-11.5,0.4-17.2,0.4c-2.8,0-3.8-1.5-4.4-4.2
- c-1.2-5.4-2.2-10.8-4.3-16.1c-1.6-4.1-2-8.9,1.5-13c5.1-5.9,9.5-12.3,12.8-19.5c1-2.2,1.4-3.8,0.4-6.1c-4.9-1-7.1-3.7-8.2-8.7
- c-1-4.6-0.2-8.9,1-13.2c2.3-7.8,4.1-11,8.4-18c5.6-9,13.4-15.5,22.8-20.2c11.3-5.6,23.3-5.5,35.3-4.2
- c16.2,1.6,32.4,3.6,48.6,5.3c1.3,0.1,2.9-0.2,4.1-0.8c7.7-3.9,15.5-4.2,23.6-1.4c5.6,1.9,11.4,3.6,17.1,5.2
- c2,0.6,4.1,0.8,6.2,1.1c5.7,0.9,11.5,1.8,17.3,2.4c2.9,0.3,5.9,0.1,8.8,0.3c0.7,0,1.5,0.3,2.1,0.7c2.6,1.8,5.1,3.7,7.5,5.6
- c1.6,1.2,3.2,2.3,4.5,3.8c0.6,0.7,0.7,1.9,0.9,2.9c0.3,1.1,0.3,2.6,0.9,3.4c2.6,3.1,5.3,6,8.1,8.9c0.9,1,1.1,1.7,0.3,2.9
- c-1.2,1.6-1.8,3.7-3.3,4.8c-3.1,2.2-6.3,4.3-10.7,3.2c-2.5-0.6-5.5,0.5-8.2,0.8c-2.1,0.3-4.3,0.2-6.2,0.9
- c-4.1,1.6-8.5,1.1-12.5,2.3c-1.5,0.4-2.8,1.2-4.3,1.6C179.2,54.8,178.3,54.5,177.2,54.3"/>
- </g>
- </g>
- </g>
- </g>
-</g>
-</svg>
diff --git a/web/pandas/static/img/partners/voltron_data.svg b/web/pandas/static/img/partners/voltron_data.svg
new file mode 100644
index 0000000000000..0fb7dfd850166
--- /dev/null
+++ b/web/pandas/static/img/partners/voltron_data.svg
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Generator: Adobe Illustrator 26.3.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
+<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+ viewBox="0 0 1887.7 876.4" style="enable-background:new 0 0 1887.7 876.4;" xml:space="preserve">
+<style type="text/css">
+ .st0{fill:#005050;}
+</style>
+<g>
+ <g>
+ <g>
+ <path class="st0" d="M943.8,0L943.8,0H791.1H638.5l76.3,132.2l76.3,132.2l76.3,132.2l76.4,132.2l76.3-132.2l76.3-132.2
+ l-76.3-132.2L943.8,0z M1000,385l-56.2,97.4L887.6,385L818,264.4l69.6-120.6l56.2-97.4l56.2,97.4l69.6,120.6L1000,385z"/>
+ <polygon class="st0" points="1082.5,0 1096,23.5 1208.4,23.5 1152.1,121.1 1165.6,144.6 1249.1,0 "/>
+ </g>
+ </g>
+ <g>
+ <g>
+ <path class="st0" d="M3.2,758.3c-1-2.1-0.3-3.6,1.7-3.6h14.7c1.2-0.2,2.5,0.1,3.5,0.7c1,0.7,1.8,1.7,2.2,2.8l38.2,78.4h0.3
+ l38.3-78.4c0.4-1.2,1.2-2.2,2.2-2.8c1-0.7,2.3-0.9,3.5-0.7h14.7c2.1,0,2.9,1.5,1.7,3.6L66.8,873.9c-0.7,1.5-1.4,2-2.6,2h-1
+ c-1.2,0-1.9-0.5-2.5-2L3.2,758.3z"/>
+ <path class="st0" d="M245.4,752.4c16.2,0.3,31.6,7,43,18.6c11.3,11.6,17.7,27.1,17.7,43.4c0,16.2-6.3,31.8-17.7,43.4
+ c-11.3,11.6-26.8,18.3-43,18.6c-34.5,0-61.1-27.1-61.1-62C184.2,779.6,210.9,752.4,245.4,752.4z M245.4,858.4
+ c24.9,0,41.7-20.6,41.7-44s-16.7-44.1-41.7-44.1c-24.9,0-41.5,20.5-41.5,44.1S220.6,858.5,245.4,858.4L245.4,858.4z"/>
+ <path class="st0" d="M454.2,856.8c2.9,0,3.4,0.7,3.4,3.4v10.4c0,2.9-0.7,3.6-3.4,3.6h-70c-2.6,0-3.4-0.9-3.4-3.6V758.3
+ c0-2.7,0.8-3.6,3.4-3.6h12.6c2.5,0,3.4,0.9,3.4,3.6v98.5H454.2z"/>
+ <path class="st0" d="M593.9,754.7c2.9,0,3.6,0.9,3.6,3.6v10.9c0,2.9-0.9,3.6-3.6,3.6h-34.5v97.8c0,2.9-0.7,3.6-3.4,3.6h-12.6
+ c-2.6,0-3.4-0.9-3.4-3.6v-97.8h-34.7c-2.5,0-3.4-0.9-3.4-3.6v-10.9c0-2.7,0.9-3.6,3.4-3.6H593.9z"/>
+ <path class="st0" d="M686,870.7c0,2.9-0.9,3.6-3.4,3.6H670c-2.6,0-3.4-0.9-3.4-3.6V758.3c0-2.7,0.8-3.6,3.4-3.6h44.6
+ c20,0,42,9.9,42,37.5c0,21.9-13.8,32.6-29.6,36l28.5,41.5c1.9,2.9,0.7,4.5-2.6,4.5h-13.6c-1.2,0-2.4-0.4-3.5-1
+ c-1-0.6-1.9-1.5-2.5-2.6l-28.4-41h-19.1L686,870.7z M712.7,811.8c18.4,0,24.2-8.6,24.2-19.8s-5.7-19.4-24.2-19.4h-26.6v39.3
+ L712.7,811.8L712.7,811.8z"/>
+ <path class="st0" d="M885.7,752.4c16.2,0.3,31.6,7,43,18.6c11.3,11.6,17.7,27.1,17.7,43.4c0,16.2-6.3,31.8-17.7,43.4
+ c-11.3,11.6-26.8,18.3-43,18.6c-34.5,0-61.1-27.1-61.1-62C824.5,779.6,851.2,752.4,885.7,752.4z M885.7,858.4
+ c24.9,0,41.6-20.6,41.6-44s-16.7-44.1-41.6-44.1c-24.9,0-41.4,20.6-41.4,44.1C844.2,838.1,860.9,858.5,885.7,858.4L885.7,858.4z"
+ />
+ <path class="st0" d="M1040.9,796.4h-0.3v74.2c0,2.9-0.9,3.6-3.4,3.6h-12.5c-2.9,0-3.6-0.9-3.6-3.6V754.7c0-1.2,0.5-1.7,1.9-1.7
+ h0.9c0.6,0,1.1,0.1,1.6,0.4c0.5,0.3,0.9,0.7,1.2,1.1l76.1,78h0.3v-74.2c0-2.7,0.7-3.6,3.4-3.6h12.5c2.9,0,3.6,0.9,3.6,3.6v115.8
+ c0,1.3-0.5,1.9-1.9,1.9h-0.9c-0.6,0-1.1-0.1-1.6-0.4c-0.5-0.3-0.9-0.6-1.2-1.1L1040.9,796.4z"/>
+ <path class="st0" d="M1335.7,754.7c34.5,0,60.3,24.9,60.3,59.8c0,34.8-25.7,59.8-60.3,59.8h-42.8c-2.6,0-3.4-0.9-3.4-3.6V758.3
+ c0-2.7,0.8-3.6,3.4-3.6H1335.7z M1309.1,772.5v84h26.6c24.8,0,40.6-18.6,40.6-42s-15.9-42.1-40.6-42.1L1309.1,772.5z"/>
+ <path class="st0" d="M1577,870.7c1,2.1,0.3,3.6-1.7,3.6h-14.7c-1.2,0.1-2.4-0.1-3.5-0.8c-1-0.7-1.8-1.6-2.2-2.8l-10.9-22.5h-55
+ l-10.9,22.5c-0.4,1.2-1.1,2.2-2.2,2.9c-1,0.7-2.3,0.9-3.5,0.7h-14.7c-2.1,0-2.9-1.5-1.7-3.6l57.4-115.6c0.7-1.5,1.4-2.1,2.6-2.1
+ h1c1.2,0,1.9,0.5,2.6,2.1L1577,870.7z M1516.6,790.1l-19.8,40.1h39.6l-19.6-40.1H1516.6z"/>
+ <path class="st0" d="M1714.4,754.7c2.9,0,3.6,0.9,3.6,3.6v10.9c0,2.9-0.9,3.6-3.6,3.6h-34.5v97.8c0,2.9-0.7,3.6-3.4,3.6h-12.7
+ c-2.5,0-3.4-0.9-3.4-3.6v-97.8h-34.7c-2.6,0-3.4-0.9-3.4-3.6v-10.9c0-2.7,0.9-3.6,3.4-3.6H1714.4z"/>
+ <path class="st0" d="M1884.4,870.7c1,2.1,0.3,3.6-1.7,3.6H1868c-1.2,0.1-2.4-0.1-3.5-0.8c-1-0.7-1.8-1.6-2.2-2.8l-10.9-22.5
+ h-54.9l-10.9,22.5c-0.3,1.2-1.1,2.2-2.2,2.9s-2.3,0.9-3.5,0.7h-14.7c-2.1,0-2.9-1.5-1.7-3.6l57.4-115.6c0.7-1.5,1.4-2.1,2.6-2.1
+ h1c1.2,0,1.9,0.5,2.6,2.1L1884.4,870.7z M1823.9,790.1l-19.8,40.1h39.6l-19.6-40.1H1823.9z"/>
+ </g>
+ </g>
+</g>
+</svg>
| Still waiting for the logos of the new sponsors, and final confirmation that we want to replace Ursa Labs by Voltron, but opening early in case there is feedback. | https://api.github.com/repos/pandas-dev/pandas/pulls/47678 | 2022-07-12T09:42:04Z | 2022-07-24T09:24:51Z | 2022-07-24T09:24:51Z | 2022-07-24T09:24:52Z |
TYP: make na_value consistently a property | diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 083acf16ec758..c9abef226770c 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -90,8 +90,11 @@ class StringDtype(StorageExtensionDtype):
name = "string"
- #: StringDtype.na_value uses pandas.NA
- na_value = libmissing.NA
+ #: StringDtype().na_value uses pandas.NA
+ @property
+ def na_value(self) -> libmissing.NAType:
+ return libmissing.NA
+
_metadata = ("storage",)
def __init__(self, storage=None) -> None:
@@ -335,13 +338,11 @@ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False):
na_values = scalars._mask
result = scalars._data
result = lib.ensure_string_array(result, copy=copy, convert_na_value=False)
- result[na_values] = StringDtype.na_value
+ result[na_values] = libmissing.NA
else:
- # convert non-na-likes to str, and nan-likes to StringDtype.na_value
- result = lib.ensure_string_array(
- scalars, na_value=StringDtype.na_value, copy=copy
- )
+ # convert non-na-likes to str, and nan-likes to StringDtype().na_value
+ result = lib.ensure_string_array(scalars, na_value=libmissing.NA, copy=copy)
# Manually creating new array avoids the validation step in the __init__, so is
# faster. Refactor need for validation?
@@ -396,7 +397,7 @@ def __setitem__(self, key, value):
# validate new items
if scalar_value:
if isna(value):
- value = StringDtype.na_value
+ value = libmissing.NA
elif not isinstance(value, str):
raise ValueError(
f"Cannot set non-string value '{value}' into a StringArray."
@@ -497,7 +498,7 @@ def _cmp_method(self, other, op):
if op.__name__ in ops.ARITHMETIC_BINOPS:
result = np.empty_like(self._ndarray, dtype="object")
- result[mask] = StringDtype.na_value
+ result[mask] = libmissing.NA
result[valid] = op(self._ndarray[valid], other)
return StringArray(result)
else:
@@ -512,7 +513,7 @@ def _cmp_method(self, other, op):
# String methods interface
# error: Incompatible types in assignment (expression has type "NAType",
# base class "PandasArray" defined the type as "float")
- _str_na_value = StringDtype.na_value # type: ignore[assignment]
+ _str_na_value = libmissing.NA # type: ignore[assignment]
def _str_map(
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 3e3df5a3200c1..bb2fefabd6ae5 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -242,8 +242,9 @@ def astype(self, dtype, copy: bool = True):
# ------------------------------------------------------------------------
# String methods interface
- # error: Cannot determine type of 'na_value'
- _str_na_value = StringDtype.na_value # type: ignore[has-type]
+ # error: Incompatible types in assignment (expression has type "NAType",
+ # base class "ObjectStringArrayMixin" defined the type as "float")
+ _str_na_value = libmissing.NA # type: ignore[assignment]
def _str_map(
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 9683c1dd93645..99b2082d409a9 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -676,11 +676,14 @@ class DatetimeTZDtype(PandasExtensionDtype):
kind: str_type = "M"
num = 101
base = np.dtype("M8[ns]") # TODO: depend on reso?
- na_value = NaT
_metadata = ("unit", "tz")
_match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
_cache_dtypes: dict[str_type, PandasExtensionDtype] = {}
+ @property
+ def na_value(self) -> NaTType:
+ return NaT
+
@cache_readonly
def str(self):
return f"|M8[{self._unit}]"
@@ -1450,7 +1453,9 @@ class BaseMaskedDtype(ExtensionDtype):
base = None
type: type
- na_value = libmissing.NA
+ @property
+ def na_value(self) -> libmissing.NAType:
+ return libmissing.NA
@cache_readonly
def numpy_dtype(self) -> np.dtype:
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 56fcec751749b..5731d476cef10 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -409,7 +409,10 @@ class Float64Index(NumericIndex):
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "float64index"
- _engine_type = libindex.Float64Engine
_default_dtype = np.dtype(np.float64)
_dtype_validation_metadata = (is_float_dtype, "float")
_is_backward_compat_public_numeric_index: bool = False
+
+ @property
+ def _engine_type(self) -> type[libindex.Float64Engine]:
+ return libindex.Float64Engine
| Similar to `_engine_type`, `na_value` is sometimes a property and sometimes a class variable.
There were multiple places that access `StringDtype.na_value` (works with class variables but not properties). I replaced these cases with the value of `StringDtype().na_value`.
(and one `_engine_type` class variable I forgot in another PR) | https://api.github.com/repos/pandas-dev/pandas/pulls/47676 | 2022-07-12T01:21:53Z | 2022-07-12T17:21:45Z | 2022-07-12T17:21:45Z | 2022-09-10T01:39:07Z |
ENH: TDA+datetime_scalar support non-nano | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index a2251c49a2cc5..eadf47b36d7fc 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -73,7 +73,6 @@
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
- DT64NS_DTYPE,
is_all_strings,
is_categorical_dtype,
is_datetime64_any_dtype,
@@ -1103,6 +1102,7 @@ def _add_datetimelike_scalar(self, other) -> DatetimeArray:
self = cast("TimedeltaArray", self)
from pandas.core.arrays import DatetimeArray
+ from pandas.core.arrays.datetimes import tz_to_dtype
assert other is not NaT
other = Timestamp(other)
@@ -1113,10 +1113,17 @@ def _add_datetimelike_scalar(self, other) -> DatetimeArray:
# Preserve our resolution
return DatetimeArray._simple_new(result, dtype=result.dtype)
+ if self._reso != other._reso:
+ raise NotImplementedError(
+ "Addition between TimedeltaArray and Timestamp with mis-matched "
+ "resolutions is not yet supported."
+ )
+
i8 = self.asi8
result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan)
- dtype = DatetimeTZDtype(tz=other.tz) if other.tz else DT64NS_DTYPE
- return DatetimeArray(result, dtype=dtype, freq=self.freq)
+ dtype = tz_to_dtype(tz=other.tz, unit=self._unit)
+ res_values = result.view(f"M8[{self._unit}]")
+ return DatetimeArray._simple_new(res_values, dtype=dtype, freq=self.freq)
@final
def _add_datetime_arraylike(self, other) -> DatetimeArray:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index c9f5946c30c8c..106afcc3c12ea 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -91,22 +91,23 @@
_midnight = time(0, 0)
-def tz_to_dtype(tz):
+def tz_to_dtype(tz: tzinfo | None, unit: str = "ns"):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
+ unit : str, default "ns"
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
- return DT64NS_DTYPE
+ return np.dtype(f"M8[{unit}]")
else:
- return DatetimeTZDtype(tz=tz)
+ return DatetimeTZDtype(tz=tz, unit=unit)
def _field_accessor(name: str, field: str, docstring=None):
@@ -800,7 +801,7 @@ def tz_convert(self, tz) -> DatetimeArray:
)
# No conversion since timestamps are all UTC to begin with
- dtype = tz_to_dtype(tz)
+ dtype = tz_to_dtype(tz, unit=self._unit)
return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
@dtl.ravel_compat
@@ -965,10 +966,14 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArr
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
- self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
+ self.asi8,
+ tz,
+ ambiguous=ambiguous,
+ nonexistent=nonexistent,
+ reso=self._reso,
)
- new_dates = new_dates.view(DT64NS_DTYPE)
- dtype = tz_to_dtype(tz)
+ new_dates = new_dates.view(f"M8[{self._unit}]")
+ dtype = tz_to_dtype(tz, unit=self._unit)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index abc27469a5428..b3b79bd988ad8 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -92,6 +92,34 @@ def test_add_pdnat(self, tda):
assert result._reso == tda._reso
assert result.isna().all()
+ # TODO: 2022-07-11 this is the only test that gets to DTA.tz_convert
+ # or tz_localize with non-nano; implement tests specific to that.
+ def test_add_datetimelike_scalar(self, tda, tz_naive_fixture):
+ ts = pd.Timestamp("2016-01-01", tz=tz_naive_fixture)
+
+ msg = "with mis-matched resolutions"
+ with pytest.raises(NotImplementedError, match=msg):
+ # mismatched reso -> check that we don't give an incorrect result
+ tda + ts
+ with pytest.raises(NotImplementedError, match=msg):
+ # mismatched reso -> check that we don't give an incorrect result
+ ts + tda
+
+ ts = ts._as_unit(tda._unit)
+
+ exp_values = tda._ndarray + ts.asm8
+ expected = (
+ DatetimeArray._simple_new(exp_values, dtype=exp_values.dtype)
+ .tz_localize("UTC")
+ .tz_convert(ts.tz)
+ )
+
+ result = tda + ts
+ tm.assert_extension_array_equal(result, expected)
+
+ result = ts + tda
+ tm.assert_extension_array_equal(result, expected)
+
def test_mul_scalar(self, tda):
other = 2
result = tda * other
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47675 | 2022-07-11T23:31:51Z | 2022-07-12T01:49:14Z | 2022-07-12T01:49:14Z | 2022-07-12T16:05:08Z |
ENH: Move database error to error/__init__.py per GH27656 | diff --git a/doc/source/reference/testing.rst b/doc/source/reference/testing.rst
index e617712aa8f5e..338dd87aa8c62 100644
--- a/doc/source/reference/testing.rst
+++ b/doc/source/reference/testing.rst
@@ -29,6 +29,7 @@ Exceptions and warnings
errors.AttributeConflictWarning
errors.ClosedFileError
errors.CSSWarning
+ errors.DatabaseError
errors.DataError
errors.DtypeWarning
errors.DuplicateLabelError
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 0e0409ccb0932..08ee5650e97a6 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -456,12 +456,26 @@ class AttributeConflictWarning(Warning):
"""
+class DatabaseError(OSError):
+ """
+ Error is raised when executing sql with bad syntax or sql that throws an error.
+
+ Examples
+ --------
+ >>> from sqlite3 import connect
+ >>> conn = connect(':memory:')
+ >>> pd.read_sql('select * test', conn) # doctest: +SKIP
+ ... # DatabaseError: Execution failed on sql 'test': near "test": syntax error
+ """
+
+
__all__ = [
"AbstractMethodError",
"AccessorRegistrationWarning",
"AttributeConflictWarning",
"ClosedFileError",
"CSSWarning",
+ "DatabaseError",
"DataError",
"DtypeWarning",
"DuplicateLabelError",
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index e4111f24ed295..f591e7b8676f6 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -31,7 +31,10 @@
DtypeArg,
)
from pandas.compat._optional import import_optional_dependency
-from pandas.errors import AbstractMethodError
+from pandas.errors import (
+ AbstractMethodError,
+ DatabaseError,
+)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
@@ -56,10 +59,6 @@
from sqlalchemy import Table
-class DatabaseError(OSError):
- pass
-
-
# -----------------------------------------------------------------------------
# -- Helper functions
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index f003e1d07bca6..187d5399f5985 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -34,6 +34,7 @@
"PossibleDataLossError",
"IncompatibilityWarning",
"AttributeConflictWarning",
+ "DatabaseError",
],
)
def test_exception_importable(exc):
| - [x] xref #27656. this GitHub issue is being done in multiple parts
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). | https://api.github.com/repos/pandas-dev/pandas/pulls/47674 | 2022-07-11T20:35:47Z | 2022-07-14T16:35:35Z | 2022-07-14T16:35:35Z | 2022-08-03T04:21:56Z |
Bug fix using GroupBy.resample produces inconsistent behavior when calling it over empty df #47705 | diff --git a/doc/source/whatsnew/v1.6.0.rst b/doc/source/whatsnew/v1.6.0.rst
index 098750aa3a2b2..6e435c8eaaf2f 100644
--- a/doc/source/whatsnew/v1.6.0.rst
+++ b/doc/source/whatsnew/v1.6.0.rst
@@ -244,6 +244,7 @@ Groupby/resample/rolling
- Bug in :class:`.ExponentialMovingWindow` with ``online`` not raising a ``NotImplementedError`` for unsupported operations (:issue:`48834`)
- Bug in :meth:`DataFrameGroupBy.sample` raises ``ValueError`` when the object is empty (:issue:`48459`)
- Bug in :meth:`Series.groupby` raises ``ValueError`` when an entry of the index is equal to the name of the index (:issue:`48567`)
+- Bug in :meth:`DataFrameGroupBy.resample` produces inconsistent results when passing empty DataFrame (:issue:`47705`)
-
Reshaping
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 0cf1aafed0f56..574c2e5e0f552 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -522,11 +522,21 @@ def _wrap_result(self, result):
"""
Potentially wrap any results.
"""
+ # GH 47705
+ obj = self.obj
+ if (
+ isinstance(result, ABCDataFrame)
+ and result.empty
+ and not isinstance(result.index, PeriodIndex)
+ ):
+ result = result.set_index(
+ _asfreq_compat(obj.index[:0], freq=self.freq), append=True
+ )
+
if isinstance(result, ABCSeries) and self._selection is not None:
result.name = self._selection
if isinstance(result, ABCSeries) and result.empty:
- obj = self.obj
# When index is all NaT, result is empty but index is not
result.index = _asfreq_compat(obj.index[:0], freq=self.freq)
result.name = getattr(obj, "name", None)
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index ceb9d6e2fda4d..7fe1e645aa141 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -435,7 +435,11 @@ def test_empty(keys):
# GH 26411
df = DataFrame([], columns=["a", "b"], index=TimedeltaIndex([]))
result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean()
- expected = DataFrame(columns=["a", "b"]).set_index(keys, drop=False)
+ expected = (
+ DataFrame(columns=["a", "b"])
+ .set_index(keys, drop=False)
+ .set_index(TimedeltaIndex([]), append=True)
+ )
if len(keys) == 1:
expected.index.name = keys[0]
@@ -497,3 +501,19 @@ def test_groupby_resample_with_list_of_keys():
),
)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("keys", [["a"], ["a", "b"]])
+def test_resample_empty_Dataframe(keys):
+ # GH 47705
+ df = DataFrame([], columns=["a", "b", "date"])
+ df["date"] = pd.to_datetime(df["date"])
+ df = df.set_index("date")
+ result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean()
+ expected = DataFrame(columns=["a", "b", "date"]).set_index(keys, drop=False)
+ expected["date"] = pd.to_datetime(expected["date"])
+ expected = expected.set_index("date", append=True, drop=True)
+ if len(keys) == 1:
+ expected.index.name = keys[0]
+
+ tm.assert_frame_equal(result, expected)
| - [X] closes #47705
- [X] Tests added and passed(https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47672 | 2022-07-11T19:16:15Z | 2022-10-03T20:27:31Z | 2022-10-03T20:27:31Z | 2022-10-13T16:59:54Z |
CI: Fix npdev build post Cython annotation change | diff --git a/pandas/_libs/arrays.pyx b/pandas/_libs/arrays.pyx
index 8895a2bcfca89..f63d16e819c92 100644
--- a/pandas/_libs/arrays.pyx
+++ b/pandas/_libs/arrays.pyx
@@ -157,7 +157,7 @@ cdef class NDArrayBacked:
return self._from_backing_data(res_values)
# TODO: pass NPY_MAXDIMS equiv to axis=None?
- def repeat(self, repeats, axis: int = 0):
+ def repeat(self, repeats, axis: int | np.integer = 0):
if axis is None:
axis = 0
res_values = cnp.PyArray_Repeat(self._ndarray, repeats, <int>axis)
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index e187df6d6f627..3332628627739 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1643,7 +1643,7 @@ cdef class _Period(PeriodMixin):
return freq
@classmethod
- def _from_ordinal(cls, ordinal: int, freq) -> "Period":
+ def _from_ordinal(cls, ordinal: int64_t, freq) -> "Period":
"""
Fast creation from an ordinal and freq that are already validated!
"""
diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py
index 456dd049d2f4a..a1aba949e74fe 100644
--- a/pandas/tests/io/parser/test_quoting.py
+++ b/pandas/tests/io/parser/test_quoting.py
@@ -38,7 +38,7 @@ def test_bad_quote_char(all_parsers, kwargs, msg):
@pytest.mark.parametrize(
"quoting,msg",
[
- ("foo", '"quoting" must be an integer'),
+ ("foo", '"quoting" must be an integer|Argument'),
(5, 'bad "quoting" value'), # quoting must be in the range [0, 3]
],
)
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Appears the new build failure was due to https://github.com/cython/cython/issues/4885
Major changes:
1. `Period._from_ordinal` is annotated `def _from_ordinal(oridinal: int, freq)` which now strictly only accepts Python ints. Need to change to `def _from_ordinal(oridinal: int64_t, freq)`
| https://api.github.com/repos/pandas-dev/pandas/pulls/47670 | 2022-07-11T18:33:03Z | 2022-07-13T20:58:30Z | 2022-07-13T20:58:30Z | 2022-07-13T20:58:45Z |
ENH: TDA.__mul__ support non-nano | diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index fbd27aa026a37..5f227cb45a65b 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -398,7 +398,7 @@ def __mul__(self, other) -> TimedeltaArray:
freq = None
if self.freq is not None and not isna(other):
freq = self.freq * other
- return type(self)(result, freq=freq)
+ return type(self)._simple_new(result, dtype=result.dtype, freq=freq)
if not hasattr(other, "dtype"):
# list, tuple
@@ -412,13 +412,14 @@ def __mul__(self, other) -> TimedeltaArray:
# this multiplication will succeed only if all elements of other
# are int or float scalars, so we will end up with
# timedelta64[ns]-dtyped result
- result = [self[n] * other[n] for n in range(len(self))]
+ arr = self._ndarray
+ result = [arr[n] * other[n] for n in range(len(self))]
result = np.array(result)
- return type(self)(result)
+ return type(self)._simple_new(result, dtype=result.dtype)
# numpy will accept float or int dtype, raise TypeError for others
result = self._ndarray * other
- return type(self)(result)
+ return type(self)._simple_new(result, dtype=result.dtype)
__rmul__ = __mul__
@@ -446,7 +447,8 @@ def __truediv__(self, other):
if self.freq is not None:
# Tick division is not implemented, so operate on Timedelta
freq = self.freq.delta / other
- return type(self)(result, freq=freq)
+ freq = to_offset(freq)
+ return type(self)._simple_new(result, dtype=result.dtype, freq=freq)
if not hasattr(other, "dtype"):
# e.g. list, tuple
@@ -462,6 +464,7 @@ def __truediv__(self, other):
elif is_object_dtype(other.dtype):
# We operate on raveled arrays to avoid problems in inference
# on NaT
+ # TODO: tests with non-nano
srav = self.ravel()
orav = other.ravel()
result_list = [srav[n] / orav[n] for n in range(len(srav))]
@@ -488,7 +491,7 @@ def __truediv__(self, other):
else:
result = self._ndarray / other
- return type(self)(result)
+ return type(self)._simple_new(result, dtype=result.dtype)
@unpack_zerodim_and_defer("__rtruediv__")
def __rtruediv__(self, other):
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index 36acb8f0fe389..abc27469a5428 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -1,3 +1,5 @@
+from datetime import timedelta
+
import numpy as np
import pytest
@@ -90,6 +92,53 @@ def test_add_pdnat(self, tda):
assert result._reso == tda._reso
assert result.isna().all()
+ def test_mul_scalar(self, tda):
+ other = 2
+ result = tda * other
+ expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)
+ tm.assert_extension_array_equal(result, expected)
+ assert result._reso == tda._reso
+
+ def test_mul_listlike(self, tda):
+ other = np.arange(len(tda))
+ result = tda * other
+ expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)
+ tm.assert_extension_array_equal(result, expected)
+ assert result._reso == tda._reso
+
+ def test_mul_listlike_object(self, tda):
+ other = np.arange(len(tda))
+ result = tda * other.astype(object)
+ expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)
+ tm.assert_extension_array_equal(result, expected)
+ assert result._reso == tda._reso
+
+ def test_div_numeric_scalar(self, tda):
+ other = 2
+ result = tda / other
+ expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype)
+ tm.assert_extension_array_equal(result, expected)
+ assert result._reso == tda._reso
+
+ def test_div_td_scalar(self, tda):
+ other = timedelta(seconds=1)
+ result = tda / other
+ expected = tda._ndarray / np.timedelta64(1, "s")
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_div_numeric_array(self, tda):
+ other = np.arange(len(tda))
+ result = tda / other
+ expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype)
+ tm.assert_extension_array_equal(result, expected)
+ assert result._reso == tda._reso
+
+ def test_div_td_array(self, tda):
+ other = tda._ndarray + tda._ndarray[-1]
+ result = tda / other
+ expected = tda._ndarray / other
+ tm.assert_numpy_array_equal(result, expected)
+
class TestTimedeltaArray:
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47668 | 2022-07-11T14:27:37Z | 2022-07-11T22:36:14Z | 2022-07-11T22:36:14Z | 2022-07-11T23:29:01Z |
Code review from #46759 : moved strftime benchmark file outside of tslibs dir | diff --git a/asv_bench/benchmarks/tslibs/strftime.py b/asv_bench/benchmarks/strftime.py
similarity index 100%
rename from asv_bench/benchmarks/tslibs/strftime.py
rename to asv_bench/benchmarks/strftime.py
| Minor : suggested by https://github.com/pandas-dev/pandas/pull/46759#discussion_r915263401
| https://api.github.com/repos/pandas-dev/pandas/pulls/47665 | 2022-07-11T07:53:59Z | 2022-07-11T17:02:04Z | 2022-07-11T17:02:04Z | 2022-07-11T17:02:11Z |
TYP: make _engine_type consistently a property | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index c8123f90ab3a3..06025c730700f 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -93,7 +93,7 @@ repos:
types: [python]
stages: [manual]
additional_dependencies: &pyright_dependencies
- - pyright@1.1.253
+ - pyright@1.1.258
- repo: local
hooks:
- id: pyright_reportGeneralTypeIssues
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index fc5fcaeab7d2a..58b4d82bcbe5f 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -404,9 +404,12 @@ def _outer_indexer(
# associated code in pandas 2.0.
_is_backward_compat_public_numeric_index: bool = False
- _engine_type: type[libindex.IndexEngine] | type[
- libindex.ExtensionEngine
- ] = libindex.ObjectEngine
+ @property
+ def _engine_type(
+ self,
+ ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]:
+ return libindex.ObjectEngine
+
# whether we support partial string indexing. Overridden
# in DatetimeIndex and PeriodIndex
_supports_partial_string_indexing = False
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 9a70a4a1aa615..c1ae3cb1b16ea 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -192,7 +192,7 @@ def _should_fallback_to_positional(self) -> bool:
_values: Categorical
@property
- def _engine_type(self):
+ def _engine_type(self) -> type[libindex.IndexEngine]:
# self.codes can have dtype int8, int16, int32 or int64, so we need
# to return the corresponding engine type (libindex.Int8Engine, etc.).
return {
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 6aa2ff91ba933..f776585926024 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -252,9 +252,12 @@ class DatetimeIndex(DatetimeTimedeltaMixin):
_typ = "datetimeindex"
_data_cls = DatetimeArray
- _engine_type = libindex.DatetimeEngine
_supports_partial_string_indexing = True
+ @property
+ def _engine_type(self) -> type[libindex.DatetimeEngine]:
+ return libindex.DatetimeEngine
+
_data: DatetimeArray
inferred_freq: str | None
tz: tzinfo | None
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index f270a6e8b555f..56fcec751749b 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -106,7 +106,7 @@ class NumericIndex(Index):
}
@property
- def _engine_type(self):
+ def _engine_type(self) -> type[libindex.IndexEngine]:
# error: Invalid index type "Union[dtype[Any], ExtensionDtype]" for
# "Dict[dtype[Any], Type[IndexEngine]]"; expected type "dtype[Any]"
return self._engine_types[self.dtype] # type: ignore[index]
@@ -373,10 +373,13 @@ class Int64Index(IntegerIndex):
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "int64index"
- _engine_type = libindex.Int64Engine
_default_dtype = np.dtype(np.int64)
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
+ @property
+ def _engine_type(self) -> type[libindex.Int64Engine]:
+ return libindex.Int64Engine
+
class UInt64Index(IntegerIndex):
_index_descr_args = {
@@ -388,10 +391,13 @@ class UInt64Index(IntegerIndex):
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "uint64index"
- _engine_type = libindex.UInt64Engine
_default_dtype = np.dtype(np.uint64)
_dtype_validation_metadata = (is_unsigned_integer_dtype, "unsigned integer")
+ @property
+ def _engine_type(self) -> type[libindex.UInt64Engine]:
+ return libindex.UInt64Engine
+
class Float64Index(NumericIndex):
_index_descr_args = {
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index e3ab5e8624585..c034d9416eae7 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -159,9 +159,12 @@ class PeriodIndex(DatetimeIndexOpsMixin):
dtype: PeriodDtype
_data_cls = PeriodArray
- _engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
+ @property
+ def _engine_type(self) -> type[libindex.PeriodEngine]:
+ return libindex.PeriodEngine
+
@cache_readonly
# Signature of "_resolution_obj" incompatible with supertype "DatetimeIndexOpsMixin"
def _resolution_obj(self) -> Resolution: # type: ignore[override]
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 12a995c7de99a..376c98b6e176f 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -104,11 +104,14 @@ class RangeIndex(NumericIndex):
"""
_typ = "rangeindex"
- _engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_range: range
_is_backward_compat_public_numeric_index: bool = False
+ @property
+ def _engine_type(self) -> type[libindex.Int64Engine]:
+ return libindex.Int64Engine
+
# --------------------------------------------------------------------
# Constructors
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index cdf09bbc3b78c..095c5d1b1ba03 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -101,7 +101,10 @@ class TimedeltaIndex(DatetimeTimedeltaMixin):
_typ = "timedeltaindex"
_data_cls = TimedeltaArray
- _engine_type = libindex.TimedeltaEngine
+
+ @property
+ def _engine_type(self) -> type[libindex.TimedeltaEngine]:
+ return libindex.TimedeltaEngine
_data: TimedeltaArray
diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json
index 98da481a6d80f..c482aa32600fb 100644
--- a/pyright_reportGeneralTypeIssues.json
+++ b/pyright_reportGeneralTypeIssues.json
@@ -15,7 +15,6 @@
"pandas/io/clipboard",
"pandas/util/version",
# and all files that currently don't pass
- "pandas/_config/config.py",
"pandas/_testing/__init__.py",
"pandas/core/algorithms.py",
"pandas/core/apply.py",
@@ -58,7 +57,6 @@
"pandas/core/indexes/multi.py",
"pandas/core/indexes/numeric.py",
"pandas/core/indexes/period.py",
- "pandas/core/indexes/range.py",
"pandas/core/indexing.py",
"pandas/core/internals/api.py",
"pandas/core/internals/array_manager.py",
@@ -80,7 +78,6 @@
"pandas/core/tools/datetimes.py",
"pandas/core/tools/timedeltas.py",
"pandas/core/util/hashing.py",
- "pandas/core/util/numba_.py",
"pandas/core/window/ewm.py",
"pandas/core/window/rolling.py",
"pandas/io/common.py",
| `_engine_type` was sometimes a property and sometimes a class variable. | https://api.github.com/repos/pandas-dev/pandas/pulls/47664 | 2022-07-11T01:34:19Z | 2022-07-11T16:49:49Z | 2022-07-11T16:49:49Z | 2022-09-21T15:28:31Z |
Enh move pytable errors and warnings | diff --git a/doc/source/reference/testing.rst b/doc/source/reference/testing.rst
index 249c2c56cfe57..e617712aa8f5e 100644
--- a/doc/source/reference/testing.rst
+++ b/doc/source/reference/testing.rst
@@ -26,11 +26,14 @@ Exceptions and warnings
errors.AbstractMethodError
errors.AccessorRegistrationWarning
+ errors.AttributeConflictWarning
+ errors.ClosedFileError
errors.CSSWarning
errors.DataError
errors.DtypeWarning
errors.DuplicateLabelError
errors.EmptyDataError
+ errors.IncompatibilityWarning
errors.IndexingError
errors.InvalidIndexError
errors.IntCastingNaNError
@@ -44,6 +47,7 @@ Exceptions and warnings
errors.ParserError
errors.ParserWarning
errors.PerformanceWarning
+ errors.PossibleDataLossError
errors.PyperclipException
errors.PyperclipWindowsException
errors.SettingWithCopyError
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index 47819ae5fad23..0e0409ccb0932 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -415,14 +415,58 @@ class CSSWarning(UserWarning):
"""
+class PossibleDataLossError(Exception):
+ """
+ Exception is raised when trying to open a HDFStore file when the file is already
+ opened.
+
+ Examples
+ --------
+ >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP
+ >>> store.open("w") # doctest: +SKIP
+ ... # PossibleDataLossError: Re-opening the file [my-store] with mode [a]...
+ """
+
+
+class ClosedFileError(Exception):
+ """
+ Exception is raised when trying to perform an operation on a closed HDFStore file.
+
+ Examples
+ --------
+ >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP
+ >>> store.close() # doctest: +SKIP
+ >>> store.keys() # doctest: +SKIP
+ ... # ClosedFileError: my-store file is not open!
+ """
+
+
+class IncompatibilityWarning(Warning):
+ """
+ Warning is raised when trying to use where criteria on an incompatible
+ HDF5 file.
+ """
+
+
+class AttributeConflictWarning(Warning):
+ """
+ Warning is raised when attempting to append an index with a different
+ name than the existing index on an HDFStore or attempting to append an index with a
+ different frequency than the existing index on an HDFStore.
+ """
+
+
__all__ = [
"AbstractMethodError",
"AccessorRegistrationWarning",
+ "AttributeConflictWarning",
+ "ClosedFileError",
"CSSWarning",
"DataError",
"DtypeWarning",
"DuplicateLabelError",
"EmptyDataError",
+ "IncompatibilityWarning",
"IntCastingNaNError",
"InvalidIndexError",
"IndexingError",
@@ -436,6 +480,7 @@ class CSSWarning(UserWarning):
"ParserError",
"ParserWarning",
"PerformanceWarning",
+ "PossibleDataLossError",
"PyperclipException",
"PyperclipWindowsException",
"SettingWithCopyError",
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index b96fa4a57f188..52a2883e70f93 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -48,7 +48,13 @@
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.pickle_compat import patch_pickle
-from pandas.errors import PerformanceWarning
+from pandas.errors import (
+ AttributeConflictWarning,
+ ClosedFileError,
+ IncompatibilityWarning,
+ PerformanceWarning,
+ PossibleDataLossError,
+)
from pandas.util._decorators import cache_readonly
from pandas.util._exceptions import find_stack_level
@@ -169,43 +175,17 @@ def _ensure_term(where, scope_level: int):
return where if where is None or len(where) else None
-class PossibleDataLossError(Exception):
- pass
-
-
-class ClosedFileError(Exception):
- pass
-
-
-class IncompatibilityWarning(Warning):
- pass
-
-
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
-
-class AttributeConflictWarning(Warning):
- pass
-
-
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
-
-class DuplicateWarning(Warning):
- pass
-
-
-duplicate_doc = """
-duplicate entries in table, taking most recently appended
-"""
-
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
@@ -3550,7 +3530,7 @@ def get_attrs(self) -> None:
def validate_version(self, where=None) -> None:
"""are we trying to operate on an old version?"""
if where is not None:
- if self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1:
+ if self.is_old_version:
ws = incompatibility_doc % ".".join([str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
diff --git a/pandas/tests/io/pytables/test_file_handling.py b/pandas/tests/io/pytables/test_file_handling.py
index 9fde65e3a1a43..13b6b94dda8d4 100644
--- a/pandas/tests/io/pytables/test_file_handling.py
+++ b/pandas/tests/io/pytables/test_file_handling.py
@@ -4,6 +4,10 @@
import pytest
from pandas.compat import is_platform_little_endian
+from pandas.errors import (
+ ClosedFileError,
+ PossibleDataLossError,
+)
from pandas import (
DataFrame,
@@ -20,11 +24,7 @@
)
from pandas.io import pytables as pytables
-from pandas.io.pytables import (
- ClosedFileError,
- PossibleDataLossError,
- Term,
-)
+from pandas.io.pytables import Term
pytestmark = pytest.mark.single_cpu
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 3a6f699cce94e..e8f4e7ee92fc3 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -589,7 +589,6 @@ def test_store_series_name(setup_path):
tm.assert_series_equal(recons, series)
-@pytest.mark.filterwarnings("ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning")
def test_overwrite_node(setup_path):
with ensure_clean_store(setup_path) as store:
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index 177ff566e347a..f003e1d07bca6 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -30,6 +30,10 @@
"IndexingError",
"PyperclipException",
"CSSWarning",
+ "ClosedFileError",
+ "PossibleDataLossError",
+ "IncompatibilityWarning",
+ "AttributeConflictWarning",
],
)
def test_exception_importable(exc):
| - [x] xref #27656. this GitHub issue is being done in multiple parts
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
It looked like `DuplicateWarning` wasn't used anywhere, so I removed it. Also, the if condition for `validate_version` seemed duplicated. Since it inherits from `Fixed`, I replaced it with the already existing property | https://api.github.com/repos/pandas-dev/pandas/pulls/47662 | 2022-07-10T20:42:40Z | 2022-07-10T23:06:44Z | 2022-07-10T23:06:44Z | 2022-07-10T23:06:53Z |
DOC: Fixed CoC broken link | diff --git a/README.md b/README.md
index fc3f988dc6809..aaf63ead9c416 100644
--- a/README.md
+++ b/README.md
@@ -169,4 +169,4 @@ Or maybe through using pandas you have an idea of your own or are looking for so
Feel free to ask questions on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Gitter](https://gitter.im/pydata/pandas).
-As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/pandas/blob/main/.github/CODE_OF_CONDUCT.md)
+As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/.github/blob/master/CODE_OF_CONDUCT.md)
| Link to Code of Conduct in README.md broke after moving markdown files to `.github` organization level repository (#47412). Redirected link to new location | https://api.github.com/repos/pandas-dev/pandas/pulls/47661 | 2022-07-10T19:28:28Z | 2022-07-10T21:43:07Z | 2022-07-10T21:43:07Z | 2022-07-10T21:43:12Z |
WEB: Add governance page to the website | diff --git a/web/pandas/about/governance.md b/web/pandas/about/governance.md
new file mode 100644
index 0000000000000..56ca0a2aac3db
--- /dev/null
+++ b/web/pandas/about/governance.md
@@ -0,0 +1,326 @@
+# Main Governance Document
+
+The official version of this document, along with a list of
+individuals and institutions in the roles defined in the governance
+section below, is contained in The Project Governance Repository at:
+
+[https://github.com/pydata/pandas-governance](https://github.com/pydata/pandas-governance)
+
+The Project
+===========
+
+The pandas Project (The Project) is an open source software project affiliated
+with the 501(c)3 NumFOCUS Foundation. The goal of The Project is to develop open
+source software for data ingest, data preparation, data analysis, and data
+visualization for the Python programming language. The Software developed by
+The Project is released under the BSD (or similar) open source license,
+developed openly and hosted in public GitHub repositories under the [PyData
+GitHub organization](https://github.com/pydata). Examples of Project Software
+include the main pandas code repository, pandas-website, and the
+pandas-datareader add-on library.
+
+Through its affiliation with NumFOCUS, The Project has the right to receive
+tax-deductible donations in the United States of America.
+
+The Project is developed by a team of distributed developers, called
+Contributors. Contributors are individuals who have contributed code,
+documentation, designs or other work to one or more Project repositories.
+Anyone can be a Contributor. Contributors can be affiliated with any legal
+entity or none. Contributors participate in the project by submitting,
+reviewing and discussing GitHub Pull Requests and Issues and participating in
+open and public Project discussions on GitHub, mailing lists, and
+elsewhere. The foundation of Project participation is openness and
+transparency.
+
+Here is a list of the current Contributors to the main pandas repository:
+
+[https://github.com/pydata/pandas/graphs/contributors](https://github.com/pydata/pandas/graphs/contributors)
+
+There are also many other Contributors listed in the logs of other repositories of
+the pandas project.
+
+The Project Community consists of all Contributors and Users of the Project.
+Contributors work on behalf of and are responsible to the larger Project
+Community and we strive to keep the barrier between Contributors and Users as
+low as possible.
+
+The Project is formally affiliated with the 501(c)3 NumFOCUS Foundation
+([http://numfocus.org](http://numfocus.org)), which serves as its fiscal
+sponsor, may hold project trademarks and other intellectual property, helps
+manage project donations and acts as a parent legal entity. NumFOCUS is the
+only legal entity that has a formal relationship with the project (see
+Institutional Partners section below).
+
+Governance
+==========
+
+This section describes the governance and leadership model of The Project.
+
+The foundations of Project governance are:
+
+- Openness & Transparency
+- Active Contribution
+- Institutional Neutrality
+
+Traditionally, Project leadership was provided by a BDFL (Wes McKinney) and
+subset of Contributors, called the Core Team, whose active and consistent
+contributions have been recognized by their receiving “commit rights” to the
+Project GitHub repositories. In general all Project decisions are made through
+consensus among the Core Team with input from the Community. The BDFL can, but
+rarely chooses to, override the Core Team and make a final decision on a
+matter.
+
+While this approach has served us well, as the Project grows and faces more
+legal and financial decisions and interacts with other institutions, we see a
+need for a more formal governance model. Moving forward The Project leadership
+will consist of a BDFL and Core Team. We view this governance model as the
+formalization of what we are already doing, rather than a change in direction.
+
+BDFL
+----
+
+The Project will have a BDFL (Benevolent Dictator for Life), who is currently
+Wes McKinney. As Dictator, the BDFL has the authority to make all final
+decisions for The Project. As Benevolent, the BDFL, in practice chooses to
+defer that authority to the consensus of the community discussion channels and
+the Core Team. It is expected, and in the past has been the case, that the BDFL
+will only rarely assert his/her final authority. Because it is rarely used, we
+refer to BDFL’s final authority as a “special” or “overriding” vote. When it
+does occur, the BDFL override typically happens in situations where there is a
+deadlock in the Core Team or if the Core Team ask the BDFL to make a decision
+on a specific matter. To ensure the benevolence of the BDFL, The Project
+encourages others to fork the project if they disagree with the overall
+direction the BDFL is taking. The BDFL is chair of the Core Team (see below)
+and may delegate his/her authority on a particular decision or set of decisions
+to any other Core Team Member at his/her discretion.
+
+The BDFL can appoint his/her successor, but it is expected that the Core Team
+would be consulted on this decision. If the BDFL is unable to appoint a
+successor (e.g. due to death or illness), the Core Team will choose a successor
+by voting with at least 2/3 of the Core Team members voting in favor of the
+chosen successor. At least 80% of the Core Team must participate in the
+vote. If no BDFL candidate receives 2/3 of the votes of the Core Team, the Core
+Team members shall propose the BDFL candidates to the Main NumFOCUS board, who
+will then make the final decision.
+
+Core Team
+---------
+
+The Project's Core Team will consist of Project Contributors who have produced
+contributions that are substantial in quality and quantity, and sustained over
+at least one year. The overall role of the Core Team is to ensure, through
+working with the BDFL and taking input from the Community, the long-term
+well-being of the project, both technically and as a community.
+
+During the everyday project activities, Core Team participate in all
+discussions, code review and other project activities as peers with all other
+Contributors and the Community. In these everyday activities, Core Team do not
+have any special power or privilege through their membership on the Core
+Team. However, it is expected that because of the quality and quantity of their
+contributions and their expert knowledge of the Project Software that the Core
+Team will provide useful guidance, both technical and in terms of project
+direction, to potentially less experienced contributors.
+
+The Core Team and its Members play a special role in certain situations.
+In particular, the Core Team may:
+
+- Make decisions about the overall scope, vision and direction of the
+ project.
+- Make decisions about strategic collaborations with other organizations or
+ individuals.
+- Make decisions about specific technical issues, features, bugs and pull
+ requests. They are the primary mechanism of guiding the code review process
+ and merging pull requests.
+- Make decisions about the Services that are run by The Project and manage
+ those Services for the benefit of the Project and Community.
+- Make decisions when regular community discussion doesn’t produce consensus
+ on an issue in a reasonable time frame.
+
+### Core Team membership
+
+To become eligible for being a Core Team Member an individual must be a Project
+Contributor who has produced contributions that are substantial in quality and
+quantity, and sustained over at least one year. Potential Core Team Members are
+nominated by existing Core members and voted upon by the existing Core Team
+after asking if the potential Member is interested and willing to serve in that
+capacity. The Core Team will be initially formed from the set of existing
+Contributors who have been granted commit rights as of late 2015.
+
+When considering potential Members, the Core Team will look at candidates with
+a comprehensive view of their contributions. This will include but is not
+limited to code, code review, infrastructure work, mailing list and chat
+participation, community help/building, education and outreach, design work,
+etc. We are deliberately not setting arbitrary quantitative metrics (like “100
+commits in this repo”) to avoid encouraging behavior that plays to the metrics
+rather than the project’s overall well-being. We want to encourage a diverse
+array of backgrounds, viewpoints and talents in our team, which is why we
+explicitly do not define code as the sole metric on which Core Team membership
+will be evaluated.
+
+If a Core Team member becomes inactive in the project for a period of one year,
+they will be considered for removal from the Core Team. Before removal,
+inactive Member will be approached by the BDFL to see if they plan on returning
+to active participation. If not they will be removed immediately upon a Core
+Team vote. If they plan on returning to active participation soon, they will be
+given a grace period of one year. If they don’t return to active participation
+within that time period they will be removed by vote of the Core Team without
+further grace period. All former Core Team members can be considered for
+membership again at any time in the future, like any other Project Contributor.
+Retired Core Team members will be listed on the project website, acknowledging
+the period during which they were active in the Core Team.
+
+The Core Team reserves the right to eject current Members, other than the BDFL,
+if they are deemed to be actively harmful to the project’s well-being, and
+attempts at communication and conflict resolution have failed.
+
+### Conflict of interest
+
+It is expected that the BDFL and Core Team Members will be employed at a wide
+range of companies, universities and non-profit organizations. Because of this,
+it is possible that Members will have conflict of interests. Such conflict of
+interests include, but are not limited to:
+
+- Financial interests, such as investments, employment or contracting work,
+ outside of The Project that may influence their work on The Project.
+- Access to proprietary information of their employer that could potentially
+ leak into their work with the Project.
+
+All members of the Core Team, BDFL included, shall disclose to the rest of the
+Core Team any conflict of interest they may have. Members with a conflict of
+interest in a particular issue may participate in Core Team discussions on that
+issue, but must recuse themselves from voting on the issue. If the BDFL has
+recused his/herself for a particular decision, they will appoint a substitute
+BDFL for that decision.
+
+### Private communications of the Core Team
+
+Unless specifically required, all Core Team discussions and activities will be
+public and done in collaboration and discussion with the Project Contributors
+and Community. The Core Team will have a private mailing list that will be used
+sparingly and only when a specific matter requires privacy. When private
+communications and decisions are needed, the Core Team will do its best to
+summarize those to the Community after eliding personal/private/sensitive
+information that should not be posted to the public internet.
+
+### Subcommittees
+
+The Core Team can create subcommittees that provide leadership and guidance for
+specific aspects of the project. Like the Core Team as a whole, subcommittees
+should conduct their business in an open and public manner unless privacy is
+specifically called for. Private subcommittee communications should happen on
+the main private mailing list of the Core Team unless specifically called for.
+
+Question: if the BDFL is not on a subcommittee, do they still have override
+authority?
+
+Suggestion: they do, but they should appoint a delegate who plays that role
+most of the time, and explicit BDFL intervention is sought only if the
+committee disagrees with that delegate’s decision and no resolution is possible
+within the team. This is different from a BDFL delegate for a specific decision
+(or a recusal situation), where the BDFL is literally giving up his/her
+authority to someone else in full. It’s more like what Linus Torvalds uses with his
+“lieutenants” model.
+
+### NumFOCUS Subcommittee
+
+The Core Team will maintain one narrowly focused subcommittee to manage its
+interactions with NumFOCUS.
+
+- The NumFOCUS Subcommittee is comprised of at least 5 persons who manage
+ project funding that comes through NumFOCUS. It is expected that these funds
+ will be spent in a manner that is consistent with the non-profit mission of
+ NumFOCUS and the direction of the Project as determined by the full Core
+ Team.
+- This Subcommittee shall NOT make decisions about the direction, scope or
+ technical direction of the Project.
+- This Subcommittee will have at least 5 members. No more than 2 Subcommitee
+ Members can report to one person (either directly or indirectly) through
+ employment or contracting work (including the reportee, i.e. the reportee + 1
+ is the max). This avoids effective majorities resting on one person.
+
+Institutional Partners and Funding
+==================================
+
+The BDFL and Core Team are the primary leadership for the project. No outside
+institution, individual or legal entity has the ability to own, control, usurp
+or influence the project other than by participating in the Project as
+Contributors and Core Team. However, because institutions are the primary
+funding mechanism for the project, it is important to formally acknowledge
+institutional participation in the project. These are Institutional Partners.
+
+An Institutional Contributor is any individual Project Contributor who
+contributes to the project as part of their official duties at an Institutional
+Partner. Likewise, an Institutional Core Team Member is any Core Team Member
+who contributes to the project as part of their official duties at an
+Institutional Partner.
+
+With these definitions, an Institutional Partner is any recognized legal entity
+in the United States or elsewhere that employs at least one Institutional
+Contributor or Institutional Core Team Member. Institutional Partners can be
+for-profit or non-profit entities.
+
+Institutions become eligible to become an Institutional Partner by employing
+individuals who actively contribute to The Project as part of their official
+duties. To state this another way, the only way for an Institutional Partner to
+influence the project is by actively contributing to the open development of
+the project, on equal terms with any other member of the community of
+Contributors and Core Team Members. Merely using pandas Software or Services in
+an institutional context does not allow an entity to become an Institutional
+Partner. Financial gifts do not enable an entity to become an Institutional
+Partner. Once an institution becomes eligible for Institutional Partnership,
+the Core Team must nominate and approve the Partnership.
+
+If an existing Institutional Partner no longer has a contributing employee,
+they will be given a one-year grace period for other employees to begin
+contributing.
+
+An Institutional Partner is free to pursue funding for their work on The
+Project through any legal means. This could involve a non-profit organization
+raising money from private foundations and donors or a for-profit company
+building proprietary products and services that leverage Project Software and
+Services. Funding acquired by Institutional Partners to work on The Project is
+called Institutional Funding. However, no funding obtained by an Institutional
+Partner can override The Project BDFL and Core Team. If a Partner has funding
+to do pandas work and the Core Team decides to not pursue that work as a
+project, the Partner is free to pursue it on their own. However in this
+situation, that part of the Partner’s work will not be under the pandas
+umbrella and cannot use the Project trademarks in a way that suggests a formal
+relationship.
+
+To acknowledge institutional contributions, there are two levels of
+Institutional Partners, with associated benefits:
+
+**Tier 1** = an institution with at least one Institutional Core Team Member
+
+- Acknowledged on the pandas website, in talks and T-shirts.
+- Ability to acknowledge their own funding sources on the pandas website, in
+ talks and T-shirts.
+- Ability to influence the project through the participation of their Core Team
+ Member.
+
+**Tier 2** = an institution with at least one Institutional Contributor
+
+Breach
+======
+
+Non-compliance with the terms of the governance documents shall be reported to
+the Core Team either through public or private channels as deemed appropriate.
+
+Changing the Governance Documents
+=================================
+
+Changes to the governance documents are submitted via a GitHub pull request to
+The Project's governance documents GitHub repository at
+[https://github.com/pydata/pandas-governance](https://github.com/pydata/pandas-governance).
+The pull request is then refined in response to public comment and review, with
+the goal being consensus in the community. After this open period, a Core Team
+Member proposes to the Core Team that the changes be ratified and the pull
+request merged (accepting the proposed changes) or proposes that the pull
+request be closed without merging (rejecting the proposed changes). The Member
+should state the final commit hash in the pull request being proposed for
+acceptance or rejection and briefly summarize the pull request. A minimum of
+80% of the Core Team must vote and at least 2/3 of the votes must be positive
+to carry out the proposed action (fractions of a vote rounded up to the nearest
+integer). Since the BDFL holds ultimate authority in The Project, the BDFL has
+authority to act alone in accepting or rejecting changes or overriding Core
+Team decisions.
diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md
index c8318dd8758ed..2982105616f47 100644
--- a/web/pandas/about/team.md
+++ b/web/pandas/about/team.md
@@ -42,7 +42,7 @@ If you want to support pandas development, you can find information in the [dona
Wes McKinney is the Benevolent Dictator for Life (BDFL).
-The project governance is available in the [project governance documents](https://github.com/pandas-dev/pandas-governance).
+The project governance is available in the [project governance page](governance.html).
## Code of conduct committee
diff --git a/web/pandas/community/blog/extension-arrays.md b/web/pandas/community/blog/extension-arrays.md
index 61a77738a259c..80a187bb3fc3c 100644
--- a/web/pandas/community/blog/extension-arrays.md
+++ b/web/pandas/community/blog/extension-arrays.md
@@ -212,7 +212,7 @@ partners][partners] involved in the pandas community.
[ml]: https://mail.python.org/mailman/listinfo/pandas-dev
[twitter]: https://twitter.com/pandas_dev
[tracker]: https://github.com/pandas-dev/pandas/issues
-[partners]: https://github.com/pandas-dev/pandas-governance/blob/master/people.md
+[partners]: https://pandas.pydata.org/about/sponsors.html
[eco]: http://pandas.pydata.org/pandas-docs/stable/ecosystem.html#extension-data-types
[whatsnew]: http://pandas.pydata.org/pandas-docs/version/0.24/whatsnew/v0.24.0.html
[geopandas]: https://github.com/geopandas/geopandas
diff --git a/web/pandas/community/blog/pandas-1.0.md b/web/pandas/community/blog/pandas-1.0.md
index b07c34a4ab6b5..d190ed6e897b3 100644
--- a/web/pandas/community/blog/pandas-1.0.md
+++ b/web/pandas/community/blog/pandas-1.0.md
@@ -19,7 +19,7 @@ We're [working with those projects](https://datapythonista.me/blog/dataframe-sum
## Community and Project Health
-This release cycle is the first to involve any kind of grant funding for pandas. [Pandas received funding](https://chanzuckerberg.com/eoss/proposals/) as part of the CZI’s [*Essential Open Source Software for Science*](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd) [program](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd). The pandas project relies overwhelmingly on volunteer contributors. These volunteer contributions are shepherded and augmented by some maintainers who are given time from their employers — our [institutional partners](https://github.com/pandas-dev/pandas-governance/blob/master/people.md#institutional-partners). The largest work item in our grant award was library maintenance, which specifically includes working with community members to address our large backlog of open issues and pull requests.
+This release cycle is the first to involve any kind of grant funding for pandas. [Pandas received funding](https://chanzuckerberg.com/eoss/proposals/) as part of the CZI’s [*Essential Open Source Software for Science*](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd) [program](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd). The pandas project relies overwhelmingly on volunteer contributors. These volunteer contributions are shepherded and augmented by some maintainers who are given time from their employers — our [institutional partners](../about/sponsors.html). The largest work item in our grant award was library maintenance, which specifically includes working with community members to address our large backlog of open issues and pull requests.
While a “1.0.0” version might seem arbitrary or anti-climactic (given that pandas as a codebase is nearly 12 years old), we see it as a symbolic milestone celebrating the growth of our core developer team and depth of our contributor base. Few open source projects are ever truly “done” and pandas is no different. We recognize the essential role that pandas now occupies, and we intend to continue to evolve the project and adapt to the needs of the world’s data wranglers.
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index 5bb0cbc7557f8..aeef826157b90 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -27,6 +27,8 @@ navbar:
target: /about/
- name: "Project roadmap"
target: /about/roadmap.html
+ - name: "Governance"
+ target: /about/governance.html
- name: "Team"
target: /about/team.html
- name: "Sponsors"
| I was having a look at the [pandas governance repo](https://github.com/pandas-dev/pandas-governance) and seems most things there are duplicated from the website. The list of core developers, the code of conduct, the sponsors, are all in the website, and in my opinion presented in a clearer and more accessible way.
The only thing that is missing is the governance document itself, moving it we can remote that repo. I think it makes more sense to move it here, for few reasons:
- To avoid duplication and inconsistencies, and having to maintain things in two different places
- To have more visibility and easier to access by having it in our public web
- We may want to update the governance document, I think it makes things easier to have the PRs with ammendments to the doc here with the rest of the PRs of the project, instead of in a separate repo
For now I move the `governance.md` file without changes. I think the file can benefit from few changes (updating the title, links...), but I prefer to do it in follow up PRs, so there is visibility of what's being changed.
Somehow related to this PR: @MarcoGorelli, I see in the governance repo we've got Gousto as a pandas sponsor, but it's not in the website (and the logo is not in the home page). Should it be added?
| https://api.github.com/repos/pandas-dev/pandas/pulls/47660 | 2022-07-10T17:20:44Z | 2022-07-11T16:54:28Z | 2022-07-11T16:54:28Z | 2022-07-11T16:54:36Z |
SAS7BDAT parser: Improve subheader lookup performance | diff --git a/pandas/io/sas/_sas.pyi b/pandas/io/sas/_sas.pyi
index 527193dd71e57..5d65e2b56b591 100644
--- a/pandas/io/sas/_sas.pyi
+++ b/pandas/io/sas/_sas.pyi
@@ -3,3 +3,5 @@ from pandas.io.sas.sas7bdat import SAS7BDATReader
class Parser:
def __init__(self, parser: SAS7BDATReader) -> None: ...
def read(self, nrows: int) -> None: ...
+
+def get_subheader_index(signature: bytes) -> int: ...
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index 8065859844b30..9406900b69998 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -6,6 +6,8 @@ from libc.stdint cimport (
int64_t,
uint8_t,
uint16_t,
+ uint32_t,
+ uint64_t,
)
from libc.stdlib cimport (
calloc,
@@ -17,6 +19,9 @@ import numpy as np
import pandas.io.sas.sas_constants as const
+cdef object np_nan = np.nan
+
+
cdef struct Buffer:
# Convenience wrapper for uint8_t data to allow fast and safe reads and writes.
# We use this as a replacement for np.array(..., dtype=np.uint8) because it's
@@ -53,9 +58,6 @@ cdef inline buf_free(Buffer buf):
if buf.data != NULL:
free(buf.data)
-
-cdef object np_nan = np.nan
-
# rle_decompress decompresses data using a Run Length Encoding
# algorithm. It is partially documented here:
#
@@ -231,7 +233,7 @@ cdef enum ColumnTypes:
column_type_string = 2
-# type the page_data types
+# Const aliases
assert len(const.page_meta_types) == 2
cdef:
int page_meta_types_0 = const.page_meta_types[0]
@@ -240,6 +242,53 @@ cdef:
int page_data_type = const.page_data_type
int subheader_pointers_offset = const.subheader_pointers_offset
+ # Copy of subheader_signature_to_index that allows for much faster lookups.
+ # Lookups are done in get_subheader_index. The C structures are initialized
+ # in _init_subheader_signatures().
+ uint32_t subheader_signatures_32bit[13]
+ int subheader_indices_32bit[13]
+ uint64_t subheader_signatures_64bit[17]
+ int subheader_indices_64bit[17]
+ int data_subheader_index = const.SASIndex.data_subheader_index
+
+
+def _init_subheader_signatures():
+ subheaders_32bit = [(sig, idx) for sig, idx in const.subheader_signature_to_index.items() if len(sig) == 4]
+ subheaders_64bit = [(sig, idx) for sig, idx in const.subheader_signature_to_index.items() if len(sig) == 8]
+ assert len(subheaders_32bit) == 13
+ assert len(subheaders_64bit) == 17
+ assert len(const.subheader_signature_to_index) == 13 + 17
+ for i, (signature, idx) in enumerate(subheaders_32bit):
+ subheader_signatures_32bit[i] = (<uint32_t *><char *>signature)[0]
+ subheader_indices_32bit[i] = idx
+ for i, (signature, idx) in enumerate(subheaders_64bit):
+ subheader_signatures_64bit[i] = (<uint64_t *><char *>signature)[0]
+ subheader_indices_64bit[i] = idx
+
+
+_init_subheader_signatures()
+
+
+def get_subheader_index(bytes signature):
+ """Fast version of 'subheader_signature_to_index.get(signature)'."""
+ cdef:
+ uint32_t sig32
+ uint64_t sig64
+ Py_ssize_t i
+ assert len(signature) in (4, 8)
+ if len(signature) == 4:
+ sig32 = (<uint32_t *><char *>signature)[0]
+ for i in range(len(subheader_signatures_32bit)):
+ if subheader_signatures_32bit[i] == sig32:
+ return subheader_indices_32bit[i]
+ else:
+ sig64 = (<uint64_t *><char *>signature)[0]
+ for i in range(len(subheader_signatures_64bit)):
+ if subheader_signatures_64bit[i] == sig64:
+ return subheader_indices_64bit[i]
+
+ return data_subheader_index
+
cdef class Parser:
@@ -355,7 +404,7 @@ cdef class Parser:
cdef bint readline(self) except? True:
cdef:
- int offset, bit_offset, align_correction
+ int offset, length, bit_offset, align_correction
int subheader_pointer_length, mn
bint done, flag
@@ -379,12 +428,10 @@ cdef class Parser:
if done:
return True
continue
- current_subheader_pointer = (
- self.parser._current_page_data_subheader_pointers[
- self.current_row_on_page_index])
- self.process_byte_array_with_data(
- current_subheader_pointer.offset,
- current_subheader_pointer.length)
+ offset, length = self.parser._current_page_data_subheader_pointers[
+ self.current_row_on_page_index
+ ]
+ self.process_byte_array_with_data(offset, length)
return False
elif self.current_page_type == page_mix_type:
align_correction = (
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 91c5e6b227c35..27bd0378e374a 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -42,7 +42,10 @@
)
from pandas.io.common import get_handle
-from pandas.io.sas._sas import Parser
+from pandas.io.sas._sas import (
+ Parser,
+ get_subheader_index,
+)
import pandas.io.sas.sas_constants as const
from pandas.io.sas.sasreader import ReaderBase
@@ -87,19 +90,6 @@ def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series:
return s_series
-class _SubheaderPointer:
- offset: int
- length: int
- compression: int
- ptype: int
-
- def __init__(self, offset: int, length: int, compression: int, ptype: int) -> None:
- self.offset = offset
- self.length = length
- self.compression = compression
- self.ptype = ptype
-
-
class _Column:
col_id: int
name: str | bytes
@@ -189,7 +179,7 @@ def __init__(
self.column_formats: list[str | bytes] = []
self.columns: list[_Column] = []
- self._current_page_data_subheader_pointers: list[_SubheaderPointer] = []
+ self._current_page_data_subheader_pointers: list[tuple[int, int]] = []
self._cached_page = None
self._column_data_lengths: list[int] = []
self._column_data_offsets: list[int] = []
@@ -205,6 +195,19 @@ def __init__(
self._path_or_buf = self.handles.handle
+ # Same order as const.SASIndex
+ self._subheader_processors = [
+ self._process_rowsize_subheader,
+ self._process_columnsize_subheader,
+ self._process_subheader_counts,
+ self._process_columntext_subheader,
+ self._process_columnname_subheader,
+ self._process_columnattributes_subheader,
+ self._process_format_subheader,
+ self._process_columnlist_subheader,
+ None, # Data
+ ]
+
try:
self._get_properties()
self._parse_metadata()
@@ -426,89 +429,47 @@ def _process_page_metadata(self) -> None:
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
- pointer = self._process_subheader_pointers(
- const.subheader_pointers_offset + bit_offset, i
- )
- if pointer.length == 0:
- continue
- if pointer.compression == const.truncated_subheader_id:
- continue
- subheader_signature = self._read_subheader_signature(pointer.offset)
- subheader_index = self._get_subheader_index(
- subheader_signature, pointer.compression, pointer.ptype
- )
- self._process_subheader(subheader_index, pointer)
-
- def _get_subheader_index(self, signature: bytes, compression, ptype) -> int:
- # TODO: return here could be made an enum
- index = const.subheader_signature_to_index.get(signature)
- if index is None:
- f1 = (compression == const.compressed_subheader_id) or (compression == 0)
- f2 = ptype == const.compressed_subheader_type
- if (self.compression != b"") and f1 and f2:
- index = const.SASIndex.data_subheader_index
- else:
- self.close()
- raise ValueError("Unknown subheader signature")
- return index
-
- def _process_subheader_pointers(
- self, offset: int, subheader_pointer_index: int
- ) -> _SubheaderPointer:
-
- subheader_pointer_length = self._subheader_pointer_length
- total_offset = offset + subheader_pointer_length * subheader_pointer_index
+ offset = const.subheader_pointers_offset + bit_offset
+ total_offset = offset + self._subheader_pointer_length * i
- subheader_offset = self._read_int(total_offset, self._int_length)
- total_offset += self._int_length
+ subheader_offset = self._read_int(total_offset, self._int_length)
+ total_offset += self._int_length
- subheader_length = self._read_int(total_offset, self._int_length)
- total_offset += self._int_length
+ subheader_length = self._read_int(total_offset, self._int_length)
+ total_offset += self._int_length
- subheader_compression = self._read_int(total_offset, 1)
- total_offset += 1
-
- subheader_type = self._read_int(total_offset, 1)
-
- x = _SubheaderPointer(
- subheader_offset, subheader_length, subheader_compression, subheader_type
- )
+ subheader_compression = self._read_int(total_offset, 1)
+ total_offset += 1
- return x
+ subheader_type = self._read_int(total_offset, 1)
- def _read_subheader_signature(self, offset: int) -> bytes:
- subheader_signature = self._read_bytes(offset, self._int_length)
- return subheader_signature
-
- def _process_subheader(
- self, subheader_index: int, pointer: _SubheaderPointer
- ) -> None:
- offset = pointer.offset
- length = pointer.length
-
- if subheader_index == const.SASIndex.row_size_index:
- processor = self._process_rowsize_subheader
- elif subheader_index == const.SASIndex.column_size_index:
- processor = self._process_columnsize_subheader
- elif subheader_index == const.SASIndex.column_text_index:
- processor = self._process_columntext_subheader
- elif subheader_index == const.SASIndex.column_name_index:
- processor = self._process_columnname_subheader
- elif subheader_index == const.SASIndex.column_attributes_index:
- processor = self._process_columnattributes_subheader
- elif subheader_index == const.SASIndex.format_and_label_index:
- processor = self._process_format_subheader
- elif subheader_index == const.SASIndex.column_list_index:
- processor = self._process_columnlist_subheader
- elif subheader_index == const.SASIndex.subheader_counts_index:
- processor = self._process_subheader_counts
- elif subheader_index == const.SASIndex.data_subheader_index:
- self._current_page_data_subheader_pointers.append(pointer)
- return
- else:
- raise ValueError("unknown subheader index")
+ if (
+ subheader_length == 0
+ or subheader_compression == const.truncated_subheader_id
+ ):
+ continue
- processor(offset, length)
+ subheader_signature = self._read_bytes(subheader_offset, self._int_length)
+ subheader_index = get_subheader_index(subheader_signature)
+ subheader_processor = self._subheader_processors[subheader_index]
+
+ if subheader_processor is None:
+ f1 = (
+ subheader_compression == const.compressed_subheader_id
+ or subheader_compression == 0
+ )
+ f2 = subheader_type == const.compressed_subheader_type
+ if self.compression and f1 and f2:
+ self._current_page_data_subheader_pointers.append(
+ (subheader_offset, subheader_length)
+ )
+ else:
+ self.close()
+ raise ValueError(
+ f"Unknown subheader signature {subheader_signature}"
+ )
+ else:
+ subheader_processor(subheader_offset, subheader_length)
def _process_rowsize_subheader(self, offset: int, length: int) -> None:
@@ -523,10 +484,12 @@ def _process_rowsize_subheader(self, offset: int, length: int) -> None:
lcp_offset += 378
self.row_length = self._read_int(
- offset + const.row_length_offset_multiplier * int_len, int_len
+ offset + const.row_length_offset_multiplier * int_len,
+ int_len,
)
self.row_count = self._read_int(
- offset + const.row_count_offset_multiplier * int_len, int_len
+ offset + const.row_count_offset_multiplier * int_len,
+ int_len,
)
self.col_count_p1 = self._read_int(
offset + const.col_count_p1_multiplier * int_len, int_len
| Avoid constructing `_SubheaderPointer` objects and make dictionary lookups in C rather than in Python.
Speedup relative to current `main`:
```
<main> <sas/shlookup~1>
- 8.32±0.07ms 7.51±0.06ms 0.90 io.sas.SAS.time_test_meta2_pagesas7bdat
- 82.8±0.5ms 73.6±0.5ms 0.89 io.sas.SAS.time_read_sas7bdat_2_chunked
before after ratio
```
Will extend what's new from https://github.com/pandas-dev/pandas/pull/47404 once that's merged.
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47656 | 2022-07-09T21:40:23Z | 2022-10-04T17:56:17Z | 2022-10-04T17:56:17Z | 2022-10-13T16:59:53Z |
CLN: Rename private variables to inclusive | diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi
index ba0a339fa93dd..bad0f2bab93d8 100644
--- a/pandas/_libs/interval.pyi
+++ b/pandas/_libs/interval.pyi
@@ -17,7 +17,7 @@ from pandas._typing import (
Timestamp,
)
-VALID_CLOSED: frozenset[str]
+VALID_INCLUSIVE: frozenset[str]
_OrderableScalarT = TypeVar("_OrderableScalarT", int, float)
_OrderableTimesT = TypeVar("_OrderableTimesT", Timestamp, Timedelta)
@@ -52,7 +52,9 @@ class IntervalMixin:
def open_right(self) -> bool: ...
@property
def is_empty(self) -> bool: ...
- def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ...
+ def _check_inclusive_matches(
+ self, other: IntervalMixin, name: str = ...
+ ) -> None: ...
def _warning_interval(
inclusive, closed
@@ -150,7 +152,7 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ...
def intervals_to_interval_bounds(
- intervals: np.ndarray, validate_closed: bool = ...
+ intervals: np.ndarray, validate_inclusive: bool = ...
) -> tuple[np.ndarray, np.ndarray, IntervalInclusiveType]: ...
class IntervalTree(IntervalMixin):
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 79b3c0d056735..bc0a63c5c5a33 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -56,7 +56,7 @@ from pandas._libs.tslibs.util cimport (
is_timedelta64_object,
)
-VALID_CLOSED = frozenset(['both', 'neither', 'left', 'right'])
+VALID_INCLUSIVE = frozenset(['both', 'neither', 'left', 'right'])
cdef class IntervalMixin:
@@ -85,7 +85,7 @@ cdef class IntervalMixin:
Returns
-------
bool
- True if the Interval is closed on the left-side.
+ True if the Interval is closed on the right-side.
"""
return self.inclusive in ('right', 'both')
@@ -99,7 +99,7 @@ cdef class IntervalMixin:
Returns
-------
bool
- True if the Interval is closed on the left-side.
+ True if the Interval is not closed on the left-side.
"""
return not self.closed_left
@@ -113,7 +113,7 @@ cdef class IntervalMixin:
Returns
-------
bool
- True if the Interval is closed on the left-side.
+ True if the Interval is not closed on the right-side.
"""
return not self.closed_right
@@ -188,7 +188,7 @@ cdef class IntervalMixin:
"""
return (self.right == self.left) & (self.inclusive != 'both')
- def _check_closed_matches(self, other, name='other'):
+ def _check_inclusive_matches(self, other, name='other'):
"""
Check if the inclusive attribute of `other` matches.
@@ -203,7 +203,7 @@ cdef class IntervalMixin:
Raises
------
ValueError
- When `other` is not closed exactly the same as self.
+ When `other` is not inclusive exactly the same as self.
"""
if self.inclusive != other.inclusive:
raise ValueError(f"'{name}.inclusive' is {repr(other.inclusive)}, "
@@ -259,14 +259,14 @@ cdef class Interval(IntervalMixin):
.. deprecated:: 1.5.0
inclusive : {'both', 'neither', 'left', 'right'}, default 'both'
- Whether the interval is closed on the left-side, right-side, both or
+ Whether the interval is inclusive on the left-side, right-side, both or
neither. See the Notes for more detailed explanation.
.. versionadded:: 1.5.0
See Also
--------
- IntervalIndex : An Index of Interval objects that are all closed on the
+ IntervalIndex : An Index of Interval objects that are all inclusive on the
same side.
cut : Convert continuous data into discrete bins (Categorical
of Interval objects).
@@ -279,13 +279,13 @@ cdef class Interval(IntervalMixin):
The parameters `left` and `right` must be from the same type, you must be
able to compare them and they must satisfy ``left <= right``.
- A closed interval (in mathematics denoted by square brackets) contains
- its endpoints, i.e. the closed interval ``[0, 5]`` is characterized by the
+ A inclusive interval (in mathematics denoted by square brackets) contains
+ its endpoints, i.e. the inclusive interval ``[0, 5]`` is characterized by the
conditions ``0 <= x <= 5``. This is what ``inclusive='both'`` stands for.
An open interval (in mathematics denoted by parentheses) does not contain
its endpoints, i.e. the open interval ``(0, 5)`` is characterized by the
conditions ``0 < x < 5``. This is what ``inclusive='neither'`` stands for.
- Intervals can also be half-open or half-closed, i.e. ``[0, 5)`` is
+ Intervals can also be half-open or half-inclusive, i.e. ``[0, 5)`` is
described by ``0 <= x < 5`` (``inclusive='left'``) and ``(0, 5]`` is
described by ``0 < x <= 5`` (``inclusive='right'``).
@@ -352,7 +352,7 @@ cdef class Interval(IntervalMixin):
cdef readonly str inclusive
"""
- Whether the interval is closed on the left-side, right-side, both or
+ Whether the interval is inclusive on the left-side, right-side, both or
neither.
"""
@@ -368,7 +368,7 @@ cdef class Interval(IntervalMixin):
if inclusive is None:
inclusive = "right"
- if inclusive not in VALID_CLOSED:
+ if inclusive not in VALID_INCLUSIVE:
raise ValueError(f"invalid option for 'inclusive': {inclusive}")
if not left <= right:
raise ValueError("left side of interval must be <= right side")
@@ -522,7 +522,7 @@ cdef class Interval(IntervalMixin):
"""
Check whether two Interval objects overlap.
- Two intervals overlap if they share a common point, including closed
+ Two intervals overlap if they share a common point, including inclusive
endpoints. Intervals that only have an open endpoint in common do not
overlap.
@@ -551,7 +551,7 @@ cdef class Interval(IntervalMixin):
>>> i1.overlaps(i3)
False
- Intervals that share closed endpoints overlap:
+ Intervals that share inclusive endpoints overlap:
>>> i4 = pd.Interval(0, 1, inclusive='both')
>>> i5 = pd.Interval(1, 2, inclusive='both')
@@ -568,7 +568,7 @@ cdef class Interval(IntervalMixin):
raise TypeError("`other` must be an Interval, "
f"got {type(other).__name__}")
- # equality is okay if both endpoints are closed (overlap at a point)
+ # equality is okay if both endpoints are inclusive (overlap at a point)
op1 = le if (self.closed_left and other.closed_right) else lt
op2 = le if (other.closed_left and self.closed_right) else lt
@@ -580,16 +580,16 @@ cdef class Interval(IntervalMixin):
@cython.wraparound(False)
@cython.boundscheck(False)
-def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True):
+def intervals_to_interval_bounds(ndarray intervals, bint validate_inclusive=True):
"""
Parameters
----------
intervals : ndarray
Object array of Intervals / nulls.
- validate_closed: bool, default True
- Boolean indicating if all intervals must be closed on the same side.
- Mismatching closed will raise if True, else return None for closed.
+ validate_inclusive: bool, default True
+ Boolean indicating if all intervals must be inclusive on the same side.
+ Mismatching inclusive will raise if True, else return None for inclusive.
Returns
-------
@@ -602,7 +602,7 @@ def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True):
object inclusive = None, interval
Py_ssize_t i, n = len(intervals)
ndarray left, right
- bint seen_closed = False
+ bint seen_inclusive = False
left = np.empty(n, dtype=intervals.dtype)
right = np.empty(n, dtype=intervals.dtype)
@@ -620,13 +620,13 @@ def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True):
left[i] = interval.left
right[i] = interval.right
- if not seen_closed:
- seen_closed = True
+ if not seen_inclusive:
+ seen_inclusive = True
inclusive = interval.inclusive
elif inclusive != interval.inclusive:
inclusive = None
- if validate_closed:
- raise ValueError("intervals must all be closed on the same side")
+ if validate_inclusive:
+ raise ValueError("intervals must all be inclusive on the same side")
return left, right, inclusive
diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py
index 5ed10661e8983..79b79a8ae8ff1 100644
--- a/pandas/core/arrays/arrow/_arrow_utils.py
+++ b/pandas/core/arrays/arrow/_arrow_utils.py
@@ -11,7 +11,7 @@
from pandas.util._decorators import deprecate_kwarg
from pandas.util._exceptions import find_stack_level
-from pandas.core.arrays.interval import VALID_CLOSED
+from pandas.core.arrays.interval import VALID_INCLUSIVE
def fallback_performancewarning(version: str | None = None) -> None:
@@ -111,8 +111,8 @@ class ArrowIntervalType(pyarrow.ExtensionType):
def __init__(self, subtype, inclusive: IntervalInclusiveType) -> None:
# attributes need to be set first before calling
# super init (as that calls serialize)
- assert inclusive in VALID_CLOSED
- self._closed: IntervalInclusiveType = inclusive
+ assert inclusive in VALID_INCLUSIVE
+ self._inclusive: IntervalInclusiveType = inclusive
if not isinstance(subtype, pyarrow.DataType):
subtype = pyarrow.type_for_alias(str(subtype))
self._subtype = subtype
@@ -126,7 +126,7 @@ def subtype(self):
@property
def inclusive(self) -> IntervalInclusiveType:
- return self._closed
+ return self._inclusive
@property
def closed(self) -> IntervalInclusiveType:
@@ -135,7 +135,7 @@ def closed(self) -> IntervalInclusiveType:
FutureWarning,
stacklevel=find_stack_level(),
)
- return self._closed
+ return self._inclusive
def __arrow_ext_serialize__(self) -> bytes:
metadata = {"subtype": str(self.subtype), "inclusive": self.inclusive}
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index ea0e7a769c25e..6469dccf6e2d5 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -23,7 +23,7 @@
from pandas._libs import lib
from pandas._libs.interval import (
- VALID_CLOSED,
+ VALID_INCLUSIVE,
Interval,
IntervalMixin,
intervals_to_interval_bounds,
@@ -130,7 +130,7 @@
Array-like containing Interval objects from which to build the
%(klass)s.
inclusive : {'left', 'right', 'both', 'neither'}, default 'right'
- Whether the intervals are closed on the left-side, right-side, both or
+ Whether the intervals are inclusive on the left-side, right-side, both or
neither.
dtype : dtype or None, default None
If None, dtype will be inferred.
@@ -185,7 +185,8 @@
_interval_shared_docs["class"]
% {
"klass": "IntervalArray",
- "summary": "Pandas array for interval data that are closed on the same side.",
+ "summary": "Pandas array for interval data that are inclusive on the same "
+ "side.",
"versionadded": "0.24.0",
"name": "",
"extra_attributes": "",
@@ -254,13 +255,13 @@ def __new__(
# might need to convert empty or purely na data
data = _maybe_convert_platform_interval(data)
- left, right, infer_closed = intervals_to_interval_bounds(
- data, validate_closed=inclusive is None
+ left, right, infer_inclusive = intervals_to_interval_bounds(
+ data, validate_inclusive=inclusive is None
)
if left.dtype == object:
left = lib.maybe_convert_objects(left)
right = lib.maybe_convert_objects(right)
- inclusive = inclusive or infer_closed
+ inclusive = inclusive or infer_inclusive
return cls._simple_new(
left,
@@ -389,7 +390,7 @@ def _from_factorized(
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
inclusive : {'left', 'right', 'both', 'neither'}, default 'right'
- Whether the intervals are closed on the left-side, right-side, both
+ Whether the intervals are inclusive on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
@@ -455,7 +456,7 @@ def from_breaks(
right : array-like (1-dimensional)
Right bounds for each interval.
inclusive : {'left', 'right', 'both', 'neither'}, default 'right'
- Whether the intervals are closed on the left-side, right-side, both
+ Whether the intervals are inclusive on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
@@ -542,7 +543,7 @@ def from_arrays(
data : array-like (1-dimensional)
Array of tuples.
inclusive : {'left', 'right', 'both', 'neither'}, default 'right'
- Whether the intervals are closed on the left-side, right-side, both
+ Whether the intervals are inclusive on the left-side, right-side, both
or neither.
copy : bool, default False
By-default copy the data, this is compat only and ignored.
@@ -629,7 +630,7 @@ def _validate(self):
* left and right have the same missing values
* left is always below right
"""
- if self.inclusive not in VALID_CLOSED:
+ if self.inclusive not in VALID_INCLUSIVE:
msg = f"invalid option for 'inclusive': {self.inclusive}"
raise ValueError(msg)
if len(self._left) != len(self._right):
@@ -745,7 +746,7 @@ def _cmp_method(self, other, op):
# for categorical defer to categories for dtype
other_dtype = other.categories.dtype
- # extract intervals if we have interval categories with matching closed
+ # extract intervals if we have interval categories with matching inclusive
if is_interval_dtype(other_dtype):
if self.inclusive != other.categories.inclusive:
return invalid_comparison(self, other, op)
@@ -754,7 +755,7 @@ def _cmp_method(self, other, op):
other.codes, allow_fill=True, fill_value=other.categories._na_value
)
- # interval-like -> need same closed and matching endpoints
+ # interval-like -> need same inclusive and matching endpoints
if is_interval_dtype(other_dtype):
if self.inclusive != other.inclusive:
return invalid_comparison(self, other, op)
@@ -994,7 +995,7 @@ def _concat_same_type(
"""
inclusive_set = {interval.inclusive for interval in to_concat}
if len(inclusive_set) != 1:
- raise ValueError("Intervals must all be closed on the same side.")
+ raise ValueError("Intervals must all be inclusive on the same side.")
inclusive = inclusive_set.pop()
left = np.concatenate([interval.left for interval in to_concat])
@@ -1120,7 +1121,7 @@ def _validate_listlike(self, value):
# list-like of intervals
try:
array = IntervalArray(value)
- self._check_closed_matches(array, name="value")
+ self._check_inclusive_matches(array, name="value")
value_left, value_right = array.left, array.right
except TypeError as err:
# wrong type: not interval or NA
@@ -1140,7 +1141,7 @@ def _validate_listlike(self, value):
def _validate_scalar(self, value):
if isinstance(value, Interval):
- self._check_closed_matches(value, name="value")
+ self._check_inclusive_matches(value, name="value")
left, right = value.left, value.right
# TODO: check subdtype match like _validate_setitem_value?
elif is_valid_na_for_dtype(value, self.left.dtype):
@@ -1166,7 +1167,7 @@ def _validate_setitem_value(self, value):
elif isinstance(value, Interval):
# scalar
- self._check_closed_matches(value, name="value")
+ self._check_inclusive_matches(value, name="value")
value_left, value_right = value.left, value.right
self.left._validate_fill_value(value_left)
self.left._validate_fill_value(value_right)
@@ -1352,7 +1353,7 @@ def overlaps(self, other):
msg = f"`other` must be Interval-like, got {type(other).__name__}"
raise TypeError(msg)
- # equality is okay if both endpoints are closed (overlap at a point)
+ # equality is okay if both endpoints are inclusive (overlap at a point)
op1 = le if (self.closed_left and other.closed_right) else lt
op2 = le if (other.closed_left and self.closed_right) else lt
@@ -1366,7 +1367,7 @@ def overlaps(self, other):
@property
def inclusive(self) -> IntervalInclusiveType:
"""
- Whether the intervals are closed on the left-side, right-side, both or
+ Whether the intervals are inclusive on the left-side, right-side, both or
neither.
"""
return self.dtype.inclusive
@@ -1482,7 +1483,7 @@ def set_closed(
def set_inclusive(
self: IntervalArrayT, inclusive: IntervalInclusiveType
) -> IntervalArrayT:
- if inclusive not in VALID_CLOSED:
+ if inclusive not in VALID_INCLUSIVE:
msg = f"invalid option for 'inclusive': {inclusive}"
raise ValueError(msg)
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 78096d836f5b0..9683c1dd93645 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -1124,7 +1124,7 @@ def __new__(
# generally for pickle compat
u = object.__new__(cls)
u._subtype = None
- u._closed = inclusive
+ u._inclusive = inclusive
return u
elif isinstance(subtype, str) and subtype.lower() == "interval":
subtype = None
@@ -1166,7 +1166,7 @@ def __new__(
except KeyError:
u = object.__new__(cls)
u._subtype = subtype
- u._closed = inclusive
+ u._inclusive = inclusive
cls._cache_dtypes[key] = u
return u
@@ -1184,7 +1184,7 @@ def _can_hold_na(self) -> bool:
@property
def inclusive(self):
- return self._closed
+ return self._inclusive
@property
def closed(self):
@@ -1193,7 +1193,7 @@ def closed(self):
FutureWarning,
stacklevel=find_stack_level(),
)
- return self._closed
+ return self._inclusive
@property
def subtype(self):
@@ -1274,7 +1274,7 @@ def __setstate__(self, state) -> None:
# pickle -> need to set the settable private ones here (see GH26067)
self._subtype = state["subtype"]
# backward-compat older pickles won't have "inclusive" key
- self._closed = state.pop("inclusive", None)
+ self._inclusive = state.pop("inclusive", None)
@classmethod
def is_dtype(cls, dtype: object) -> bool:
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index ced675fe9a3cf..23f2e724e208c 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -153,7 +153,7 @@ def _new_IntervalIndex(cls, d):
_interval_shared_docs["class"]
% {
"klass": "IntervalIndex",
- "summary": "Immutable index of intervals that are closed on the same side.",
+ "summary": "Immutable index of intervals that are inclusive on the same side.",
"name": _index_doc_kwargs["name"],
"versionadded": "0.20.0",
"extra_attributes": "is_overlapping\nvalues\n",
@@ -473,7 +473,7 @@ def is_overlapping(self) -> bool:
>>> index.is_overlapping
True
- Intervals that share closed endpoints overlap:
+ Intervals that share inclusive endpoints overlap:
>>> index = pd.interval_range(0, 3, inclusive='both')
>>> index
@@ -1009,7 +1009,7 @@ def interval_range(
name : str, default None
Name of the resulting IntervalIndex.
inclusive : {"both", "neither", "left", "right"}, default "both"
- Include boundaries; Whether to set each bound as closed or open.
+ Include boundaries; Whether to set each bound as inclusive or not.
.. versionadded:: 1.5.0
closed : {'left', 'right', 'both', 'neither'}, default 'right'
@@ -1026,7 +1026,7 @@ def interval_range(
See Also
--------
- IntervalIndex : An Index of intervals that are all closed on the same side.
+ IntervalIndex : An Index of intervals that are all inclusive on the same side.
Notes
-----
@@ -1079,7 +1079,7 @@ def interval_range(
dtype='interval[float64, right]')
The ``inclusive`` parameter specifies which endpoints of the individual
- intervals within the ``IntervalIndex`` are closed.
+ intervals within the ``IntervalIndex`` are inclusive.
>>> pd.interval_range(end=5, periods=4, inclusive='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index e127fe27b6209..695b06690b358 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -826,7 +826,7 @@ def test_unpickling_without_closed(self):
# GH#38394
dtype = IntervalDtype("interval")
- assert dtype._closed is None
+ assert dtype._inclusive is None
tm.round_trip_pickle(dtype)
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index 1966f344356a3..8c8998a8e4be9 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -401,7 +401,7 @@ def test_constructor_string(self):
def test_constructor_errors(self, constructor):
# mismatched closed within intervals with no constructor override
ivs = [Interval(0, 1, inclusive="right"), Interval(2, 3, inclusive="left")]
- msg = "intervals must all be closed on the same side"
+ msg = "intervals must all be inclusive on the same side"
with pytest.raises(ValueError, match=msg):
constructor(ivs)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47655 | 2022-07-09T21:00:51Z | 2022-07-10T00:30:31Z | 2022-07-10T00:30:31Z | 2022-07-18T00:24:22Z |
TYP: fix some of the __hash__ methods | diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py
index 5ed10661e8983..3b8333fdb410a 100644
--- a/pandas/core/arrays/arrow/_arrow_utils.py
+++ b/pandas/core/arrays/arrow/_arrow_utils.py
@@ -92,7 +92,7 @@ def __eq__(self, other):
else:
return NotImplemented
- def __hash__(self):
+ def __hash__(self) -> int:
return hash((str(self), self.freq))
def to_pandas_dtype(self):
@@ -158,7 +158,7 @@ def __eq__(self, other):
else:
return NotImplemented
- def __hash__(self):
+ def __hash__(self) -> int:
return hash((str(self), str(self.subtype), self.inclusive))
def to_pandas_dtype(self):
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 882cc76cf2d77..6c9b7adadb7b0 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -14,6 +14,7 @@
TYPE_CHECKING,
Any,
Callable,
+ ClassVar,
Iterator,
Literal,
Sequence,
@@ -1442,7 +1443,7 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
# Incompatible types in assignment (expression has type "None", base class
# "object" defined the type as "Callable[[object], int]")
- __hash__: None # type: ignore[assignment]
+ __hash__: ClassVar[None] # type: ignore[assignment]
# ------------------------------------------------------------------------
# Non-Optimized Default Methods; in the case of the private methods here,
diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
index 859995cb3c230..eaed6257736ba 100644
--- a/pandas/core/arrays/sparse/dtype.py
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -99,7 +99,7 @@ def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
self._fill_value = fill_value
self._check_fill_value()
- def __hash__(self):
+ def __hash__(self) -> int:
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super().__hash__()
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bd8e04df7594f..e392802bdb5ea 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -13,6 +13,7 @@
TYPE_CHECKING,
Any,
Callable,
+ ClassVar,
Hashable,
Literal,
Mapping,
@@ -1882,7 +1883,7 @@ def _drop_labels_or_levels(self, keys, axis: int = 0):
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
# Incompatible types in assignment (expression has type "None", base class
# "object" defined the type as "Callable[[object], int]")
- __hash__: None # type: ignore[assignment]
+ __hash__: ClassVar[None] # type: ignore[assignment]
def __iter__(self):
"""
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 667ce4664c359..fc5fcaeab7d2a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -8,6 +8,7 @@
TYPE_CHECKING,
Any,
Callable,
+ ClassVar,
Hashable,
Iterable,
Literal,
@@ -5296,7 +5297,7 @@ def __contains__(self, key: Any) -> bool:
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
# Incompatible types in assignment (expression has type "None", base class
# "object" defined the type as "Callable[[object], int]")
- __hash__: None # type: ignore[assignment]
+ __hash__: ClassVar[None] # type: ignore[assignment]
@final
def __setitem__(self, key, value):
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index ed5cf047ab59f..deb6ac2c80a81 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -89,7 +89,8 @@ def __mul__(self, other):
def __reduce__(self):
return type(self), (list(self),)
- def __hash__(self):
+ # error: Signature of "__hash__" incompatible with supertype "list"
+ def __hash__(self) -> int: # type: ignore[override]
return hash(tuple(self))
def _disabled(self, *args, **kwargs):
| `__hash__` is `None` for DataFrame/Series/Index but I cannot find the line that sets `__hash__ = None` | https://api.github.com/repos/pandas-dev/pandas/pulls/47654 | 2022-07-09T18:27:01Z | 2022-07-10T00:32:30Z | 2022-07-10T00:32:30Z | 2022-09-21T15:28:31Z |
ci: add GitHub token permissions for workflows | diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml
index be894e6a5a63e..e091160c952f8 100644
--- a/.github/workflows/32-bit-linux.yml
+++ b/.github/workflows/32-bit-linux.yml
@@ -12,6 +12,9 @@ on:
paths-ignore:
- "doc/**"
+permissions:
+ contents: read
+
jobs:
pytest:
runs-on: ubuntu-latest
diff --git a/.github/workflows/assign.yml b/.github/workflows/assign.yml
index a1812843b1a8f..b7bb8db549f86 100644
--- a/.github/workflows/assign.yml
+++ b/.github/workflows/assign.yml
@@ -3,8 +3,14 @@ on:
issue_comment:
types: created
+permissions:
+ contents: read
+
jobs:
issue_assign:
+ permissions:
+ issues: write
+ pull-requests: write
runs-on: ubuntu-latest
steps:
- if: github.event.comment.body == 'take'
diff --git a/.github/workflows/asv-bot.yml b/.github/workflows/asv-bot.yml
index dbf0ab0acb9ec..abb19a95315b6 100644
--- a/.github/workflows/asv-bot.yml
+++ b/.github/workflows/asv-bot.yml
@@ -9,8 +9,15 @@ env:
ENV_FILE: environment.yml
COMMENT: ${{github.event.comment.body}}
+permissions:
+ contents: read
+
jobs:
autotune:
+ permissions:
+ contents: read
+ issues: write
+ pull-requests: write
name: "Run benchmarks"
# TODO: Support more benchmarking options later, against different branches, against self, etc
if: startsWith(github.event.comment.body, '@github-actions benchmark')
diff --git a/.github/workflows/autoupdate-pre-commit-config.yml b/.github/workflows/autoupdate-pre-commit-config.yml
index d2eac234ca361..9a41871c26062 100644
--- a/.github/workflows/autoupdate-pre-commit-config.yml
+++ b/.github/workflows/autoupdate-pre-commit-config.yml
@@ -5,8 +5,14 @@ on:
- cron: "0 7 1 * *" # At 07:00 on 1st of every month.
workflow_dispatch:
+permissions:
+ contents: read
+
jobs:
update-pre-commit:
+ permissions:
+ contents: write # for technote-space/create-pr-action to push code
+ pull-requests: write # for technote-space/create-pr-action to create a PR
if: github.repository_owner == 'pandas-dev'
name: Autoupdate pre-commit config
runs-on: ubuntu-latest
diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index 8031aaf22981f..09c603f347d4c 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -14,6 +14,9 @@ env:
ENV_FILE: environment.yml
PANDAS_CI: 1
+permissions:
+ contents: read
+
jobs:
pre_commit:
name: pre-commit
diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml
index f9a941b87387c..626bf7828e032 100644
--- a/.github/workflows/docbuild-and-upload.yml
+++ b/.github/workflows/docbuild-and-upload.yml
@@ -14,6 +14,9 @@ env:
ENV_FILE: environment.yml
PANDAS_CI: 1
+permissions:
+ contents: read
+
jobs:
web_and_docs:
name: Doc Build and Upload
diff --git a/.github/workflows/macos-windows.yml b/.github/workflows/macos-windows.yml
index cf9a59400bc92..e9503a2486560 100644
--- a/.github/workflows/macos-windows.yml
+++ b/.github/workflows/macos-windows.yml
@@ -18,6 +18,9 @@ env:
PATTERN: "not slow and not db and not network and not single_cpu"
+permissions:
+ contents: read
+
jobs:
pytest:
defaults:
diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index 09639acafbba1..d93b92a9662ec 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -27,6 +27,9 @@ env:
COVERAGE: true
PYTEST_TARGET: pandas
+permissions:
+ contents: read
+
jobs:
build:
if: false # Comment this line out to "unfreeze"
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml
index 89312cdaaa80a..2e1ffe6d0d17e 100644
--- a/.github/workflows/sdist.yml
+++ b/.github/workflows/sdist.yml
@@ -13,6 +13,9 @@ on:
paths-ignore:
- "doc/**"
+permissions:
+ contents: read
+
jobs:
build:
if: ${{ github.event.label.name == 'Build' || contains(github.event.pull_request.labels.*.name, 'Build') || github.event_name == 'push'}}
diff --git a/.github/workflows/stale-pr.yml b/.github/workflows/stale-pr.yml
index b97b60717a2b8..69656be18a8b1 100644
--- a/.github/workflows/stale-pr.yml
+++ b/.github/workflows/stale-pr.yml
@@ -4,8 +4,13 @@ on:
# * is a special character in YAML so you have to quote this string
- cron: "0 0 * * *"
+permissions:
+ contents: read
+
jobs:
stale:
+ permissions:
+ pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml
index 8d6cae6278dcf..a759280c74521 100644
--- a/.github/workflows/ubuntu.yml
+++ b/.github/workflows/ubuntu.yml
@@ -15,6 +15,9 @@ on:
env:
PANDAS_CI: 1
+permissions:
+ contents: read
+
jobs:
pytest:
runs-on: ubuntu-latest
| This PR adds minimum token permissions for the GITHUB_TOKEN using https://github.com/step-security/secure-workflows.
GitHub recommends defining minimum GITHUB_TOKEN permissions for securing GitHub Actions workflows
- https://github.blog/changelog/2021-04-20-github-actions-control-permissions-for-github_token/
- https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
- The Open Source Security Foundation (OpenSSF) [Scorecards](https://github.com/ossf/scorecard) treats not setting token permissions as a high-risk issue
This project is part of the top 100 critical projects as per OpenSSF (https://github.com/ossf/wg-securing-critical-projects), so fixing the token permissions to improve security.
Signed-off-by: Varun Sharma <varunsh@stepsecurity.io>
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47652 | 2022-07-09T14:59:00Z | 2022-07-11T16:57:29Z | 2022-07-11T16:57:29Z | 2022-07-11T16:57:37Z |
WEB: Update NumFOCUS committee members | diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index 9165456d55897..5bb0cbc7557f8 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -101,11 +101,11 @@ maintainers:
- Camille Scott
- Nathaniel Smith
numfocus:
- - Phillip Cloud
- - Stephan Hoyer
- Wes McKinney
- Jeff Reback
- Joris Van den Bossche
+ - Tom Augspurger
+ - Matthew Roeschke
sponsors:
active:
- name: "NumFOCUS"
| After initial discussion in the mailing list about updating the NumFOCUS committee, I did some research, and based on the [governance docs](https://github.com/pandas-dev/pandas-governance/blob/master/governance.md#numfocus-subcommittee) the main goal of the committee is to manage the funds coming from NumFOCUS.
So, I guess it makes sense that the list matches the approvers of NumFOCUS funds previously discussed.
The governance also mentions that the committee must have at least 5 members. Leaving Wes in the list even if he's not one of the 4 approvers. Being the BDFL of the project I guess it makes sense to keep.
We can always continue the discussion, but for now I think the new list makes more sense than the previous.
CC: @wesm @cpcloud @shoyer @TomAugspurger @mroeschke | https://api.github.com/repos/pandas-dev/pandas/pulls/47650 | 2022-07-09T07:46:15Z | 2022-07-10T07:21:08Z | 2022-07-10T07:21:08Z | 2022-07-10T07:21:08Z |
TYP: Improve typing interval inclusive | diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi
index 3bd5dd2042e69..ba0a339fa93dd 100644
--- a/pandas/_libs/interval.pyi
+++ b/pandas/_libs/interval.pyi
@@ -12,7 +12,7 @@ import numpy.typing as npt
from pandas._libs import lib
from pandas._typing import (
- IntervalClosedType,
+ IntervalInclusiveType,
Timedelta,
Timestamp,
)
@@ -56,7 +56,7 @@ class IntervalMixin:
def _warning_interval(
inclusive, closed
-) -> tuple[IntervalClosedType, lib.NoDefault]: ...
+) -> tuple[IntervalInclusiveType, lib.NoDefault]: ...
class Interval(IntervalMixin, Generic[_OrderableT]):
@property
@@ -64,17 +64,17 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
@property
def right(self: Interval[_OrderableT]) -> _OrderableT: ...
@property
- def inclusive(self) -> IntervalClosedType: ...
+ def inclusive(self) -> IntervalInclusiveType: ...
@property
- def closed(self) -> IntervalClosedType: ...
+ def closed(self) -> IntervalInclusiveType: ...
mid: _MidDescriptor
length: _LengthDescriptor
def __init__(
self,
left: _OrderableT,
right: _OrderableT,
- inclusive: IntervalClosedType = ...,
- closed: IntervalClosedType = ...,
+ inclusive: IntervalInclusiveType = ...,
+ closed: IntervalInclusiveType = ...,
) -> None: ...
def __hash__(self) -> int: ...
@overload
@@ -151,14 +151,14 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
def intervals_to_interval_bounds(
intervals: np.ndarray, validate_closed: bool = ...
-) -> tuple[np.ndarray, np.ndarray, str]: ...
+) -> tuple[np.ndarray, np.ndarray, IntervalInclusiveType]: ...
class IntervalTree(IntervalMixin):
def __init__(
self,
left: np.ndarray,
right: np.ndarray,
- inclusive: IntervalClosedType = ...,
+ inclusive: IntervalInclusiveType = ...,
leaf_size: int = ...,
) -> None: ...
@property
diff --git a/pandas/_typing.py b/pandas/_typing.py
index ac1237f8841be..4bc5f75400455 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -314,7 +314,7 @@ def closed(self) -> bool:
# Interval closed type
IntervalLeftRight = Literal["left", "right"]
-IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]]
+IntervalInclusiveType = Union[IntervalLeftRight, Literal["both", "neither"]]
# datetime and NaTType
DatetimeNaTType = Union[datetime, "NaTType"]
diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py
index 5893ca77193c4..5ed10661e8983 100644
--- a/pandas/core/arrays/arrow/_arrow_utils.py
+++ b/pandas/core/arrays/arrow/_arrow_utils.py
@@ -6,6 +6,7 @@
import numpy as np
import pyarrow
+from pandas._typing import IntervalInclusiveType
from pandas.errors import PerformanceWarning
from pandas.util._decorators import deprecate_kwarg
from pandas.util._exceptions import find_stack_level
@@ -107,11 +108,11 @@ def to_pandas_dtype(self):
class ArrowIntervalType(pyarrow.ExtensionType):
@deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
- def __init__(self, subtype, inclusive: str) -> None:
+ def __init__(self, subtype, inclusive: IntervalInclusiveType) -> None:
# attributes need to be set first before calling
# super init (as that calls serialize)
assert inclusive in VALID_CLOSED
- self._closed = inclusive
+ self._closed: IntervalInclusiveType = inclusive
if not isinstance(subtype, pyarrow.DataType):
subtype = pyarrow.type_for_alias(str(subtype))
self._subtype = subtype
@@ -124,11 +125,11 @@ def subtype(self):
return self._subtype
@property
- def inclusive(self) -> str:
+ def inclusive(self) -> IntervalInclusiveType:
return self._closed
@property
- def closed(self):
+ def closed(self) -> IntervalInclusiveType:
warnings.warn(
"Attribute `closed` is deprecated in favor of `inclusive`.",
FutureWarning,
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 4320c862fbc41..ea0e7a769c25e 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -32,7 +32,7 @@
from pandas._typing import (
ArrayLike,
Dtype,
- IntervalClosedType,
+ IntervalInclusiveType,
NpDtype,
PositionalIndexer,
ScalarIndexer,
@@ -230,7 +230,7 @@ def ndim(self) -> Literal[1]:
def __new__(
cls: type[IntervalArrayT],
data,
- inclusive: str | None = None,
+ inclusive: IntervalInclusiveType | None = None,
dtype: Dtype | None = None,
copy: bool = False,
verify_integrity: bool = True,
@@ -277,7 +277,7 @@ def _simple_new(
cls: type[IntervalArrayT],
left,
right,
- inclusive=None,
+ inclusive: IntervalInclusiveType | None = None,
copy: bool = False,
dtype: Dtype | None = None,
verify_integrity: bool = True,
@@ -431,7 +431,7 @@ def _from_factorized(
def from_breaks(
cls: type[IntervalArrayT],
breaks,
- inclusive: IntervalClosedType | None = None,
+ inclusive: IntervalInclusiveType | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalArrayT:
@@ -513,7 +513,7 @@ def from_arrays(
cls: type[IntervalArrayT],
left,
right,
- inclusive: IntervalClosedType | None = None,
+ inclusive: IntervalInclusiveType | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalArrayT:
@@ -586,7 +586,7 @@ def from_arrays(
def from_tuples(
cls: type[IntervalArrayT],
data,
- inclusive=None,
+ inclusive: IntervalInclusiveType | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalArrayT:
@@ -1364,7 +1364,7 @@ def overlaps(self, other):
# ---------------------------------------------------------------------
@property
- def inclusive(self) -> IntervalClosedType:
+ def inclusive(self) -> IntervalInclusiveType:
"""
Whether the intervals are closed on the left-side, right-side, both or
neither.
@@ -1372,7 +1372,7 @@ def inclusive(self) -> IntervalClosedType:
return self.dtype.inclusive
@property
- def closed(self) -> IntervalClosedType:
+ def closed(self) -> IntervalInclusiveType:
"""
Whether the intervals are closed on the left-side, right-side, both or
neither.
@@ -1426,7 +1426,9 @@ def closed(self) -> IntervalClosedType:
),
}
)
- def set_closed(self: IntervalArrayT, closed: IntervalClosedType) -> IntervalArrayT:
+ def set_closed(
+ self: IntervalArrayT, closed: IntervalInclusiveType
+ ) -> IntervalArrayT:
warnings.warn(
"set_closed is deprecated and will be removed in a future version. "
"Use set_inclusive instead.",
@@ -1478,7 +1480,7 @@ def set_closed(self: IntervalArrayT, closed: IntervalClosedType) -> IntervalArra
}
)
def set_inclusive(
- self: IntervalArrayT, inclusive: IntervalClosedType
+ self: IntervalArrayT, inclusive: IntervalInclusiveType
) -> IntervalArrayT:
if inclusive not in VALID_CLOSED:
msg = f"invalid option for 'inclusive': {inclusive}"
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 16e7559e4d153..78096d836f5b0 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -38,6 +38,7 @@
from pandas._typing import (
Dtype,
DtypeObj,
+ IntervalInclusiveType,
Ordered,
npt,
type_t,
@@ -1091,7 +1092,7 @@ class IntervalDtype(PandasExtensionDtype):
def __new__(
cls,
subtype=None,
- inclusive: str_type | None = None,
+ inclusive: IntervalInclusiveType | None = None,
closed: None | lib.NoDefault = lib.no_default,
):
from pandas.core.dtypes.common import (
@@ -1140,7 +1141,11 @@ def __new__(
"'inclusive' keyword does not match value "
"specified in dtype string"
)
- inclusive = gd["inclusive"]
+ # Incompatible types in assignment (expression has type
+ # "Union[str, Any]", variable has type
+ # "Optional[Union[Literal['left', 'right'],
+ # Literal['both', 'neither']]]")
+ inclusive = gd["inclusive"] # type: ignore[assignment]
try:
subtype = pandas_dtype(subtype)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 8089fc58db07d..e2f4574abe5a0 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -48,7 +48,7 @@
IgnoreRaise,
IndexKeyFunc,
IndexLabel,
- IntervalClosedType,
+ IntervalInclusiveType,
JSONSerializable,
Level,
Manager,
@@ -8066,7 +8066,7 @@ def between_time(
end_time,
include_start: bool_t | lib.NoDefault = lib.no_default,
include_end: bool_t | lib.NoDefault = lib.no_default,
- inclusive: IntervalClosedType | None = None,
+ inclusive: IntervalInclusiveType | None = None,
axis=None,
) -> NDFrameT:
"""
@@ -8172,7 +8172,7 @@ def between_time(
left = True if include_start is lib.no_default else include_start
right = True if include_end is lib.no_default else include_end
- inc_dict: dict[tuple[bool_t, bool_t], IntervalClosedType] = {
+ inc_dict: dict[tuple[bool_t, bool_t], IntervalInclusiveType] = {
(True, True): "both",
(True, False): "left",
(False, True): "right",
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 18c0d56abbeb4..6aa2ff91ba933 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -35,7 +35,7 @@
from pandas._typing import (
Dtype,
DtypeObj,
- IntervalClosedType,
+ IntervalInclusiveType,
IntervalLeftRight,
npt,
)
@@ -920,7 +920,7 @@ def date_range(
normalize: bool = False,
name: Hashable = None,
closed: Literal["left", "right"] | None | lib.NoDefault = lib.no_default,
- inclusive: IntervalClosedType | None = None,
+ inclusive: IntervalInclusiveType | None = None,
**kwargs,
) -> DatetimeIndex:
"""
@@ -1126,7 +1126,7 @@ def bdate_range(
weekmask=None,
holidays=None,
closed: IntervalLeftRight | lib.NoDefault | None = lib.no_default,
- inclusive: IntervalClosedType | None = None,
+ inclusive: IntervalInclusiveType | None = None,
**kwargs,
) -> DatetimeIndex:
"""
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index b1f839daa694d..ced675fe9a3cf 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -30,7 +30,7 @@
from pandas._typing import (
Dtype,
DtypeObj,
- IntervalClosedType,
+ IntervalInclusiveType,
npt,
)
from pandas.errors import InvalidIndexError
@@ -198,7 +198,7 @@ class IntervalIndex(ExtensionIndex):
_typ = "intervalindex"
# annotate properties pinned via inherit_names
- inclusive: IntervalClosedType
+ inclusive: IntervalInclusiveType
is_non_overlapping_monotonic: bool
closed_left: bool
closed_right: bool
@@ -217,7 +217,7 @@ class IntervalIndex(ExtensionIndex):
def __new__(
cls,
data,
- inclusive=None,
+ inclusive: IntervalInclusiveType | None = None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
@@ -266,7 +266,7 @@ def closed(self):
def from_breaks(
cls,
breaks,
- inclusive=None,
+ inclusive: IntervalInclusiveType | None = None,
name: Hashable = None,
copy: bool = False,
dtype: Dtype | None = None,
@@ -302,7 +302,7 @@ def from_arrays(
cls,
left,
right,
- inclusive=None,
+ inclusive: IntervalInclusiveType | None = None,
name: Hashable = None,
copy: bool = False,
dtype: Dtype | None = None,
@@ -337,7 +337,7 @@ def from_arrays(
def from_tuples(
cls,
data,
- inclusive=None,
+ inclusive: IntervalInclusiveType | None = None,
name: Hashable = None,
copy: bool = False,
dtype: Dtype | None = None,
@@ -989,7 +989,7 @@ def interval_range(
periods=None,
freq=None,
name: Hashable = None,
- inclusive: IntervalClosedType | None = None,
+ inclusive: IntervalInclusiveType | None = None,
) -> IntervalIndex:
"""
Return a fixed frequency IntervalIndex.
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 10b607da45ca8..0461fbfc6faa8 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -25,6 +25,7 @@
Axis,
FilePath,
IndexLabel,
+ IntervalInclusiveType,
Level,
QuantileInterpolation,
Scalar,
@@ -3479,7 +3480,7 @@ def highlight_between(
axis: Axis | None = 0,
left: Scalar | Sequence | None = None,
right: Scalar | Sequence | None = None,
- inclusive: str = "both",
+ inclusive: IntervalInclusiveType = "both",
props: str | None = None,
) -> Styler:
"""
@@ -3584,7 +3585,7 @@ def highlight_quantile(
q_left: float = 0.0,
q_right: float = 1.0,
interpolation: QuantileInterpolation = "linear",
- inclusive: str = "both",
+ inclusive: IntervalInclusiveType = "both",
props: str | None = None,
) -> Styler:
"""
@@ -3969,7 +3970,7 @@ def _highlight_between(
props: str,
left: Scalar | Sequence | np.ndarray | NDFrame | None = None,
right: Scalar | Sequence | np.ndarray | NDFrame | None = None,
- inclusive: bool | str = True,
+ inclusive: bool | IntervalInclusiveType = True,
) -> np.ndarray:
"""
Return an array of css props based on condition of data values within given range.
diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py
index caa191dc78493..3676e6eb0091e 100644
--- a/pandas/util/_validators.py
+++ b/pandas/util/_validators.py
@@ -14,6 +14,7 @@
import numpy as np
+from pandas._typing import IntervalInclusiveType
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
@@ -487,7 +488,7 @@ def validate_endpoints(closed: str | None) -> tuple[bool, bool]:
return left_closed, right_closed
-def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]:
+def validate_inclusive(inclusive: IntervalInclusiveType | None) -> tuple[bool, bool]:
"""
Check that the `inclusive` argument is among {"both", "neither", "left", "right"}.
| This makes the usage of IntervalInclusiveType consistent. Also renamed for consistency with new argument
| https://api.github.com/repos/pandas-dev/pandas/pulls/47646 | 2022-07-08T20:03:59Z | 2022-07-09T14:36:07Z | 2022-07-09T14:36:07Z | 2022-07-09T20:47:33Z |
ENH/TST: Add TestBaseArithmeticOps tests for ArrowExtensionArray #47601 | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 2ab710a5762d3..5db859897b663 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -27,6 +27,7 @@
pa_version_under5p0,
pa_version_under6p0,
pa_version_under7p0,
+ pa_version_under8p0,
)
if TYPE_CHECKING:
@@ -158,4 +159,5 @@ def get_lzma_file() -> type[lzma.LZMAFile]:
"pa_version_under5p0",
"pa_version_under6p0",
"pa_version_under7p0",
+ "pa_version_under8p0",
]
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 92aedbb836b38..07b09d78016fd 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -57,6 +57,76 @@
"ge": pc.greater_equal,
}
+ ARROW_LOGICAL_FUNCS = {
+ "and": NotImplemented if pa_version_under2p0 else pc.and_kleene,
+ "rand": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: pc.and_kleene(y, x),
+ "or": NotImplemented if pa_version_under2p0 else pc.or_kleene,
+ "ror": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: pc.or_kleene(y, x),
+ "xor": NotImplemented if pa_version_under2p0 else pc.xor,
+ "rxor": NotImplemented if pa_version_under2p0 else lambda x, y: pc.xor(y, x),
+ }
+
+ def cast_for_truediv(
+ arrow_array: pa.ChunkedArray, pa_object: pa.Array | pa.Scalar
+ ) -> pa.ChunkedArray:
+ # Ensure int / int -> float mirroring Python/Numpy behavior
+ # as pc.divide_checked(int, int) -> int
+ if pa.types.is_integer(arrow_array.type) and pa.types.is_integer(
+ pa_object.type
+ ):
+ return arrow_array.cast(pa.float64())
+ return arrow_array
+
+ def floordiv_compat(
+ left: pa.ChunkedArray | pa.Array | pa.Scalar,
+ right: pa.ChunkedArray | pa.Array | pa.Scalar,
+ ) -> pa.ChunkedArray:
+ # Ensure int // int -> int mirroring Python/Numpy behavior
+ # as pc.floor(pc.divide_checked(int, int)) -> float
+ result = pc.floor(pc.divide_checked(left, right))
+ if pa.types.is_integer(left.type) and pa.types.is_integer(right.type):
+ result = result.cast(left.type)
+ return result
+
+ ARROW_ARITHMETIC_FUNCS = {
+ "add": NotImplemented if pa_version_under2p0 else pc.add_checked,
+ "radd": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: pc.add_checked(y, x),
+ "sub": NotImplemented if pa_version_under2p0 else pc.subtract_checked,
+ "rsub": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: pc.subtract_checked(y, x),
+ "mul": NotImplemented if pa_version_under2p0 else pc.multiply_checked,
+ "rmul": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: pc.multiply_checked(y, x),
+ "truediv": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: pc.divide_checked(cast_for_truediv(x, y), y),
+ "rtruediv": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: pc.divide_checked(y, cast_for_truediv(x, y)),
+ "floordiv": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: floordiv_compat(x, y),
+ "rfloordiv": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: floordiv_compat(y, x),
+ "mod": NotImplemented,
+ "rmod": NotImplemented,
+ "divmod": NotImplemented,
+ "rdivmod": NotImplemented,
+ "pow": NotImplemented if pa_version_under2p0 else pc.power_checked,
+ "rpow": NotImplemented
+ if pa_version_under2p0
+ else lambda x, y: pc.power_checked(y, x),
+ }
+
if TYPE_CHECKING:
from pandas import Series
@@ -74,6 +144,7 @@ def to_pyarrow_type(
elif isinstance(dtype, pa.DataType):
pa_dtype = dtype
elif dtype:
+ # Accepts python types too
pa_dtype = pa.from_numpy_dtype(dtype)
else:
pa_dtype = None
@@ -263,6 +334,28 @@ def _cmp_method(self, other, op):
result = result.to_numpy()
return BooleanArray._from_sequence(result)
+ def _evaluate_op_method(self, other, op, arrow_funcs):
+ pc_func = arrow_funcs[op.__name__]
+ if pc_func is NotImplemented:
+ raise NotImplementedError(f"{op.__name__} not implemented.")
+ if isinstance(other, ArrowExtensionArray):
+ result = pc_func(self._data, other._data)
+ elif isinstance(other, (np.ndarray, list)):
+ result = pc_func(self._data, pa.array(other, from_pandas=True))
+ elif is_scalar(other):
+ result = pc_func(self._data, pa.scalar(other))
+ else:
+ raise NotImplementedError(
+ f"{op.__name__} not implemented for {type(other)}"
+ )
+ return type(self)(result)
+
+ def _logical_method(self, other, op):
+ return self._evaluate_op_method(other, op, ARROW_LOGICAL_FUNCS)
+
+ def _arith_method(self, other, op):
+ return self._evaluate_op_method(other, op, ARROW_ARITHMETIC_FUNCS)
+
def equals(self, other) -> bool:
if not isinstance(other, ArrowExtensionArray):
return False
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index 7421645baa463..f884264e9ab75 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -360,7 +360,7 @@ def _str_get_dummies(self, sep="|"):
arr = Series(self).fillna("")
try:
arr = sep + arr + sep
- except TypeError:
+ except (TypeError, NotImplementedError):
arr = sep + arr.astype(str) + sep
tags: set[str] = set()
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index b563f84207b22..a5eb6189db6f1 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -101,7 +101,7 @@ def test_add(dtype, request):
"unsupported operand type(s) for +: 'ArrowStringArray' and "
"'ArrowStringArray'"
)
- mark = pytest.mark.xfail(raises=TypeError, reason=reason)
+ mark = pytest.mark.xfail(raises=NotImplementedError, reason=reason)
request.node.add_marker(mark)
a = pd.Series(["a", "b", "c", None, None], dtype=dtype)
@@ -142,7 +142,7 @@ def test_add_2d(dtype, request):
def test_add_sequence(dtype, request):
if dtype.storage == "pyarrow":
reason = "unsupported operand type(s) for +: 'ArrowStringArray' and 'list'"
- mark = pytest.mark.xfail(raises=TypeError, reason=reason)
+ mark = pytest.mark.xfail(raises=NotImplementedError, reason=reason)
request.node.add_marker(mark)
a = pd.array(["a", "b", None, None], dtype=dtype)
@@ -160,7 +160,7 @@ def test_add_sequence(dtype, request):
def test_mul(dtype, request):
if dtype.storage == "pyarrow":
reason = "unsupported operand type(s) for *: 'ArrowStringArray' and 'int'"
- mark = pytest.mark.xfail(raises=TypeError, reason=reason)
+ mark = pytest.mark.xfail(raises=NotImplementedError, reason=reason)
request.node.add_marker(mark)
a = pd.array(["a", "b", None], dtype=dtype)
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index a1d232b737da7..569782e55fd72 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -67,10 +67,10 @@ class BaseArithmeticOpsTests(BaseOpsUtil):
* divmod_exc = TypeError
"""
- series_scalar_exc: type[TypeError] | None = TypeError
- frame_scalar_exc: type[TypeError] | None = TypeError
- series_array_exc: type[TypeError] | None = TypeError
- divmod_exc: type[TypeError] | None = TypeError
+ series_scalar_exc: type[Exception] | None = TypeError
+ frame_scalar_exc: type[Exception] | None = TypeError
+ series_array_exc: type[Exception] | None = TypeError
+ divmod_exc: type[Exception] | None = TypeError
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 7e0792a6010a7..ef576692c83b6 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -24,6 +24,7 @@
from pandas.compat import (
pa_version_under2p0,
pa_version_under3p0,
+ pa_version_under8p0,
)
import pandas as pd
@@ -179,6 +180,16 @@ def data_missing_for_sorting(data_for_grouping):
)
+@pytest.fixture
+def data_for_twos(data):
+ """Length-100 array in which all the elements are two."""
+ pa_dtype = data.dtype.pyarrow_dtype
+ if pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype):
+ return pd.array([2] * 100, dtype=data.dtype)
+ # tests will be xfailed where 2 is not a valid scalar for pa_dtype
+ return data
+
+
@pytest.fixture
def na_value():
"""The scalar missing value for this type. Default 'None'"""
@@ -1211,6 +1222,20 @@ def test_EA_types(self, engine, data, request):
class TestBaseMethods(base.BaseMethodsTests):
+ @pytest.mark.parametrize("periods", [1, -2])
+ def test_diff(self, data, periods, request):
+ pa_dtype = data.dtype.pyarrow_dtype
+ if pa.types.is_unsigned_integer(pa_dtype) and periods == 1:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=pa.ArrowInvalid,
+ reason=(
+ f"diff with {pa_dtype} and periods={periods} will overflow"
+ ),
+ )
+ )
+ super().test_diff(data, periods)
+
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna, request):
pa_dtype = all_data.dtype.pyarrow_dtype
@@ -1491,6 +1516,325 @@ def test_where_series(self, data, na_value, as_frame, request, using_array_manag
super().test_where_series(data, na_value, as_frame)
+class TestBaseArithmeticOps(base.BaseArithmeticOpsTests):
+
+ divmod_exc = NotImplementedError
+
+ def _patch_combine(self, obj, other, op):
+ # BaseOpsUtil._combine can upcast expected dtype
+ # (because it generates expected on python scalars)
+ # while ArrowExtensionArray maintains original type
+ expected = base.BaseArithmeticOpsTests._combine(self, obj, other, op)
+ was_frame = False
+ if isinstance(expected, pd.DataFrame):
+ was_frame = True
+ expected_data = expected.iloc[:, 0]
+ original_dtype = obj.iloc[:, 0].dtype
+ else:
+ expected_data = expected
+ original_dtype = obj.dtype
+ pa_array = pa.array(expected_data._values).cast(original_dtype.pyarrow_dtype)
+ pd_array = type(expected_data._values)(pa_array)
+ if was_frame:
+ expected = pd.DataFrame(
+ pd_array, index=expected.index, columns=expected.columns
+ )
+ else:
+ expected = pd.Series(pd_array)
+ return expected
+
+ def test_arith_series_with_scalar(
+ self, data, all_arithmetic_operators, request, monkeypatch
+ ):
+ pa_dtype = data.dtype.pyarrow_dtype
+
+ arrow_temporal_supported = not pa_version_under8p0 and (
+ all_arithmetic_operators in ("__add__", "__radd__")
+ and pa.types.is_duration(pa_dtype)
+ or all_arithmetic_operators in ("__sub__", "__rsub__")
+ and pa.types.is_temporal(pa_dtype)
+ )
+ if (
+ all_arithmetic_operators
+ in {
+ "__mod__",
+ "__rmod__",
+ }
+ or pa_version_under2p0
+ ):
+ self.series_scalar_exc = NotImplementedError
+ elif arrow_temporal_supported:
+ self.series_scalar_exc = None
+ elif not (
+ pa.types.is_floating(pa_dtype)
+ or pa.types.is_integer(pa_dtype)
+ or arrow_temporal_supported
+ ):
+ self.series_scalar_exc = pa.ArrowNotImplementedError
+ else:
+ self.series_scalar_exc = None
+ if (
+ all_arithmetic_operators == "__rpow__"
+ and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))
+ and not pa_version_under2p0
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"GH 29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL "
+ f"for {pa_dtype}"
+ )
+ )
+ )
+ elif arrow_temporal_supported:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=TypeError,
+ reason=(
+ f"{all_arithmetic_operators} not supported between"
+ f"pd.NA and {pa_dtype} Python scalar"
+ ),
+ )
+ )
+ elif (
+ all_arithmetic_operators in {"__rtruediv__", "__rfloordiv__"}
+ and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))
+ and not pa_version_under2p0
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=pa.ArrowInvalid,
+ reason="divide by 0",
+ )
+ )
+ if all_arithmetic_operators == "__floordiv__" and pa.types.is_integer(pa_dtype):
+ # BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does
+ # not upcast
+ monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine)
+ super().test_arith_series_with_scalar(data, all_arithmetic_operators)
+
+ def test_arith_frame_with_scalar(
+ self, data, all_arithmetic_operators, request, monkeypatch
+ ):
+ pa_dtype = data.dtype.pyarrow_dtype
+
+ arrow_temporal_supported = not pa_version_under8p0 and (
+ all_arithmetic_operators in ("__add__", "__radd__")
+ and pa.types.is_duration(pa_dtype)
+ or all_arithmetic_operators in ("__sub__", "__rsub__")
+ and pa.types.is_temporal(pa_dtype)
+ )
+ if (
+ all_arithmetic_operators
+ in {
+ "__mod__",
+ "__rmod__",
+ }
+ or pa_version_under2p0
+ ):
+ self.frame_scalar_exc = NotImplementedError
+ elif arrow_temporal_supported:
+ self.frame_scalar_exc = None
+ elif not (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)):
+ self.frame_scalar_exc = pa.ArrowNotImplementedError
+ else:
+ self.frame_scalar_exc = None
+ if (
+ all_arithmetic_operators == "__rpow__"
+ and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))
+ and not pa_version_under2p0
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"GH 29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL "
+ f"for {pa_dtype}"
+ )
+ )
+ )
+ elif arrow_temporal_supported:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=TypeError,
+ reason=(
+ f"{all_arithmetic_operators} not supported between"
+ f"pd.NA and {pa_dtype} Python scalar"
+ ),
+ )
+ )
+ elif (
+ all_arithmetic_operators in {"__rtruediv__", "__rfloordiv__"}
+ and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))
+ and not pa_version_under2p0
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=pa.ArrowInvalid,
+ reason="divide by 0",
+ )
+ )
+ if all_arithmetic_operators == "__floordiv__" and pa.types.is_integer(pa_dtype):
+ # BaseOpsUtil._combine always returns int64, while ArrowExtensionArray does
+ # not upcast
+ monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine)
+ super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
+
+ def test_arith_series_with_array(
+ self, data, all_arithmetic_operators, request, monkeypatch
+ ):
+ pa_dtype = data.dtype.pyarrow_dtype
+
+ arrow_temporal_supported = not pa_version_under8p0 and (
+ all_arithmetic_operators in ("__add__", "__radd__")
+ and pa.types.is_duration(pa_dtype)
+ or all_arithmetic_operators in ("__sub__", "__rsub__")
+ and pa.types.is_temporal(pa_dtype)
+ )
+ if (
+ all_arithmetic_operators
+ in {
+ "__mod__",
+ "__rmod__",
+ }
+ or pa_version_under2p0
+ ):
+ self.series_array_exc = NotImplementedError
+ elif arrow_temporal_supported:
+ self.series_array_exc = None
+ elif not (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype)):
+ self.series_array_exc = pa.ArrowNotImplementedError
+ else:
+ self.series_array_exc = None
+ if (
+ all_arithmetic_operators == "__rpow__"
+ and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))
+ and not pa_version_under2p0
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"GH 29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL "
+ f"for {pa_dtype}"
+ )
+ )
+ )
+ elif (
+ all_arithmetic_operators
+ in (
+ "__sub__",
+ "__rsub__",
+ )
+ and pa.types.is_unsigned_integer(pa_dtype)
+ and not pa_version_under2p0
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=pa.ArrowInvalid,
+ reason=(
+ f"Implemented pyarrow.compute.subtract_checked "
+ f"which raises on overflow for {pa_dtype}"
+ ),
+ )
+ )
+ elif arrow_temporal_supported:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=TypeError,
+ reason=(
+ f"{all_arithmetic_operators} not supported between"
+ f"pd.NA and {pa_dtype} Python scalar"
+ ),
+ )
+ )
+ elif (
+ all_arithmetic_operators in {"__rtruediv__", "__rfloordiv__"}
+ and (pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype))
+ and not pa_version_under2p0
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=pa.ArrowInvalid,
+ reason="divide by 0",
+ )
+ )
+ op_name = all_arithmetic_operators
+ ser = pd.Series(data)
+ # pd.Series([ser.iloc[0]] * len(ser)) may not return ArrowExtensionArray
+ # since ser.iloc[0] is a python scalar
+ other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype))
+ if pa.types.is_floating(pa_dtype) or (
+ pa.types.is_integer(pa_dtype) and all_arithmetic_operators != "__truediv__"
+ ):
+ monkeypatch.setattr(TestBaseArithmeticOps, "_combine", self._patch_combine)
+ self.check_opname(ser, op_name, other, exc=self.series_array_exc)
+
+ def test_add_series_with_extension_array(self, data, request):
+ pa_dtype = data.dtype.pyarrow_dtype
+ if (
+ not (
+ pa.types.is_integer(pa_dtype)
+ or pa.types.is_floating(pa_dtype)
+ or (not pa_version_under8p0 and pa.types.is_duration(pa_dtype))
+ )
+ or pa_version_under2p0
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=NotImplementedError,
+ reason=f"add_checked not implemented for {pa_dtype}",
+ )
+ )
+ super().test_add_series_with_extension_array(data)
+
+
+class TestBaseComparisonOps(base.BaseComparisonOpsTests):
+ def assert_series_equal(self, left, right, *args, **kwargs):
+ # Series.combine for "expected" retains bool[pyarrow] dtype
+ # While "result" return "boolean" dtype
+ right = pd.Series(right._values.to_numpy(), dtype="boolean")
+ super().assert_series_equal(left, right, *args, **kwargs)
+
+ def test_compare_array(self, data, comparison_op, na_value, request):
+ pa_dtype = data.dtype.pyarrow_dtype
+ ser = pd.Series(data)
+ # pd.Series([ser.iloc[0]] * len(ser)) may not return ArrowExtensionArray
+ # since ser.iloc[0] is a python scalar
+ other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype))
+ if comparison_op.__name__ in ["eq", "ne"]:
+ # comparison should match point-wise comparisons
+ result = comparison_op(ser, other)
+ # Series.combine does not calculate the NA mask correctly
+ # when comparing over an array
+ assert result[8] is na_value
+ assert result[97] is na_value
+ expected = ser.combine(other, comparison_op)
+ expected[8] = na_value
+ expected[97] = na_value
+ self.assert_series_equal(result, expected)
+
+ else:
+ exc = None
+ try:
+ result = comparison_op(ser, other)
+ except Exception as err:
+ exc = err
+
+ if exc is None:
+ # Didn't error, then should match point-wise behavior
+ if pa.types.is_temporal(pa_dtype):
+ # point-wise comparison with pd.NA raises TypeError
+ assert result[8] is na_value
+ assert result[97] is na_value
+ result = result.drop([8, 97]).reset_index(drop=True)
+ ser = ser.drop([8, 97])
+ other = other.drop([8, 97])
+ expected = ser.combine(other, comparison_op)
+ self.assert_series_equal(result, expected)
+ else:
+ with pytest.raises(type(exc)):
+ ser.combine(other, comparison_op)
+
+
def test_arrowdtype_construct_from_string_type_with_unsupported_parameters():
with pytest.raises(NotImplementedError, match="Passing pyarrow type"):
ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]")
diff --git a/pandas/tests/strings/test_api.py b/pandas/tests/strings/test_api.py
index 974ecc152f17b..d76ed65be9e1b 100644
--- a/pandas/tests/strings/test_api.py
+++ b/pandas/tests/strings/test_api.py
@@ -132,7 +132,7 @@ def test_api_for_categorical(any_string_method, any_string_dtype, request):
any_string_dtype == "string" and get_option("string_storage") == "pyarrow"
):
# unsupported operand type(s) for +: 'ArrowStringArray' and 'str'
- mark = pytest.mark.xfail(raises=TypeError, reason="Not Implemented")
+ mark = pytest.mark.xfail(raises=NotImplementedError, reason="Not Implemented")
request.node.add_marker(mark)
s = Series(list("aabb"), dtype=any_string_dtype)
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
Generally the xfails correspond to:
* pyarrow < 2 not having the *_checked ops
* pyarrow < 8 not supporting arithmetic with some temporal types
* pyarrow not having mod/rmod compute functions
* `1**pandas.NA == 1` while `1**pyarrow.NA == NULL` | https://api.github.com/repos/pandas-dev/pandas/pulls/47645 | 2022-07-08T19:59:06Z | 2022-07-16T02:18:52Z | 2022-07-16T02:18:52Z | 2022-07-17T17:30:59Z |
Fixed mypy errors in frequencies.py | diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 7ad68a812c2e2..a4fe2161983b6 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -394,11 +394,11 @@ def _get_annual_rule(self) -> str | None:
return None
pos_check = self.month_position_check()
- # error: Argument 1 to "get" of "dict" has incompatible type
- # "Optional[str]"; expected "str"
- return {"cs": "AS", "bs": "BAS", "ce": "A", "be": "BA"}.get(
- pos_check # type: ignore[arg-type]
- )
+
+ if pos_check is None:
+ return None
+ else:
+ return {"cs": "AS", "bs": "BAS", "ce": "A", "be": "BA"}.get(pos_check)
def _get_quarterly_rule(self) -> str | None:
if len(self.mdiffs) > 1:
@@ -408,21 +408,21 @@ def _get_quarterly_rule(self) -> str | None:
return None
pos_check = self.month_position_check()
- # error: Argument 1 to "get" of "dict" has incompatible type
- # "Optional[str]"; expected "str"
- return {"cs": "QS", "bs": "BQS", "ce": "Q", "be": "BQ"}.get(
- pos_check # type: ignore[arg-type]
- )
+
+ if pos_check is None:
+ return None
+ else:
+ return {"cs": "QS", "bs": "BQS", "ce": "Q", "be": "BQ"}.get(pos_check)
def _get_monthly_rule(self) -> str | None:
if len(self.mdiffs) > 1:
return None
pos_check = self.month_position_check()
- # error: Argument 1 to "get" of "dict" has incompatible type
- # "Optional[str]"; expected "str"
- return {"cs": "MS", "bs": "BMS", "ce": "M", "be": "BM"}.get(
- pos_check # type: ignore[arg-type]
- )
+
+ if pos_check is None:
+ return None
+ else:
+ return {"cs": "MS", "bs": "BMS", "ce": "M", "be": "BM"}.get(pos_check)
def _is_business_daily(self) -> bool:
# quick check: cannot be business daily
| - [ ] xref #37715 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47644 | 2022-07-08T18:57:52Z | 2022-07-08T22:13:05Z | 2022-07-08T22:13:05Z | 2022-07-08T22:14:41Z |
ENH: add result_names argument to DataFrame.compare #44354 | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index b081f743f9b0b..22a5f2a08362f 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -278,6 +278,7 @@ Other enhancements
- :meth:`DatetimeIndex.astype` now supports casting timezone-naive indexes to ``datetime64[s]``, ``datetime64[ms]``, and ``datetime64[us]``, and timezone-aware indexes to the corresponding ``datetime64[unit, tzname]`` dtypes (:issue:`47579`)
- :class:`Series` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) will now successfully operate when the dtype is numeric and ``numeric_only=True`` is provided; previously this would raise a ``NotImplementedError`` (:issue:`47500`)
- :meth:`RangeIndex.union` now can return a :class:`RangeIndex` instead of a :class:`Int64Index` if the resulting values are equally spaced (:issue:`47557`, :issue:`43885`)
+- :meth:`DataFrame.compare` now accepts an argument ``result_names`` to allow the user to specify the result's names of both left and right DataFrame which are being compared. This is by default ``'self'`` and ``'other'`` (:issue:`44354`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.notable_bug_fixes:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e70312c562907..9c90dffbf4df6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7776,6 +7776,14 @@ def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]:
0 a c NaN NaN
2 NaN NaN 3.0 4.0
+Assign result_names
+
+>>> df.compare(df2, result_names=("left", "right"))
+ col1 col3
+ left right left right
+0 a c NaN NaN
+2 NaN NaN 3.0 4.0
+
Stack the differences on rows
>>> df.compare(df2, align_axis=0)
@@ -7823,12 +7831,14 @@ def compare(
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
+ result_names: Suffixes = ("self", "other"),
) -> DataFrame:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
+ result_names=result_names,
)
def combine(
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6e00f33f486d9..6e1df8fa3e270 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -59,6 +59,7 @@
Renamer,
SortKind,
StorageOptions,
+ Suffixes,
T,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
@@ -8970,6 +8971,7 @@ def compare(
align_axis: Axis = 1,
keep_shape: bool_t = False,
keep_equal: bool_t = False,
+ result_names: Suffixes = ("self", "other"),
):
from pandas.core.reshape.concat import concat
@@ -8980,7 +8982,6 @@ def compare(
)
mask = ~((self == other) | (self.isna() & other.isna()))
- keys = ["self", "other"]
if not keep_equal:
self = self.where(mask)
@@ -8995,13 +8996,18 @@ def compare(
else:
self = self[mask]
other = other[mask]
+ if not isinstance(result_names, tuple):
+ raise TypeError(
+ f"Passing 'result_names' as a {type(result_names)} is not "
+ "supported. Provide 'result_names' as a tuple instead."
+ )
if align_axis in (1, "columns"): # This is needed for Series
axis = 1
else:
axis = self._get_axis_number(align_axis)
- diff = concat([self, other], axis=axis, keys=keys)
+ diff = concat([self, other], axis=axis, keys=result_names)
if axis >= self.ndim:
# No need to reorganize data if stacking on new axis
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 60898ee75f7c2..67cdb5d8d72ab 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -165,6 +165,7 @@
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
+ Suffixes,
)
from pandas.core.frame import DataFrame
@@ -3237,12 +3238,14 @@ def compare(
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
+ result_names: Suffixes = ("self", "other"),
) -> DataFrame | Series:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
+ result_names=result_names,
)
def combine(self, other, func, fill_value=None) -> Series:
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 4b7a487e9472d..b7b75d6464da3 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -75,6 +75,11 @@
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
+
+result_names : tuple, default ('self', 'other')
+ Set the dataframes names in the comparison.
+
+ .. versionadded:: 1.5.0
"""
_shared_docs[
diff --git a/pandas/tests/frame/methods/test_compare.py b/pandas/tests/frame/methods/test_compare.py
index 468811eba0d39..609242db453ba 100644
--- a/pandas/tests/frame/methods/test_compare.py
+++ b/pandas/tests/frame/methods/test_compare.py
@@ -180,3 +180,59 @@ def test_compare_unaligned_objects():
df1 = pd.DataFrame(np.ones((3, 3)))
df2 = pd.DataFrame(np.zeros((2, 1)))
df1.compare(df2)
+
+
+def test_compare_result_names():
+ # GH 44354
+ df1 = pd.DataFrame(
+ {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
+ )
+ df2 = pd.DataFrame(
+ {
+ "col1": ["c", "b", "c"],
+ "col2": [1.0, 2.0, np.nan],
+ "col3": [1.0, 2.0, np.nan],
+ },
+ )
+ result = df1.compare(df2, result_names=("left", "right"))
+ expected = pd.DataFrame(
+ {
+ ("col1", "left"): {0: "a", 2: np.nan},
+ ("col1", "right"): {0: "c", 2: np.nan},
+ ("col3", "left"): {0: np.nan, 2: 3.0},
+ ("col3", "right"): {0: np.nan, 2: np.nan},
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "result_names",
+ [
+ [1, 2],
+ "HK",
+ {"2": 2, "3": 3},
+ 3,
+ 3.0,
+ ],
+)
+def test_invalid_input_result_names(result_names):
+ # GH 44354
+ df1 = pd.DataFrame(
+ {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
+ )
+ df2 = pd.DataFrame(
+ {
+ "col1": ["c", "b", "c"],
+ "col2": [1.0, 2.0, np.nan],
+ "col3": [1.0, 2.0, np.nan],
+ },
+ )
+ with pytest.raises(
+ TypeError,
+ match=(
+ f"Passing 'result_names' as a {type(result_names)} is not "
+ "supported. Provide 'result_names' as a tuple instead."
+ ),
+ ):
+ df1.compare(df2, result_names=result_names)
| This is following up on the pull request #44365
- [X] closes #44354 (Replace xxxx with the Github issue number)
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47643 | 2022-07-08T17:48:16Z | 2022-07-16T14:29:38Z | 2022-07-16T14:29:38Z | 2022-07-16T14:29:50Z |
POC/ENH: Timedelta min/max/resolution support non-nano | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 5fd3e33808800..fef2a317a4f26 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -950,14 +950,18 @@ cdef _timedelta_from_value_and_reso(int64_t value, NPY_DATETIMEUNIT reso):
cdef:
_Timedelta td_base
+ # For millisecond and second resos, we cannot actually pass int(value) because
+ # many cases would fall outside of the pytimedelta implementation bounds.
+ # We pass 0 instead, and override seconds, microseconds, days.
+ # In principle we could pass 0 for ns and us too.
if reso == NPY_FR_ns:
td_base = _Timedelta.__new__(Timedelta, microseconds=int(value) // 1000)
elif reso == NPY_DATETIMEUNIT.NPY_FR_us:
td_base = _Timedelta.__new__(Timedelta, microseconds=int(value))
elif reso == NPY_DATETIMEUNIT.NPY_FR_ms:
- td_base = _Timedelta.__new__(Timedelta, milliseconds=int(value))
+ td_base = _Timedelta.__new__(Timedelta, milliseconds=0)
elif reso == NPY_DATETIMEUNIT.NPY_FR_s:
- td_base = _Timedelta.__new__(Timedelta, seconds=int(value))
+ td_base = _Timedelta.__new__(Timedelta, seconds=0)
# Other resolutions are disabled but could potentially be implemented here:
# elif reso == NPY_DATETIMEUNIT.NPY_FR_m:
# td_base = _Timedelta.__new__(Timedelta, minutes=int(value))
@@ -977,6 +981,34 @@ cdef _timedelta_from_value_and_reso(int64_t value, NPY_DATETIMEUNIT reso):
return td_base
+class MinMaxReso:
+ """
+ We need to define min/max/resolution on both the Timedelta _instance_
+ and Timedelta class. On an instance, these depend on the object's _reso.
+ On the class, we default to the values we would get with nanosecond _reso.
+ """
+ def __init__(self, name):
+ self._name = name
+
+ def __get__(self, obj, type=None):
+ if self._name == "min":
+ val = np.iinfo(np.int64).min + 1
+ elif self._name == "max":
+ val = np.iinfo(np.int64).max
+ else:
+ assert self._name == "resolution"
+ val = 1
+
+ if obj is None:
+ # i.e. this is on the class, default to nanos
+ return Timedelta(val)
+ else:
+ return Timedelta._from_value_and_reso(val, obj._reso)
+
+ def __set__(self, obj, value):
+ raise AttributeError(f"{self._name} is not settable.")
+
+
# Similar to Timestamp/datetime, this is a construction requirement for
# timedeltas that we need to do object instantiation in python. This will
# serve as a C extension type that shadows the Python class, where we do any
@@ -990,6 +1022,36 @@ cdef class _Timedelta(timedelta):
# higher than np.ndarray and np.matrix
__array_priority__ = 100
+ min = MinMaxReso("min")
+ max = MinMaxReso("max")
+ resolution = MinMaxReso("resolution")
+
+ @property
+ def days(self) -> int: # TODO(cython3): make cdef property
+ # NB: using the python C-API PyDateTime_DELTA_GET_DAYS will fail
+ # (or be incorrect)
+ self._ensure_components()
+ return self._d
+
+ @property
+ def seconds(self) -> int: # TODO(cython3): make cdef property
+ # NB: using the python C-API PyDateTime_DELTA_GET_SECONDS will fail
+ # (or be incorrect)
+ self._ensure_components()
+ return self._h * 3600 + self._m * 60 + self._s
+
+ @property
+ def microseconds(self) -> int: # TODO(cython3): make cdef property
+ # NB: using the python C-API PyDateTime_DELTA_GET_MICROSECONDS will fail
+ # (or be incorrect)
+ self._ensure_components()
+ return self._ms * 1000 + self._us
+
+ def total_seconds(self) -> float:
+ """Total seconds in the duration."""
+ # We need to override bc we overrided days/seconds/microseconds
+ # TODO: add nanos/1e9?
+ return self.days * 24 * 3600 + self.seconds + self.microseconds / 1_000_000
@property
def freq(self) -> None:
@@ -1979,9 +2041,3 @@ cdef _broadcast_floordiv_td64(
res = res.astype('f8')
res[mask] = np.nan
return res
-
-
-# resolution in ns
-Timedelta.min = Timedelta(np.iinfo(np.int64).min + 1)
-Timedelta.max = Timedelta(np.iinfo(np.int64).max)
-Timedelta.resolution = Timedelta(nanoseconds=1)
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index f9cc1c6878068..b6559385e1597 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -101,18 +101,23 @@ def test_as_unit_non_nano(self):
class TestNonNano:
- @pytest.fixture(params=[7, 8, 9])
- def unit(self, request):
- # 7, 8, 9 correspond to second, millisecond, and microsecond, respectively
+ @pytest.fixture(params=["s", "ms", "us"])
+ def unit_str(self, request):
return request.param
+ @pytest.fixture
+ def unit(self, unit_str):
+ # 7, 8, 9 correspond to second, millisecond, and microsecond, respectively
+ attr = f"NPY_FR_{unit_str}"
+ return getattr(NpyDatetimeUnit, attr).value
+
@pytest.fixture
def val(self, unit):
# microsecond that would be just out of bounds for nano
us = 9223372800000000
- if unit == 9:
+ if unit == NpyDatetimeUnit.NPY_FR_us.value:
value = us
- elif unit == 8:
+ elif unit == NpyDatetimeUnit.NPY_FR_ms.value:
value = us // 1000
else:
value = us // 1_000_000
@@ -166,11 +171,11 @@ def test_to_timedelta64(self, td, unit):
assert isinstance(res, np.timedelta64)
assert res.view("i8") == td.value
- if unit == 7:
+ if unit == NpyDatetimeUnit.NPY_FR_s.value:
assert res.dtype == "m8[s]"
- elif unit == 8:
+ elif unit == NpyDatetimeUnit.NPY_FR_ms.value:
assert res.dtype == "m8[ms]"
- elif unit == 9:
+ elif unit == NpyDatetimeUnit.NPY_FR_us.value:
assert res.dtype == "m8[us]"
def test_truediv_timedeltalike(self, td):
@@ -266,6 +271,35 @@ def test_addsub_mismatched_reso(self, td):
with pytest.raises(ValueError, match=msg):
other2 - td
+ def test_min(self, td):
+ assert td.min <= td
+ assert td.min._reso == td._reso
+ assert td.min.value == NaT.value + 1
+
+ def test_max(self, td):
+ assert td.max >= td
+ assert td.max._reso == td._reso
+ assert td.max.value == np.iinfo(np.int64).max
+
+ def test_resolution(self, td):
+ expected = Timedelta._from_value_and_reso(1, td._reso)
+ result = td.resolution
+ assert result == expected
+ assert result._reso == expected._reso
+
+
+def test_timedelta_class_min_max_resolution():
+ # when accessed on the class (as opposed to an instance), we default
+ # to nanoseconds
+ assert Timedelta.min == Timedelta(NaT.value + 1)
+ assert Timedelta.min._reso == NpyDatetimeUnit.NPY_FR_ns.value
+
+ assert Timedelta.max == Timedelta(np.iinfo(np.int64).max)
+ assert Timedelta.max._reso == NpyDatetimeUnit.NPY_FR_ns.value
+
+ assert Timedelta.resolution == Timedelta(1)
+ assert Timedelta.resolution._reso == NpyDatetimeUnit.NPY_FR_ns.value
+
class TestTimedeltaUnaryOps:
def test_invert(self):
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47641 | 2022-07-08T15:56:23Z | 2022-07-08T17:49:27Z | 2022-07-08T17:49:27Z | 2022-07-11T17:06:46Z |
TST: adding test for multiindex nunique raising not implemnted error | diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index 193f3eb1a590f..08e15545cb998 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -214,6 +214,11 @@ def test_subtracting_two_series_with_unordered_index_and_all_nan_index(
tm.assert_series_equal(result[0], a_series_expected)
tm.assert_series_equal(result[1], b_series_expected)
+ def test_nunique_smoke(self):
+ # GH 34019
+ n = DataFrame([[1, 2], [1, 2]]).set_index([0, 1]).index.nunique()
+ assert n == 1
+
def test_multiindex_repeated_keys(self):
# GH19414
tm.assert_series_equal(
| - [x] closes #34019
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47638 | 2022-07-08T10:17:59Z | 2022-07-08T17:04:24Z | 2022-07-08T17:04:24Z | 2022-10-20T08:44:45Z |
DEPR: Remove deprecation from private class IntervalTree | diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in
index 1a6106173e58e..8bf1a53d56dfb 100644
--- a/pandas/_libs/intervaltree.pxi.in
+++ b/pandas/_libs/intervaltree.pxi.in
@@ -8,8 +8,6 @@ import warnings
from pandas._libs import lib
from pandas._libs.algos import is_monotonic
-from pandas._libs.interval import _warning_interval
-
ctypedef fused int_scalar_t:
int64_t
float64_t
@@ -42,18 +40,13 @@ cdef class IntervalTree(IntervalMixin):
object _is_overlapping, _left_sorter, _right_sorter
Py_ssize_t _na_count
- def __init__(self, left, right, inclusive: str | None = None, closed: None | lib.NoDefault = lib.no_default, leaf_size=100):
+ def __init__(self, left, right, inclusive: str | None = None, leaf_size=100):
"""
Parameters
----------
left, right : np.ndarray[ndim=1]
Left and right bounds for each interval. Assumed to contain no
NaNs.
- closed : {'left', 'right', 'both', 'neither'}, optional
- Whether the intervals are closed on the left-side, right-side, both
- or neither. Defaults to 'right'.
-
- .. deprecated:: 1.5.0
inclusive : {"both", "neither", "left", "right"}, optional
Whether the intervals are closed on the left-side, right-side, both
@@ -66,8 +59,6 @@ cdef class IntervalTree(IntervalMixin):
to brute-force search. Tune this parameter to optimize query
performance.
"""
- inclusive, closed = _warning_interval(inclusive, closed)
-
if inclusive is None:
inclusive = "right"
@@ -119,7 +110,7 @@ cdef class IntervalTree(IntervalMixin):
if self._is_overlapping is not None:
return self._is_overlapping
- # <= when both sides closed since endpoints can overlap
+ # <= when inclusive on both sides since endpoints can overlap
op = le if self.inclusive == 'both' else lt
# overlap if start of current interval < end of previous interval
@@ -263,7 +254,7 @@ cdef class IntervalNode:
# we need specialized nodes and leaves to optimize for different dtype and
-# closed values
+# inclusive values
{{py:
diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py
index 06c499b9e33f4..6c30d16e61582 100644
--- a/pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas/tests/indexes/interval/test_interval_tree.py
@@ -190,24 +190,6 @@ def test_construction_overflow(self):
expected = (50 + np.iinfo(np.int64).max) / 2
assert result == expected
- def test_interval_tree_error_and_warning(self):
- # GH 40245
-
- msg = (
- "Deprecated argument `closed` cannot "
- "be passed if argument `inclusive` is not None"
- )
- with pytest.raises(ValueError, match=msg):
- left, right = np.arange(10), [np.iinfo(np.int64).max] * 10
- IntervalTree(left, right, closed="both", inclusive="both")
-
- msg = "Argument `closed` is deprecated in favor of `inclusive`"
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
- left, right = np.arange(10), [np.iinfo(np.int64).max] * 10
- IntervalTree(left, right, closed="both")
-
@pytest.mark.xfail(not IS64, reason="GH 23440")
@pytest.mark.parametrize(
"left, right, expected",
| this is private, so no need to deprecate | https://api.github.com/repos/pandas-dev/pandas/pulls/47637 | 2022-07-08T10:17:25Z | 2022-07-08T17:06:05Z | 2022-07-08T17:06:05Z | 2022-07-08T19:53:25Z |
DEPR: Deprecate set_closed and add set_incluive | diff --git a/doc/redirects.csv b/doc/redirects.csv
index 173e670e30f0e..90ddf6c4dc582 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -761,6 +761,7 @@ generated/pandas.IntervalIndex.mid,../reference/api/pandas.IntervalIndex.mid
generated/pandas.IntervalIndex.overlaps,../reference/api/pandas.IntervalIndex.overlaps
generated/pandas.IntervalIndex.right,../reference/api/pandas.IntervalIndex.right
generated/pandas.IntervalIndex.set_closed,../reference/api/pandas.IntervalIndex.set_closed
+generated/pandas.IntervalIndex.set_inclusive,../reference/api/pandas.IntervalIndex.set_inclusive
generated/pandas.IntervalIndex.to_tuples,../reference/api/pandas.IntervalIndex.to_tuples
generated/pandas.IntervalIndex.values,../reference/api/pandas.IntervalIndex.values
generated/pandas.Interval.left,../reference/api/pandas.Interval.left
diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst
index 0d8444841fcae..cd0ce581519a8 100644
--- a/doc/source/reference/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -352,6 +352,7 @@ A collection of intervals may be stored in an :class:`arrays.IntervalArray`.
arrays.IntervalArray.contains
arrays.IntervalArray.overlaps
arrays.IntervalArray.set_closed
+ arrays.IntervalArray.set_inclusive
arrays.IntervalArray.to_tuples
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index 89a9a0a92ef08..589a339a1ca60 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -251,6 +251,7 @@ IntervalIndex components
IntervalIndex.get_loc
IntervalIndex.get_indexer
IntervalIndex.set_closed
+ IntervalIndex.set_inclusive
IntervalIndex.contains
IntervalIndex.overlaps
IntervalIndex.to_tuples
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 0b450fab53137..c9c523e7a2415 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -762,7 +762,7 @@ Other Deprecations
- Deprecated the ``closed`` argument in :class:`IntervalIndex` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
- Deprecated the ``closed`` argument in :class:`IntervalDtype` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
- Deprecated the ``closed`` argument in :class:`.IntervalArray` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
-- Deprecated the ``closed`` argument in :class:`IntervalTree` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
+- Deprecated :meth:`.IntervalArray.set_closed` and :meth:`.IntervalIndex.set_closed` in favor of ``set_inclusive``; In a future version ``set_closed`` will get removed (:issue:`40245`)
- Deprecated the ``closed`` argument in :class:`ArrowInterval` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
- Deprecated allowing ``unit="M"`` or ``unit="Y"`` in :class:`Timestamp` constructor with a non-round float value (:issue:`47267`)
- Deprecated the ``display.column_space`` global configuration option (:issue:`7576`)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 56aae3039f7d6..9685c4db51b67 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -156,6 +156,7 @@
contains
overlaps
set_closed
+set_inclusive
to_tuples
%(extra_methods)s\
@@ -1385,9 +1386,11 @@ def closed(self) -> IntervalClosedType:
Return an %(klass)s identical to the current one, but closed on the
specified side.
+ .. deprecated:: 1.5.0
+
Parameters
----------
- inclusive : {'left', 'right', 'both', 'neither'}
+ closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
@@ -1420,8 +1423,58 @@ def closed(self) -> IntervalClosedType:
),
}
)
- @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
- def set_closed(
+ def set_closed(self: IntervalArrayT, closed: IntervalClosedType) -> IntervalArrayT:
+ warnings.warn(
+ "set_closed is deprecated and will be removed in a future version. "
+ "Use set_inclusive instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self.set_inclusive(closed)
+
+ _interval_shared_docs["set_inclusive"] = textwrap.dedent(
+ """
+ Return an %(klass)s identical to the current one, but closed on the
+ specified side.
+
+ .. versionadded:: 1.5
+
+ Parameters
+ ----------
+ inclusive : {'left', 'right', 'both', 'neither'}
+ Whether the intervals are closed on the left-side, right-side, both
+ or neither.
+
+ Returns
+ -------
+ new_index : %(klass)s
+
+ %(examples)s\
+ """
+ )
+
+ @Appender(
+ _interval_shared_docs["set_inclusive"]
+ % {
+ "klass": "IntervalArray",
+ "examples": textwrap.dedent(
+ """\
+ Examples
+ --------
+ >>> index = pd.arrays.IntervalArray.from_breaks(range(4), "right")
+ >>> index
+ <IntervalArray>
+ [(0, 1], (1, 2], (2, 3]]
+ Length: 3, dtype: interval[int64, right]
+ >>> index.set_inclusive('both')
+ <IntervalArray>
+ [[0, 1], [1, 2], [2, 3]]
+ Length: 3, dtype: interval[int64, both]
+ """
+ ),
+ }
+ )
+ def set_inclusive(
self: IntervalArrayT, inclusive: IntervalClosedType
) -> IntervalArrayT:
if inclusive not in VALID_CLOSED:
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 5f48be921f7c6..b1f839daa694d 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -179,7 +179,7 @@ def _new_IntervalIndex(cls, d):
),
}
)
-@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
+@inherit_names(["set_closed", "set_inclusive", "to_tuples"], IntervalArray, wrap=True)
@inherit_names(
[
"__array__",
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index 7ca86408a7f59..073e6b6119b14 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -60,12 +60,12 @@ def test_is_empty(self, constructor, left, right, closed):
class TestMethods:
- @pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"])
- def test_set_closed(self, closed, new_closed):
+ @pytest.mark.parametrize("new_inclusive", ["left", "right", "both", "neither"])
+ def test_set_inclusive(self, closed, new_inclusive):
# GH 21670
array = IntervalArray.from_breaks(range(10), inclusive=closed)
- result = array.set_closed(new_closed)
- expected = IntervalArray.from_breaks(range(10), inclusive=new_closed)
+ result = array.set_inclusive(new_inclusive)
+ expected = IntervalArray.from_breaks(range(10), inclusive=new_inclusive)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
@@ -134,10 +134,10 @@ def test_set_na(self, left_right_dtypes):
tm.assert_extension_array_equal(result, expected)
- def test_setitem_mismatched_closed(self):
+ def test_setitem_mismatched_inclusive(self):
arr = IntervalArray.from_breaks(range(4), "right")
orig = arr.copy()
- other = arr.set_closed("both")
+ other = arr.set_inclusive("both")
msg = "'value.inclusive' is 'both', expected 'right'"
with pytest.raises(ValueError, match=msg):
@@ -488,17 +488,8 @@ def test_from_arrays_deprecation():
IntervalArray.from_arrays([0, 1, 2], [1, 2, 3], closed="right")
-def test_set_closed_deprecated_closed():
+def test_set_closed_deprecated():
# GH#40245
array = IntervalArray.from_breaks(range(10))
with tm.assert_produces_warning(FutureWarning):
array.set_closed(closed="both")
-
-
-def test_set_closed_both_provided_deprecation():
- # GH#40245
- array = IntervalArray.from_breaks(range(10))
- msg = "Can only specify 'closed' or 'inclusive', not both."
- with pytest.raises(TypeError, match=msg):
- with tm.assert_produces_warning(FutureWarning):
- array.set_closed(inclusive="both", closed="both")
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 90497780311de..5bf29093152d8 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -871,21 +871,21 @@ def test_nbytes(self):
expected = 64 # 4 * 8 * 2
assert result == expected
- @pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"])
- def test_set_closed(self, name, closed, new_closed):
+ @pytest.mark.parametrize("new_inclusive", ["left", "right", "both", "neither"])
+ def test_set_inclusive(self, name, closed, new_inclusive):
# GH 21670
index = interval_range(0, 5, inclusive=closed, name=name)
- result = index.set_closed(new_closed)
- expected = interval_range(0, 5, inclusive=new_closed, name=name)
+ result = index.set_inclusive(new_inclusive)
+ expected = interval_range(0, 5, inclusive=new_inclusive, name=name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("bad_inclusive", ["foo", 10, "LEFT", True, False])
- def test_set_closed_errors(self, bad_inclusive):
+ def test_set_inclusive_errors(self, bad_inclusive):
# GH 21670
index = interval_range(0, 5)
msg = f"invalid option for 'inclusive': {bad_inclusive}"
with pytest.raises(ValueError, match=msg):
- index.set_closed(bad_inclusive)
+ index.set_inclusive(bad_inclusive)
def test_is_all_dates(self):
# GH 23576
| Deprecatets set closed for ser_inclusive to keep consistency
| https://api.github.com/repos/pandas-dev/pandas/pulls/47636 | 2022-07-08T10:16:21Z | 2022-07-08T22:16:03Z | 2022-07-08T22:16:03Z | 2022-07-09T02:11:43Z |
BUG: Series.str.zfill() behaves differently than str.zfill() from standard library | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index c85a087835b80..cbb1d306f9927 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -530,6 +530,7 @@ Conversion
Strings
^^^^^^^
- Bug in :meth:`str.startswith` and :meth:`str.endswith` when using other series as parameter _pat_. Now raises ``TypeError`` (:issue:`3485`)
+- Bug in :meth:`Series.str.zfill` when strings contain leading signs, padding '0' before the sign character rather than after as ``str.zfill`` from standard library (:issue:`20868`)
-
Interval
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index abd380299ba02..d2458c75972d5 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -1683,19 +1683,23 @@ def zfill(self, width):
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
- regular character and the zero is added to the left of it
+ special character and the zero is added to the right of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
- 0 0-1
+ 0 -01
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
- result = self.pad(width, side="left", fillchar="0")
+ if not is_integer(width):
+ msg = f"width must be of integer type, not {type(width).__name__}"
+ raise TypeError(msg)
+ f = lambda x: x.zfill(width)
+ result = self._data.array._str_map(f)
return self._wrap_result(result)
def slice(self, start=None, stop=None, step=None):
diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py
index db99ba8368a8a..b55dab8170382 100644
--- a/pandas/tests/strings/test_strings.py
+++ b/pandas/tests/strings/test_strings.py
@@ -799,3 +799,28 @@ def test_str_accessor_in_apply_func():
expected = Series(["A/D", "B/E", "C/F"])
result = df.apply(lambda f: "/".join(f.str.upper()), axis=1)
tm.assert_series_equal(result, expected)
+
+
+def test_zfill():
+ # https://github.com/pandas-dev/pandas/issues/20868
+ value = Series(["-1", "1", "1000", 10, np.nan])
+ expected = Series(["-01", "001", "1000", np.nan, np.nan])
+ tm.assert_series_equal(value.str.zfill(3), expected)
+
+ value = Series(["-2", "+5"])
+ expected = Series(["-0002", "+0005"])
+ tm.assert_series_equal(value.str.zfill(5), expected)
+
+
+def test_zfill_with_non_integer_argument():
+ value = Series(["-2", "+5"])
+ wid = "a"
+ msg = f"width must be of integer type, not {type(wid).__name__}"
+ with pytest.raises(TypeError, match=msg):
+ value.str.zfill(wid)
+
+
+def test_zfill_with_leading_sign():
+ value = Series(["-cat", "-1", "+dog"])
+ expected = Series(["-0cat", "-0001", "+0dog"])
+ tm.assert_series_equal(value.str.zfill(5), expected)
| - [ ] closes #20868 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47633 | 2022-07-08T07:35:44Z | 2022-07-10T00:15:25Z | 2022-07-10T00:15:25Z | 2022-07-10T01:24:04Z |
DOC: avoid overriding Python built-in functions | diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index ba3fb17cc8764..5d8ef7ce02097 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -839,10 +839,10 @@ Alternatively, the built-in methods could be used to produce the same outputs.
.. ipython:: python
- max = ts.groupby(lambda x: x.year).transform("max")
- min = ts.groupby(lambda x: x.year).transform("min")
+ max_ts = ts.groupby(lambda x: x.year).transform("max")
+ min_ts = ts.groupby(lambda x: x.year).transform("min")
- max - min
+ max_ts - min_ts
Another common data transform is to replace missing data with the group mean.
| - [x] closes #47606
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47631 | 2022-07-08T02:49:59Z | 2022-07-08T16:23:11Z | 2022-07-08T16:23:11Z | 2022-07-12T01:33:54Z |
BUG: using read_xml with iterparse and names will ignore duplicate values | diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index 95fac0c739895..9b6eb31dafc07 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -342,7 +342,7 @@ def _iterparse_nodes(self, iterparse: Callable) -> list[dict[str, str | None]]:
for col, nm in zip(self.iterparse[row_node], self.names):
if curr_elem == col:
elem_val = elem.text.strip() if elem.text else None
- if elem_val not in row.values() and nm not in row:
+ if row.get(nm) != elem_val and nm not in row:
row[nm] = elem_val
if col in elem.attrib:
if elem.attrib[col] not in row.values() and nm not in row:
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index b89adf85d8e26..410c5f6703dcd 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -824,6 +824,46 @@ def test_repeat_names(parser):
tm.assert_frame_equal(df_iter, df_expected)
+def test_repeat_values_new_names(parser):
+ xml = """\
+<shapes>
+ <shape>
+ <name>rectangle</name>
+ <family>rectangle</family>
+ </shape>
+ <shape>
+ <name>square</name>
+ <family>rectangle</family>
+ </shape>
+ <shape>
+ <name>ellipse</name>
+ <family>ellipse</family>
+ </shape>
+ <shape>
+ <name>circle</name>
+ <family>ellipse</family>
+ </shape>
+</shapes>"""
+ df_xpath = read_xml(xml, xpath=".//shape", parser=parser, names=["name", "group"])
+
+ df_iter = read_xml_iterparse(
+ xml,
+ parser=parser,
+ iterparse={"shape": ["name", "family"]},
+ names=["name", "group"],
+ )
+
+ df_expected = DataFrame(
+ {
+ "name": ["rectangle", "square", "ellipse", "circle"],
+ "group": ["rectangle", "rectangle", "ellipse", "ellipse"],
+ }
+ )
+
+ tm.assert_frame_equal(df_xpath, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
+
+
def test_names_option_wrong_length(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
| - [X] closes #47483
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. _(not necessary)_
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. _(not necessary)_
| https://api.github.com/repos/pandas-dev/pandas/pulls/47630 | 2022-07-08T01:28:53Z | 2022-07-08T16:19:58Z | 2022-07-08T16:19:57Z | 2022-07-09T00:28:08Z |
Test for nested series equality #22400 | diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py
index 2209bed67325c..963af81bcb6a5 100644
--- a/pandas/tests/util/test_assert_series_equal.py
+++ b/pandas/tests/util/test_assert_series_equal.py
@@ -1,3 +1,4 @@
+import numpy as np
import pytest
from pandas.core.dtypes.common import is_extension_array_dtype
@@ -382,3 +383,29 @@ def test_assert_series_equal_identical_na(nulls_fixture):
# while we're here do Index too
idx = pd.Index(ser)
tm.assert_index_equal(idx, idx.copy(deep=True))
+
+
+def test_identical_nested_series_is_equal():
+ # GH#22400
+ x = Series(
+ [
+ 0,
+ 0.0131142231938,
+ 1.77774652865e-05,
+ np.array([0.4722720840328748, 0.4216929783681722]),
+ ]
+ )
+ y = Series(
+ [
+ 0,
+ 0.0131142231938,
+ 1.77774652865e-05,
+ np.array([0.4722720840328748, 0.4216929783681722]),
+ ]
+ )
+ # These two arrays should be equal, nesting could cause issue
+
+ tm.assert_series_equal(x, x)
+ tm.assert_series_equal(x, x, check_exact=True)
+ tm.assert_series_equal(x, y)
+ tm.assert_series_equal(x, y, check_exact=True)
| - [ ] closes #22400 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47627 | 2022-07-07T20:25:49Z | 2022-07-07T22:27:46Z | 2022-07-07T22:27:45Z | 2022-07-07T22:27:55Z |
"DOC #45443 edited the documentation of where/mask functions" | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ba3474a2513fb..8089fc58db07d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9614,7 +9614,9 @@ def where(
The {name} method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``{cond}`` the
element is used; otherwise the corresponding element from the DataFrame
- ``other`` is used.
+ ``other`` is used. If the axis of ``other`` does not align with axis of
+ ``cond`` {klass}, the misaligned index positions will be filled with
+ {cond_rev}.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
@@ -9641,6 +9643,23 @@ def where(
4 NaN
dtype: float64
+ >>> s = pd.Series(range(5))
+ >>> t = pd.Series([True, False])
+ >>> s.where(t, 99)
+ 0 0
+ 1 99
+ 2 99
+ 3 99
+ 4 99
+ dtype: int64
+ >>> s.mask(t, 99)
+ 0 99
+ 1 1
+ 2 99
+ 3 99
+ 4 99
+ dtype: int64
+
>>> s.where(s > 1, 10)
0 10
1 10
| - [X] closes #45443
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47626 | 2022-07-07T17:58:14Z | 2022-07-08T22:17:31Z | 2022-07-08T22:17:31Z | 2022-07-08T22:17:45Z |
BUG: boolean indexer with NA raising when reindex is necessary | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 0b450fab53137..62a99ce3cae44 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -885,6 +885,7 @@ Indexing
- Bug when setting a value too large for a :class:`Series` dtype failing to coerce to a common type (:issue:`26049`, :issue:`32878`)
- Bug in :meth:`loc.__setitem__` treating ``range`` keys as positional instead of label-based (:issue:`45479`)
- Bug in :meth:`Series.__setitem__` when setting ``boolean`` dtype values containing ``NA`` incorrectly raising instead of casting to ``boolean`` dtype (:issue:`45462`)
+- Bug in :meth:`Series.loc` raising with boolean indexer containing ``NA`` when :class:`Index` did not match (:issue:`46551`)
- Bug in :meth:`Series.__setitem__` where setting :attr:`NA` into a numeric-dtype :class:`Series` would incorrectly upcast to object-dtype rather than treating the value as ``np.nan`` (:issue:`44199`)
- Bug in :meth:`DataFrame.loc` when setting values to a column and right hand side is a dictionary (:issue:`47216`)
- Bug in :meth:`DataFrame.loc` when setting a :class:`DataFrame` not aligning index in some cases (:issue:`47578`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 665333d0d7b4f..30d6a8a9f019b 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -30,6 +30,7 @@
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
+ is_extension_array_dtype,
is_hashable,
is_integer,
is_iterator,
@@ -2531,15 +2532,20 @@ def check_bool_indexer(index: Index, key) -> np.ndarray:
"""
result = key
if isinstance(key, ABCSeries) and not key.index.equals(index):
- result = result.reindex(index)
- mask = isna(result._values)
- if mask.any():
+ indexer = result.index.get_indexer_for(index)
+ if -1 in indexer:
raise IndexingError(
"Unalignable boolean Series provided as "
"indexer (index of the boolean Series and of "
"the indexed object do not match)."
)
- return result.astype(bool)._values
+
+ result = result.take(indexer)
+
+ # fall through for boolean
+ if not is_extension_array_dtype(result.dtype):
+ return result.astype(bool)._values
+
if is_object_dtype(key):
# key might be object-dtype bool, check_array_indexer needs bool array
result = np.asarray(result, dtype=bool)
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 3a8e14576a55d..2f4fffe57593f 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -5,7 +5,10 @@
import numpy as np
import pytest
+from pandas.errors import IndexingError
+
from pandas import (
+ NA,
DataFrame,
IndexSlice,
MultiIndex,
@@ -330,6 +333,22 @@ def test_loc_setitem_all_false_indexer():
tm.assert_series_equal(ser, expected)
+def test_loc_boolean_indexer_non_matching_index():
+ # GH#46551
+ ser = Series([1])
+ result = ser.loc[Series([NA, False], dtype="boolean")]
+ expected = Series([], dtype="int64")
+ tm.assert_series_equal(result, expected)
+
+
+def test_loc_boolean_indexer_miss_matching_index():
+ # GH#46551
+ ser = Series([1])
+ indexer = Series([NA, False], dtype="boolean", index=[1, 2])
+ with pytest.raises(IndexingError, match="Unalignable"):
+ ser.loc[indexer]
+
+
class TestDeprecatedIndexers:
@pytest.mark.parametrize("key", [{1}, {1: 1}])
def test_getitem_dict_and_set_deprecated(self, key):
| - [x] closes #46551 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
There are multiple options how to solve this. Alternatively, we could get the indexeer for the reindexing operation and check for -1 instead of filling the NAs | https://api.github.com/repos/pandas-dev/pandas/pulls/47623 | 2022-07-07T11:47:26Z | 2022-07-08T22:53:53Z | 2022-07-08T22:53:53Z | 2022-07-09T02:12:23Z |
WARN: Don't show FutureWarning when enlarging df with iloc | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 665333d0d7b4f..adc4b8c003515 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2004,11 +2004,16 @@ def _setitem_single_column(self, loc: int, value, plane_indexer):
if (
isinstance(new_values, np.ndarray)
and isinstance(orig_values, np.ndarray)
- and np.shares_memory(new_values, orig_values)
+ and (
+ np.shares_memory(new_values, orig_values)
+ or new_values.shape != orig_values.shape
+ )
):
# TODO: get something like tm.shares_memory working?
# The values were set inplace after all, no need to warn,
# e.g. test_rename_nocopy
+ # In case of enlarging we can not set inplace, so need to
+ # warn either
pass
else:
warnings.warn(
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 994181eac8199..edcd577dd948d 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1307,6 +1307,16 @@ def test_loc_setitem_rhs_frame(self, idxr, val):
expected = DataFrame({"a": [np.nan, val]})
tm.assert_frame_equal(df, expected)
+ @td.skip_array_manager_invalid_test
+ def test_iloc_setitem_enlarge_no_warning(self):
+ # GH#47381
+ df = DataFrame(columns=["a", "b"])
+ expected = df.copy()
+ view = df[:]
+ with tm.assert_produces_warning(None):
+ df.iloc[:, 0] = np.array([1, 2], dtype=np.float64)
+ tm.assert_frame_equal(view, expected)
+
class TestDataFrameIndexingUInt64:
def test_setitem(self, uint64_frame):
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 9e9310d735f6a..45f36834510ed 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -806,9 +806,7 @@ def test_setitem_string_column_numpy_dtype_raising(self):
def test_setitem_empty_df_duplicate_columns(self):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
- msg = "will attempt to set the values inplace instead"
- with tm.assert_produces_warning(FutureWarning, match=msg):
- df.loc[:, "a"] = list(range(2))
+ df.loc[:, "a"] = list(range(2))
expected = DataFrame(
[[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"]
)
| - [x] xref #47381 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Can't be done inplace, so no need to warn | https://api.github.com/repos/pandas-dev/pandas/pulls/47621 | 2022-07-07T06:34:31Z | 2022-07-07T16:37:28Z | 2022-07-07T16:37:28Z | 2022-07-08T20:07:47Z |
BUG: Return Float64 for read_parquet(use_nullable_dtypes=True) | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 0b450fab53137..c70acc0a0b18c 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -948,7 +948,7 @@ I/O
- Bug in :func:`read_sas` with RLE-compressed SAS7BDAT files that contain 0x40 control bytes (:issue:`31243`)
- Bug in :func:`read_sas` that scrambled column names (:issue:`31243`)
- Bug in :func:`read_sas` with RLE-compressed SAS7BDAT files that contain 0x00 control bytes (:issue:`47099`)
--
+- Bug in :func:`read_parquet` with ``use_nullable_dtypes=True`` where ``float64`` dtype was returned instead of nullable ``Float64`` dtype (:issue:`45694`)
Period
^^^^^^
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index cbf3bcc9278d5..d28309cda6788 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -231,6 +231,8 @@ def read(
self.api.uint64(): pd.UInt64Dtype(),
self.api.bool_(): pd.BooleanDtype(),
self.api.string(): pd.StringDtype(),
+ self.api.float32(): pd.Float32Dtype(),
+ self.api.float64(): pd.Float64Dtype(),
}
to_pandas_kwargs["types_mapper"] = mapping.get
manager = get_option("mode.data_manager")
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 5b899079dfffd..64e4a15a42061 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -626,6 +626,9 @@ def test_use_nullable_dtypes(self, engine, request):
"d": pyarrow.array([True, False, True, None]),
# Test that nullable dtypes used even in absence of nulls
"e": pyarrow.array([1, 2, 3, 4], "int64"),
+ # GH 45694
+ "f": pyarrow.array([1.0, 2.0, 3.0, None], "float32"),
+ "g": pyarrow.array([1.0, 2.0, 3.0, None], "float64"),
}
)
with tm.ensure_clean() as path:
@@ -642,6 +645,8 @@ def test_use_nullable_dtypes(self, engine, request):
"c": pd.array(["a", "b", "c", None], dtype="string"),
"d": pd.array([True, False, True, None], dtype="boolean"),
"e": pd.array([1, 2, 3, 4], dtype="Int64"),
+ "f": pd.array([1.0, 2.0, 3.0, None], dtype="Float32"),
+ "g": pd.array([1.0, 2.0, 3.0, None], dtype="Float64"),
}
)
if engine == "fastparquet":
@@ -672,7 +677,17 @@ def test_read_empty_array(self, pa, dtype):
"value": pd.array([], dtype=dtype),
}
)
- check_round_trip(df, pa, read_kwargs={"use_nullable_dtypes": True})
+ # GH 45694
+ expected = None
+ if dtype == "float":
+ expected = pd.DataFrame(
+ {
+ "value": pd.array([], dtype="Float64"),
+ }
+ )
+ check_round_trip(
+ df, pa, read_kwargs={"use_nullable_dtypes": True}, expected=expected
+ )
@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning")
| - [x] closes #45694 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47619 | 2022-07-06T22:23:23Z | 2022-07-07T16:10:11Z | 2022-07-07T16:10:10Z | 2022-07-07T16:10:14Z |
TYP: more return annotations in core/ | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 0fcea111716ec..5e90eae27f981 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -529,7 +529,7 @@ def getMixedTypeDict():
return index, data
-def makeMixedDataFrame():
+def makeMixedDataFrame() -> DataFrame:
return DataFrame(getMixedTypeDict()[1])
diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py
index 19a44dbfe6f6d..466eeb768f5f9 100644
--- a/pandas/core/array_algos/replace.py
+++ b/pandas/core/array_algos/replace.py
@@ -119,7 +119,7 @@ def _check_comparison_types(
def replace_regex(
values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None
-):
+) -> None:
"""
Parameters
----------
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index b15e0624963ea..f17d343024915 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -70,6 +70,8 @@
NumpyValueArrayLike,
)
+ from pandas import Series
+
def ravel_compat(meth: F) -> F:
"""
@@ -259,7 +261,7 @@ def _validate_shift_value(self, fill_value):
# we can remove this and use validate_fill_value directly
return self._validate_scalar(fill_value)
- def __setitem__(self, key, value):
+ def __setitem__(self, key, value) -> None:
key = check_array_indexer(self, key)
value = self._validate_setitem_value(value)
self._ndarray[key] = value
@@ -433,7 +435,7 @@ def insert(
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
- def value_counts(self, dropna: bool = True):
+ def value_counts(self, dropna: bool = True) -> Series:
"""
Return a Series containing counts of unique values.
diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py
index e4bb7dc94cb8d..5893ca77193c4 100644
--- a/pandas/core/arrays/arrow/_arrow_utils.py
+++ b/pandas/core/arrays/arrow/_arrow_utils.py
@@ -13,7 +13,7 @@
from pandas.core.arrays.interval import VALID_CLOSED
-def fallback_performancewarning(version: str | None = None):
+def fallback_performancewarning(version: str | None = None) -> None:
"""
Raise a PerformanceWarning for falling back to ExtensionArray's
non-pyarrow method
@@ -24,7 +24,9 @@ def fallback_performancewarning(version: str | None = None):
warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
-def pyarrow_array_to_numpy_and_mask(arr, dtype: np.dtype):
+def pyarrow_array_to_numpy_and_mask(
+ arr, dtype: np.dtype
+) -> tuple[np.ndarray, np.ndarray]:
"""
Convert a primitive pyarrow.Array to a numpy array and boolean mask based
on the buffers of the Array.
@@ -74,12 +76,12 @@ def __init__(self, freq) -> None:
def freq(self):
return self._freq
- def __arrow_ext_serialize__(self):
+ def __arrow_ext_serialize__(self) -> bytes:
metadata = {"freq": self.freq}
return json.dumps(metadata).encode()
@classmethod
- def __arrow_ext_deserialize__(cls, storage_type, serialized):
+ def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowPeriodType:
metadata = json.loads(serialized.decode())
return ArrowPeriodType(metadata["freq"])
@@ -122,7 +124,7 @@ def subtype(self):
return self._subtype
@property
- def inclusive(self):
+ def inclusive(self) -> str:
return self._closed
@property
@@ -134,12 +136,12 @@ def closed(self):
)
return self._closed
- def __arrow_ext_serialize__(self):
+ def __arrow_ext_serialize__(self) -> bytes:
metadata = {"subtype": str(self.subtype), "inclusive": self.inclusive}
return json.dumps(metadata).encode()
@classmethod
- def __arrow_ext_deserialize__(cls, storage_type, serialized):
+ def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType:
metadata = json.loads(serialized.decode())
subtype = pyarrow.type_for_alias(metadata["subtype"])
inclusive = metadata["inclusive"]
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index dfb58f0edd127..92aedbb836b38 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -368,7 +368,7 @@ def take(
indices: TakeIndexer,
allow_fill: bool = False,
fill_value: Any = None,
- ):
+ ) -> ArrowExtensionArray:
"""
Take elements from an array.
diff --git a/pandas/core/arrays/arrow/dtype.py b/pandas/core/arrays/arrow/dtype.py
index 346c4e8d19379..4a32663a68ed2 100644
--- a/pandas/core/arrays/arrow/dtype.py
+++ b/pandas/core/arrays/arrow/dtype.py
@@ -77,7 +77,7 @@ def construct_array_type(cls):
return ArrowExtensionArray
@classmethod
- def construct_from_string(cls, string: str):
+ def construct_from_string(cls, string: str) -> ArrowDtype:
"""
Construct this type from a string.
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 4274e6e5a911c..882cc76cf2d77 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -460,7 +460,7 @@ def __ne__(self, other: Any) -> ArrayLike: # type: ignore[override]
"""
return ~(self == other)
- def __init_subclass__(cls, **kwargs):
+ def __init_subclass__(cls, **kwargs) -> None:
factorize = getattr(cls, "factorize")
if (
"use_na_sentinel" not in inspect.signature(factorize).parameters
@@ -770,11 +770,11 @@ def argmax(self, skipna: bool = True) -> int:
return nargminmax(self, "argmax")
def fillna(
- self,
+ self: ExtensionArrayT,
value: object | ArrayLike | None = None,
method: FillnaOptions | None = None,
limit: int | None = None,
- ):
+ ) -> ExtensionArrayT:
"""
Fill NA/NaN values using the specified method.
@@ -1139,7 +1139,9 @@ def factorize(
@Substitution(klass="ExtensionArray")
@Appender(_extension_array_shared_docs["repeat"])
- def repeat(self, repeats: int | Sequence[int], axis: int | None = None):
+ def repeat(
+ self: ExtensionArrayT, repeats: int | Sequence[int], axis: int | None = None
+ ) -> ExtensionArrayT:
nv.validate_repeat((), {"axis": axis})
ind = np.arange(len(self)).repeat(repeats)
return self.take(ind)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 70699c45e0c36..2c3b7c2f2589d 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -7,6 +7,7 @@
from typing import (
TYPE_CHECKING,
Hashable,
+ Literal,
Sequence,
TypeVar,
Union,
@@ -29,7 +30,10 @@
lib,
)
from pandas._libs.arrays import NDArrayBacked
-from pandas._libs.lib import no_default
+from pandas._libs.lib import (
+ NoDefault,
+ no_default,
+)
from pandas._typing import (
ArrayLike,
AstypeArg,
@@ -114,7 +118,11 @@
from pandas.io.formats import console
if TYPE_CHECKING:
- from pandas import Index
+ from pandas import (
+ DataFrame,
+ Index,
+ Series,
+ )
CategoricalT = TypeVar("CategoricalT", bound="Categorical")
@@ -193,7 +201,7 @@ def func(self, other):
return func
-def contains(cat, key, container):
+def contains(cat, key, container) -> bool:
"""
Helper for membership check for ``key`` in ``cat``.
@@ -462,9 +470,7 @@ def __init__(
dtype = CategoricalDtype(ordered=False).update_dtype(dtype)
arr = coerce_indexer_dtype(codes, dtype.categories)
- # error: Argument 1 to "__init__" of "NDArrayBacked" has incompatible
- # type "Union[ExtensionArray, ndarray]"; expected "ndarray"
- super().__init__(arr, dtype) # type: ignore[arg-type]
+ super().__init__(arr, dtype)
@property
def dtype(self) -> CategoricalDtype:
@@ -639,7 +645,7 @@ def _from_inferred_categories(
@classmethod
def from_codes(
cls, codes, categories=None, ordered=None, dtype: Dtype | None = None
- ):
+ ) -> Categorical:
"""
Make a Categorical type from codes and categories or dtype.
@@ -707,7 +713,7 @@ def from_codes(
# Categories/Codes/Ordered
@property
- def categories(self):
+ def categories(self) -> Index:
"""
The categories of this categorical.
@@ -738,7 +744,7 @@ def categories(self):
return self.dtype.categories
@categories.setter
- def categories(self, categories):
+ def categories(self, categories) -> None:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
@@ -829,7 +835,20 @@ def _set_dtype(self, dtype: CategoricalDtype) -> Categorical:
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
- def set_ordered(self, value, inplace=False):
+ @overload
+ def set_ordered(self, value, *, inplace: Literal[False] = ...) -> Categorical:
+ ...
+
+ @overload
+ def set_ordered(self, value, *, inplace: Literal[True]) -> None:
+ ...
+
+ @overload
+ def set_ordered(self, value, *, inplace: bool) -> Categorical | None:
+ ...
+
+ @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"])
+ def set_ordered(self, value, inplace: bool = False) -> Categorical | None:
"""
Set the ordered attribute to the boolean value.
@@ -847,8 +866,18 @@ def set_ordered(self, value, inplace=False):
NDArrayBacked.__init__(cat, cat._ndarray, new_dtype)
if not inplace:
return cat
+ return None
+
+ @overload
+ def as_ordered(self, *, inplace: Literal[False] = ...) -> Categorical:
+ ...
+
+ @overload
+ def as_ordered(self, *, inplace: Literal[True]) -> None:
+ ...
- def as_ordered(self, inplace=False):
+ @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ def as_ordered(self, inplace: bool = False) -> Categorical | None:
"""
Set the Categorical to be ordered.
@@ -866,7 +895,16 @@ def as_ordered(self, inplace=False):
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
- def as_unordered(self, inplace=False):
+ @overload
+ def as_unordered(self, *, inplace: Literal[False] = ...) -> Categorical:
+ ...
+
+ @overload
+ def as_unordered(self, *, inplace: Literal[True]) -> None:
+ ...
+
+ @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ def as_unordered(self, inplace: bool = False) -> Categorical | None:
"""
Set the Categorical to be unordered.
@@ -973,7 +1011,22 @@ def set_categories(
if not inplace:
return cat
- def rename_categories(self, new_categories, inplace=no_default):
+ @overload
+ def rename_categories(
+ self, new_categories, *, inplace: Literal[False] | NoDefault = ...
+ ) -> Categorical:
+ ...
+
+ @overload
+ def rename_categories(self, new_categories, *, inplace: Literal[True]) -> None:
+ ...
+
+ @deprecate_nonkeyword_arguments(
+ version=None, allowed_args=["self", "new_categories"]
+ )
+ def rename_categories(
+ self, new_categories, inplace: bool | NoDefault = no_default
+ ) -> Categorical | None:
"""
Rename categories.
@@ -1062,6 +1115,7 @@ def rename_categories(self, new_categories, inplace=no_default):
cat.categories = new_categories
if not inplace:
return cat
+ return None
def reorder_categories(self, new_categories, ordered=None, inplace=no_default):
"""
@@ -1124,7 +1178,22 @@ def reorder_categories(self, new_categories, ordered=None, inplace=no_default):
simplefilter("ignore")
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
- def add_categories(self, new_categories, inplace=no_default):
+ @overload
+ def add_categories(
+ self, new_categories, *, inplace: Literal[False] | NoDefault = ...
+ ) -> Categorical:
+ ...
+
+ @overload
+ def add_categories(self, new_categories, *, inplace: Literal[True]) -> None:
+ ...
+
+ @deprecate_nonkeyword_arguments(
+ version=None, allowed_args=["self", "new_categories"]
+ )
+ def add_categories(
+ self, new_categories, inplace: bool | NoDefault = no_default
+ ) -> Categorical | None:
"""
Add new categories.
@@ -1199,6 +1268,7 @@ def add_categories(self, new_categories, inplace=no_default):
NDArrayBacked.__init__(cat, codes, new_dtype)
if not inplace:
return cat
+ return None
def remove_categories(self, removals, inplace=no_default):
"""
@@ -1280,7 +1350,20 @@ def remove_categories(self, removals, inplace=no_default):
new_categories, ordered=self.ordered, rename=False, inplace=inplace
)
- def remove_unused_categories(self, inplace=no_default):
+ @overload
+ def remove_unused_categories(
+ self, *, inplace: Literal[False] | NoDefault = ...
+ ) -> Categorical:
+ ...
+
+ @overload
+ def remove_unused_categories(self, *, inplace: Literal[True]) -> None:
+ ...
+
+ @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
+ def remove_unused_categories(
+ self, inplace: bool | NoDefault = no_default
+ ) -> Categorical | None:
"""
Remove categories which are not used.
@@ -1348,6 +1431,7 @@ def remove_unused_categories(self, inplace=no_default):
NDArrayBacked.__init__(cat, new_codes, new_dtype)
if not inplace:
return cat
+ return None
# ------------------------------------------------------------------
@@ -1531,7 +1615,7 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
f"the numpy op {ufunc.__name__}"
)
- def __setstate__(self, state):
+ def __setstate__(self, state) -> None:
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
return super().__setstate__(state)
@@ -1617,7 +1701,7 @@ def notna(self) -> np.ndarray:
notnull = notna
- def value_counts(self, dropna: bool = True):
+ def value_counts(self, dropna: bool = True) -> Series:
"""
Return a Series containing counts of each category.
@@ -1700,7 +1784,7 @@ def _internal_get_values(self):
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
- def check_for_ordered(self, op):
+ def check_for_ordered(self, op) -> None:
"""assert that we are ordered"""
if not self.ordered:
raise TypeError(
@@ -1763,9 +1847,26 @@ def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
+ @overload
+ def sort_values(
+ self,
+ *,
+ inplace: Literal[False] = ...,
+ ascending: bool = ...,
+ na_position: str = ...,
+ ) -> Categorical:
+ ...
+
+ @overload
+ def sort_values(
+ self, *, inplace: Literal[True], ascending: bool = ..., na_position: str = ...
+ ) -> None:
+ ...
+
+ @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def sort_values(
self, inplace: bool = False, ascending: bool = True, na_position: str = "last"
- ):
+ ) -> Categorical | None:
"""
Sort the Categorical by category value returning a new
Categorical by default.
@@ -1845,11 +1946,11 @@ def sort_values(
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
- if inplace:
- self._codes[:] = self._codes[sorted_idx]
- else:
+ if not inplace:
codes = self._codes[sorted_idx]
return self._from_backing_data(codes)
+ self._codes[:] = self._codes[sorted_idx]
+ return None
def _rank(
self,
@@ -1954,7 +2055,9 @@ def _unbox_scalar(self, key) -> int:
# ------------------------------------------------------------------
- def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
+ def take_nd(
+ self, indexer, allow_fill: bool = False, fill_value=None
+ ) -> Categorical:
# GH#27745 deprecate alias that other EAs dont have
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
@@ -2402,7 +2505,7 @@ def is_dtype_equal(self, other) -> bool:
except (AttributeError, TypeError):
return False
- def describe(self):
+ def describe(self) -> DataFrame:
"""
Describes this Categorical
@@ -2476,7 +2579,18 @@ def isin(self, values) -> npt.NDArray[np.bool_]:
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
- def replace(self, to_replace, value, inplace: bool = False):
+ @overload
+ def replace(
+ self, to_replace, value, *, inplace: Literal[False] = ...
+ ) -> Categorical:
+ ...
+
+ @overload
+ def replace(self, to_replace, value, *, inplace: Literal[True]) -> None:
+ ...
+
+ @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"])
+ def replace(self, to_replace, value, inplace: bool = False) -> Categorical | None:
"""
Replaces all instances of one value with another
@@ -2724,7 +2838,7 @@ def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
- def codes(self):
+ def codes(self) -> Series:
"""
Return Series of codes as well as the index.
"""
@@ -2823,6 +2937,7 @@ def factorize_from_iterable(values) -> tuple[np.ndarray, Index]:
if not is_list_like(values):
raise TypeError("Input must be list-like")
+ categories: Index
if is_categorical_dtype(values):
values = extract_array(values)
# The Categorical we want to build has the same categories
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 48de7771cd8d7..a2251c49a2cc5 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -931,7 +931,7 @@ def freq(self):
return self._freq
@freq.setter
- def freq(self, value):
+ def freq(self, value) -> None:
if value is not None:
value = to_offset(value)
self._validate_frequency(self, value)
@@ -1548,7 +1548,7 @@ def __rsub__(self, other):
# We get here with e.g. datetime objects
return -(self - other)
- def __iadd__(self, other):
+ def __iadd__(self: DatetimeLikeArrayT, other) -> DatetimeLikeArrayT:
result = self + other
self[:] = result[:]
@@ -1557,7 +1557,7 @@ def __iadd__(self, other):
self._freq = result.freq
return self
- def __isub__(self, other):
+ def __isub__(self: DatetimeLikeArrayT, other) -> DatetimeLikeArrayT:
result = self - other
self[:] = result[:]
@@ -2041,11 +2041,11 @@ def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
# --------------------------------------------------------------
# Reductions
- def any(self, *, axis: int | None = None, skipna: bool = True):
+ def any(self, *, axis: int | None = None, skipna: bool = True) -> bool:
# GH#34479 discussion of desired behavior long-term
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
- def all(self, *, axis: int | None = None, skipna: bool = True):
+ def all(self, *, axis: int | None = None, skipna: bool = True) -> bool:
# GH#34479 discussion of desired behavior long-term
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 56aae3039f7d6..d4db5cfd78367 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -99,7 +99,10 @@
)
if TYPE_CHECKING:
- from pandas import Index
+ from pandas import (
+ Index,
+ Series,
+ )
IntervalArrayT = TypeVar("IntervalArrayT", bound="IntervalArray")
@@ -708,7 +711,7 @@ def __getitem__(
raise ValueError("multi-dimensional indexing not allowed")
return self._shallow_copy(left, right)
- def __setitem__(self, key, value):
+ def __setitem__(self, key, value) -> None:
value_left, value_right = self._validate_setitem_value(value)
key = check_array_indexer(self, key)
@@ -837,7 +840,7 @@ def argsort(
ascending=ascending, kind=kind, na_position=na_position, **kwargs
)
- def min(self, *, axis: int | None = None, skipna: bool = True):
+ def min(self, *, axis: int | None = None, skipna: bool = True) -> IntervalOrNA:
nv.validate_minmax_axis(axis, self.ndim)
if not len(self):
@@ -854,7 +857,7 @@ def min(self, *, axis: int | None = None, skipna: bool = True):
indexer = obj.argsort()[0]
return obj[indexer]
- def max(self, *, axis: int | None = None, skipna: bool = True):
+ def max(self, *, axis: int | None = None, skipna: bool = True) -> IntervalOrNA:
nv.validate_minmax_axis(axis, self.ndim)
if not len(self):
@@ -1172,7 +1175,7 @@ def _validate_setitem_value(self, value):
return value_left, value_right
- def value_counts(self, dropna: bool = True):
+ def value_counts(self, dropna: bool = True) -> Series:
"""
Returns a Series containing counts of each interval.
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 2fce5fc747312..128c7e44f5075 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -322,13 +322,13 @@ def round(self, decimals: int = 0, *args, **kwargs):
def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(~self._data, self._mask.copy())
- def __neg__(self):
+ def __neg__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(-self._data, self._mask.copy())
- def __pos__(self):
+ def __pos__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return self.copy()
- def __abs__(self):
+ def __abs__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(abs(self._data), self._mask.copy())
# ------------------------------------------------------------------
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 4d97345912250..fa7c4e0d0aa70 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -1009,7 +1009,9 @@ def validate_dtype_freq(dtype, freq):
return freq
-def dt64arr_to_periodarr(data, freq, tz=None):
+def dt64arr_to_periodarr(
+ data, freq, tz=None
+) -> tuple[npt.NDArray[np.int64], BaseOffset]:
"""
Convert an datetime-like array to values Period ordinals.
diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py
index 41af7d4ccd506..80713a6fca323 100644
--- a/pandas/core/arrays/sparse/accessor.py
+++ b/pandas/core/arrays/sparse/accessor.py
@@ -1,4 +1,7 @@
"""Sparse accessor"""
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
import numpy as np
@@ -13,6 +16,12 @@
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
+if TYPE_CHECKING:
+ from pandas import (
+ DataFrame,
+ Series,
+ )
+
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
@@ -49,7 +58,7 @@ def _delegate_method(self, name, *args, **kwargs):
raise ValueError
@classmethod
- def from_coo(cls, A, dense_index=False):
+ def from_coo(cls, A, dense_index=False) -> Series:
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
@@ -180,7 +189,7 @@ def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
)
return A, rows, columns
- def to_dense(self):
+ def to_dense(self) -> Series:
"""
Convert a Series from sparse values to dense.
@@ -228,7 +237,7 @@ def _validate(self, data):
raise AttributeError(self._validation_msg)
@classmethod
- def from_spmatrix(cls, data, index=None, columns=None):
+ def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame:
"""
Create a new DataFrame from a scipy sparse matrix.
@@ -284,7 +293,7 @@ def from_spmatrix(cls, data, index=None, columns=None):
arrays, columns=columns, index=index, verify_integrity=False
)
- def to_dense(self):
+ def to_dense(self) -> DataFrame:
"""
Convert a DataFrame with sparse values to dense.
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index e7c745e902a49..5653d87a4570b 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -1395,7 +1395,7 @@ def _where(self, mask, value):
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
- def __setstate__(self, state):
+ def __setstate__(self, state) -> None:
"""Necessary for making this object picklable"""
if isinstance(state, tuple):
# Compat for pandas < 0.24.0
@@ -1410,7 +1410,7 @@ def __setstate__(self, state):
else:
self.__dict__.update(state)
- def nonzero(self):
+ def nonzero(self) -> tuple[npt.NDArray[np.int32]]:
if self.fill_value == 0:
return (self.sp_index.indices,)
else:
diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
index b6bb5faeebdee..859995cb3c230 100644
--- a/pandas/core/arrays/sparse/dtype.py
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -179,7 +179,7 @@ def _is_boolean(self) -> bool:
return is_bool_dtype(self.subtype)
@property
- def kind(self):
+ def kind(self) -> str:
"""
The sparse kind. Either 'integer', or 'block'.
"""
@@ -194,7 +194,7 @@ def subtype(self):
return self._dtype
@property
- def name(self):
+ def name(self) -> str:
return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]"
def __repr__(self) -> str:
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 45683d83a1303..083acf16ec758 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -51,6 +51,8 @@
if TYPE_CHECKING:
import pyarrow
+ from pandas import Series
+
@register_extension_dtype
class StringDtype(StorageExtensionDtype):
@@ -461,7 +463,7 @@ def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
)
return self._wrap_reduction_result(axis, result)
- def value_counts(self, dropna: bool = True):
+ def value_counts(self, dropna: bool = True) -> Series:
from pandas import value_counts
result = value_counts(self._ndarray, dropna=dropna).astype("Int64")
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index c4d1a35315d7d..3e3df5a3200c1 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -178,7 +178,7 @@ def to_numpy(
result[mask] = na_value
return result
- def insert(self, loc: int, item):
+ def insert(self, loc: int, item) -> ArrowStringArray:
if not isinstance(item, str) and item is not libmissing.NA:
raise TypeError("Scalar must be NA or str")
return super().insert(loc, item)
diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py
index 8a9583c465f50..ebf4d4ea9154e 100644
--- a/pandas/core/computation/common.py
+++ b/pandas/core/computation/common.py
@@ -5,7 +5,7 @@
from pandas._config import get_option
-def ensure_decoded(s):
+def ensure_decoded(s) -> str:
"""
If we have bytes, decode them to unicode.
"""
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index 4b037ab564a87..90824ce8d856f 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -548,13 +548,13 @@ def visit_UnaryOp(self, node, **kwargs):
def visit_Name(self, node, **kwargs):
return self.term_type(node.id, self.env, **kwargs)
- def visit_NameConstant(self, node, **kwargs):
+ def visit_NameConstant(self, node, **kwargs) -> Term:
return self.const_type(node.value, self.env)
- def visit_Num(self, node, **kwargs):
+ def visit_Num(self, node, **kwargs) -> Term:
return self.const_type(node.n, self.env)
- def visit_Constant(self, node, **kwargs):
+ def visit_Constant(self, node, **kwargs) -> Term:
return self.const_type(node.n, self.env)
def visit_Str(self, node, **kwargs):
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index 9e180f11c4211..e82bec47c6ac5 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -38,7 +38,7 @@
_MIN_ELEMENTS = 1_000_000
-def set_use_numexpr(v=True):
+def set_use_numexpr(v=True) -> None:
# set/unset to use numexpr
global USE_NUMEXPR
if NUMEXPR_INSTALLED:
@@ -51,7 +51,7 @@ def set_use_numexpr(v=True):
_where = _where_numexpr if USE_NUMEXPR else _where_standard
-def set_numexpr_threads(n=None):
+def set_numexpr_threads(n=None) -> None:
# if we are using numexpr, set the threads to n
# otherwise reset
if NUMEXPR_INSTALLED and USE_NUMEXPR:
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index 3a556b57ea5a5..db5f28e2ae6c1 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -94,7 +94,7 @@ def __repr__(self) -> str:
def __call__(self, *args, **kwargs):
return self.value
- def evaluate(self, *args, **kwargs):
+ def evaluate(self, *args, **kwargs) -> Term:
return self
def _resolve_name(self):
@@ -107,7 +107,7 @@ def _resolve_name(self):
)
return res
- def update(self, value):
+ def update(self, value) -> None:
"""
search order for local (i.e., @variable) variables:
@@ -447,7 +447,7 @@ def evaluate(self, env, engine: str, parser, term_type, eval_in_python):
name = env.add_tmp(res)
return term_type(name, env=env)
- def convert_values(self):
+ def convert_values(self) -> None:
"""
Convert datetimes to a comparable value in an expression.
"""
@@ -564,7 +564,7 @@ def __init__(self, op: str, operand) -> None:
f"valid operators are {UNARY_OPS_SYMS}"
) from err
- def __call__(self, env):
+ def __call__(self, env) -> MathCall:
operand = self.operand(env)
# error: Cannot call function of unknown type
return self.func(operand) # type: ignore[operator]
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index f96a9ab4cfb43..5ec2aaab98ba1 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -400,7 +400,7 @@ class StorageExtensionDtype(ExtensionDtype):
def __init__(self, storage=None) -> None:
self.storage = storage
- def __repr__(self):
+ def __repr__(self) -> str:
return f"{self.name}[{self.storage}]"
def __str__(self):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index d356a858a82fb..769656d1c4755 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -978,7 +978,7 @@ def maybe_upcast(
return upcast_values, fill_value # type: ignore[return-value]
-def invalidate_string_dtypes(dtype_set: set[DtypeObj]):
+def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None:
"""
Change string like dtypes to object for
``DataFrame.select_dtypes()``.
@@ -995,7 +995,7 @@ def invalidate_string_dtypes(dtype_set: set[DtypeObj]):
raise TypeError("string dtypes are not allowed, use 'object' instead")
-def coerce_indexer_dtype(indexer, categories):
+def coerce_indexer_dtype(indexer, categories) -> np.ndarray:
"""coerce the indexer input array to the smallest dtype possible"""
length = len(categories)
if length < _int8_max:
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 519dfd9269df5..378f33e2b65ac 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1041,7 +1041,7 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool:
# This exists to silence numpy deprecation warnings, see GH#29553
-def is_numeric_v_string_like(a: ArrayLike, b):
+def is_numeric_v_string_like(a: ArrayLike, b) -> bool:
"""
Check if we are comparing a string-like object to a numeric ndarray.
NumPy doesn't like to compare such objects, especially numeric arrays
@@ -1090,7 +1090,7 @@ def is_numeric_v_string_like(a: ArrayLike, b):
# This exists to silence numpy deprecation warnings, see GH#29553
-def is_datetimelike_v_numeric(a, b):
+def is_datetimelike_v_numeric(a, b) -> bool:
"""
Check if we are comparing a datetime-like object to a numeric object.
By "numeric," we mean an object that is either of an int or float dtype.
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index c61e9aaa59362..059df4009e2f6 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -1,6 +1,8 @@
"""
Utility functions related to concat.
"""
+from __future__ import annotations
+
from typing import (
TYPE_CHECKING,
cast,
@@ -32,6 +34,7 @@
)
if TYPE_CHECKING:
+ from pandas.core.arrays import Categorical
from pandas.core.arrays.sparse import SparseArray
@@ -156,7 +159,7 @@ def is_nonempty(x) -> bool:
def union_categoricals(
to_union, sort_categories: bool = False, ignore_order: bool = False
-):
+) -> Categorical:
"""
Combine list-like of Categorical-like, unioning categories.
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 20fecbb0095c5..16e7559e4d153 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -27,6 +27,7 @@
from pandas._libs.tslibs import (
BaseOffset,
NaT,
+ NaTType,
Period,
Timestamp,
dtypes,
@@ -945,7 +946,7 @@ def name(self) -> str_type:
return f"period[{self.freq.freqstr}]"
@property
- def na_value(self):
+ def na_value(self) -> NaTType:
return NaT
def __hash__(self) -> int:
@@ -972,7 +973,7 @@ def __eq__(self, other: Any) -> bool:
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
- def __setstate__(self, state):
+ def __setstate__(self, state) -> None:
# for pickle compat. __getstate__ is defined in the
# PandasExtensionDtype superclass and uses the public properties to
# pickle -> need to set the settable private ones here (see GH26067)
@@ -1034,7 +1035,9 @@ def __from_arrow__(
for arr in chunks:
data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64))
parr = PeriodArray(data.copy(), freq=self.freq, copy=False)
- parr[~mask] = NaT
+ # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray";
+ # expected type "Union[int, Sequence[int], Sequence[bool], slice]"
+ parr[~mask] = NaT # type: ignore[index]
results.append(parr)
if not results:
@@ -1230,7 +1233,7 @@ def construct_from_string(cls, string: str_type) -> IntervalDtype:
raise TypeError(msg)
@property
- def type(self):
+ def type(self) -> type[Interval]:
return Interval
def __str__(self) -> str_type:
@@ -1260,7 +1263,7 @@ def __eq__(self, other: Any) -> bool:
return is_dtype_equal(self.subtype, other.subtype)
- def __setstate__(self, state):
+ def __setstate__(self, state) -> None:
# for pickle compat. __get_state__ is defined in the
# PandasExtensionDtype superclass and uses the public properties to
# pickle -> need to set the settable private ones here (see GH26067)
diff --git a/pandas/core/exchange/column.py b/pandas/core/exchange/column.py
index ae24c5d295cc9..538c1d061ef22 100644
--- a/pandas/core/exchange/column.py
+++ b/pandas/core/exchange/column.py
@@ -1,7 +1,6 @@
-from typing import (
- Any,
- Tuple,
-)
+from __future__ import annotations
+
+from typing import Any
import numpy as np
@@ -126,7 +125,7 @@ def dtype(self):
else:
return self._dtype_from_pandasdtype(dtype)
- def _dtype_from_pandasdtype(self, dtype) -> Tuple[DtypeKind, int, str, str]:
+ def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]:
"""
See `self.dtype` for details.
"""
@@ -214,7 +213,7 @@ def get_chunks(self, n_chunks=None):
else:
yield self
- def get_buffers(self):
+ def get_buffers(self) -> ColumnBuffers:
"""
Return a dictionary containing the underlying buffers.
The returned dictionary has the following contents:
@@ -253,7 +252,7 @@ def get_buffers(self):
def _get_data_buffer(
self,
- ) -> Tuple[PandasBuffer, Any]: # Any is for self.dtype tuple
+ ) -> tuple[PandasBuffer, Any]: # Any is for self.dtype tuple
"""
Return the buffer containing the data and the buffer's associated dtype.
"""
@@ -296,7 +295,7 @@ def _get_data_buffer(
return buffer, dtype
- def _get_validity_buffer(self) -> Tuple[PandasBuffer, Any]:
+ def _get_validity_buffer(self) -> tuple[PandasBuffer, Any]:
"""
Return the buffer containing the mask values indicating missing data and
the buffer's associated dtype.
@@ -334,7 +333,7 @@ def _get_validity_buffer(self) -> Tuple[PandasBuffer, Any]:
raise NoBufferPresent(msg)
- def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]:
+ def _get_offsets_buffer(self) -> tuple[PandasBuffer, Any]:
"""
Return the buffer containing the offset values for variable-size binary
data (e.g., variable-length strings) and the buffer's associated dtype.
diff --git a/pandas/core/exchange/dataframe.py b/pandas/core/exchange/dataframe.py
index c8a89184b34c6..e5bb3811afed0 100644
--- a/pandas/core/exchange/dataframe.py
+++ b/pandas/core/exchange/dataframe.py
@@ -1,9 +1,15 @@
+from __future__ import annotations
+
from collections import abc
+from typing import TYPE_CHECKING
import pandas as pd
from pandas.core.exchange.column import PandasColumn
from pandas.core.exchange.dataframe_protocol import DataFrame as DataFrameXchg
+if TYPE_CHECKING:
+ from pandas import Index
+
class PandasDataFrameXchg(DataFrameXchg):
"""
@@ -29,11 +35,13 @@ def __init__(
self._nan_as_null = nan_as_null
self._allow_copy = allow_copy
- def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
+ def __dataframe__(
+ self, nan_as_null: bool = False, allow_copy: bool = True
+ ) -> PandasDataFrameXchg:
return PandasDataFrameXchg(self._df, nan_as_null, allow_copy)
@property
- def metadata(self):
+ def metadata(self) -> dict[str, Index]:
# `index` isn't a regular column, and the protocol doesn't support row
# labels - so we export it as Pandas-specific metadata here.
return {"pandas.index": self._df.index}
@@ -47,7 +55,7 @@ def num_rows(self) -> int:
def num_chunks(self) -> int:
return 1
- def column_names(self):
+ def column_names(self) -> Index:
return self._df.columns
def get_column(self, i: int) -> PandasColumn:
@@ -56,13 +64,13 @@ def get_column(self, i: int) -> PandasColumn:
def get_column_by_name(self, name: str) -> PandasColumn:
return PandasColumn(self._df[name], allow_copy=self._allow_copy)
- def get_columns(self):
+ def get_columns(self) -> list[PandasColumn]:
return [
PandasColumn(self._df[name], allow_copy=self._allow_copy)
for name in self._df.columns
]
- def select_columns(self, indices):
+ def select_columns(self, indices) -> PandasDataFrameXchg:
if not isinstance(indices, abc.Sequence):
raise ValueError("`indices` is not a sequence")
if not isinstance(indices, list):
@@ -72,7 +80,7 @@ def select_columns(self, indices):
self._df.iloc[:, indices], self._nan_as_null, self._allow_copy
)
- def select_columns_by_name(self, names):
+ def select_columns_by_name(self, names) -> PandasDataFrameXchg:
if not isinstance(names, abc.Sequence):
raise ValueError("`names` is not a sequence")
if not isinstance(names, list):
diff --git a/pandas/core/exchange/from_dataframe.py b/pandas/core/exchange/from_dataframe.py
index 805e63ac67f16..cb1967b5701a0 100644
--- a/pandas/core/exchange/from_dataframe.py
+++ b/pandas/core/exchange/from_dataframe.py
@@ -32,7 +32,7 @@
}
-def from_dataframe(df, allow_copy=True):
+def from_dataframe(df, allow_copy=True) -> pd.DataFrame:
"""
Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index ec9a2e4a4b5c0..ad1f36e0cddd8 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -6,7 +6,10 @@
from __future__ import annotations
import dataclasses
-from typing import Hashable
+from typing import (
+ Hashable,
+ Literal,
+)
@dataclasses.dataclass(order=True, frozen=True)
@@ -92,7 +95,7 @@ class OutputKey:
# TODO(2.0) Remove after pad/backfill deprecation enforced
-def maybe_normalize_deprecated_kernels(kernel):
+def maybe_normalize_deprecated_kernels(kernel) -> Literal["bfill", "ffill"]:
if kernel == "backfill":
kernel = "bfill"
elif kernel == "pad":
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 63c861e084eda..9e26598d85e74 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -603,7 +603,7 @@ def value_counts(
ascending: bool = False,
bins=None,
dropna: bool = True,
- ):
+ ) -> Series:
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
@@ -747,7 +747,7 @@ def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Series.nlargest)
- def nlargest(self, n: int = 5, keep: str = "first"):
+ def nlargest(self, n: int = 5, keep: str = "first") -> Series:
f = partial(Series.nlargest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
@@ -756,7 +756,7 @@ def nlargest(self, n: int = 5, keep: str = "first"):
return result
@doc(Series.nsmallest)
- def nsmallest(self, n: int = 5, keep: str = "first"):
+ def nsmallest(self, n: int = 5, keep: str = "first") -> Series:
f = partial(Series.nsmallest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
@@ -1600,7 +1600,7 @@ def idxmax(
axis=0,
skipna: bool = True,
numeric_only: bool | lib.NoDefault = lib.no_default,
- ):
+ ) -> DataFrame:
axis = DataFrame._get_axis_number(axis)
if numeric_only is lib.no_default:
# Cannot use self._resolve_numeric_only; we must pass None to
@@ -1639,7 +1639,7 @@ def idxmin(
axis=0,
skipna: bool = True,
numeric_only: bool | lib.NoDefault = lib.no_default,
- ):
+ ) -> DataFrame:
axis = DataFrame._get_axis_number(axis)
if numeric_only is lib.no_default:
# Cannot use self._resolve_numeric_only; we must pass None to
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 7e8a732a2e30d..89e47af4cb614 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -18,6 +18,7 @@ class providing the base-class of operations.
from textwrap import dedent
import types
from typing import (
+ TYPE_CHECKING,
Callable,
Hashable,
Iterable,
@@ -122,6 +123,13 @@ class providing the base-class of operations.
maybe_use_numba,
)
+if TYPE_CHECKING:
+ from pandas.core.window import (
+ ExpandingGroupby,
+ ExponentialMovingWindowGroupby,
+ RollingGroupby,
+ )
+
_common_see_also = """
See Also
--------
@@ -663,7 +671,7 @@ def ngroups(self) -> int:
@final
@property
- def indices(self):
+ def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:
"""
Dict {group name -> group indices}.
"""
@@ -2758,7 +2766,7 @@ def resample(self, rule, *args, **kwargs):
@final
@Substitution(name="groupby")
@Appender(_common_see_also)
- def rolling(self, *args, **kwargs):
+ def rolling(self, *args, **kwargs) -> RollingGroupby:
"""
Return a rolling grouper, providing rolling functionality per group.
"""
@@ -2775,7 +2783,7 @@ def rolling(self, *args, **kwargs):
@final
@Substitution(name="groupby")
@Appender(_common_see_also)
- def expanding(self, *args, **kwargs):
+ def expanding(self, *args, **kwargs) -> ExpandingGroupby:
"""
Return an expanding grouper, providing expanding
functionality per group.
@@ -2792,7 +2800,7 @@ def expanding(self, *args, **kwargs):
@final
@Substitution(name="groupby")
@Appender(_common_see_also)
- def ewm(self, *args, **kwargs):
+ def ewm(self, *args, **kwargs) -> ExponentialMovingWindowGroupby:
"""
Return an ewm grouper, providing ewm functionality per group.
"""
@@ -3484,7 +3492,7 @@ def rank(
na_option: str = "keep",
pct: bool = False,
axis: int = 0,
- ):
+ ) -> NDFrameT:
"""
Provide the rank of values within each group.
@@ -3575,7 +3583,7 @@ def rank(
@final
@Substitution(name="groupby")
@Appender(_common_see_also)
- def cumprod(self, axis=0, *args, **kwargs):
+ def cumprod(self, axis=0, *args, **kwargs) -> NDFrameT:
"""
Cumulative product for each group.
@@ -3593,7 +3601,7 @@ def cumprod(self, axis=0, *args, **kwargs):
@final
@Substitution(name="groupby")
@Appender(_common_see_also)
- def cumsum(self, axis=0, *args, **kwargs):
+ def cumsum(self, axis=0, *args, **kwargs) -> NDFrameT:
"""
Cumulative sum for each group.
@@ -3611,7 +3619,7 @@ def cumsum(self, axis=0, *args, **kwargs):
@final
@Substitution(name="groupby")
@Appender(_common_see_also)
- def cummin(self, axis=0, numeric_only=False, **kwargs):
+ def cummin(self, axis=0, numeric_only=False, **kwargs) -> NDFrameT:
"""
Cumulative min for each group.
@@ -3631,7 +3639,7 @@ def cummin(self, axis=0, numeric_only=False, **kwargs):
@final
@Substitution(name="groupby")
@Appender(_common_see_also)
- def cummax(self, axis=0, numeric_only=False, **kwargs):
+ def cummax(self, axis=0, numeric_only=False, **kwargs) -> NDFrameT:
"""
Cumulative max for each group.
@@ -3921,7 +3929,7 @@ def pct_change(self, periods=1, fill_method="ffill", limit=None, freq=None, axis
@final
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
- def head(self, n=5):
+ def head(self, n: int = 5) -> NDFrameT:
"""
Return first n rows of each group.
@@ -3960,7 +3968,7 @@ def head(self, n=5):
@final
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
- def tail(self, n=5):
+ def tail(self, n: int = 5) -> NDFrameT:
"""
Return last n rows of each group.
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 0c9b64dc8cec3..b9f4166b475ca 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -679,10 +679,16 @@ def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]:
elif isinstance(self.grouping_vector, ops.BaseGrouper):
# we have a list of groupers
codes = self.grouping_vector.codes_info
- uniques = self.grouping_vector.result_index._values
+ # error: Incompatible types in assignment (expression has type "Union
+ # [ExtensionArray, ndarray[Any, Any]]", variable has type "Categorical")
+ uniques = (
+ self.grouping_vector.result_index._values # type: ignore[assignment]
+ )
else:
# GH35667, replace dropna=False with use_na_sentinel=False
- codes, uniques = algorithms.factorize(
+ # error: Incompatible types in assignment (expression has type "Union[
+ # ndarray[Any, Any], Index]", variable has type "Categorical")
+ codes, uniques = algorithms.factorize( # type: ignore[assignment]
self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna
)
return codes, uniques
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index d056b4b03d904..6dc4ccfa8e1ee 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -735,7 +735,7 @@ def groupings(self) -> list[grouper.Grouping]:
def shape(self) -> Shape:
return tuple(ping.ngroups for ping in self.groupings)
- def __iter__(self):
+ def __iter__(self) -> Iterator[Hashable]:
return iter(self.indices)
@property
diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py
index f098066d1c7d7..0f3cdc4195c85 100644
--- a/pandas/core/indexers/utils.py
+++ b/pandas/core/indexers/utils.py
@@ -240,7 +240,7 @@ def validate_indices(indices: np.ndarray, n: int) -> None:
# Indexer Conversion
-def maybe_convert_indices(indices, n: int, verify: bool = True):
+def maybe_convert_indices(indices, n: int, verify: bool = True) -> np.ndarray:
"""
Attempt to convert indices into valid, positive indices.
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 8694ad94dae26..46959aa5cd3e2 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -38,7 +38,10 @@
from pandas.core.indexes.timedeltas import TimedeltaIndex
if TYPE_CHECKING:
- from pandas import Series
+ from pandas import (
+ DataFrame,
+ Series,
+ )
class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
@@ -241,7 +244,7 @@ def to_pydatetime(self) -> np.ndarray:
def freq(self):
return self._get_values().inferred_freq
- def isocalendar(self):
+ def isocalendar(self) -> DataFrame:
"""
Calculate year, week, and day according to the ISO 8601 standard.
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 87f2ae41cc98e..667ce4664c359 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1552,7 +1552,7 @@ def _summary(self, name=None) -> str_t:
# --------------------------------------------------------------------
# Conversion Methods
- def to_flat_index(self):
+ def to_flat_index(self: _IndexT) -> _IndexT:
"""
Identity method.
@@ -1709,14 +1709,14 @@ def to_frame(
# Name-Centric Methods
@property
- def name(self):
+ def name(self) -> Hashable:
"""
Return Index or MultiIndex name.
"""
return self._name
@name.setter
- def name(self, value: Hashable):
+ def name(self, value: Hashable) -> None:
if self._no_setting_name:
# Used in MultiIndex.levels to avoid silently ignoring name updates.
raise RuntimeError(
@@ -5947,7 +5947,7 @@ def _get_values_for_loc(self, series: Series, loc, key):
return series.iloc[loc]
@final
- def set_value(self, arr, key, value):
+ def set_value(self, arr, key, value) -> None:
"""
Fast lookup of value from 1-dimensional ndarray.
@@ -7008,16 +7008,16 @@ def _unary_method(self, op):
result = op(self._values)
return Index(result, name=self.name)
- def __abs__(self):
+ def __abs__(self) -> Index:
return self._unary_method(operator.abs)
- def __neg__(self):
+ def __neg__(self) -> Index:
return self._unary_method(operator.neg)
- def __pos__(self):
+ def __pos__(self) -> Index:
return self._unary_method(operator.pos)
- def __invert__(self):
+ def __invert__(self) -> Index:
# GH#8875
return self._unary_method(operator.inv)
@@ -7131,7 +7131,7 @@ def _maybe_disable_logical_methods(self, opname: str_t) -> None:
make_invalid_op(opname)(self)
@Appender(IndexOpsMixin.argmin.__doc__)
- def argmin(self, axis=None, skipna=True, *args, **kwargs):
+ def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int:
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
@@ -7143,7 +7143,7 @@ def argmin(self, axis=None, skipna=True, *args, **kwargs):
return super().argmin(skipna=skipna)
@Appender(IndexOpsMixin.argmax.__doc__)
- def argmax(self, axis=None, skipna=True, *args, **kwargs):
+ def argmax(self, axis=None, skipna=True, *args, **kwargs) -> int:
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index c2bcd90ff10fb..9a70a4a1aa615 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -422,6 +422,7 @@ def reindex(
stacklevel=find_stack_level(),
)
+ new_target: Index
if len(self) and indexer is not None:
new_target = self.take(indexer)
else:
@@ -434,8 +435,8 @@ def reindex(
if not isinstance(target, CategoricalIndex) or (cats == -1).any():
new_target, indexer, _ = super()._reindex_non_unique(target)
else:
-
- codes = new_target.codes.copy()
+ # error: "Index" has no attribute "codes"
+ codes = new_target.codes.copy() # type: ignore[attr-defined]
codes[indexer == -1] = cats[missing]
cat = self._data._from_backing_data(codes)
new_target = type(self)._simple_new(cat, name=self.name)
@@ -450,8 +451,8 @@ def reindex(
new_target = type(self)._simple_new(cat, name=self.name)
else:
# e.g. test_reindex_with_categoricalindex, test_reindex_duplicate_target
- new_target = np.asarray(new_target)
- new_target = Index._with_infer(new_target, name=self.name)
+ new_target_array = np.asarray(new_target)
+ new_target = Index._with_infer(new_target_array, name=self.name)
return new_target, indexer
@@ -488,7 +489,7 @@ def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex:
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return self.categories._is_comparable_dtype(dtype)
- def take_nd(self, *args, **kwargs):
+ def take_nd(self, *args, **kwargs) -> CategoricalIndex:
"""Alias for `take`"""
warnings.warn(
"CategoricalIndex.take_nd is deprecated, use CategoricalIndex.take "
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 811dc72e9b908..8014d010afc1b 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -672,7 +672,7 @@ def _get_insert_freq(self, loc: int, item):
return freq
@doc(NDArrayBackedExtensionIndex.delete)
- def delete(self, loc):
+ def delete(self, loc) -> DatetimeTimedeltaMixin:
result = super().delete(loc)
result._data._freq = self._get_delete_freq(loc)
return result
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 583612b4659b6..fd6b6ba63d7e0 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -308,7 +308,7 @@ def __new__(
copy=False,
name=None,
verify_integrity: bool = True,
- ):
+ ) -> MultiIndex:
# compat with Index
if name is not None:
@@ -503,7 +503,7 @@ def from_tuples(
cls,
tuples: Iterable[tuple[Hashable, ...]],
sortorder: int | None = None,
- names: Sequence[Hashable] | None = None,
+ names: Sequence[Hashable] | Hashable | None = None,
) -> MultiIndex:
"""
Convert list of tuples to MultiIndex.
@@ -562,7 +562,9 @@ def from_tuples(
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
- arrays = [[]] * len(names)
+ # error: Argument 1 to "len" has incompatible type "Hashable";
+ # expected "Sized"
+ arrays = [[]] * len(names) # type: ignore[arg-type]
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = np.asarray(tuples._values)
@@ -1826,7 +1828,9 @@ def to_frame(
result.index = self
return result
- def to_flat_index(self) -> Index:
+ # error: Return type "Index" of "to_flat_index" incompatible with return type
+ # "MultiIndex" in supertype "Index"
+ def to_flat_index(self) -> Index: # type: ignore[override]
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index c1cb5ad315298..f270a6e8b555f 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -120,7 +120,9 @@ def inferred_type(self) -> str:
"c": "complex",
}[self.dtype.kind]
- def __new__(cls, data=None, dtype: Dtype | None = None, copy=False, name=None):
+ def __new__(
+ cls, data=None, dtype: Dtype | None = None, copy=False, name=None
+ ) -> NumericIndex:
name = maybe_extract_name(name, data, cls)
subarr = cls._ensure_array(data, dtype, copy)
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 5b384fbc97c1a..2d6d121a089c0 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -8,6 +8,7 @@
Any,
Callable,
Hashable,
+ Iterator,
List,
cast,
)
@@ -426,7 +427,7 @@ def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
- def __iter__(self):
+ def __iter__(self) -> Iterator[int]:
yield from self._range
@doc(Int64Index._shallow_copy)
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index 7d2e4129461a7..3a8ed54d6c634 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -171,13 +171,13 @@ def set_axis(self, axis: int, new_labels: Index) -> None:
axis = self._normalize_axis(axis)
self._axes[axis] = new_labels
- def get_dtypes(self):
+ def get_dtypes(self) -> np.ndarray:
return np.array([arr.dtype for arr in self.arrays], dtype="object")
def __getstate__(self):
return self.arrays, self._axes
- def __setstate__(self, state):
+ def __setstate__(self, state) -> None:
self.arrays = state[0]
self._axes = state[1]
@@ -348,7 +348,7 @@ def where(self: T, other, cond, align: bool) -> T:
def setitem(self: T, indexer, value) -> T:
return self.apply_with_block("setitem", indexer=indexer, value=value)
- def putmask(self, mask, new, align: bool = True):
+ def putmask(self: T, mask, new, align: bool = True) -> T:
if align:
align_keys = ["new", "mask"]
else:
@@ -451,7 +451,7 @@ def replace_list(
regex=regex,
)
- def to_native_types(self, **kwargs):
+ def to_native_types(self: T, **kwargs) -> T:
return self.apply(to_native_types, **kwargs)
@property
@@ -815,7 +815,7 @@ def column_arrays(self) -> list[ArrayLike]:
def iset(
self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool = False
- ):
+ ) -> None:
"""
Set new column(s).
@@ -923,7 +923,7 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None:
self.arrays = arrays
self._axes[1] = new_axis
- def idelete(self, indexer):
+ def idelete(self, indexer) -> ArrayManager:
"""
Delete selected locations in-place (new block and array, same BlockManager)
"""
@@ -1240,7 +1240,7 @@ def make_empty(self, axes=None) -> SingleArrayManager:
return type(self)([array], axes)
@classmethod
- def from_array(cls, array, index):
+ def from_array(cls, array, index) -> SingleArrayManager:
return cls([array], [index])
@property
@@ -1305,7 +1305,7 @@ def apply(self, func, **kwargs):
new_array = getattr(self.array, func)(**kwargs)
return type(self)([new_array], self._axes)
- def setitem(self, indexer, value):
+ def setitem(self, indexer, value) -> SingleArrayManager:
"""
Set values with indexer.
@@ -1336,7 +1336,7 @@ def _get_data_subset(self, predicate: Callable) -> SingleArrayManager:
else:
return self.make_empty()
- def set_values(self, values: ArrayLike):
+ def set_values(self, values: ArrayLike) -> None:
"""
Set (replace) the values of the SingleArrayManager in place.
@@ -1372,7 +1372,7 @@ def __init__(self, n: int) -> None:
self.n = n
@property
- def shape(self):
+ def shape(self) -> tuple[int]:
return (self.n,)
def to_array(self, dtype: DtypeObj) -> ArrayLike:
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 49efecec7472e..df327716970f1 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -215,7 +215,7 @@ def mgr_locs(self) -> BlockPlacement:
return self._mgr_locs
@mgr_locs.setter
- def mgr_locs(self, new_mgr_locs: BlockPlacement):
+ def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None:
self._mgr_locs = new_mgr_locs
@final
@@ -504,7 +504,7 @@ def dtype(self) -> DtypeObj:
@final
def astype(
self, dtype: DtypeObj, copy: bool = False, errors: IgnoreRaise = "raise"
- ):
+ ) -> Block:
"""
Coerce to the new dtype.
@@ -536,13 +536,13 @@ def astype(
return newb
@final
- def to_native_types(self, na_rep="nan", quoting=None, **kwargs):
+ def to_native_types(self, na_rep="nan", quoting=None, **kwargs) -> Block:
"""convert to our native types format"""
result = to_native_types(self.values, na_rep=na_rep, quoting=quoting, **kwargs)
return self.make_block(result)
@final
- def copy(self, deep: bool = True):
+ def copy(self, deep: bool = True) -> Block:
"""copy constructor"""
values = self.values
if deep:
@@ -575,7 +575,11 @@ def replace(
if isinstance(values, Categorical):
# TODO: avoid special-casing
blk = self if inplace else self.copy()
- blk.values._replace(to_replace=to_replace, value=value, inplace=True)
+ # error: Item "ExtensionArray" of "Union[ndarray[Any, Any],
+ # ExtensionArray]" has no attribute "_replace"
+ blk.values._replace( # type: ignore[union-attr]
+ to_replace=to_replace, value=value, inplace=True
+ )
return [blk]
if not self._can_hold_element(to_replace):
@@ -725,10 +729,13 @@ def replace_list(
assert not isinstance(mib, bool)
m = mib[blk_num : blk_num + 1]
+ # error: Argument "mask" to "_replace_coerce" of "Block" has
+ # incompatible type "Union[ExtensionArray, ndarray[Any, Any], bool]";
+ # expected "ndarray[Any, dtype[bool_]]"
result = blk._replace_coerce(
to_replace=src,
value=dest,
- mask=m,
+ mask=m, # type: ignore[arg-type]
inplace=inplace,
regex=regex,
)
@@ -815,7 +822,7 @@ def _unwrap_setitem_indexer(self, indexer):
def shape(self) -> Shape:
return self.values.shape
- def iget(self, i: int | tuple[int, int] | tuple[slice, int]):
+ def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray:
# In the case where we have a tuple[slice, int], the slice will always
# be slice(None)
# Note: only reached with self.ndim == 2
@@ -924,7 +931,7 @@ def _unstack(
# ---------------------------------------------------------------------
- def setitem(self, indexer, value):
+ def setitem(self, indexer, value) -> Block:
"""
Attempt self.values[indexer] = value, possibly creating a new array.
@@ -2156,7 +2163,7 @@ def new_block(values, placement, *, ndim: int) -> Block:
return klass(values, ndim=ndim, placement=placement)
-def check_ndim(values, placement: BlockPlacement, ndim: int):
+def check_ndim(values, placement: BlockPlacement, ndim: int) -> None:
"""
ndim inference and validation.
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 4a352d614e1d9..77197dac3363b 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -381,7 +381,7 @@ def needs_filling(self) -> bool:
return False
@cache_readonly
- def dtype(self):
+ def dtype(self) -> DtypeObj:
blk = self.block
if blk.values.dtype.kind == "V":
raise AssertionError("Block is None, no dtype")
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 626809eab304e..c1d0ab730fe7e 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -167,7 +167,7 @@ def rec_array_to_mgr(
dtype: DtypeObj | None,
copy: bool,
typ: str,
-):
+) -> Manager:
"""
Extract from a masked rec array and create the manager.
"""
diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py
index cc4a1f11edd2b..e069c765d5299 100644
--- a/pandas/core/ops/invalid.py
+++ b/pandas/core/ops/invalid.py
@@ -6,7 +6,7 @@
import numpy as np
-def invalid_comparison(left, right, op):
+def invalid_comparison(left, right, op) -> np.ndarray:
"""
If a comparison has mismatched types and is not necessarily meaningful,
follow python3 conventions by:
diff --git a/pandas/core/ops/mask_ops.py b/pandas/core/ops/mask_ops.py
index 57bacba0d4bee..adc1f63c568bf 100644
--- a/pandas/core/ops/mask_ops.py
+++ b/pandas/core/ops/mask_ops.py
@@ -184,6 +184,6 @@ def kleene_and(
return result, mask
-def raise_for_nan(value, method: str):
+def raise_for_nan(value, method: str) -> None:
if lib.is_float(value) and np.isnan(value):
raise ValueError(f"Cannot perform logical '{method}' with floating NaN")
diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py
index df22919ed19f1..d1f704635ba64 100644
--- a/pandas/core/ops/methods.py
+++ b/pandas/core/ops/methods.py
@@ -43,7 +43,7 @@ def _get_method_wrappers(cls):
return arith_flex, comp_flex
-def add_flex_arithmetic_methods(cls):
+def add_flex_arithmetic_methods(cls) -> None:
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index b4e944861f1bc..d4f4057af7bfd 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -152,7 +152,7 @@ def _indexer_and_to_sort(
return indexer, to_sort
@cache_readonly
- def sorted_labels(self):
+ def sorted_labels(self) -> list[np.ndarray]:
indexer, to_sort = self._indexer_and_to_sort
return [line.take(indexer) for line in to_sort]
@@ -199,7 +199,7 @@ def arange_result(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.bool_]]:
return new_values, mask.any(0)
# TODO: in all tests we have mask.any(0).all(); can we rely on that?
- def get_result(self, values, value_columns, fill_value):
+ def get_result(self, values, value_columns, fill_value) -> DataFrame:
if values.ndim == 1:
values = values[:, np.newaxis]
@@ -346,7 +346,7 @@ def _repeater(self) -> np.ndarray:
return repeater
@cache_readonly
- def new_index(self):
+ def new_index(self) -> MultiIndex:
# Does not depend on values or value_columns
result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]]
diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py
index 9f9143f4aaa60..459928acc0da3 100644
--- a/pandas/core/reshape/util.py
+++ b/pandas/core/reshape/util.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import numpy as np
from pandas._typing import NumpyIndexT
@@ -5,7 +7,7 @@
from pandas.core.dtypes.common import is_list_like
-def cartesian_product(X):
+def cartesian_product(X) -> list[np.ndarray]:
"""
Numpy version of itertools.product.
Sometimes faster (for large inputs)...
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index d4d61df915acb..7de34c04a31ed 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -266,7 +266,7 @@ def _box_as_indexlike(
def _convert_and_box_cache(
arg: DatetimeScalarOrArrayConvertible,
cache_array: Series,
- name: str | None = None,
+ name: Hashable | None = None,
) -> Index:
"""
Convert array of dates with a cache and wrap the result in an Index.
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index a153761f377b3..3a42a4b1a1663 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -442,7 +442,9 @@ def _get_window_indexer(self) -> BaseIndexer:
"""
return ExponentialMovingWindowIndexer()
- def online(self, engine="numba", engine_kwargs=None):
+ def online(
+ self, engine="numba", engine_kwargs=None
+ ) -> OnlineExponentialMovingWindow:
"""
Return an ``OnlineExponentialMovingWindow`` object to calculate
exponentially moving window aggregations in an online method.
@@ -948,7 +950,7 @@ def __init__(
else:
raise ValueError("'numba' is the only supported engine")
- def reset(self):
+ def reset(self) -> None:
"""
Reset the state captured by `update` calls.
"""
diff --git a/pandas/core/window/online.py b/pandas/core/window/online.py
index 2ef06732f9800..bb973f05687e2 100644
--- a/pandas/core/window/online.py
+++ b/pandas/core/window/online.py
@@ -112,6 +112,6 @@ def run_ewm(self, weighted_avg, deltas, min_periods, ewm_func):
self.last_ewm = result[-1]
return result
- def reset(self):
+ def reset(self) -> None:
self.old_wt = np.ones(self.shape[self.axis - 1])
self.last_ewm = None
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index c577acfaeba8e..6ab57b0cce2a4 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -118,16 +118,16 @@ def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel:
return [index_label]
return index_label
- def _get_index_label_from_obj(self) -> list[str]:
+ def _get_index_label_from_obj(self) -> Sequence[Hashable]:
if isinstance(self.obj.index, ABCMultiIndex):
return self._get_index_label_multiindex()
else:
return self._get_index_label_flat()
- def _get_index_label_multiindex(self) -> list[str]:
+ def _get_index_label_multiindex(self) -> Sequence[Hashable]:
return [name or "" for name in self.obj.index.names]
- def _get_index_label_flat(self) -> list[str]:
+ def _get_index_label_flat(self) -> Sequence[Hashable]:
index_label = self.obj.index.name
return [""] if index_label is None else [index_label]
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 6d497f7d9bb94..6554b4c1f1afd 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -993,8 +993,8 @@ def _get_formatted_index(self, frame: DataFrame) -> list[str]:
else:
return adjoined
- def _get_column_name_list(self) -> list[str]:
- names: list[str] = []
+ def _get_column_name_list(self) -> list[Hashable]:
+ names: list[Hashable] = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend("" if name is None else name for name in columns.names)
diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index b6494682d308d..163e7dc7bde5e 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -6,6 +6,7 @@
from textwrap import dedent
from typing import (
Any,
+ Hashable,
Iterable,
Mapping,
cast,
@@ -258,6 +259,7 @@ def _write_table(self, indent: int = 0) -> None:
self.write("</table>", indent)
def _write_col_header(self, indent: int) -> None:
+ row: list[Hashable]
is_truncated_horizontally = self.fmt.is_truncated_horizontally
if isinstance(self.columns, MultiIndex):
template = 'colspan="{span:d}" halign="left"'
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index 3d6b5fcb49b85..711d0857a5a1c 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -367,7 +367,7 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
names = list(chunks[0].keys())
warning_columns = []
- result = {}
+ result: dict = {}
for name in names:
arrs = [chunk.pop(name) for chunk in chunks]
# Check each arr for consistent types.
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 16c1a80034d0c..226a19e1f7599 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1930,7 +1930,9 @@ def _do_convert_categoricals(
categories = list(vl.values())
try:
# Try to catch duplicate categories
- cat_data.categories = categories
+ # error: Incompatible types in assignment (expression has
+ # type "List[str]", variable has type "Index")
+ cat_data.categories = categories # type: ignore[assignment]
except ValueError as err:
vc = Series(categories).value_counts()
repeated_cats = list(vc.index[vc > 1])
| Probably the last big PR related to #47521 There are many more missing return annotations but they should be addressed in smaller PRs (they are either difficult to type or lead to many ignore comments). | https://api.github.com/repos/pandas-dev/pandas/pulls/47618 | 2022-07-06T21:59:25Z | 2022-07-07T16:45:19Z | 2022-07-07T16:45:19Z | 2022-09-21T15:28:31Z |
DOC: Fixed pandas capitalization | diff --git a/RELEASE.md b/RELEASE.md
index 42cb82dfcf020..344a097a3e81e 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -1,6 +1,6 @@
Release Notes
=============
-The list of changes to Pandas between each release can be found
+The list of changes to pandas between each release can be found
[here](https://pandas.pydata.org/pandas-docs/stable/whatsnew/index.html). For full
details, see the commit logs at https://github.com/pandas-dev/pandas.
| Fixed wrong capitalization of "pandas" in RELEASE.md.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47617 | 2022-07-06T20:59:23Z | 2022-07-06T21:11:12Z | 2022-07-06T21:11:11Z | 2022-07-06T21:11:18Z |
TST: Added test for strides in column major Dataframes stored in HDFS… | diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 8a933f4981ff3..3a6f699cce94e 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -1019,3 +1019,11 @@ def test_hdfstore_iteritems_deprecated(setup_path):
hdf.put("table", df)
with tm.assert_produces_warning(FutureWarning):
next(hdf.iteritems())
+
+
+def test_hdfstore_strides(setup_path):
+ # GH22073
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})
+ with ensure_clean_store(setup_path) as store:
+ store.put("df", df)
+ assert df["a"].values.strides == store["df"]["a"].values.strides
| …tore
- [x] closes #22073.
- [x] Tests added and passed.
- [x] All code checks passed.
Added test for column-major DataFrames stored in HDFStore which were incorrectly returned as row-major.
Tests and linter pass locally. | https://api.github.com/repos/pandas-dev/pandas/pulls/47616 | 2022-07-06T20:52:14Z | 2022-07-07T00:10:56Z | 2022-07-07T00:10:56Z | 2022-07-07T05:46:09Z |
CLN: Add tets and fix docstring for concat with sort=True and outer join | diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index 6820b7b5360a5..b4f47f70c5a84 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -233,6 +233,7 @@ def _unique_indices(inds, dtype) -> Index:
Parameters
----------
inds : list of Index or list objects
+ dtype : dtype to set for the resulting Index
Returns
-------
diff --git a/pandas/tests/reshape/concat/test_index.py b/pandas/tests/reshape/concat/test_index.py
index 66382eb0e95a9..5796c47884db7 100644
--- a/pandas/tests/reshape/concat/test_index.py
+++ b/pandas/tests/reshape/concat/test_index.py
@@ -399,13 +399,36 @@ def test_concat_range_index_result(self):
expected_index = pd.RangeIndex(0, 2)
tm.assert_index_equal(result.index, expected_index, exact=True)
- @pytest.mark.parametrize("dtype", ["Int64", "object"])
- def test_concat_index_keep_dtype(self, dtype):
+ def test_concat_index_keep_dtype(self):
+ # GH#47329
+ df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype="object"))
+ df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="object"))
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
+ expected = DataFrame(
+ [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="object")
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_concat_index_keep_dtype_ea_numeric(self, any_numeric_ea_dtype):
+ # GH#47329
+ df1 = DataFrame(
+ [[0, 1, 1]], columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype)
+ )
+ df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=any_numeric_ea_dtype))
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
+ expected = DataFrame(
+ [[0, 1, 1.0], [0, 1, np.nan]],
+ columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype),
+ )
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", ["Int8", "Int16", "Int32"])
+ def test_concat_index_find_common(self, dtype):
# GH#47329
df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype))
- df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=dtype))
+ df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="Int32"))
result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
expected = DataFrame(
- [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype=dtype)
+ [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="Int32")
)
tm.assert_frame_equal(result, expected)
| cc @simonjayhawkins
Added a couple of tests and fixed the docstring | https://api.github.com/repos/pandas-dev/pandas/pulls/47612 | 2022-07-06T14:45:25Z | 2022-07-07T17:04:55Z | 2022-07-07T17:04:55Z | 2022-07-08T20:07:04Z |
DOC: fix shared compression_options and decompression_options | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3967c3df6b7cb..76a473dad79a4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3296,7 +3296,7 @@ def to_latex(
@final
@doc(
storage_options=_shared_docs["storage_options"],
- compression_options=_shared_docs["compression_options"],
+ compression_options=_shared_docs["compression_options"] % "path_or_buf",
)
@deprecate_kwarg(old_arg_name="line_terminator", new_arg_name="lineterminator")
def to_csv(
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 09ee254633b22..4b7a487e9472d 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -423,7 +423,7 @@
_shared_docs[
"compression_options"
] = """compression : str or dict, default 'infer'
- For on-the-fly compression of the output data. If 'infer' and '%s'
+ For on-the-fly compression of the output data. If 'infer' and '%s' is
path-like, then detect compression from the following extensions: '.gz',
'.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'
(otherwise no compression).
@@ -432,7 +432,7 @@
to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'tar'``} and other
key-value pairs are forwarded to
``zipfile.ZipFile``, ``gzip.GzipFile``,
- ``bz2.BZ2File``, ``zstandard.ZstdDecompressor`` or
+ ``bz2.BZ2File``, ``zstandard.ZstdCompressor`` or
``tarfile.TarFile``, respectively.
As an example, the following could be passed for faster compression and to create
a reproducible gzip archive:
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index b4e3a0545879c..35227dcf6a82d 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -852,7 +852,8 @@ def read_csv(
summary="Read a comma-separated values (csv) file into DataFrame.",
_default_sep="','",
storage_options=_shared_docs["storage_options"],
- decompression_options=_shared_docs["decompression_options"],
+ decompression_options=_shared_docs["decompression_options"]
+ % "filepath_or_buffer",
)
)
def read_csv(
@@ -1189,7 +1190,8 @@ def read_table(
summary="Read general delimited file into DataFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=_shared_docs["storage_options"],
- decompression_options=_shared_docs["decompression_options"],
+ decompression_options=_shared_docs["decompression_options"]
+ % "filepath_or_buffer",
)
)
def read_table(
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index 57014ef9c9622..359174166f980 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -79,7 +79,7 @@ def read_sas(
@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"])
-@doc(decompression_options=_shared_docs["decompression_options"])
+@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer")
def read_sas(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
format: str | None = None,
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 1a230f4ae4164..16c1a80034d0c 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -140,7 +140,7 @@
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
-{_shared_docs["decompression_options"]}
+{_shared_docs["decompression_options"] % "filepath_or_buffer"}
{_shared_docs["storage_options"]}
Returns
| Both compression_options and decompression_options made the same
reference to zstandard.ZstdDecompressor however compression uses
zstandard.ZstdCompressor and so the documentation for
compression_options should refer to this class instead
The shared doc strings compression_options and decompression_options
require an interpolated string for the name of the parameter that takes
a filepath/buffer, with the name of this parameter.
It was missing in a few places.
Also fix a small gramatical error in compression_options
It took me a while to find the documentation for the python zstandard library to see what options are available to pass in as a dictionary, do you think it's worth updating the text `zstandard.ZstdDecompressor ` in the docs to be a link to https://python-zstandard.readthedocs.io/en/latest/decompressor.html?
I saw some test failures when running test_fast.sh inside the provided Docker image, but from what I can tell, these are not related to the changes that I made, and I also see the same failure when running tests from `main`:
<details>
<summary>test failure</summary>
```
pandas/tests/test_expressions.py:133: AssertionError
________________ TestExpressions.test_run_binary[gt-False-df2] _________________
[gw2] linux -- Python 3.8.13 /opt/conda/bin/python
self = <pandas.tests.test_expressions.TestExpressions object at 0x7fcc8c9f3fd0>
df = A B C D
0 0 0 12 0
1 22 22 54 17
2 0 0 21 0
3 0 95 0 0
4 ...3 91
9997 25 0 32 5
9998 0 55 0 62
9999 42 25 30 1
10000 3 0 75 56
[10001 rows x 4 columns]
flex = False, comparison_op = <built-in function gt>
@pytest.mark.parametrize(
"df",
[
_integer,
_integer2,
# randint to get a case with zeros
_integer * np.random.randint(0, 2, size=np.shape(_integer)),
_frame,
_frame2,
_mixed,
_mixed2,
],
)
@pytest.mark.parametrize("flex", [True, False])
def test_run_binary(self, df, flex, comparison_op):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
arith = comparison_op.__name__
with option_context("compute.use_numexpr", False):
other = df.copy() + 1
expr._MIN_ELEMENTS = 0
expr.set_test_mode(True)
result, expected = self.call_op(df, other, flex, arith)
used_numexpr = expr.get_test_result()
> assert used_numexpr, "Did not use numexpr as expected."
E AssertionError: Did not use numexpr as expected.
E assert []
```
</details>
- [x] no related ticket
- [x] Built and inspected docs
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] N/A Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] N/A Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47609 | 2022-07-06T10:55:53Z | 2022-07-06T16:45:15Z | 2022-07-06T16:45:15Z | 2022-07-06T16:45:22Z |
BUG: df.groupby().resample()[[cols]] without key columns raise KeyError | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 0b450fab53137..5b07fc31287e5 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -996,6 +996,8 @@ Groupby/resample/rolling
- Bug in :meth:`.DataFrameGroupBy.describe` and :meth:`.SeriesGroupBy.describe` produces inconsistent results for empty datasets (:issue:`41575`)
- Bug in :meth:`DataFrame.resample` reduction methods when used with ``on`` would attempt to aggregate the provided column (:issue:`47079`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` would not respect ``dropna=False`` when the input DataFrame/Series had a NaN values in a :class:`MultiIndex` (:issue:`46783`)
+- Bug in :meth:`DataFrameGroupBy.resample` raises ``KeyError`` when getting the result from a key list which misses the resample key (:issue:`47362`)
+-
Reshaping
^^^^^^^^^
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 0a62861cdaba7..dc436cd322a8d 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -502,11 +502,11 @@ def _apply_loffset(self, result):
self.loffset = None
return result
- def _get_resampler_for_grouping(self, groupby):
+ def _get_resampler_for_grouping(self, groupby, key=None):
"""
Return the correct class for resampling with groupby.
"""
- return self._resampler_for_grouping(self, groupby=groupby)
+ return self._resampler_for_grouping(self, groupby=groupby, key=key)
def _wrap_result(self, result):
"""
@@ -1132,7 +1132,7 @@ class _GroupByMixin(PandasObject):
_attributes: list[str] # in practice the same as Resampler._attributes
_selection: IndexLabel | None = None
- def __init__(self, obj, parent=None, groupby=None, **kwargs) -> None:
+ def __init__(self, obj, parent=None, groupby=None, key=None, **kwargs) -> None:
# reached via ._gotitem and _get_resampler_for_grouping
if parent is None:
@@ -1145,6 +1145,7 @@ def __init__(self, obj, parent=None, groupby=None, **kwargs) -> None:
self._selection = kwargs.get("selection")
self.binner = parent.binner
+ self.key = key
self._groupby = groupby
self._groupby.mutated = True
@@ -1197,6 +1198,8 @@ def _gotitem(self, key, ndim, subset=None):
# Try to select from a DataFrame, falling back to a Series
try:
+ if isinstance(key, list) and self.key not in key:
+ key.append(self.key)
groupby = self._groupby[key]
except IndexError:
groupby = self._groupby
@@ -1511,7 +1514,7 @@ def get_resampler_for_grouping(
# .resample uses 'on' similar to how .groupby uses 'key'
tg = TimeGrouper(freq=rule, key=on, **kwargs)
resampler = tg._get_resampler(groupby.obj, kind=kind)
- return resampler._get_resampler_for_grouping(groupby=groupby)
+ return resampler._get_resampler_for_grouping(groupby=groupby, key=tg.key)
class TimeGrouper(Grouper):
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index c54d9de009940..8aff217cca5c1 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -470,3 +470,30 @@ def test_resample_groupby_agg_object_dtype_all_nan(consolidate):
index=idx,
)
tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_resample_with_list_of_keys():
+ # GH 47362
+ df = DataFrame(
+ data={
+ "date": date_range(start="2016-01-01", periods=8),
+ "group": [0, 0, 0, 0, 1, 1, 1, 1],
+ "val": [1, 7, 5, 2, 3, 10, 5, 1],
+ }
+ )
+ result = df.groupby("group").resample("2D", on="date")[["val"]].mean()
+ expected = DataFrame(
+ data={
+ "val": [4.0, 3.5, 6.5, 3.0],
+ },
+ index=Index(
+ data=[
+ (0, Timestamp("2016-01-01")),
+ (0, Timestamp("2016-01-03")),
+ (1, Timestamp("2016-01-05")),
+ (1, Timestamp("2016-01-07")),
+ ],
+ name=("group", "date"),
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #47362
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47605 | 2022-07-06T04:44:26Z | 2022-07-11T21:00:02Z | 2022-07-11T21:00:02Z | 2022-07-12T02:00:41Z |
Add test for unique aggregation returning different dtypes | diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index 72a9d8723d34c..1ef0865fff552 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -1585,3 +1585,21 @@ def test_apply_on_empty_dataframe():
result = df.head(0).apply(lambda x: max(x["a"], x["b"]), axis=1)
expected = Series([])
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "test, constant",
+ [
+ ({"a": [1, 2, 3], "b": [1, 1, 1]}, {"a": [1, 2, 3], "b": [1]}),
+ ({"a": [2, 2, 2], "b": [1, 1, 1]}, {"a": [2], "b": [1]}),
+ ],
+)
+def test_unique_agg_type_is_series(test, constant):
+ # GH#22558
+ df1 = DataFrame(test)
+ expected = Series(data=constant, index=["a", "b"], dtype="object")
+ aggregation = {"a": "unique", "b": "unique"}
+
+ result = df1.agg(aggregation)
+
+ tm.assert_series_equal(result, expected)
| - [ ] closes #22558 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47603 | 2022-07-05T23:16:10Z | 2022-07-26T21:11:05Z | 2022-07-26T21:11:05Z | 2022-07-26T21:11:15Z |
BUG: Make xticks from _quarterly_finder() line up better | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 0b450fab53137..746d58fb6b437 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -970,6 +970,7 @@ Plotting
- Bug in :meth:`DataFrame.plot.scatter` that prevented specifying ``norm`` (:issue:`45809`)
- The function :meth:`DataFrame.plot.scatter` now accepts ``color`` as an alias for ``c`` and ``size`` as an alias for ``s`` for consistency to other plotting functions (:issue:`44670`)
- Fix showing "None" as ylabel in :meth:`Series.plot` when not setting ylabel (:issue:`46129`)
+- Bug in :meth:`DataFrame.plot` that led to xticks and vertical grids being improperly placed when plotting a quarterly series (:issue:`47602`)
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 31f014d25d67d..873084393371c 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -867,7 +867,8 @@ def _quarterly_finder(vmin, vmax, freq):
info_fmt[year_start] = "%F"
else:
- years = dates_[year_start] // 4 + 1
+ # https://github.com/pandas-dev/pandas/pull/47602
+ years = dates_[year_start] // 4 + 1970
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(years % maj_anndef == 0)]
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index 656969bfad703..3ec8f4bd71c2b 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -15,8 +15,10 @@
from pandas import (
Index,
Period,
+ PeriodIndex,
Series,
Timestamp,
+ arrays,
date_range,
)
import pandas._testing as tm
@@ -375,3 +377,32 @@ def get_view_interval(self):
tdc = converter.TimeSeries_TimedeltaFormatter()
monkeypatch.setattr(tdc, "axis", mock_axis())
tdc(0.0, 0)
+
+
+@pytest.mark.parametrize("year_span", [11.25, 30, 80, 150, 400, 800, 1500, 2500, 3500])
+# The range is limited to 11.25 at the bottom by if statements in
+# the _quarterly_finder() function
+def test_quarterly_finder(year_span):
+ vmin = -1000
+ vmax = vmin + year_span * 4
+ span = vmax - vmin + 1
+ if span < 45: # the quarterly finder is only invoked if the span is >= 45
+ return
+ nyears = span / 4
+ (min_anndef, maj_anndef) = converter._get_default_annual_spacing(nyears)
+ result = converter._quarterly_finder(vmin, vmax, "Q")
+ quarters = PeriodIndex(
+ arrays.PeriodArray(np.array([x[0] for x in result]), freq="Q")
+ )
+ majors = np.array([x[1] for x in result])
+ minors = np.array([x[2] for x in result])
+ major_quarters = quarters[majors]
+ minor_quarters = quarters[minors]
+ check_major_years = major_quarters.year % maj_anndef == 0
+ check_minor_years = minor_quarters.year % min_anndef == 0
+ check_major_quarters = major_quarters.quarter == 1
+ check_minor_quarters = minor_quarters.quarter == 1
+ assert np.all(check_major_years)
+ assert np.all(check_minor_years)
+ assert np.all(check_major_quarters)
+ assert np.all(check_minor_quarters)
| When I plot a series of quarterly data that spans more than 11 years, my vertical grid lines get placed a year before where they should, i.e., one year before a year that is divisible by the default annual spacing. Changing one line in the _quarterly_finder() function, from +1 to +1970, fixes this for me. Can anyone please confirm the issue and that this fixes it? | https://api.github.com/repos/pandas-dev/pandas/pulls/47602 | 2022-07-05T21:42:50Z | 2022-07-15T19:20:56Z | 2022-07-15T19:20:56Z | 2022-07-15T19:52:27Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.