title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
REF: remove dtstruct_to_dt64 | diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index fb77e2b5f3a0c..00e2c8b8b6be6 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -40,7 +40,6 @@ from pandas._libs.tslibs.np_datetime cimport (
NPY_FR_ns,
astype_overflowsafe,
check_dts_bounds,
- dt64_to_dtstruct,
dtstruct_to_dt64,
get_datetime64_unit,
get_datetime64_value,
@@ -248,7 +247,7 @@ cdef _TSObject convert_to_tsobject(object ts, tzinfo tz, str unit,
elif is_datetime64_object(ts):
obj.value = get_datetime64_nanos(ts)
if obj.value != NPY_NAT:
- dt64_to_dtstruct(obj.value, &obj.dts)
+ pandas_datetime_to_datetimestruct(obj.value, NPY_FR_ns, &obj.dts)
elif is_integer_object(ts):
try:
ts = <int64_t>ts
@@ -266,7 +265,7 @@ cdef _TSObject convert_to_tsobject(object ts, tzinfo tz, str unit,
ts = ts * cast_from_unit(None, unit)
obj.value = ts
- dt64_to_dtstruct(ts, &obj.dts)
+ pandas_datetime_to_datetimestruct(ts, NPY_FR_ns, &obj.dts)
elif is_float_object(ts):
if ts != ts or ts == NPY_NAT:
obj.value = NPY_NAT
@@ -289,7 +288,7 @@ cdef _TSObject convert_to_tsobject(object ts, tzinfo tz, str unit,
ts = cast_from_unit(ts, unit)
obj.value = ts
- dt64_to_dtstruct(ts, &obj.dts)
+ pandas_datetime_to_datetimestruct(ts, NPY_FR_ns, &obj.dts)
elif PyDateTime_Check(ts):
return convert_datetime_to_tsobject(ts, tz, nanos)
elif PyDate_Check(ts):
diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd
index 2f775912da141..290483a741fe7 100644
--- a/pandas/_libs/tslibs/np_datetime.pxd
+++ b/pandas/_libs/tslibs/np_datetime.pxd
@@ -76,7 +76,6 @@ cdef bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1
cdef check_dts_bounds(npy_datetimestruct *dts, NPY_DATETIMEUNIT unit=?)
cdef int64_t dtstruct_to_dt64(npy_datetimestruct* dts) nogil
-cdef void dt64_to_dtstruct(int64_t dt64, npy_datetimestruct* out) nogil
cdef int64_t pydatetime_to_dt64(datetime val, npy_datetimestruct *dts)
cdef void pydatetime_to_dtstruct(datetime dt, npy_datetimestruct *dts)
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx
index 24ba329120a51..1aab5dcd6f70b 100644
--- a/pandas/_libs/tslibs/np_datetime.pyx
+++ b/pandas/_libs/tslibs/np_datetime.pyx
@@ -217,14 +217,6 @@ cdef inline int64_t dtstruct_to_dt64(npy_datetimestruct* dts) nogil:
return npy_datetimestruct_to_datetime(NPY_FR_ns, dts)
-cdef inline void dt64_to_dtstruct(int64_t dt64,
- npy_datetimestruct* out) nogil:
- """Convenience function to call pandas_datetime_to_datetimestruct
- with the by-far-most-common frequency NPY_FR_ns"""
- pandas_datetime_to_datetimestruct(dt64, NPY_FR_ns, out)
- return
-
-
# just exposed for testing at the moment
def py_td64_to_tdstruct(int64_t td64, NPY_DATETIMEUNIT unit):
cdef:
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 0fa85f54944eb..e187df6d6f627 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -49,8 +49,6 @@ from pandas._libs.tslibs.np_datetime cimport (
NPY_FR_us,
astype_overflowsafe,
check_dts_bounds,
- dt64_to_dtstruct,
- dtstruct_to_dt64,
get_timedelta64_value,
npy_datetimestruct,
npy_datetimestruct_to_datetime,
@@ -813,7 +811,7 @@ cdef void get_date_info(int64_t ordinal, int freq, npy_datetimestruct *dts) nogi
pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, dts)
- dt64_to_dtstruct(nanos, &dts2)
+ pandas_datetime_to_datetimestruct(nanos, NPY_DATETIMEUNIT.NPY_FR_ns, &dts2)
dts.hour = dts2.hour
dts.min = dts2.min
dts.sec = dts2.sec
@@ -1149,7 +1147,7 @@ cdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) except? -1:
get_date_info(ordinal, freq, &dts)
check_dts_bounds(&dts)
- return dtstruct_to_dt64(&dts)
+ return npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT.NPY_FR_ns, &dts)
cdef str period_format(int64_t value, int freq, object fmt=None):
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index 598127b0eae7c..b63b4cf1df66b 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -30,7 +30,6 @@ from .nattype cimport (
from .np_datetime cimport (
NPY_DATETIMEUNIT,
NPY_FR_ns,
- dt64_to_dtstruct,
npy_datetimestruct,
pandas_datetime_to_datetimestruct,
)
| Broken off from a less-easy branch. | https://api.github.com/repos/pandas-dev/pandas/pulls/47492 | 2022-06-23T22:07:49Z | 2022-06-24T21:12:47Z | 2022-06-24T21:12:47Z | 2022-06-25T04:06:35Z |
ENH: Move PyperclipException and PyperclipWindowsException to error/_… | diff --git a/doc/source/reference/testing.rst b/doc/source/reference/testing.rst
index 2c419e8df9517..c3ce267ff9dc7 100644
--- a/doc/source/reference/testing.rst
+++ b/doc/source/reference/testing.rst
@@ -43,6 +43,8 @@ Exceptions and warnings
errors.ParserError
errors.ParserWarning
errors.PerformanceWarning
+ errors.PyperclipException
+ errors.PyperclipWindowsException
errors.SettingWithCopyError
errors.SettingWithCopyWarning
errors.SpecificationError
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index b8d9df16311d7..04d52e2c5853c 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -3,6 +3,8 @@
"""
from __future__ import annotations
+import ctypes
+
from pandas._config.config import OptionError # noqa:F401
from pandas._libs.tslibs import ( # noqa:F401
@@ -374,3 +376,22 @@ class IndexingError(Exception):
>>> s.loc["a", "c", "d"] # doctest: +SKIP
... # IndexingError: Too many indexers
"""
+
+
+class PyperclipException(RuntimeError):
+ """
+ Exception is raised when trying to use methods like to_clipboard() and
+ read_clipboard() on an unsupported OS/platform.
+ """
+
+
+class PyperclipWindowsException(PyperclipException):
+ """
+ Exception is raised when pandas is unable to get access to the clipboard handle
+ due to some other window process is accessing it.
+ """
+
+ def __init__(self, message: str) -> None:
+ # attr only exists on Windows, so typing fails on other platforms
+ message += f" ({ctypes.WinError()})" # type: ignore[attr-defined]
+ super().__init__(message)
diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py
index 6a39b20869497..27fb06dfb6023 100644
--- a/pandas/io/clipboard/__init__.py
+++ b/pandas/io/clipboard/__init__.py
@@ -58,6 +58,11 @@
import time
import warnings
+from pandas.errors import (
+ PyperclipException,
+ PyperclipWindowsException,
+)
+
# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
# Thus, we need to detect the presence of $DISPLAY manually
# and not load PyQt4 if it is absent.
@@ -87,18 +92,6 @@ def _executable_exists(name):
)
-# Exceptions
-class PyperclipException(RuntimeError):
- pass
-
-
-class PyperclipWindowsException(PyperclipException):
- def __init__(self, message) -> None:
- # attr only exists on Windows, so typing fails on other platforms
- message += f" ({ctypes.WinError()})" # type: ignore[attr-defined]
- super().__init__(message)
-
-
def _stringifyText(text) -> str:
acceptedTypes = (str, int, float, bool)
if not isinstance(text, acceptedTypes):
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py
index 73e563fd2b743..cb34cb6678a67 100644
--- a/pandas/tests/io/test_clipboard.py
+++ b/pandas/tests/io/test_clipboard.py
@@ -3,6 +3,11 @@
import numpy as np
import pytest
+from pandas.errors import (
+ PyperclipException,
+ PyperclipWindowsException,
+)
+
from pandas import (
DataFrame,
get_option,
@@ -11,6 +16,8 @@
import pandas._testing as tm
from pandas.io.clipboard import (
+ CheckedCall,
+ _stringifyText,
clipboard_get,
clipboard_set,
)
@@ -110,6 +117,81 @@ def df(request):
raise ValueError
+@pytest.fixture
+def mock_ctypes(monkeypatch):
+ """
+ Mocks WinError to help with testing the clipboard.
+ """
+
+ def _mock_win_error():
+ return "Window Error"
+
+ # Set raising to False because WinError won't exist on non-windows platforms
+ with monkeypatch.context() as m:
+ m.setattr("ctypes.WinError", _mock_win_error, raising=False)
+ yield
+
+
+@pytest.mark.usefixtures("mock_ctypes")
+def test_checked_call_with_bad_call(monkeypatch):
+ """
+ Give CheckCall a function that returns a falsey value and
+ mock get_errno so it returns false so an exception is raised.
+ """
+
+ def _return_false():
+ return False
+
+ monkeypatch.setattr("pandas.io.clipboard.get_errno", lambda: True)
+ msg = f"Error calling {_return_false.__name__} \\(Window Error\\)"
+
+ with pytest.raises(PyperclipWindowsException, match=msg):
+ CheckedCall(_return_false)()
+
+
+@pytest.mark.usefixtures("mock_ctypes")
+def test_checked_call_with_valid_call(monkeypatch):
+ """
+ Give CheckCall a function that returns a truthy value and
+ mock get_errno so it returns true so an exception is not raised.
+ The function should return the results from _return_true.
+ """
+
+ def _return_true():
+ return True
+
+ monkeypatch.setattr("pandas.io.clipboard.get_errno", lambda: False)
+
+ # Give CheckedCall a callable that returns a truthy value s
+ checked_call = CheckedCall(_return_true)
+ assert checked_call() is True
+
+
+@pytest.mark.parametrize(
+ "text",
+ [
+ "String_test",
+ True,
+ 1,
+ 1.0,
+ 1j,
+ ],
+)
+def test_stringify_text(text):
+ valid_types = (str, int, float, bool)
+
+ if isinstance(text, valid_types):
+ result = _stringifyText(text)
+ assert result == str(text)
+ else:
+ msg = (
+ "only str, int, float, and bool values "
+ f"can be copied to the clipboard, not {type(text).__name__}"
+ )
+ with pytest.raises(PyperclipException, match=msg):
+ _stringifyText(text)
+
+
@pytest.fixture
def mock_clipboard(monkeypatch, request):
"""Fixture mocking clipboard IO.
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index 7e3d5b43f3014..e0ce798fec021 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -28,6 +28,7 @@
"SettingWithCopyWarning",
"NumExprClobberingError",
"IndexingError",
+ "PyperclipException",
],
)
def test_exception_importable(exc):
| - [x] xref #27656. this GitHub issue is being done in multiple parts
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
I noticed that there weren't a lot of test around the functions that used PyperclipException & PyperclipWindowsException. So, I took a stab at adding some. Let me know if this unwarranted.
Also, the github only mentioned PyperclipException, so not sure if we also want to move PyperclipWindowsException, but I did so just in case. | https://api.github.com/repos/pandas-dev/pandas/pulls/47491 | 2022-06-23T19:37:34Z | 2022-06-25T17:13:18Z | 2022-06-25T17:13:18Z | 2022-06-25T17:13:32Z |
Backport PR #47473 on branch 1.4.x (DOC: Start v1.4.4 release notes) | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 47a46c86c3a44..817edd82ccada 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -16,6 +16,7 @@ Version 1.4
.. toctree::
:maxdepth: 2
+ v1.4.4
v1.4.3
v1.4.2
v1.4.1
diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index be5ac74201be8..39e9a55b5c384 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -68,4 +68,4 @@ Other
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.4.2..v1.4.3|HEAD
+.. contributors:: v1.4.2..v1.4.3
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
new file mode 100644
index 0000000000000..b462f0c6a8ffe
--- /dev/null
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -0,0 +1,45 @@
+.. _whatsnew_144:
+
+What's new in 1.4.4 (July ??, 2022)
+-----------------------------------
+
+These are the changes in pandas 1.4.4. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_144.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_144.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_144.other:
+
+Other
+~~~~~
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_144.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.4.3..v1.4.4|HEAD
| Backport PR #47473: DOC: Start v1.4.4 release notes | https://api.github.com/repos/pandas-dev/pandas/pulls/47486 | 2022-06-23T12:56:12Z | 2022-06-23T18:13:19Z | 2022-06-23T18:13:19Z | 2022-06-23T18:13:20Z |
CI: Run all code checks even if one fails | diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index e8f54b33a92c0..a7bacad1f3152 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -65,37 +65,39 @@ jobs:
id: build
uses: ./.github/actions/build_pandas
+ # The following checks are independent of each other and should still be run if one fails
- name: Check for no warnings when building single-page docs
run: ci/code_checks.sh single-docs
- if: ${{ steps.build.outcome == 'success' }}
+ if: ${{ steps.build.outcome == 'success' && always() }}
- name: Run checks on imported code
run: ci/code_checks.sh code
- if: ${{ steps.build.outcome == 'success' }}
+ if: ${{ steps.build.outcome == 'success' && always() }}
- name: Run doctests
run: ci/code_checks.sh doctests
- if: ${{ steps.build.outcome == 'success' }}
+ if: ${{ steps.build.outcome == 'success' && always() }}
- name: Run docstring validation
run: ci/code_checks.sh docstrings
- if: ${{ steps.build.outcome == 'success' }}
+ if: ${{ steps.build.outcome == 'success' && always() }}
- name: Use existing environment for type checking
run: |
echo $PATH >> $GITHUB_PATH
echo "PYTHONHOME=$PYTHONHOME" >> $GITHUB_ENV
echo "PYTHONPATH=$PYTHONPATH" >> $GITHUB_ENV
+ if: ${{ steps.build.outcome == 'success' && always() }}
- name: Typing
uses: pre-commit/action@v2.0.3
with:
extra_args: --hook-stage manual --all-files
- if: ${{ steps.build.outcome == 'success' }}
+ if: ${{ steps.build.outcome == 'success' && always() }}
- name: Run docstring validation script tests
run: pytest scripts
- if: ${{ steps.build.outcome == 'success' }}
+ if: ${{ steps.build.outcome == 'success' && always() }}
asv-benchmarks:
name: ASV Benchmarks
| xref https://github.com/pandas-dev/pandas/pull/47469#issuecomment-1163790491=
Solution gleaned from: https://stackoverflow.com/a/58859404
Want to `always()` run all these steps _If_ pandas was built correctly
Note: Cannot use `continue-on-error` because if a check fails the build will still pass which we don't want https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepscontinue-on-error= | https://api.github.com/repos/pandas-dev/pandas/pulls/47482 | 2022-06-23T02:35:21Z | 2022-06-23T16:51:34Z | 2022-06-23T16:51:33Z | 2022-07-06T14:09:19Z |
BUG: Fix issues with numeric_only deprecation | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index b4aea4240e458..a4462a292f4f9 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1610,17 +1610,20 @@ def idxmax(
numeric_only_arg = numeric_only
def func(df):
- res = df._reduce(
- nanops.nanargmax,
- "argmax",
- axis=axis,
- skipna=skipna,
- numeric_only=numeric_only_arg,
- )
- indices = res._values
- index = df._get_axis(axis)
- result = [index[i] if i >= 0 else np.nan for i in indices]
- return df._constructor_sliced(result, index=res.index)
+ with warnings.catch_warnings():
+ # Suppress numeric_only warnings here, will warn below
+ warnings.filterwarnings("ignore", ".*numeric_only in DataFrame.argmax")
+ res = df._reduce(
+ nanops.nanargmax,
+ "argmax",
+ axis=axis,
+ skipna=skipna,
+ numeric_only=numeric_only_arg,
+ )
+ indices = res._values
+ index = df._get_axis(axis)
+ result = [index[i] if i >= 0 else np.nan for i in indices]
+ return df._constructor_sliced(result, index=res.index)
func.__name__ = "idxmax"
result = self._python_apply_general(func, self._obj_with_exclusions)
@@ -1646,17 +1649,20 @@ def idxmin(
numeric_only_arg = numeric_only
def func(df):
- res = df._reduce(
- nanops.nanargmin,
- "argmin",
- axis=axis,
- skipna=skipna,
- numeric_only=numeric_only_arg,
- )
- indices = res._values
- index = df._get_axis(axis)
- result = [index[i] if i >= 0 else np.nan for i in indices]
- return df._constructor_sliced(result, index=res.index)
+ with warnings.catch_warnings():
+ # Suppress numeric_only warnings here, will warn below
+ warnings.filterwarnings("ignore", ".*numeric_only in DataFrame.argmin")
+ res = df._reduce(
+ nanops.nanargmin,
+ "argmin",
+ axis=axis,
+ skipna=skipna,
+ numeric_only=numeric_only_arg,
+ )
+ indices = res._values
+ index = df._get_axis(axis)
+ result = [index[i] if i >= 0 else np.nan for i in indices]
+ return df._constructor_sliced(result, index=res.index)
func.__name__ = "idxmin"
result = self._python_apply_general(func, self._obj_with_exclusions)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index c294082edce71..c2098fbe93a56 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1716,8 +1716,9 @@ def _cython_agg_general(
kwd_name = "numeric_only"
if how in ["any", "all"]:
kwd_name = "bool_only"
+ kernel = "sum" if how == "add" else how
raise NotImplementedError(
- f"{type(self).__name__}.{how} does not implement {kwd_name}."
+ f"{type(self).__name__}.{kernel} does not implement {kwd_name}."
)
elif not is_ser:
data = data.get_numeric_data(copy=False)
@@ -2194,10 +2195,16 @@ def std(
return np.sqrt(self._numba_agg_general(sliding_var, engine_kwargs, ddof))
else:
+ # Resolve numeric_only so that var doesn't warn
+ numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0)
+ if numeric_only_bool and self.obj.ndim == 1:
+ raise NotImplementedError(
+ f"{type(self).__name__}.std does not implement numeric_only."
+ )
result = self._get_cythonized_result(
libgroupby.group_var,
cython_dtype=np.dtype(np.float64),
- numeric_only=numeric_only,
+ numeric_only=numeric_only_bool,
needs_counts=True,
post_processing=lambda vals, inference: np.sqrt(vals),
ddof=ddof,
@@ -2296,7 +2303,13 @@ def sem(self, ddof: int = 1, numeric_only: bool | lib.NoDefault = lib.no_default
Series or DataFrame
Standard error of the mean of values within each group.
"""
- result = self.std(ddof=ddof, numeric_only=numeric_only)
+ # Reolve numeric_only so that std doesn't warn
+ numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0)
+ if numeric_only_bool and self.obj.ndim == 1:
+ raise NotImplementedError(
+ f"{type(self).__name__}.sem does not implement numeric_only."
+ )
+ result = self.std(ddof=ddof, numeric_only=numeric_only_bool)
self._maybe_warn_numeric_only_depr("sem", result, numeric_only)
if result.ndim == 1:
@@ -3167,6 +3180,10 @@ def quantile(
b 3.0
"""
numeric_only_bool = self._resolve_numeric_only(numeric_only, axis=0)
+ if numeric_only_bool and self.obj.ndim == 1:
+ raise NotImplementedError(
+ f"{type(self).__name__}.quantile does not implement numeric_only"
+ )
def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, np.dtype | None]:
if is_object_dtype(vals):
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 25266d1e6ab80..7f24143920b84 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -306,7 +306,7 @@ def test_idxmax(self, gb):
# non-cython calls should not include the grouper
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
- msg = "The default value of numeric_only"
+ msg = "The default value of numeric_only in DataFrameGroupBy.idxmax"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = gb.idxmax()
tm.assert_frame_equal(result, expected)
@@ -317,7 +317,7 @@ def test_idxmin(self, gb):
# non-cython calls should not include the grouper
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
- msg = "The default value of numeric_only"
+ msg = "The default value of numeric_only in DataFrameGroupBy.idxmin"
with tm.assert_produces_warning(FutureWarning, match=msg):
result = gb.idxmin()
tm.assert_frame_equal(result, expected)
@@ -1356,6 +1356,77 @@ def test_deprecate_numeric_only(
method(*args, **kwargs)
+def test_deprecate_numeric_only_series(groupby_func, request):
+ # GH#46560
+ if groupby_func in ("backfill", "mad", "pad", "tshift"):
+ pytest.skip("method is deprecated")
+ elif groupby_func == "corrwith":
+ msg = "corrwith is not implemented on SeriesGroupBy"
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
+
+ ser = Series(list("xyz"))
+ gb = ser.groupby([0, 0, 1])
+
+ if groupby_func == "corrwith":
+ args = (ser,)
+ elif groupby_func == "corr":
+ args = (ser,)
+ elif groupby_func == "cov":
+ args = (ser,)
+ elif groupby_func == "nth":
+ args = (0,)
+ elif groupby_func == "fillna":
+ args = (True,)
+ elif groupby_func == "take":
+ args = ([0],)
+ elif groupby_func == "quantile":
+ args = (0.5,)
+ else:
+ args = ()
+ method = getattr(gb, groupby_func)
+
+ try:
+ _ = method(*args)
+ except (TypeError, ValueError) as err:
+ # ops that only work on numeric dtypes
+ assert groupby_func in (
+ "corr",
+ "cov",
+ "cummax",
+ "cummin",
+ "cumprod",
+ "cumsum",
+ "diff",
+ "idxmax",
+ "idxmin",
+ "mean",
+ "median",
+ "pct_change",
+ "prod",
+ "quantile",
+ "sem",
+ "skew",
+ "std",
+ "var",
+ )
+ assert (
+ "could not convert" in str(err).lower()
+ or "unsupported operand type" in str(err)
+ or "not allowed for this dtype" in str(err)
+ or "can't multiply sequence by non-int" in str(err)
+ or "cannot be performed against 'object' dtypes" in str(err)
+ or "is not supported for object dtype" in str(err)
+ ), str(err)
+
+ msgs = (
+ "got an unexpected keyword argument 'numeric_only'",
+ f"{groupby_func} does not implement numeric_only",
+ f"{groupby_func} is not supported for object dtype",
+ )
+ with pytest.raises((NotImplementedError, TypeError), match=f"({'|'.join(msgs)})"):
+ _ = method(*args, numeric_only=True)
+
+
@pytest.mark.parametrize("dtype", [int, float, object])
@pytest.mark.parametrize(
"kwargs",
| Part of #46560
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
ref: https://github.com/pandas-dev/pandas/pull/47265#issuecomment-1150415159
Issues were introduced as part of 1.5, no need for a whatsnew note. | https://api.github.com/repos/pandas-dev/pandas/pulls/47481 | 2022-06-23T02:08:20Z | 2022-06-23T22:55:06Z | 2022-06-23T22:55:06Z | 2022-06-24T18:23:50Z |
Revert "Backport PR #47318 on branch 1.4.x (CI: Pin PYTEST_WORKERS=1 for Windows builds due to memory errors)" | diff --git a/.github/workflows/macos-windows.yml b/.github/workflows/macos-windows.yml
index 923ac8f2e0fd6..560a421ec74ec 100644
--- a/.github/workflows/macos-windows.yml
+++ b/.github/workflows/macos-windows.yml
@@ -15,6 +15,7 @@ on:
env:
PANDAS_CI: 1
PYTEST_TARGET: pandas
+ PYTEST_WORKERS: auto
PATTERN: "not slow and not db and not network and not single_cpu"
@@ -35,9 +36,6 @@ jobs:
# https://github.community/t/concurrecy-not-work-for-push/183068/7
group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.os }}
cancel-in-progress: true
- env:
- # GH 47443: PYTEST_WORKERS > 1 crashes Windows builds with memory related errors
- PYTEST_WORKERS: ${{ matrix.os == 'macos-latest' && 'auto' || '1' }}
steps:
- name: Checkout
| Reverts pandas-dev/pandas#47445
checking ci with this reverted. | https://api.github.com/repos/pandas-dev/pandas/pulls/47479 | 2022-06-23T01:22:00Z | 2022-06-23T09:55:07Z | null | 2022-06-24T09:58:09Z |
Backport PR #47476 on branch 1.4.x (DOC: v1.4.3 release date) | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index 2550a12ebbb9d..be5ac74201be8 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -1,7 +1,7 @@
.. _whatsnew_143:
-What's new in 1.4.3 (April ??, 2022)
-------------------------------------
+What's new in 1.4.3 (June 23, 2022)
+-----------------------------------
These are the changes in pandas 1.4.3. See :ref:`release` for a full changelog
including other versions of pandas.
@@ -12,10 +12,10 @@ including other versions of pandas.
.. _whatsnew_143.concat:
-Behaviour of ``concat`` with empty or all-NA DataFrame columns
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Behavior of ``concat`` with empty or all-NA DataFrame columns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The behaviour change in version 1.4.0 to stop ignoring the data type
+The behavior change in version 1.4.0 to stop ignoring the data type
of empty or all-NA columns with float or object dtype in :func:`concat`
(:ref:`whatsnew_140.notable_bug_fixes.concat_with_empty_or_all_na`) has been
reverted (:issue:`45637`).
@@ -30,7 +30,7 @@ Fixed regressions
- Fixed regression in representation of ``dtypes`` attribute of :class:`MultiIndex` (:issue:`46900`)
- Fixed regression when setting values with :meth:`DataFrame.loc` updating :class:`RangeIndex` when index was set as new column and column was updated afterwards (:issue:`47128`)
- Fixed regression in :meth:`DataFrame.fillna` and :meth:`DataFrame.update` creating a copy when updating inplace (:issue:`47188`)
-- Fixed regression in :meth:`DataFrame.nsmallest` led to wrong results when ``np.nan`` in the sorting column (:issue:`46589`)
+- Fixed regression in :meth:`DataFrame.nsmallest` led to wrong results when the sorting column has ``np.nan`` values (:issue:`46589`)
- Fixed regression in :func:`read_fwf` raising ``ValueError`` when ``widths`` was specified with ``usecols`` (:issue:`46580`)
- Fixed regression in :func:`concat` not sorting columns for mixed column names (:issue:`47127`)
- Fixed regression in :meth:`.Groupby.transform` and :meth:`.Groupby.agg` failing with ``engine="numba"`` when the index was a :class:`MultiIndex` (:issue:`46867`)
@@ -39,7 +39,7 @@ Fixed regressions
- Fixed regression in :func:`read_csv` with ``index_col=False`` identifying first row as index names when ``header=None`` (:issue:`46955`)
- Fixed regression in :meth:`.DataFrameGroupBy.agg` when used with list-likes or dict-likes and ``axis=1`` that would give incorrect results; now raises ``NotImplementedError`` (:issue:`46995`)
- Fixed regression in :meth:`DataFrame.resample` and :meth:`DataFrame.rolling` when used with list-likes or dict-likes and ``axis=1`` that would raise an unintuitive error message; now raises ``NotImplementedError`` (:issue:`46904`)
-- Fixed regression in :func:`assert_index_equal` when ``check_order=False`` and :class:`Index` has extension or object dtype (:issue:`47207`)
+- Fixed regression in :func:`testing.assert_index_equal` when ``check_order=False`` and :class:`Index` has extension or object dtype (:issue:`47207`)
- Fixed regression in :func:`read_excel` returning ints as floats on certain input sheets (:issue:`46988`)
- Fixed regression in :meth:`DataFrame.shift` when ``axis`` is ``columns`` and ``fill_value`` is absent, ``freq`` is ignored (:issue:`47039`)
- Fixed regression in :meth:`DataFrame.to_json` causing a segmentation violation when :class:`DataFrame` is created with an ``index`` parameter of the type :class:`PeriodIndex` (:issue:`46683`)
@@ -50,9 +50,8 @@ Fixed regressions
Bug fixes
~~~~~~~~~
-- Bug in :meth:`pd.eval`, :meth:`DataFrame.eval` and :meth:`DataFrame.query` where passing empty ``local_dict`` or ``global_dict`` was treated as passing ``None`` (:issue:`47084`)
-- Most I/O methods do no longer suppress ``OSError`` and ``ValueError`` when closing file handles (:issue:`47136`)
--
+- Bug in :func:`pandas.eval`, :meth:`DataFrame.eval` and :meth:`DataFrame.query` where passing empty ``local_dict`` or ``global_dict`` was treated as passing ``None`` (:issue:`47084`)
+- Most I/O methods no longer suppress ``OSError`` and ``ValueError`` when closing file handles (:issue:`47136`)
.. ---------------------------------------------------------------------------
@@ -61,7 +60,6 @@ Bug fixes
Other
~~~~~
- The minimum version of Cython needed to compile pandas is now ``0.29.30`` (:issue:`41935`)
--
.. ---------------------------------------------------------------------------
| Backport PR #47476: DOC: v1.4.3 release date | https://api.github.com/repos/pandas-dev/pandas/pulls/47478 | 2022-06-23T00:19:17Z | 2022-06-23T00:57:30Z | 2022-06-23T00:57:30Z | 2022-06-23T00:57:30Z |
DOC: v1.4.3 release date | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index 2550a12ebbb9d..be5ac74201be8 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -1,7 +1,7 @@
.. _whatsnew_143:
-What's new in 1.4.3 (April ??, 2022)
-------------------------------------
+What's new in 1.4.3 (June 23, 2022)
+-----------------------------------
These are the changes in pandas 1.4.3. See :ref:`release` for a full changelog
including other versions of pandas.
@@ -12,10 +12,10 @@ including other versions of pandas.
.. _whatsnew_143.concat:
-Behaviour of ``concat`` with empty or all-NA DataFrame columns
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Behavior of ``concat`` with empty or all-NA DataFrame columns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The behaviour change in version 1.4.0 to stop ignoring the data type
+The behavior change in version 1.4.0 to stop ignoring the data type
of empty or all-NA columns with float or object dtype in :func:`concat`
(:ref:`whatsnew_140.notable_bug_fixes.concat_with_empty_or_all_na`) has been
reverted (:issue:`45637`).
@@ -30,7 +30,7 @@ Fixed regressions
- Fixed regression in representation of ``dtypes`` attribute of :class:`MultiIndex` (:issue:`46900`)
- Fixed regression when setting values with :meth:`DataFrame.loc` updating :class:`RangeIndex` when index was set as new column and column was updated afterwards (:issue:`47128`)
- Fixed regression in :meth:`DataFrame.fillna` and :meth:`DataFrame.update` creating a copy when updating inplace (:issue:`47188`)
-- Fixed regression in :meth:`DataFrame.nsmallest` led to wrong results when ``np.nan`` in the sorting column (:issue:`46589`)
+- Fixed regression in :meth:`DataFrame.nsmallest` led to wrong results when the sorting column has ``np.nan`` values (:issue:`46589`)
- Fixed regression in :func:`read_fwf` raising ``ValueError`` when ``widths`` was specified with ``usecols`` (:issue:`46580`)
- Fixed regression in :func:`concat` not sorting columns for mixed column names (:issue:`47127`)
- Fixed regression in :meth:`.Groupby.transform` and :meth:`.Groupby.agg` failing with ``engine="numba"`` when the index was a :class:`MultiIndex` (:issue:`46867`)
@@ -39,7 +39,7 @@ Fixed regressions
- Fixed regression in :func:`read_csv` with ``index_col=False`` identifying first row as index names when ``header=None`` (:issue:`46955`)
- Fixed regression in :meth:`.DataFrameGroupBy.agg` when used with list-likes or dict-likes and ``axis=1`` that would give incorrect results; now raises ``NotImplementedError`` (:issue:`46995`)
- Fixed regression in :meth:`DataFrame.resample` and :meth:`DataFrame.rolling` when used with list-likes or dict-likes and ``axis=1`` that would raise an unintuitive error message; now raises ``NotImplementedError`` (:issue:`46904`)
-- Fixed regression in :func:`assert_index_equal` when ``check_order=False`` and :class:`Index` has extension or object dtype (:issue:`47207`)
+- Fixed regression in :func:`testing.assert_index_equal` when ``check_order=False`` and :class:`Index` has extension or object dtype (:issue:`47207`)
- Fixed regression in :func:`read_excel` returning ints as floats on certain input sheets (:issue:`46988`)
- Fixed regression in :meth:`DataFrame.shift` when ``axis`` is ``columns`` and ``fill_value`` is absent, ``freq`` is ignored (:issue:`47039`)
- Fixed regression in :meth:`DataFrame.to_json` causing a segmentation violation when :class:`DataFrame` is created with an ``index`` parameter of the type :class:`PeriodIndex` (:issue:`46683`)
@@ -50,9 +50,8 @@ Fixed regressions
Bug fixes
~~~~~~~~~
-- Bug in :meth:`pd.eval`, :meth:`DataFrame.eval` and :meth:`DataFrame.query` where passing empty ``local_dict`` or ``global_dict`` was treated as passing ``None`` (:issue:`47084`)
-- Most I/O methods do no longer suppress ``OSError`` and ``ValueError`` when closing file handles (:issue:`47136`)
--
+- Bug in :func:`pandas.eval`, :meth:`DataFrame.eval` and :meth:`DataFrame.query` where passing empty ``local_dict`` or ``global_dict`` was treated as passing ``None`` (:issue:`47084`)
+- Most I/O methods no longer suppress ``OSError`` and ``ValueError`` when closing file handles (:issue:`47136`)
.. ---------------------------------------------------------------------------
@@ -61,7 +60,6 @@ Bug fixes
Other
~~~~~
- The minimum version of Cython needed to compile pandas is now ``0.29.30`` (:issue:`41935`)
--
.. ---------------------------------------------------------------------------
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47476 | 2022-06-22T23:27:29Z | 2022-06-23T00:18:51Z | 2022-06-23T00:18:51Z | 2022-06-23T00:18:54Z |
REGR: maybe_convert_objects ignoring uints | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 7aa1c1e84aa09..f19cc88fa1690 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -857,6 +857,7 @@ Conversion
- Bug in :meth:`DataFrame.to_records` returning inconsistent numpy types if the index was a :class:`MultiIndex` (:issue:`47263`)
- Bug in :meth:`DataFrame.to_dict` for ``orient="list"`` or ``orient="index"`` was not returning native types (:issue:`46751`)
- Bug in :meth:`DataFrame.apply` that returns a :class:`DataFrame` instead of a :class:`Series` when applied to an empty :class:`DataFrame` and ``axis=1`` (:issue:`39111`)
+- Bug when inferring the dtype from an iterable that is *not* a NumPy ``ndarray`` consisting of all NumPy unsigned integer scalars did not result in an unsigned integer dtype (:issue:`47294`)
Strings
^^^^^^^
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 2136c410ef4a0..e353d224708b7 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -17,6 +17,7 @@ from cpython.number cimport PyNumber_Check
from cpython.object cimport (
Py_EQ,
PyObject_RichCompareBool,
+ PyTypeObject,
)
from cpython.ref cimport Py_INCREF
from cpython.sequence cimport PySequence_Check
@@ -54,6 +55,11 @@ from numpy cimport (
cnp.import_array()
+cdef extern from "Python.h":
+ # Note: importing extern-style allows us to declare these as nogil
+ # functions, whereas `from cpython cimport` does not.
+ bint PyObject_TypeCheck(object obj, PyTypeObject* type) nogil
+
cdef extern from "numpy/arrayobject.h":
# cython's numpy.dtype specification is incorrect, which leads to
# errors in issubclass(self.dtype.type, np.bool_), so we directly
@@ -71,6 +77,9 @@ cdef extern from "numpy/arrayobject.h":
object fields
tuple names
+ PyTypeObject PySignedIntegerArrType_Type
+ PyTypeObject PyUnsignedIntegerArrType_Type
+
cdef extern from "numpy/ndarrayobject.h":
bint PyArray_CheckScalar(obj) nogil
@@ -1283,9 +1292,9 @@ cdef class Seen:
In addition to setting a flag that an integer was seen, we
also set two flags depending on the type of integer seen:
- 1) sint_ : a negative (signed) number in the
+ 1) sint_ : a signed numpy integer type or a negative (signed) number in the
range of [-2**63, 0) was encountered
- 2) uint_ : a positive number in the range of
+ 2) uint_ : an unsigned numpy integer type or a positive number in the range of
[2**63, 2**64) was encountered
Parameters
@@ -1294,8 +1303,18 @@ cdef class Seen:
Value with which to set the flags.
"""
self.int_ = True
- self.sint_ = self.sint_ or (oINT64_MIN <= val < 0)
- self.uint_ = self.uint_ or (oINT64_MAX < val <= oUINT64_MAX)
+ self.sint_ = (
+ self.sint_
+ or (oINT64_MIN <= val < 0)
+ # Cython equivalent of `isinstance(val, np.signedinteger)`
+ or PyObject_TypeCheck(val, &PySignedIntegerArrType_Type)
+ )
+ self.uint_ = (
+ self.uint_
+ or (oINT64_MAX < val <= oUINT64_MAX)
+ # Cython equivalent of `isinstance(val, np.unsignedinteger)`
+ or PyObject_TypeCheck(val, &PyUnsignedIntegerArrType_Type)
+ )
@property
def numeric_(self):
@@ -2542,7 +2561,6 @@ def maybe_convert_objects(ndarray[object] objects,
floats[i] = <float64_t>val
complexes[i] = <double complex>val
if not seen.null_:
- val = int(val)
seen.saw_int(val)
if ((seen.uint_ and seen.sint_) or
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index b12476deccbfc..8fe6abd3b0ed5 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -700,25 +700,32 @@ def test_convert_int_overflow(self, value):
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
- def test_maybe_convert_objects_uint64(self):
- # see gh-4471
- arr = np.array([2**63], dtype=object)
- exp = np.array([2**63], dtype=np.uint64)
- tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
-
- # NumPy bug: can't compare uint64 to int64, as that
- # results in both casting to float64, so we should
- # make sure that this function is robust against it
- arr = np.array([np.uint64(2**63)], dtype=object)
- exp = np.array([2**63], dtype=np.uint64)
- tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
-
- arr = np.array([2, -1], dtype=object)
- exp = np.array([2, -1], dtype=np.int64)
- tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
-
- arr = np.array([2**63, -1], dtype=object)
- exp = np.array([2**63, -1], dtype=object)
+ @pytest.mark.parametrize(
+ "value, expected_dtype",
+ [
+ # see gh-4471
+ ([2**63], np.uint64),
+ # NumPy bug: can't compare uint64 to int64, as that
+ # results in both casting to float64, so we should
+ # make sure that this function is robust against it
+ ([np.uint64(2**63)], np.uint64),
+ ([2, -1], np.int64),
+ ([2**63, -1], object),
+ # GH#47294
+ ([np.uint8(1)], np.uint8),
+ ([np.uint16(1)], np.uint16),
+ ([np.uint32(1)], np.uint32),
+ ([np.uint64(1)], np.uint64),
+ ([np.uint8(2), np.uint16(1)], np.uint16),
+ ([np.uint32(2), np.uint16(1)], np.uint32),
+ ([np.uint32(2), -1], object),
+ ([np.uint32(2), 1], np.uint64),
+ ([np.uint32(2), np.int32(1)], object),
+ ],
+ )
+ def test_maybe_convert_objects_uint(self, value, expected_dtype):
+ arr = np.array(value, dtype=object)
+ exp = np.array(value, dtype=expected_dtype)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_maybe_convert_objects_datetime(self):
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 45f36834510ed..cd547819dbe94 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -57,7 +57,9 @@ class mystring(str):
expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index)
tm.assert_equal(df, expected)
- @pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
+ @pytest.mark.parametrize(
+ "dtype", ["int32", "int64", "uint32", "uint64", "float32", "float64"]
+ )
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
@@ -210,6 +212,7 @@ def test_setitem_dict_preserves_dtypes(self):
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
+ "d": Series([1, 2, 3], dtype="uint32"),
}
)
df = DataFrame(
@@ -217,10 +220,16 @@ def test_setitem_dict_preserves_dtypes(self):
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
+ "d": Series([], dtype="uint32"),
}
)
for idx, b in enumerate([1, 2, 3]):
- df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
+ df.loc[df.shape[0]] = {
+ "a": int(idx),
+ "b": float(b),
+ "c": float(b),
+ "d": np.uint32(b),
+ }
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index f06641002e039..d00cf198b3296 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -434,6 +434,25 @@ def test_constructor_int_overflow(self, values):
assert result[0].dtype == object
assert result[0][0] == value
+ @pytest.mark.parametrize(
+ "values",
+ [
+ np.array([1], dtype=np.uint16),
+ np.array([1], dtype=np.uint32),
+ np.array([1], dtype=np.uint64),
+ [np.uint16(1)],
+ [np.uint32(1)],
+ [np.uint64(1)],
+ ],
+ )
+ def test_constructor_numpy_uints(self, values):
+ # GH#47294
+ value = values[0]
+ result = DataFrame(values)
+
+ assert result[0].dtype == value.dtype
+ assert result[0][0] == value
+
def test_constructor_ordereddict(self):
import random
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index 57c4af1a0fe1c..39b5e0ffc526c 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -540,10 +540,18 @@ def test_union_duplicates(index, request):
mi1 = MultiIndex.from_arrays([values, [1] * len(values)])
mi2 = MultiIndex.from_arrays([[values[0]] + values, [1] * (len(values) + 1)])
result = mi1.union(mi2)
- tm.assert_index_equal(result, mi2.sort_values())
+ expected = mi2.sort_values()
+ if mi2.levels[0].dtype == np.uint64 and (mi2.get_level_values(0) < 2**63).all():
+ # GH#47294 - union uses lib.fast_zip, converting data to Python integers
+ # and loses type information. Result is then unsigned only when values are
+ # sufficiently large to require unsigned dtype.
+ expected = expected.set_levels(
+ [expected.levels[0].astype(int), expected.levels[1]]
+ )
+ tm.assert_index_equal(result, expected)
result = mi2.union(mi1)
- tm.assert_index_equal(result, mi2.sort_values())
+ tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py
index 7d2bcdf20c795..23262cb2eb768 100644
--- a/pandas/tests/indexes/numeric/test_numeric.py
+++ b/pandas/tests/indexes/numeric/test_numeric.py
@@ -509,6 +509,20 @@ def test_constructor_coercion_signed_to_unsigned(
with pytest.raises(OverflowError, match=msg):
Index([-1], dtype=any_unsigned_int_numpy_dtype)
+ def test_constructor_np_signed(self, any_signed_int_numpy_dtype):
+ # GH#47475
+ scalar = np.dtype(any_signed_int_numpy_dtype).type(1)
+ result = Index([scalar])
+ expected = Int64Index([1])
+ tm.assert_index_equal(result, expected)
+
+ def test_constructor_np_unsigned(self, any_unsigned_int_numpy_dtype):
+ # GH#47475
+ scalar = np.dtype(any_unsigned_int_numpy_dtype).type(1)
+ result = Index([scalar])
+ expected = UInt64Index([1])
+ tm.assert_index_equal(result, expected)
+
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 3dce22a06c1b2..cec06d054d766 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -745,6 +745,25 @@ def test_constructor_signed_int_overflow_deprecation(self):
expected = Series([1, 200, 50], dtype="uint8")
tm.assert_series_equal(ser, expected)
+ @pytest.mark.parametrize(
+ "values",
+ [
+ np.array([1], dtype=np.uint16),
+ np.array([1], dtype=np.uint32),
+ np.array([1], dtype=np.uint64),
+ [np.uint16(1)],
+ [np.uint32(1)],
+ [np.uint64(1)],
+ ],
+ )
+ def test_constructor_numpy_uints(self, values):
+ # GH#47294
+ value = values[0]
+ result = Series(values)
+
+ assert result[0].dtype == value.dtype
+ assert result[0] == value
+
def test_constructor_unsigned_dtype_overflow(self, any_unsigned_int_numpy_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
| - [x] closes #47294 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
I think this is closely related to #37258, though it does not impact any code examples there.
cc @TomAugspurger, @jbrockmendel, @jorisvandenbossche | https://api.github.com/repos/pandas-dev/pandas/pulls/47475 | 2022-06-22T23:19:59Z | 2022-07-10T15:54:05Z | 2022-07-10T15:54:04Z | 2022-07-11T12:10:29Z |
BUG: to_sql with method=callable not returning int raising TypeError | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index 0af25daf0468a..6ee140f59e096 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -24,7 +24,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
- The :class:`errors.FutureWarning` raised when passing arguments (other than ``filepath_or_buffer``) as positional in :func:`read_csv` is now raised at the correct stacklevel (:issue:`47385`)
--
+- Bug in :meth:`DataFrame.to_sql` when ``method`` was a ``callable`` that did not return an ``int`` and would raise a ``TypeError`` (:issue:`46891`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f896169d0ae44..647cc6b533275 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2752,7 +2752,7 @@ def to_sql(
-------
None or int
Number of rows affected by to_sql. None is returned if the callable
- passed into ``method`` does not return the number of rows.
+ passed into ``method`` does not return an integer number of rows.
The number of returned rows affected is the sum of the ``rowcount``
attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 24290b8370ed2..987a19ee0cf47 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -37,6 +37,7 @@
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_dict_like,
+ is_integer,
is_list_like,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
@@ -668,7 +669,7 @@ def to_sql(
-------
None or int
Number of rows affected by to_sql. None is returned if the callable
- passed into ``method`` does not return the number of rows.
+ passed into ``method`` does not return an integer number of rows.
.. versionadded:: 1.4.0
@@ -933,7 +934,7 @@ def insert(
raise ValueError("chunksize argument should be non-zero")
chunks = (nrows // chunksize) + 1
- total_inserted = 0
+ total_inserted = None
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
@@ -943,10 +944,12 @@ def insert(
chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))
num_inserted = exec_insert(conn, keys, chunk_iter)
- if num_inserted is None:
- total_inserted = None
- else:
- total_inserted += num_inserted
+ # GH 46891
+ if is_integer(num_inserted):
+ if total_inserted is None:
+ total_inserted = num_inserted
+ else:
+ total_inserted += num_inserted
return total_inserted
def _query_iterator(
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index e28901fa1a1ed..dd3464ccf9f64 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -620,7 +620,8 @@ def test_read_procedure(conn, request):
@pytest.mark.db
@pytest.mark.parametrize("conn", postgresql_connectable)
-def test_copy_from_callable_insertion_method(conn, request):
+@pytest.mark.parametrize("expected_count", [2, "Success!"])
+def test_copy_from_callable_insertion_method(conn, expected_count, request):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
@@ -641,10 +642,18 @@ def psql_insert_copy(table, conn, keys, data_iter):
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
+ return expected_count
conn = request.getfixturevalue(conn)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
- expected.to_sql("test_frame", conn, index=False, method=psql_insert_copy)
+ result_count = expected.to_sql(
+ "test_frame", conn, index=False, method=psql_insert_copy
+ )
+ # GH 46891
+ if not isinstance(expected_count, int):
+ assert result_count is None
+ else:
+ assert result_count == expected_count
result = sql.read_sql_table("test_frame", conn)
tm.assert_frame_equal(result, expected)
| - [x] closes #46891 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added an entry in the latest `doc/source/whatsnew/v1.4.4.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47474 | 2022-06-22T22:08:22Z | 2022-07-03T15:13:05Z | 2022-07-03T15:13:04Z | 2022-07-05T16:45:18Z |
DOC: Start v1.4.4 release notes | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index ccec4f90183bc..926b73d0f3fd9 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -24,6 +24,7 @@ Version 1.4
.. toctree::
:maxdepth: 2
+ v1.4.4
v1.4.3
v1.4.2
v1.4.1
diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index be5ac74201be8..39e9a55b5c384 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -68,4 +68,4 @@ Other
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.4.2..v1.4.3|HEAD
+.. contributors:: v1.4.2..v1.4.3
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
new file mode 100644
index 0000000000000..b462f0c6a8ffe
--- /dev/null
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -0,0 +1,45 @@
+.. _whatsnew_144:
+
+What's new in 1.4.4 (July ??, 2022)
+-----------------------------------
+
+These are the changes in pandas 1.4.4. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_144.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_144.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_144.other:
+
+Other
+~~~~~
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_144.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.4.3..v1.4.4|HEAD
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47473 | 2022-06-22T21:32:24Z | 2022-06-23T12:56:03Z | 2022-06-23T12:56:03Z | 2022-06-23T12:56:08Z |
Backport PR #47372 on branch 1.4.x (REGR: revert behaviour change for concat with empty/all-NaN data) | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 7340f2475e1f6..9f9bde65b482f 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -271,6 +271,9 @@ the given ``dayfirst`` value when the value is a delimited date string (e.g.
Ignoring dtypes in concat with empty or all-NA columns
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. note::
+ This behaviour change has been reverted in pandas 1.4.3.
+
When using :func:`concat` to concatenate two or more :class:`DataFrame` objects,
if one of the DataFrames was empty or had all-NA values, its dtype was
*sometimes* ignored when finding the concatenated dtype. These are now
@@ -301,9 +304,15 @@ object, the ``np.nan`` is retained.
*New behavior*:
-.. ipython:: python
+.. code-block:: ipython
+
+ In [4]: res
+ Out[4]:
+ bar
+ 0 2013-01-01 00:00:00
+ 1 NaN
+
- res
.. _whatsnew_140.notable_bug_fixes.value_counts_and_mode_do_not_coerce_to_nan:
diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index 4034655ccd325..f1532871d33c6 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -10,6 +10,17 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
+.. _whatsnew_143.concat:
+
+Behaviour of ``concat`` with empty or all-NA DataFrame columns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The behaviour change in version 1.4.0 to stop ignoring the data type
+of empty or all-NA columns with float or object dtype in :func:`concat`
+(:ref:`whatsnew_140.notable_bug_fixes.concat_with_empty_or_all_na`) has been
+reverted (:issue:`45637`).
+
+
.. _whatsnew_143.regressions:
Fixed regressions
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index dd3fcb260fdbd..d6c89824b619b 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -14,6 +14,7 @@
import pandas._libs.missing as libmissing
from pandas._libs.tslibs import (
NaT,
+ Period,
iNaT,
)
from pandas._typing import (
@@ -668,3 +669,40 @@ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:
# fallback, default to allowing NaN, None, NA, NaT
return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
+
+
+def isna_all(arr: ArrayLike) -> bool:
+ """
+ Optimized equivalent to isna(arr).all()
+ """
+ total_len = len(arr)
+
+ # Usually it's enough to check but a small fraction of values to see if
+ # a block is NOT null, chunks should help in such cases.
+ # parameters 1000 and 40 were chosen arbitrarily
+ chunk_len = max(total_len // 40, 1000)
+
+ dtype = arr.dtype
+ if dtype.kind == "f":
+ checker = nan_checker
+
+ elif dtype.kind in ["m", "M"] or dtype.type is Period:
+ # error: Incompatible types in assignment (expression has type
+ # "Callable[[Any], Any]", variable has type "ufunc")
+ checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment]
+
+ else:
+ # error: Incompatible types in assignment (expression has type "Callable[[Any],
+ # Any]", variable has type "ufunc")
+ checker = lambda x: _isna_array( # type: ignore[assignment]
+ x, inf_as_na=INF_AS_NA
+ )
+
+ return all(
+ # error: Argument 1 to "__call__" of "ufunc" has incompatible type
+ # "Union[ExtensionArray, Any]"; expected "Union[Union[int, float, complex, str,
+ # bytes, generic], Sequence[Union[int, float, complex, str, bytes, generic]],
+ # Sequence[Sequence[Any]], _SupportsArray]"
+ checker(arr[i : i + chunk_len]).all() # type: ignore[arg-type]
+ for i in range(0, total_len, chunk_len)
+ )
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 782842d167570..2c21708aede0f 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import copy
import itertools
from typing import (
TYPE_CHECKING,
@@ -13,6 +14,7 @@
NaT,
internals as libinternals,
)
+from pandas._libs.missing import NA
from pandas._typing import (
ArrayLike,
DtypeObj,
@@ -30,17 +32,26 @@
is_1d_only_ea_obj,
is_datetime64tz_dtype,
is_dtype_equal,
+ is_scalar,
+ needs_i8_conversion,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
+from pandas.core.dtypes.missing import (
+ is_valid_na_for_dtype,
+ isna,
+ isna_all,
+)
+import pandas.core.algorithms as algos
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
+from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
@@ -192,29 +203,19 @@ def concatenate_managers(
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
- # Assertions disabled for performance
- # for tup in mgrs_indexers:
- # # caller is responsible for ensuring this
- # indexers = tup[1]
- # assert concat_axis not in indexers
-
- if concat_axis == 0:
- return _concat_managers_axis0(mgrs_indexers, axes, copy)
-
mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers)
- # Assertion disabled for performance
- # assert all(not x[1] for x in mgrs_indexers)
-
- concat_plans = [_get_mgr_concatenation_plan(mgr) for mgr, _ in mgrs_indexers]
- concat_plan = _combine_concat_plans(concat_plans)
+ concat_plans = [
+ _get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
+ ]
+ concat_plan = _combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
- if len(join_units) == 1:
+ if len(join_units) == 1 and not join_units[0].indexers:
values = blk.values
if copy:
values = values.copy()
@@ -238,7 +239,7 @@ def concatenate_managers(
fastpath = blk.values.dtype == values.dtype
else:
- values = _concatenate_join_units(join_units, copy=copy)
+ values = _concatenate_join_units(join_units, concat_axis, copy=copy)
fastpath = False
if fastpath:
@@ -251,42 +252,6 @@ def concatenate_managers(
return BlockManager(tuple(blocks), axes)
-def _concat_managers_axis0(
- mgrs_indexers, axes: list[Index], copy: bool
-) -> BlockManager:
- """
- concat_managers specialized to concat_axis=0, with reindexing already
- having been done in _maybe_reindex_columns_na_proxy.
- """
- had_reindexers = {
- i: len(mgrs_indexers[i][1]) > 0 for i in range(len(mgrs_indexers))
- }
- mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers)
-
- mgrs = [x[0] for x in mgrs_indexers]
-
- offset = 0
- blocks = []
- for i, mgr in enumerate(mgrs):
- # If we already reindexed, then we definitely don't need another copy
- made_copy = had_reindexers[i]
-
- for blk in mgr.blocks:
- if made_copy:
- nb = blk.copy(deep=False)
- elif copy:
- nb = blk.copy()
- else:
- # by slicing instead of copy(deep=False), we get a new array
- # object, see test_concat_copy
- nb = blk.getitem_block(slice(None))
- nb._mgr_locs = nb._mgr_locs.add(offset)
- blocks.append(nb)
-
- offset += len(mgr.items)
- return BlockManager(tuple(blocks), axes)
-
-
def _maybe_reindex_columns_na_proxy(
axes: list[Index], mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]]
) -> list[tuple[BlockManager, dict[int, np.ndarray]]]:
@@ -297,33 +262,36 @@ def _maybe_reindex_columns_na_proxy(
Columns added in this reindexing have dtype=np.void, indicating they
should be ignored when choosing a column's final dtype.
"""
- new_mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]] = []
-
+ new_mgrs_indexers = []
for mgr, indexers in mgrs_indexers:
- # For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this
- # is a cheap reindexing.
- for i, indexer in indexers.items():
- mgr = mgr.reindex_indexer(
- axes[i],
- indexers[i],
- axis=i,
+ # We only reindex for axis=0 (i.e. columns), as this can be done cheaply
+ if 0 in indexers:
+ new_mgr = mgr.reindex_indexer(
+ axes[0],
+ indexers[0],
+ axis=0,
copy=False,
- only_slice=True, # only relevant for i==0
+ only_slice=True,
allow_dups=True,
- use_na_proxy=True, # only relevant for i==0
+ use_na_proxy=True,
)
- new_mgrs_indexers.append((mgr, {}))
+ new_indexers = indexers.copy()
+ del new_indexers[0]
+ new_mgrs_indexers.append((new_mgr, new_indexers))
+ else:
+ new_mgrs_indexers.append((mgr, indexers))
return new_mgrs_indexers
-def _get_mgr_concatenation_plan(mgr: BlockManager):
+def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: dict[int, np.ndarray]):
"""
- Construct concatenation plan for given block manager.
+ Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
+ indexers : dict of {axis: indexer}
Returns
-------
@@ -333,11 +301,15 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape_list = list(mgr.shape)
+ for ax, indexer in indexers.items():
+ mgr_shape_list[ax] = len(indexer)
mgr_shape = tuple(mgr_shape_list)
+ assert 0 not in indexers
+
if mgr.is_single_block:
blk = mgr.blocks[0]
- return [(blk.mgr_locs, JoinUnit(blk, mgr_shape))]
+ return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
blknos = mgr.blknos
blklocs = mgr.blklocs
@@ -348,6 +320,8 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
assert placements.is_slice_like
assert blkno != -1
+ join_unit_indexers = indexers.copy()
+
shape_list = list(mgr_shape)
shape_list[0] = len(placements)
shape = tuple(shape_list)
@@ -372,14 +346,13 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
)
)
- if not unit_no_ax0_reindexing:
- # create block from subset of columns
- blk = blk.getitem_block(ax0_blk_indexer)
+ # Omit indexer if no item reindexing is required.
+ if unit_no_ax0_reindexing:
+ join_unit_indexers.pop(0, None)
+ else:
+ join_unit_indexers[0] = ax0_blk_indexer
- # Assertions disabled for performance
- # assert blk._mgr_locs.as_slice == placements.as_slice
- # assert blk.shape[0] == shape[0]
- unit = JoinUnit(blk, shape)
+ unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
@@ -387,82 +360,192 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
class JoinUnit:
- def __init__(self, block: Block, shape: Shape):
+ def __init__(self, block: Block, shape: Shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
+ # Note: block is None implies indexers is None, but not vice-versa
+ if indexers is None:
+ indexers = {}
self.block = block
+ self.indexers = indexers
self.shape = shape
def __repr__(self) -> str:
- return f"{type(self).__name__}({repr(self.block)})"
+ return f"{type(self).__name__}({repr(self.block)}, {self.indexers})"
+
+ @cache_readonly
+ def needs_filling(self) -> bool:
+ for indexer in self.indexers.values():
+ # FIXME: cache results of indexer == -1 checks.
+ if (indexer == -1).any():
+ return True
+
+ return False
+
+ @cache_readonly
+ def dtype(self):
+ blk = self.block
+ if blk.values.dtype.kind == "V":
+ raise AssertionError("Block is None, no dtype")
+
+ if not self.needs_filling:
+ return blk.dtype
+ return ensure_dtype_can_hold_na(blk.dtype)
+
+ def _is_valid_na_for(self, dtype: DtypeObj) -> bool:
+ """
+ Check that we are all-NA of a type/dtype that is compatible with this dtype.
+ Augments `self.is_na` with an additional check of the type of NA values.
+ """
+ if not self.is_na:
+ return False
+ if self.block.dtype.kind == "V":
+ return True
+
+ if self.dtype == object:
+ values = self.block.values
+ return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
+
+ na_value = self.block.fill_value
+ if na_value is NaT and not is_dtype_equal(self.dtype, dtype):
+ # e.g. we are dt64 and other is td64
+ # fill_values match but we should not cast self.block.values to dtype
+ # TODO: this will need updating if we ever have non-nano dt64/td64
+ return False
+
+ if na_value is NA and needs_i8_conversion(dtype):
+ # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat
+ # e.g. self.dtype == "Int64" and dtype is td64, we dont want
+ # to consider these as matching
+ return False
+
+ # TODO: better to use can_hold_element?
+ return is_valid_na_for_dtype(na_value, dtype)
@cache_readonly
def is_na(self) -> bool:
blk = self.block
if blk.dtype.kind == "V":
return True
- return False
-
- def get_reindexed_values(self, empty_dtype: DtypeObj) -> ArrayLike:
- values: ArrayLike
- if self.is_na:
- return make_na_array(empty_dtype, self.shape)
+ if not blk._can_hold_na:
+ return False
+ values = blk.values
+ if values.size == 0:
+ return True
+ if isinstance(values.dtype, SparseDtype):
+ return False
+
+ if values.ndim == 1:
+ # TODO(EA2D): no need for special case with 2D EAs
+ val = values[0]
+ if not is_scalar(val) or not isna(val):
+ # ideally isna_all would do this short-circuiting
+ return False
+ return isna_all(values)
else:
+ val = values[0][0]
+ if not is_scalar(val) or not isna(val):
+ # ideally isna_all would do this short-circuiting
+ return False
+ return all(isna_all(row) for row in values)
+
+ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
+ values: ArrayLike
- if not self.block._can_consolidate:
+ if upcasted_na is None and self.block.dtype.kind != "V":
+ # No upcasting is necessary
+ fill_value = self.block.fill_value
+ values = self.block.get_values()
+ else:
+ fill_value = upcasted_na
+
+ if self._is_valid_na_for(empty_dtype):
+ # note: always holds when self.block.dtype.kind == "V"
+ blk_dtype = self.block.dtype
+
+ if blk_dtype == np.dtype("object"):
+ # we want to avoid filling with np.nan if we are
+ # using None; we already know that we are all
+ # nulls
+ values = self.block.values.ravel(order="K")
+ if len(values) and values[0] is None:
+ fill_value = None
+
+ if is_datetime64tz_dtype(empty_dtype):
+ i8values = np.full(self.shape, fill_value.value)
+ return DatetimeArray(i8values, dtype=empty_dtype)
+
+ elif is_1d_only_ea_dtype(empty_dtype):
+ empty_dtype = cast(ExtensionDtype, empty_dtype)
+ cls = empty_dtype.construct_array_type()
+
+ missing_arr = cls._from_sequence([], dtype=empty_dtype)
+ ncols, nrows = self.shape
+ assert ncols == 1, ncols
+ empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
+ return missing_arr.take(
+ empty_arr, allow_fill=True, fill_value=fill_value
+ )
+ elif isinstance(empty_dtype, ExtensionDtype):
+ # TODO: no tests get here, a handful would if we disabled
+ # the dt64tz special-case above (which is faster)
+ cls = empty_dtype.construct_array_type()
+ missing_arr = cls._empty(shape=self.shape, dtype=empty_dtype)
+ missing_arr[:] = fill_value
+ return missing_arr
+ else:
+ # NB: we should never get here with empty_dtype integer or bool;
+ # if we did, the missing_arr.fill would cast to gibberish
+ missing_arr = np.empty(self.shape, dtype=empty_dtype)
+ missing_arr.fill(fill_value)
+ return missing_arr
+
+ if (not self.indexers) and (not self.block._can_consolidate):
# preserve these for validation in concat_compat
return self.block.values
- # No dtype upcasting is done here, it will be performed during
- # concatenation itself.
- values = self.block.values
+ if self.block.is_bool:
+ # External code requested filling/upcasting, bool values must
+ # be upcasted to object to avoid being upcasted to numeric.
+ values = self.block.astype(np.dtype("object")).values
+ else:
+ # No dtype upcasting is done here, it will be performed during
+ # concatenation itself.
+ values = self.block.values
- return values
+ if not self.indexers:
+ # If there's no indexing to be done, we want to signal outside
+ # code that this array must be copied explicitly. This is done
+ # by returning a view and checking `retval.base`.
+ values = values.view()
+ else:
+ for ax, indexer in self.indexers.items():
+ values = algos.take_nd(values, indexer, axis=ax)
-def make_na_array(dtype: DtypeObj, shape: Shape) -> ArrayLike:
- """
- Construct an np.ndarray or ExtensionArray of the given dtype and shape
- holding all-NA values.
- """
- if is_datetime64tz_dtype(dtype):
- # NaT here is analogous to dtype.na_value below
- i8values = np.full(shape, NaT.value)
- return DatetimeArray(i8values, dtype=dtype)
-
- elif is_1d_only_ea_dtype(dtype):
- dtype = cast(ExtensionDtype, dtype)
- cls = dtype.construct_array_type()
-
- missing_arr = cls._from_sequence([], dtype=dtype)
- nrows = shape[-1]
- taker = -1 * np.ones((nrows,), dtype=np.intp)
- return missing_arr.take(taker, allow_fill=True, fill_value=dtype.na_value)
- elif isinstance(dtype, ExtensionDtype):
- # TODO: no tests get here, a handful would if we disabled
- # the dt64tz special-case above (which is faster)
- cls = dtype.construct_array_type()
- missing_arr = cls._empty(shape=shape, dtype=dtype)
- missing_arr[:] = dtype.na_value
- return missing_arr
- else:
- # NB: we should never get here with dtype integer or bool;
- # if we did, the missing_arr.fill would cast to gibberish
- missing_arr = np.empty(shape, dtype=dtype)
- fill_value = _dtype_to_na_value(dtype)
- missing_arr.fill(fill_value)
- return missing_arr
+ return values
-def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike:
+def _concatenate_join_units(
+ join_units: list[JoinUnit], concat_axis: int, copy: bool
+) -> ArrayLike:
"""
- Concatenate values from several join units along axis=1.
+ Concatenate values from several join units along selected axis.
"""
+ if concat_axis == 0 and len(join_units) > 1:
+ # Concatenating join units along ax0 is handled in _merge_blocks.
+ raise AssertionError("Concatenating join units along axis0")
empty_dtype = _get_empty_dtype(join_units)
- to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype) for ju in join_units]
+ has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
+ upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)
+
+ to_concat = [
+ ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
+ for ju in join_units
+ ]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
@@ -492,12 +575,12 @@ def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike
concat_values = ensure_block_shape(concat_values, 2)
else:
- concat_values = concat_compat(to_concat, axis=1)
+ concat_values = concat_compat(to_concat, axis=concat_axis)
return concat_values
-def _dtype_to_na_value(dtype: DtypeObj):
+def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):
"""
Find the NA value to go with this dtype.
"""
@@ -511,6 +594,9 @@ def _dtype_to_na_value(dtype: DtypeObj):
# different from missing.na_value_for_dtype
return None
elif dtype.kind in ["i", "u"]:
+ if not has_none_blocks:
+ # different from missing.na_value_for_dtype
+ return None
return np.nan
elif dtype.kind == "O":
return np.nan
@@ -535,12 +621,14 @@ def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:
empty_dtype = join_units[0].block.dtype
return empty_dtype
- needs_can_hold_na = any(unit.is_na for unit in join_units)
+ has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
- dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]
+ dtypes = [unit.dtype for unit in join_units if not unit.is_na]
+ if not len(dtypes):
+ dtypes = [unit.dtype for unit in join_units if unit.block.dtype.kind != "V"]
dtype = find_common_type(dtypes)
- if needs_can_hold_na:
+ if has_none_blocks:
dtype = ensure_dtype_can_hold_na(dtype)
return dtype
@@ -572,6 +660,9 @@ def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units)
and
+ # no blocks with indexers (as then the dimensions do not fit)
+ all(not ju.indexers for ju in join_units)
+ and
# only use this path when there is something to concatenate
len(join_units) > 1
)
@@ -591,17 +682,28 @@ def _trim_join_unit(join_unit: JoinUnit, length: int) -> JoinUnit:
Extra items that didn't fit are returned as a separate block.
"""
+ if 0 not in join_unit.indexers:
+ extra_indexers = join_unit.indexers
+
+ if join_unit.block is None:
+ extra_block = None
+ else:
+ extra_block = join_unit.block.getitem_block(slice(length, None))
+ join_unit.block = join_unit.block.getitem_block(slice(length))
+ else:
+ extra_block = join_unit.block
- extra_block = join_unit.block.getitem_block(slice(length, None))
- join_unit.block = join_unit.block.getitem_block(slice(length))
+ extra_indexers = copy.copy(join_unit.indexers)
+ extra_indexers[0] = extra_indexers[0][length:]
+ join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
- return JoinUnit(block=extra_block, shape=extra_shape)
+ return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)
-def _combine_concat_plans(plans):
+def _combine_concat_plans(plans, concat_axis: int):
"""
Combine multiple concatenation plans into one.
@@ -611,6 +713,18 @@ def _combine_concat_plans(plans):
for p in plans[0]:
yield p[0], [p[1]]
+ elif concat_axis == 0:
+ offset = 0
+ for plan in plans:
+ last_plc = None
+
+ for plc, unit in plan:
+ yield plc.add(offset), [unit]
+ last_plc = plc
+
+ if last_plc is not None:
+ offset += last_plc.as_slice.stop
+
else:
# singleton list so we can modify it as a side-effect within _next_or_none
num_ended = [0]
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 208a1a1757be2..a15cc2e8af66f 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -349,6 +349,20 @@ def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
self.assert_frame_equal(result, expected)
+ def test_setitem_with_expansion_row(self, data, na_value):
+ df = pd.DataFrame({"data": data[:1]})
+
+ df.loc[1, "data"] = data[1]
+ expected = pd.DataFrame({"data": data[:2]})
+ self.assert_frame_equal(df, expected)
+
+ # https://github.com/pandas-dev/pandas/issues/47284
+ df.loc[2, "data"] = na_value
+ expected = pd.DataFrame(
+ {"data": pd.Series([data[0], data[1], na_value], dtype=data.dtype)}
+ )
+ self.assert_frame_equal(df, expected)
+
def test_setitem_series(self, data, full_indexer):
# https://github.com/pandas-dev/pandas/issues/32395
ser = pd.Series(data, name="data")
diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py
index 5cfad472e0134..f8e6e07050aca 100644
--- a/pandas/tests/frame/methods/test_append.py
+++ b/pandas/tests/frame/methods/test_append.py
@@ -159,7 +159,7 @@ def test_append_empty_dataframe(self):
expected = df1.copy()
tm.assert_frame_equal(result, expected)
- def test_append_dtypes(self):
+ def test_append_dtypes(self, using_array_manager):
# GH 5754
# row appends of different dtypes (so need to do by-item)
@@ -183,7 +183,10 @@ def test_append_dtypes(self):
expected = DataFrame(
{"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")}
)
- expected = expected.astype(object)
+ if using_array_manager:
+ # TODO(ArrayManager) decide on exact casting rules in concat
+ # With ArrayManager, all-NaN float is not ignored
+ expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
@@ -192,7 +195,9 @@ def test_append_dtypes(self):
expected = DataFrame(
{"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")}
)
- expected = expected.astype(object)
+ if using_array_manager:
+ # With ArrayManager, all-NaN float is not ignored
+ expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": np.nan}, index=range(1))
@@ -201,7 +206,9 @@ def test_append_dtypes(self):
expected = DataFrame(
{"bar": Series([np.nan, Timestamp("20130101")], dtype="M8[ns]")}
)
- expected = expected.astype(object)
+ if using_array_manager:
+ # With ArrayManager, all-NaN float is not ignored
+ expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index a7b3c77e6ea0a..cc2f2ab7f7c1c 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -12,6 +12,7 @@
import pytest
from pandas.errors import PerformanceWarning
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
@@ -744,3 +745,50 @@ def test_concat_retain_attrs(data):
df2.attrs = {1: 1}
df = concat([df1, df2])
assert df.attrs[1] == 1
+
+
+@td.skip_array_manager_invalid_test
+@pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"])
+@pytest.mark.parametrize("empty_dtype", [None, "float64", "object"])
+def test_concat_ignore_emtpy_object_float(empty_dtype, df_dtype):
+ # https://github.com/pandas-dev/pandas/issues/45637
+ df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype)
+ empty = DataFrame(columns=["foo", "bar"], dtype=empty_dtype)
+ result = concat([empty, df])
+ expected = df
+ if df_dtype == "int64":
+ # TODO what exact behaviour do we want for integer eventually?
+ if empty_dtype == "float64":
+ expected = df.astype("float64")
+ else:
+ expected = df.astype("object")
+ tm.assert_frame_equal(result, expected)
+
+
+@td.skip_array_manager_invalid_test
+@pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"])
+@pytest.mark.parametrize("empty_dtype", [None, "float64", "object"])
+def test_concat_ignore_all_na_object_float(empty_dtype, df_dtype):
+ df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype)
+ empty = DataFrame({"foo": [np.nan], "bar": [np.nan]}, dtype=empty_dtype)
+ result = concat([empty, df], ignore_index=True)
+
+ if df_dtype == "int64":
+ # TODO what exact behaviour do we want for integer eventually?
+ if empty_dtype == "object":
+ df_dtype = "object"
+ else:
+ df_dtype = "float64"
+ expected = DataFrame({"foo": [None, 1, 2], "bar": [None, 1, 2]}, dtype=df_dtype)
+ tm.assert_frame_equal(result, expected)
+
+
+@td.skip_array_manager_invalid_test
+def test_concat_ignore_empty_from_reindex():
+ # https://github.com/pandas-dev/pandas/pull/43507#issuecomment-920375856
+ df1 = DataFrame({"a": [1], "b": [pd.Timestamp("2012-01-01")]})
+ df2 = DataFrame({"a": [2]})
+
+ result = concat([df1, df2.reindex(columns=df1.columns)], ignore_index=True)
+ expected = df1 = DataFrame({"a": [1, 2], "b": [pd.Timestamp("2012-01-01"), pd.NaT]})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 1249194d3a36d..7e62500df3e8c 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -682,7 +682,7 @@ def _constructor(self):
assert isinstance(result, NotADataFrame)
- def test_join_append_timedeltas(self):
+ def test_join_append_timedeltas(self, using_array_manager):
# timedelta64 issues with join/merge
# GH 5695
@@ -696,9 +696,11 @@ def test_join_append_timedeltas(self):
{
"d": [datetime(2013, 11, 5, 5, 56), datetime(2013, 11, 5, 5, 56)],
"t": [timedelta(0, 22500), timedelta(0, 22500)],
- },
- dtype=object,
+ }
)
+ if using_array_manager:
+ # TODO(ArrayManager) decide on exact casting rules in concat
+ expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
def test_join_append_timedeltas2(self):
| Backport PR #47372 | https://api.github.com/repos/pandas-dev/pandas/pulls/47472 | 2022-06-22T21:19:20Z | 2022-06-22T22:52:53Z | 2022-06-22T22:52:53Z | 2022-06-22T22:52:57Z |
BUG: GH33912 fixed issue with precision of the the left end | diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 00b2b30eb3122..e451a747557c3 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -582,7 +582,8 @@ def _format_labels(
breaks = [formatter(b) for b in bins]
if right and include_lowest:
# adjust lhs of first interval by precision to account for being right closed
- breaks[0] = adjust(breaks[0])
+ # Applied formatter to address GH33912
+ breaks[0] = formatter(adjust(breaks[0]))
return IntervalIndex.from_breaks(breaks, inclusive=inclusive)
| - [ ] closes #33912
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47470 | 2022-06-22T20:21:42Z | 2022-08-15T16:43:17Z | null | 2022-08-15T16:43:18Z |
TYP: Fix typing errors on main | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 72f6a7bce4d0e..7f9d77c4193ca 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1064,7 +1064,13 @@ def checked_add_with_arr(
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
- not_nan = np.logical_not(b2_mask)
+ # error: Argument 1 to "__call__" of "_UFunc_Nin1_Nout1" has
+ # incompatible type "Optional[ndarray[Any, dtype[bool_]]]";
+ # expected "Union[_SupportsArray[dtype[Any]], _NestedSequence
+ # [_SupportsArray[dtype[Any]]], bool, int, float, complex, str
+ # , bytes, _NestedSequence[Union[bool, int, float, complex, str
+ # , bytes]]]"
+ not_nan = np.logical_not(b2_mask) # type: ignore[arg-type]
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py
index de7945a96c69e..217fbafce719c 100644
--- a/pandas/core/array_algos/quantile.py
+++ b/pandas/core/array_algos/quantile.py
@@ -143,7 +143,10 @@ def _nanpercentile_1d(
return np.percentile(
values,
qs,
- **{np_percentile_argname: interpolation},
+ # error: No overload variant of "percentile" matches argument
+ # types "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]"
+ # , "Dict[str, str]" [call-overload]
+ **{np_percentile_argname: interpolation}, # type: ignore[call-overload]
)
@@ -212,5 +215,8 @@ def _nanpercentile(
values,
qs,
axis=1,
- **{np_percentile_argname: interpolation},
+ # error: No overload variant of "percentile" matches argument types
+ # "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]",
+ # "int", "Dict[str, str]" [call-overload]
+ **{np_percentile_argname: interpolation}, # type: ignore[call-overload]
)
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py
index e241fc119ae02..d2875be0f58cd 100644
--- a/pandas/core/arraylike.py
+++ b/pandas/core/arraylike.py
@@ -265,9 +265,8 @@ def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any)
return result
# Determine if we should defer.
- # error: "Type[ndarray[Any, Any]]" has no attribute "__array_ufunc__"
no_defer = (
- np.ndarray.__array_ufunc__, # type: ignore[attr-defined]
+ np.ndarray.__array_ufunc__,
cls.__array_ufunc__,
)
diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index c1380fcdbba06..27d74f2140434 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -496,7 +496,15 @@ def _indexing_key_to_indices(
if isinstance(key, slice):
indices = np.arange(n)[key]
elif is_integer(key):
- indices = np.arange(n)[[key]]
+ # error: Invalid index type "List[Union[int, ndarray[Any, Any]]]"
+ # for "ndarray[Any, dtype[signedinteger[Any]]]"; expected type
+ # "Union[SupportsIndex, _SupportsArray[dtype[Union[bool_,
+ # integer[Any]]]], _NestedSequence[_SupportsArray[dtype[Union
+ # [bool_, integer[Any]]]]], _NestedSequence[Union[bool, int]]
+ # , Tuple[Union[SupportsIndex, _SupportsArray[dtype[Union[bool_
+ # , integer[Any]]]], _NestedSequence[_SupportsArray[dtype[Union
+ # [bool_, integer[Any]]]]], _NestedSequence[Union[bool, int]]], ...]]"
+ indices = np.arange(n)[[key]] # type: ignore[index]
elif is_bool_dtype(key):
key = np.asarray(key)
if len(key) != n:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 97dae33bc0311..5f060542526d3 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -487,10 +487,7 @@ def _generate_range(
np.linspace(0, end.value - start.value, periods, dtype="int64")
+ start.value
)
- # error: Non-overlapping equality check
- # (left operand type: "dtype[signedinteger[Any]]",
- # right operand type: "Literal['i8']")
- if i8values.dtype != "i8": # type: ignore[comparison-overlap]
+ if i8values.dtype != "i8":
# 2022-01-09 I (brock) am not sure if it is possible for this
# to overflow and cast to e.g. f8, but if it does we need to cast
i8values = i8values.astype("i8")
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 69814863afefc..1015a54826ac8 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -687,21 +687,7 @@ def __getitem__(
if is_scalar(left) and isna(left):
return self._fill_value
return Interval(left, right, inclusive=self.inclusive)
- # error: Argument 1 to "ndim" has incompatible type
- # "Union[ndarray[Any, Any], ExtensionArray]"; expected
- # "Union[Sequence[Sequence[Sequence[Sequence[Sequence[Any]]]]],
- # Union[Union[_SupportsArray[dtype[Any]],
- # Sequence[_SupportsArray[dtype[Any]]],
- # Sequence[Sequence[_SupportsArray[dtype[Any]]]],
- # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]],
- # Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]],
- # Union[bool, int, float, complex, str, bytes,
- # Sequence[Union[bool, int, float, complex, str, bytes]],
- # Sequence[Sequence[Union[bool, int, float, complex, str, bytes]]],
- # Sequence[Sequence[Sequence[Union[bool, int, float, complex, str, bytes]]]],
- # Sequence[Sequence[Sequence[Sequence[Union[bool, int, float,
- # complex, str, bytes]]]]]]]]"
- if np.ndim(left) > 1: # type: ignore[arg-type]
+ if np.ndim(left) > 1:
# GH#30588 multi-dimensional indexer disallowed
raise ValueError("multi-dimensional indexing not allowed")
return self._shallow_copy(left, right)
@@ -1679,7 +1665,14 @@ def isin(self, values) -> np.ndarray:
# complex128 ndarray is much more performant.
left = self._combined.view("complex128")
right = values._combined.view("complex128")
- return np.in1d(left, right)
+ # error: Argument 1 to "in1d" has incompatible type
+ # "Union[ExtensionArray, ndarray[Any, Any],
+ # ndarray[Any, dtype[Any]]]"; expected
+ # "Union[_SupportsArray[dtype[Any]],
+ # _NestedSequence[_SupportsArray[dtype[Any]]], bool,
+ # int, float, complex, str, bytes, _NestedSequence[
+ # Union[bool, int, float, complex, str, bytes]]]"
+ return np.in1d(left, right) # type: ignore[arg-type]
elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion(
values.left.dtype
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index d2c082c472e5e..78c82d9a4e478 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -110,13 +110,7 @@ def __init__(
self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
) -> None:
# values is supposed to already be validated in the subclass
- if not (
- isinstance(mask, np.ndarray)
- and
- # error: Non-overlapping equality check
- # (left operand type: "dtype[bool_]", right operand type: "Type[bool_]")
- mask.dtype == np.bool_ # type: ignore[comparison-overlap]
- ):
+ if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_):
raise TypeError(
"mask should be boolean numpy array. Use "
"the 'pd.array' function instead"
@@ -1157,7 +1151,12 @@ def any(self, *, skipna: bool = True, **kwargs):
nv.validate_any((), kwargs)
values = self._data.copy()
- np.putmask(values, self._mask, self._falsey_value)
+ # error: Argument 3 to "putmask" has incompatible type "object";
+ # expected "Union[_SupportsArray[dtype[Any]],
+ # _NestedSequence[_SupportsArray[dtype[Any]]],
+ # bool, int, float, complex, str, bytes,
+ # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
+ np.putmask(values, self._mask, self._falsey_value) # type: ignore[arg-type]
result = values.any()
if skipna:
return result
@@ -1233,7 +1232,12 @@ def all(self, *, skipna: bool = True, **kwargs):
nv.validate_all((), kwargs)
values = self._data.copy()
- np.putmask(values, self._mask, self._truthy_value)
+ # error: Argument 3 to "putmask" has incompatible type "object";
+ # expected "Union[_SupportsArray[dtype[Any]],
+ # _NestedSequence[_SupportsArray[dtype[Any]]],
+ # bool, int, float, complex, str, bytes,
+ # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
+ np.putmask(values, self._mask, self._truthy_value) # type: ignore[arg-type]
result = values.all()
if skipna:
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 8215abf294221..0c34229fb5080 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -944,7 +944,16 @@ def __getitem__(
if is_integer(key):
return self._get_val_at(key)
elif isinstance(key, tuple):
- data_slice = self.to_dense()[key]
+ # error: Invalid index type "Tuple[Union[int, ellipsis], ...]"
+ # for "ndarray[Any, Any]"; expected type
+ # "Union[SupportsIndex, _SupportsArray[dtype[Union[bool_,
+ # integer[Any]]]], _NestedSequence[_SupportsArray[dtype[
+ # Union[bool_, integer[Any]]]]], _NestedSequence[Union[
+ # bool, int]], Tuple[Union[SupportsIndex, _SupportsArray[
+ # dtype[Union[bool_, integer[Any]]]], _NestedSequence[
+ # _SupportsArray[dtype[Union[bool_, integer[Any]]]]],
+ # _NestedSequence[Union[bool, int]]], ...]]"
+ data_slice = self.to_dense()[key] # type: ignore[index]
elif isinstance(key, slice):
# Avoid densifying when handling contiguous slices
@@ -1184,7 +1193,10 @@ def _concat_same_type(
data = np.concatenate(values)
indices_arr = np.concatenate(indices)
- sp_index = IntIndex(length, indices_arr)
+ # error: Argument 2 to "IntIndex" has incompatible type
+ # "ndarray[Any, dtype[signedinteger[_32Bit]]]";
+ # expected "Sequence[int]"
+ sp_index = IntIndex(length, indices_arr) # type: ignore[arg-type]
else:
# when concatenating block indices, we don't claim that you'll
@@ -1374,7 +1386,8 @@ def __setstate__(self, state):
if isinstance(state, tuple):
# Compat for pandas < 0.24.0
nd_state, (fill_value, sp_index) = state
- sparse_values = np.array([])
+ # error: Need type annotation for "sparse_values"
+ sparse_values = np.array([]) # type: ignore[var-annotated]
sparse_values.__setstate__(nd_state)
self._sparse_values = sparse_values
diff --git a/pandas/core/dtypes/astype.py b/pandas/core/dtypes/astype.py
index 7dc2c81746454..8d1427976276c 100644
--- a/pandas/core/dtypes/astype.py
+++ b/pandas/core/dtypes/astype.py
@@ -113,9 +113,7 @@ def astype_nansafe(
).reshape(shape)
elif is_datetime64_dtype(arr.dtype):
- # error: Non-overlapping equality check (left
- # operand type: "dtype[Any]", right operand type: "Type[signedinteger[Any]]")
- if dtype == np.int64: # type: ignore[comparison-overlap]
+ if dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
@@ -127,9 +125,7 @@ def astype_nansafe(
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr.dtype):
- # error: Non-overlapping equality check (left
- # operand type: "dtype[Any]", right operand type: "Type[signedinteger[Any]]")
- if dtype == np.int64: # type: ignore[comparison-overlap]
+ if dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index bdbad2560b2d7..a192337daf59b 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -534,9 +534,7 @@ def is_string_or_object_np_dtype(dtype: np.dtype) -> bool:
"""
Faster alternative to is_string_dtype, assumes we have a np.dtype object.
"""
- # error: Non-overlapping equality check (left operand type:
- # "dtype[Any]", right operand type: "Type[object]")
- return dtype == object or dtype.kind in "SU" # type: ignore[comparison-overlap]
+ return dtype == object or dtype.kind in "SU"
def is_string_dtype(arr_or_dtype) -> bool:
diff --git a/pandas/core/exchange/buffer.py b/pandas/core/exchange/buffer.py
index 65f2ac6dabef5..098c596bff4cd 100644
--- a/pandas/core/exchange/buffer.py
+++ b/pandas/core/exchange/buffer.py
@@ -57,8 +57,7 @@ def __dlpack__(self):
Represent this structure as DLPack interface.
"""
if _NUMPY_HAS_DLPACK:
- # error: "ndarray[Any, Any]" has no attribute "__dlpack__"
- return self._x.__dlpack__() # type: ignore[attr-defined]
+ return self._x.__dlpack__()
raise NotImplementedError("__dlpack__")
def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 535b06650c665..7b5c374dc25d9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2481,7 +2481,9 @@ def to_records(
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
- formats.append(dtype_mapping)
+ # error: Argument 1 to "append" of "list" has incompatible
+ # type "Union[type, dtype[Any], str]"; expected "dtype[Any]"
+ formats.append(dtype_mapping) # type: ignore[arg-type]
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 7e6233d247251..0393c9d07cc74 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4834,7 +4834,13 @@ def _join_non_unique(
right = other._values.take(right_idx)
if isinstance(join_array, np.ndarray):
- np.putmask(join_array, mask, right)
+ # error: Argument 3 to "putmask" has incompatible type
+ # "Union[ExtensionArray, ndarray[Any, Any]]"; expected
+ # "Union[_SupportsArray[dtype[Any]], _NestedSequence[
+ # _SupportsArray[dtype[Any]]], bool, int, float, complex,
+ # str, bytes, _NestedSequence[Union[bool, int, float,
+ # complex, str, bytes]]]"
+ np.putmask(join_array, mask, right) # type: ignore[arg-type]
else:
join_array._putmask(mask, right)
@@ -5348,7 +5354,10 @@ def __getitem__(self, key):
if hasattr(result, "_ndarray"):
# i.e. NDArrayBackedExtensionArray
# Unpack to ndarray for MPL compat
- return result._ndarray
+ # error: Item "ndarray[Any, Any]" of
+ # "Union[ExtensionArray, ndarray[Any, Any]]"
+ # has no attribute "_ndarray"
+ return result._ndarray # type: ignore[union-attr]
return result
# NB: Using _constructor._simple_new would break if MultiIndex
@@ -6886,7 +6895,9 @@ def insert(self, loc: int, item) -> Index:
new_values = np.insert(arr, loc, casted)
else:
- new_values = np.insert(arr, loc, None)
+ # error: No overload variant of "insert" matches argument types
+ # "ndarray[Any, Any]", "int", "None"
+ new_values = np.insert(arr, loc, None) # type: ignore[call-overload]
loc = loc if loc >= 0 else loc - 1
new_values[loc] = item
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 101eac992e95d..0a8df9d64d512 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -363,7 +363,10 @@ def _validate_codes(self, level: list, code: list):
"""
null_mask = isna(level)
if np.any(null_mask):
- code = np.where(null_mask[code], -1, code)
+ # error: Incompatible types in assignment
+ # (expression has type "ndarray[Any, dtype[Any]]",
+ # variable has type "List[Any]")
+ code = np.where(null_mask[code], -1, code) # type: ignore[assignment]
return code
def _verify_integrity(self, codes: list | None = None, levels: list | None = None):
@@ -1577,7 +1580,13 @@ def is_monotonic_increasing(self) -> bool:
self._get_level_values(i)._values for i in reversed(range(len(self.levels)))
]
try:
- sort_order = np.lexsort(values)
+ # error: Argument 1 to "lexsort" has incompatible type
+ # "List[Union[ExtensionArray, ndarray[Any, Any]]]";
+ # expected "Union[_SupportsArray[dtype[Any]],
+ # _NestedSequence[_SupportsArray[dtype[Any]]], bool,
+ # int, float, complex, str, bytes, _NestedSequence[Union
+ # [bool, int, float, complex, str, bytes]]]"
+ sort_order = np.lexsort(values) # type: ignore[arg-type]
return Index(sort_order).is_monotonic_increasing
except TypeError:
diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py
index c938a018574f9..1160d3b2a8e3a 100644
--- a/pandas/core/internals/ops.py
+++ b/pandas/core/internals/ops.py
@@ -125,9 +125,7 @@ def _get_same_shape_values(
# argument type "Tuple[Union[ndarray, slice], slice]"
lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload]
assert lvals.shape[0] == 1, lvals.shape
- # error: No overload variant of "__getitem__" of "ExtensionArray" matches
- # argument type "Tuple[int, slice]"
- lvals = lvals[0, :] # type: ignore[call-overload]
+ lvals = lvals[0, :]
else:
# lvals are 1D, rvals are 2D
assert rvals.shape[0] == 1, rvals.shape
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 6224b35f7e680..57b0a95f803b1 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -333,7 +333,13 @@ def func(yvalues: np.ndarray) -> None:
**kwargs,
)
- np.apply_along_axis(func, axis, data)
+ # error: Argument 1 to "apply_along_axis" has incompatible type
+ # "Callable[[ndarray[Any, Any]], None]"; expected "Callable[...,
+ # Union[_SupportsArray[dtype[<nothing>]], Sequence[_SupportsArray
+ # [dtype[<nothing>]]], Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]],
+ # Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
+ # Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]]]]]"
+ np.apply_along_axis(func, axis, data) # type: ignore[arg-type]
return
@@ -772,13 +778,23 @@ def interpolate_2d(
"""
if limit_area is not None:
np.apply_along_axis(
- partial(
+ # error: Argument 1 to "apply_along_axis" has incompatible type
+ # "partial[None]"; expected
+ # "Callable[..., Union[_SupportsArray[dtype[<nothing>]],
+ # Sequence[_SupportsArray[dtype[<nothing>]]],
+ # Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]],
+ # Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
+ # Sequence[Sequence[Sequence[Sequence[_
+ # SupportsArray[dtype[<nothing>]]]]]]]]"
+ partial( # type: ignore[arg-type]
_interpolate_with_limit_area,
method=method,
limit=limit,
limit_area=limit_area,
),
- axis,
+ # error: Argument 2 to "apply_along_axis" has incompatible type
+ # "Union[str, int]"; expected "SupportsIndex"
+ axis, # type: ignore[arg-type]
values,
)
return
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index aa426d24db75d..06127c8ecb932 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -133,7 +133,9 @@ def melt(
if is_extension_array_dtype(id_data):
id_data = concat([id_data] * K, ignore_index=True)
else:
- id_data = np.tile(id_data._values, K)
+ # error: Incompatible types in assignment (expression has type
+ # "ndarray[Any, dtype[Any]]", variable has type "Series")
+ id_data = np.tile(id_data._values, K) # type: ignore[assignment]
mdata[col] = id_data
mcolumns = id_vars + var_name + [value_name]
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cf602115f683f..6ebee74810305 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2024,7 +2024,9 @@ def count(self, level=None):
lev = lev.insert(cnt, lev._na_value)
obs = level_codes[notna(self._values)]
- out = np.bincount(obs, minlength=len(lev) or None)
+ # error: Argument "minlength" to "bincount" has incompatible type
+ # "Optional[int]"; expected "SupportsIndex"
+ out = np.bincount(obs, minlength=len(lev) or None) # type: ignore[arg-type]
return self._constructor(out, index=lev, dtype="int64").__finalize__(
self, method="count"
)
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 886d81af8ba6b..b45f43adbe952 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -409,10 +409,7 @@ def _prep_values(self, values: ArrayLike) -> np.ndarray:
if inf.any():
values = np.where(inf, np.nan, values)
- # error: Incompatible return value type
- # (got "Union[ExtensionArray, ndarray[Any, Any],
- # ndarray[Any, dtype[floating[_64Bit]]]]", expected "ndarray[Any, Any]")
- return values # type: ignore[return-value]
+ return values
def _insert_on_column(self, result: DataFrame, obj: DataFrame) -> None:
# if we have an 'on' column we want to put it back into
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 1f42329389a18..24669e84443a6 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -3860,24 +3860,10 @@ def _highlight_between(
Return an array of css props based on condition of data values within given range.
"""
if np.iterable(left) and not isinstance(left, str):
- # error: Argument 1 to "_validate_apply_axis_arg"
- # has incompatible type "Union[str, float, Period,
- # Timedelta, Interval[Any], datetime64, timedelta64,
- # datetime, Sequence[Any], ndarray[Any, Any], NDFrame, None]";
- # expected "Union[NDFrame, Sequence[Any], ndarray[Any, Any]]"
- left = _validate_apply_axis_arg(
- left, "left", None, data # type: ignore[arg-type]
- )
+ left = _validate_apply_axis_arg(left, "left", None, data)
if np.iterable(right) and not isinstance(right, str):
- # error: Argument 1 to "_validate_apply_axis_arg"
- # has incompatible type "Union[str, float, Period,
- # Timedelta, Interval[Any], datetime64, timedelta64,
- # datetime, Sequence[Any], ndarray[Any, Any], NDFrame, None]";
- # expected "Union[NDFrame, Sequence[Any], ndarray[Any, Any]]"
- right = _validate_apply_axis_arg(
- right, "right", None, data # type: ignore[arg-type]
- )
+ right = _validate_apply_axis_arg(right, "right", None, data)
# get ops with correct boundary attribution
if inclusive == "both":
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index 7781860f0d61e..3d6b5fcb49b85 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -400,7 +400,15 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
arrs # type: ignore[arg-type]
)
else:
- result[name] = np.concatenate(arrs)
+ # error: Argument 1 to "concatenate" has incompatible
+ # type "List[Union[ExtensionArray, ndarray[Any, Any]]]"
+ # ; expected "Union[_SupportsArray[dtype[Any]],
+ # Sequence[_SupportsArray[dtype[Any]]],
+ # Sequence[Sequence[_SupportsArray[dtype[Any]]]],
+ # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]
+ # , Sequence[Sequence[Sequence[Sequence[
+ # _SupportsArray[dtype[Any]]]]]]]"
+ result[name] = np.concatenate(arrs) # type: ignore[arg-type]
if warning_columns:
warning_names = ",".join(warning_columns)
diff --git a/pandas/tests/extension/date/array.py b/pandas/tests/extension/date/array.py
index d29ed293e71ed..eca935cdc9128 100644
--- a/pandas/tests/extension/date/array.py
+++ b/pandas/tests/extension/date/array.py
@@ -109,7 +109,9 @@ def __init__(
self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)
self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)
- for (i,), (y, m, d) in np.ndenumerate(np.char.split(dates, sep="-")):
+ # error: "object_" object is not iterable
+ obj = np.char.split(dates, sep="-")
+ for (i,), (y, m, d) in np.ndenumerate(obj): # type: ignore[misc]
self._year[i] = int(y)
self._month[i] = int(m)
self._day[i] = int(d)
|
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47469 | 2022-06-22T19:40:32Z | 2022-06-23T00:37:56Z | 2022-06-23T00:37:56Z | 2022-06-23T02:09:05Z |
ENH/TST: Add BaseInterfaceTests tests for ArrowExtensionArray PT2 | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 27d74f2140434..18d965ff26e10 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -31,6 +31,7 @@
)
from pandas.core.dtypes.missing import isna
+from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.base import ExtensionArray
from pandas.core.indexers import (
check_array_indexer,
@@ -45,13 +46,22 @@
from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
from pandas.core.arrays.arrow.dtype import ArrowDtype
+ ARROW_CMP_FUNCS = {
+ "eq": pc.equal,
+ "ne": pc.not_equal,
+ "lt": pc.less,
+ "gt": pc.greater,
+ "le": pc.less_equal,
+ "ge": pc.greater_equal,
+ }
+
if TYPE_CHECKING:
from pandas import Series
ArrowExtensionArrayT = TypeVar("ArrowExtensionArrayT", bound="ArrowExtensionArray")
-class ArrowExtensionArray(ExtensionArray):
+class ArrowExtensionArray(OpsMixin, ExtensionArray):
"""
Base class for ExtensionArray backed by Arrow ChunkedArray.
"""
@@ -179,6 +189,34 @@ def __arrow_array__(self, type=None):
"""Convert myself to a pyarrow ChunkedArray."""
return self._data
+ def _cmp_method(self, other, op):
+ from pandas.arrays import BooleanArray
+
+ pc_func = ARROW_CMP_FUNCS[op.__name__]
+ if isinstance(other, ArrowExtensionArray):
+ result = pc_func(self._data, other._data)
+ elif isinstance(other, (np.ndarray, list)):
+ result = pc_func(self._data, other)
+ elif is_scalar(other):
+ try:
+ result = pc_func(self._data, pa.scalar(other))
+ except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid):
+ mask = isna(self) | isna(other)
+ valid = ~mask
+ result = np.zeros(len(self), dtype="bool")
+ result[valid] = op(np.array(self)[valid], other)
+ return BooleanArray(result, mask)
+ else:
+ return NotImplementedError(
+ f"{op.__name__} not implemented for {type(other)}"
+ )
+
+ if pa_version_under2p0:
+ result = result.to_pandas().values
+ else:
+ result = result.to_numpy()
+ return BooleanArray._from_sequence(result)
+
def equals(self, other) -> bool:
if not isinstance(other, ArrowExtensionArray):
return False
@@ -589,7 +627,7 @@ def _replace_with_indices(
# fast path for a contiguous set of indices
arrays = [
chunk[:start],
- pa.array(value, type=chunk.type),
+ pa.array(value, type=chunk.type, from_pandas=True),
chunk[stop + 1 :],
]
arrays = [arr for arr in arrays if len(arr)]
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index a07f748fa0c8c..c4d1a35315d7d 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -34,7 +34,6 @@
)
from pandas.core.dtypes.missing import isna
-from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.arrow import ArrowExtensionArray
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
@@ -51,15 +50,6 @@
from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
- ARROW_CMP_FUNCS = {
- "eq": pc.equal,
- "ne": pc.not_equal,
- "lt": pc.less,
- "gt": pc.greater,
- "le": pc.less_equal,
- "ge": pc.greater_equal,
- }
-
ArrowStringScalarOrNAT = Union[str, libmissing.NAType]
@@ -74,9 +64,7 @@ def _chk_pyarrow_available() -> None:
# fallback for the ones that pyarrow doesn't yet support
-class ArrowStringArray(
- OpsMixin, ArrowExtensionArray, BaseStringArray, ObjectStringArrayMixin
-):
+class ArrowStringArray(ArrowExtensionArray, BaseStringArray, ObjectStringArrayMixin):
"""
Extension array for string data in a ``pyarrow.ChunkedArray``.
@@ -190,32 +178,6 @@ def to_numpy(
result[mask] = na_value
return result
- def _cmp_method(self, other, op):
- from pandas.arrays import BooleanArray
-
- pc_func = ARROW_CMP_FUNCS[op.__name__]
- if isinstance(other, ArrowStringArray):
- result = pc_func(self._data, other._data)
- elif isinstance(other, (np.ndarray, list)):
- result = pc_func(self._data, other)
- elif is_scalar(other):
- try:
- result = pc_func(self._data, pa.scalar(other))
- except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid):
- mask = isna(self) | isna(other)
- valid = ~mask
- result = np.zeros(len(self), dtype="bool")
- result[valid] = op(np.array(self)[valid], other)
- return BooleanArray(result, mask)
- else:
- return NotImplemented
-
- if pa_version_under2p0:
- result = result.to_pandas().values
- else:
- result = result.to_numpy()
- return BooleanArray._from_sequence(result)
-
def insert(self, loc: int, item):
if not isinstance(item, str) and item is not libmissing.NA:
raise TypeError("Scalar must be NA or str")
diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py
index 22595c4e461d7..26b94ebe5a8da 100644
--- a/pandas/tests/extension/arrow/arrays.py
+++ b/pandas/tests/extension/arrow/arrays.py
@@ -23,7 +23,6 @@
take,
)
from pandas.api.types import is_scalar
-from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.arrow import ArrowExtensionArray as _ArrowExtensionArray
from pandas.core.construction import extract_array
@@ -72,7 +71,7 @@ def construct_array_type(cls) -> type_t[ArrowStringArray]:
return ArrowStringArray
-class ArrowExtensionArray(OpsMixin, _ArrowExtensionArray):
+class ArrowExtensionArray(_ArrowExtensionArray):
_data: pa.ChunkedArray
@classmethod
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 95cb7045ac68d..06ce6901effc7 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -18,6 +18,7 @@
timedelta,
)
+import numpy as np
import pytest
from pandas.compat import (
@@ -93,6 +94,18 @@ def data_missing(data):
return type(data)._from_sequence([None, data[0]])
+@pytest.fixture(params=["data", "data_missing"])
+def all_data(request, data, data_missing):
+ """Parametrized fixture returning 'data' or 'data_missing' integer arrays.
+
+ Used to test dtype conversion with and without missing values.
+ """
+ if request.param == "data":
+ return data
+ elif request.param == "data_missing":
+ return data_missing
+
+
@pytest.fixture
def na_value():
"""The scalar missing value for this type. Default 'None'"""
@@ -271,6 +284,518 @@ class TestBaseIndex(base.BaseIndexTests):
pass
+class TestBaseInterface(base.BaseInterfaceTests):
+ def test_contains(self, data, data_missing, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ unit = getattr(data.dtype.pyarrow_dtype, "unit", None)
+ if pa_version_under2p0 and tz not in (None, "UTC") and unit == "us":
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"Not supported by pyarrow < 2.0 "
+ f"with timestamp type {tz} and {unit}"
+ )
+ )
+ )
+ super().test_contains(data, data_missing)
+
+ @pytest.mark.xfail(reason="pyarrow.ChunkedArray does not support views.")
+ def test_view(self, data):
+ super().test_view(data)
+
+
+class TestBaseMissing(base.BaseMissingTests):
+ def test_fillna_limit_pad(self, data_missing, using_array_manager, request):
+ if using_array_manager and pa.types.is_duration(
+ data_missing.dtype.pyarrow_dtype
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_fillna_limit_pad(data_missing)
+
+ def test_fillna_limit_backfill(self, data_missing, using_array_manager, request):
+ if using_array_manager and pa.types.is_duration(
+ data_missing.dtype.pyarrow_dtype
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_fillna_limit_backfill(data_missing)
+
+ def test_fillna_series(self, data_missing, using_array_manager, request):
+ if using_array_manager and pa.types.is_duration(
+ data_missing.dtype.pyarrow_dtype
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_fillna_series(data_missing)
+
+ def test_fillna_series_method(
+ self, data_missing, fillna_method, using_array_manager, request
+ ):
+ if using_array_manager and pa.types.is_duration(
+ data_missing.dtype.pyarrow_dtype
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_fillna_series_method(data_missing, fillna_method)
+
+ def test_fillna_frame(self, data_missing, using_array_manager, request):
+ if using_array_manager and pa.types.is_duration(
+ data_missing.dtype.pyarrow_dtype
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_fillna_frame(data_missing)
+
+
+class TestBaseSetitem(base.BaseSetitemTests):
+ def test_setitem_scalar_series(self, data, box_in_series, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ super().test_setitem_scalar_series(data, box_in_series)
+
+ def test_setitem_sequence(self, data, box_in_series, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif (
+ using_array_manager
+ and pa.types.is_duration(data.dtype.pyarrow_dtype)
+ and box_in_series
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_sequence(data, box_in_series)
+
+ def test_setitem_sequence_mismatched_length_raises(
+ self, data, as_array, using_array_manager, request
+ ):
+ if using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_sequence_mismatched_length_raises(data, as_array)
+
+ def test_setitem_empty_indexer(
+ self, data, box_in_series, using_array_manager, request
+ ):
+ if (
+ using_array_manager
+ and pa.types.is_duration(data.dtype.pyarrow_dtype)
+ and box_in_series
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_empty_indexer(data, box_in_series)
+
+ def test_setitem_sequence_broadcasts(
+ self, data, box_in_series, using_array_manager, request
+ ):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif (
+ using_array_manager
+ and pa.types.is_duration(data.dtype.pyarrow_dtype)
+ and box_in_series
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_sequence_broadcasts(data, box_in_series)
+
+ @pytest.mark.parametrize("setter", ["loc", "iloc"])
+ def test_setitem_scalar(self, data, setter, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_scalar(data, setter)
+
+ def test_setitem_loc_scalar_mixed(self, data, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_loc_scalar_mixed(data)
+
+ def test_setitem_loc_scalar_single(self, data, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_loc_scalar_single(data)
+
+ def test_setitem_loc_scalar_multiple_homogoneous(
+ self, data, using_array_manager, request
+ ):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_loc_scalar_multiple_homogoneous(data)
+
+ def test_setitem_iloc_scalar_mixed(self, data, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_iloc_scalar_mixed(data)
+
+ def test_setitem_iloc_scalar_single(self, data, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_iloc_scalar_single(data)
+
+ def test_setitem_iloc_scalar_multiple_homogoneous(
+ self, data, using_array_manager, request
+ ):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_iloc_scalar_multiple_homogoneous(data)
+
+ @pytest.mark.parametrize(
+ "mask",
+ [
+ np.array([True, True, True, False, False]),
+ pd.array([True, True, True, False, False], dtype="boolean"),
+ pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
+ ],
+ ids=["numpy-array", "boolean-array", "boolean-array-na"],
+ )
+ def test_setitem_mask(
+ self, data, mask, box_in_series, using_array_manager, request
+ ):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif (
+ using_array_manager
+ and pa.types.is_duration(data.dtype.pyarrow_dtype)
+ and box_in_series
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_mask(data, mask, box_in_series)
+
+ def test_setitem_mask_boolean_array_with_na(
+ self, data, box_in_series, using_array_manager, request
+ ):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ unit = getattr(data.dtype.pyarrow_dtype, "unit", None)
+ if pa_version_under2p0 and tz not in (None, "UTC") and unit == "us":
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif (
+ using_array_manager
+ and pa.types.is_duration(data.dtype.pyarrow_dtype)
+ and box_in_series
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_mask_boolean_array_with_na(data, box_in_series)
+
+ @pytest.mark.parametrize(
+ "idx",
+ [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
+ ids=["list", "integer-array", "numpy-array"],
+ )
+ def test_setitem_integer_array(
+ self, data, idx, box_in_series, using_array_manager, request
+ ):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif (
+ using_array_manager
+ and pa.types.is_duration(data.dtype.pyarrow_dtype)
+ and box_in_series
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_integer_array(data, idx, box_in_series)
+
+ @pytest.mark.parametrize("as_callable", [True, False])
+ @pytest.mark.parametrize("setter", ["loc", None])
+ def test_setitem_mask_aligned(
+ self, data, as_callable, setter, using_array_manager, request
+ ):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_mask_aligned(data, as_callable, setter)
+
+ @pytest.mark.parametrize("setter", ["loc", None])
+ def test_setitem_mask_broadcast(self, data, setter, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_mask_broadcast(data, setter)
+
+ def test_setitem_tuple_index(self, data, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ super().test_setitem_tuple_index(data)
+
+ def test_setitem_slice(self, data, box_in_series, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif (
+ using_array_manager
+ and pa.types.is_duration(data.dtype.pyarrow_dtype)
+ and box_in_series
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_slice(data, box_in_series)
+
+ def test_setitem_loc_iloc_slice(self, data, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_loc_iloc_slice(data)
+
+ def test_setitem_slice_array(self, data, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ super().test_setitem_slice_array(data)
+
+ def test_setitem_with_expansion_dataframe_column(
+ self, data, full_indexer, using_array_manager, request
+ ):
+ # Is there a way to get the full_indexer id "null_slice"?
+ is_null_slice = full_indexer(pd.Series(dtype=object)) == slice(None)
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC") and not is_null_slice:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif (
+ using_array_manager
+ and pa.types.is_duration(data.dtype.pyarrow_dtype)
+ and not is_null_slice
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_with_expansion_dataframe_column(data, full_indexer)
+
+ def test_setitem_with_expansion_row(
+ self, data, na_value, using_array_manager, request
+ ):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_with_expansion_row(data, na_value)
+
+ def test_setitem_frame_2d_values(self, data, using_array_manager, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ if pa_version_under2p0 and tz not in (None, "UTC"):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(f"Not supported by pyarrow < 2.0 with timestamp type {tz}")
+ )
+ )
+ elif using_array_manager and pa.types.is_duration(data.dtype.pyarrow_dtype):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Checking ndim when using arraymanager with duration type"
+ )
+ )
+ super().test_setitem_frame_2d_values(data)
+
+ @pytest.mark.xfail(reason="GH 45419: pyarrow.ChunkedArray does not support views")
+ def test_setitem_preserves_views(self, data):
+ super().test_setitem_preserves_views(data)
+
+
def test_arrowdtype_construct_from_string_type_with_unsupported_parameters():
with pytest.raises(NotImplementedError, match="Passing pyarrow type"):
ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]")
| xref https://github.com/pandas-dev/pandas/pull/47377#issuecomment-1163230392=
* Min pyarrow version with timezones type coerces to integer when setting (xfailing)
* ArrayManger tests with duration types oddly checks for a ndim attribute (xfailing) | https://api.github.com/repos/pandas-dev/pandas/pulls/47468 | 2022-06-22T18:15:20Z | 2022-06-24T18:27:51Z | 2022-06-24T18:27:51Z | 2022-06-24T18:29:24Z |
DEPS/TST: tzdata is optional, not required | diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index 65918005ad6f1..5f7beb24674e7 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -47,6 +47,7 @@ dependencies:
- scipy
- sqlalchemy
- tabulate
+ - tzdata>=2022a
- xarray
- xlrd
- xlsxwriter
diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index a57c7279e2e9b..f4b4d0c79aada 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -49,6 +49,7 @@ dependencies:
- scipy=1.7.1
- sqlalchemy=1.4.16
- tabulate=0.8.9
+ - tzdata=2022a
- xarray=0.19.0
- xlrd=2.0.1
- xlsxwriter=1.4.3
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index 8605a9f4520d7..99b55a4c83046 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -47,6 +47,7 @@ dependencies:
- scipy
- sqlalchemy
- tabulate
+ - tzdata>=2022a
- xarray
- xlrd
- xlsxwriter
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index 605a69b26a646..e24528e611c12 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -270,6 +270,23 @@ For example, :func:`pandas.read_hdf` requires the ``pytables`` package, while
optional dependency is not installed, pandas will raise an ``ImportError`` when
the method requiring that dependency is called.
+Timezones
+^^^^^^^^^
+
+========================= ========================= =============================================================
+Dependency Minimum Version Notes
+========================= ========================= =============================================================
+tzdata 2022.1(pypi)/ Allows the use of ``zoneinfo`` timezones with pandas.
+ 2022a(for system tzdata) **Note**: You only need to install the pypi package if your
+ system does not already provide the IANA tz database.
+ However, the minimum tzdata version still applies, even if it
+ is not enforced through an error.
+
+ If you would like to keep your system tzdata version updated,
+ it is recommended to use the ``tzdata`` package from
+ conda-forge.
+========================= ========================= =============================================================
+
Visualization
^^^^^^^^^^^^^
diff --git a/environment.yml b/environment.yml
index ec2e0a3860432..121df3feb9cae 100644
--- a/environment.yml
+++ b/environment.yml
@@ -48,6 +48,7 @@ dependencies:
- scipy
- sqlalchemy
- tabulate
+ - tzdata>=2022a
- xarray
- xlrd
- xlsxwriter
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index 22a154be5fcad..5992f31e96988 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -3,6 +3,8 @@ from datetime import (
timezone,
)
+from pandas.compat._optional import import_optional_dependency
+
try:
# py39+
import zoneinfo
@@ -67,6 +69,9 @@ cdef inline bint is_utc_zoneinfo(tzinfo tz):
utc_zoneinfo = ZoneInfo("UTC")
except zoneinfo.ZoneInfoNotFoundError:
return False
+ # Warn if tzdata is too old, even if there is a system tzdata to alert
+ # users about the mismatch between local/system tzdata
+ import_optional_dependency("tzdata", errors="warn", min_version="2022.1")
return tz is utc_zoneinfo
diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index ad6c6fb839f10..3801a1648f1e7 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -44,6 +44,7 @@
"xlwt": "1.3.0",
"xlsxwriter": "1.4.3",
"zstandard": "0.15.2",
+ "tzdata": "2022.1",
}
# A mapping from import name to package name (on PyPI) for packages where
diff --git a/pandas/conftest.py b/pandas/conftest.py
index e176707d8a8f1..babd88b60b366 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -83,6 +83,14 @@
# Import "zoneinfo" could not be resolved (reportMissingImports)
import zoneinfo # type: ignore[no-redef]
+ # Although zoneinfo can be imported in Py39, it is effectively
+ # "not available" without tzdata/IANA tz data.
+ # We will set zoneinfo to not found in this case
+ try:
+ zoneinfo.ZoneInfo("UTC") # type: ignore[attr-defined]
+ except zoneinfo.ZoneInfoNotFoundError: # type: ignore[attr-defined]
+ zoneinfo = None
+
# Until https://github.com/numpy/numpy/issues/19078 is sorted out, just suppress
suppress_npdev_promotion_warning = pytest.mark.filterwarnings(
"ignore:Promotion of numbers and bools:FutureWarning"
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 5a9647456cb0b..10285810d8e7a 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -39,6 +39,7 @@ s3fs
scipy
sqlalchemy
tabulate
+tzdata>=2022.1
xarray
xlrd
xlsxwriter
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index 8cb539d3b02c8..a27d089cdb9e8 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -21,6 +21,7 @@
import yaml
EXCLUDE = {"python", "c-compiler", "cxx-compiler"}
+REMAP_VERSION = {"tzdata": "2022.1"}
RENAME = {"pytables": "tables", "geopandas-base": "geopandas", "pytorch": "torch"}
@@ -41,7 +42,8 @@ def conda_package_to_pip(package: str):
pkg, version = package.split(compare)
if pkg in EXCLUDE:
return
-
+ if pkg in REMAP_VERSION:
+ return "".join((pkg, compare, REMAP_VERSION[pkg]))
if pkg in RENAME:
return "".join((RENAME[pkg], compare, version))
diff --git a/scripts/validate_min_versions_in_sync.py b/scripts/validate_min_versions_in_sync.py
index 4dbf6a4cdcef8..1b937673672f8 100755
--- a/scripts/validate_min_versions_in_sync.py
+++ b/scripts/validate_min_versions_in_sync.py
@@ -21,6 +21,7 @@
pathlib.Path("ci/deps").absolute().glob("actions-*-minimum_versions.yaml")
)
CODE_PATH = pathlib.Path("pandas/compat/_optional.py").resolve()
+EXCLUDE_DEPS = {"tzdata"}
# pandas package is not available
# in pre-commit environment
sys.path.append("pandas/compat")
@@ -34,6 +35,8 @@
def get_versions_from_code() -> dict[str, str]:
install_map = _optional.INSTALL_MAPPING
versions = _optional.VERSIONS
+ for item in EXCLUDE_DEPS:
+ versions.pop(item)
return {
install_map.get(k, k).casefold(): v
for k, v in versions.items()
@@ -55,6 +58,8 @@ def get_versions_from_ci(content: list[str]) -> tuple[dict[str, str], dict[str,
elif seen_required and line.strip():
package, version = line.strip().split("=")
package = package[2:]
+ if package in EXCLUDE_DEPS:
+ continue
if not seen_optional:
required_deps[package] = version
else:
| - [ ] closes #47332 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47467 | 2022-06-22T17:56:29Z | 2022-08-12T00:44:10Z | 2022-08-12T00:44:10Z | 2022-08-12T00:45:22Z |
Fix signature of initObjToJSON | diff --git a/pandas/_libs/src/ujson/python/ujson.c b/pandas/_libs/src/ujson/python/ujson.c
index def06cdf2db84..5d4a5693c0ff6 100644
--- a/pandas/_libs/src/ujson/python/ujson.c
+++ b/pandas/_libs/src/ujson/python/ujson.c
@@ -43,7 +43,7 @@ Numeric decoder derived from TCL library
/* objToJSON */
PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs);
-void initObjToJSON(void);
+void *initObjToJSON(void);
/* JSONToObj */
PyObject *JSONToObj(PyObject *self, PyObject *args, PyObject *kwargs);
| This patch is required to make Pandas compile with `-Wl,--fatal-warnings`. It is also needed to prevent an "indirect call signature mismatch" error when using pandas in wasm. This is the only patch Pyodide currently applies to Pandas. | https://api.github.com/repos/pandas-dev/pandas/pulls/47466 | 2022-06-22T15:24:55Z | 2022-06-23T20:20:20Z | 2022-06-23T20:20:20Z | 2022-06-23T20:20:27Z |
Update ecosystem.rst | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 256c3ee36e80c..fc3b857422c88 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -87,7 +87,7 @@ Featuretools is a Python library for automated feature engineering built on top
Compose is a machine learning tool for labeling data and prediction engineering. It allows you to structure the labeling process by parameterizing prediction problems and transforming time-driven relational data into target values with cutoff times that can be used for supervised learning.
-`STUMPY <https://github.com/TDAmeritrade/stumpy>`__
+`Stumpy <https://github.com/TDAmeritrade/stumpy>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
STUMPY is a powerful and scalable Python library for modern time series analysis.
@@ -146,7 +146,7 @@ also goes beyond matplotlib and pandas with the option to perform statistical
estimation while plotting, aggregating across observations and visualizing the
fit of statistical models to emphasize patterns in a dataset.
-`plotnine <https://github.com/has2k1/plotnine/>`__
+`Plotnine <https://github.com/has2k1/plotnine/>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Hadley Wickham's `ggplot2 <https://ggplot2.tidyverse.org/>`__ is a foundational exploratory visualization package for the R language.
@@ -187,7 +187,7 @@ By printing out a dataframe, Lux automatically `recommends a set of visualizatio
Spun off from the main pandas library, the `qtpandas <https://github.com/draperjames/qtpandas>`__
library enables DataFrame visualization and manipulation in PyQt4 and PySide applications.
-`D-Tale <https://github.com/man-group/dtale>`__
+`D-tale <https://github.com/man-group/dtale>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
D-Tale is a lightweight web client for visualizing pandas data structures. It
@@ -206,7 +206,7 @@ invoked with the following command
D-Tale integrates seamlessly with Jupyter notebooks, Python terminals, Kaggle
& Google Colab. Here are some demos of the `grid <http://alphatechadmin.pythonanywhere.com/dtale/main/1>`__.
-`hvplot <https://hvplot.holoviz.org/index.html>`__
+`Hvplot <https://hvplot.holoviz.org/index.html>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
hvPlot is a high-level plotting API for the PyData ecosystem built on `HoloViews <https://holoviews.org/>`__.
@@ -218,7 +218,7 @@ It can be loaded as a native pandas plotting backend via
.. _ecosystem.ide:
-IDE
+IdE
---
`IPython <https://ipython.org/documentation.html>`__
@@ -323,7 +323,7 @@ PyDatastream is a Python interface to the
REST API to return indexed pandas DataFrames with financial data.
This package requires valid credentials for this API (non free).
-`pandaSDMX <https://pandasdmx.readthedocs.io/en/v1.0/>`__
+`Pandasdmx <https://pandasdmx.readthedocs.io/en/v1.0/>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pandaSDMX is a library to retrieve and acquire statistical data
and metadata disseminated in
| Fix capitalization for some headings
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47464 | 2022-06-22T14:40:24Z | 2022-06-24T17:36:20Z | null | 2022-06-24T17:36:21Z |
Revert "TST: fix groupby-empty xfails (#44092)" | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index b4aea4240e458..dc7e04d28ca43 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1622,7 +1622,6 @@ def func(df):
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
- func.__name__ = "idxmax"
result = self._python_apply_general(func, self._obj_with_exclusions)
self._maybe_warn_numeric_only_depr("idxmax", result, numeric_only)
return result
@@ -1658,7 +1657,6 @@ def func(df):
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
- func.__name__ = "idxmin"
result = self._python_apply_general(func, self._obj_with_exclusions)
self._maybe_warn_numeric_only_depr("idxmin", result, numeric_only)
return result
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index d056b4b03d904..d0774fd9391bc 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -809,18 +809,6 @@ def apply(
mutated = True
result_values.append(res)
- # getattr pattern for __name__ is needed for functools.partial objects
- if len(group_keys) == 0 and getattr(f, "__name__", None) not in [
- "idxmin",
- "idxmax",
- "nanargmin",
- "nanargmax",
- ]:
- # If group_keys is empty, then no function calls have been made,
- # so we will not have raised even if this is an invalid dtype.
- # So do one dummy call here to raise appropriate TypeError.
- f(data.iloc[:0])
-
return result_values, mutated
@cache_readonly
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 97e616ef14cef..c593ff91c4a16 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1922,23 +1922,17 @@ def test_pivot_table_values_key_error():
)
@pytest.mark.filterwarnings("ignore:Dropping invalid columns:FutureWarning")
@pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning")
-def test_empty_groupby(columns, keys, values, method, op, request, using_array_manager):
+def test_empty_groupby(columns, keys, values, method, op, request):
# GH8093 & GH26411
override_dtype = None
if (
isinstance(values, Categorical)
and not isinstance(columns, list)
- and op in ["sum", "prod", "skew", "mad"]
+ and op in ["sum", "prod"]
):
# handled below GH#41291
-
- if using_array_manager and op == "mad":
- right_msg = "Cannot interpret 'CategoricalDtype.* as a data type"
- msg = "Regex pattern \"'Categorical' does not implement.*" + right_msg
- mark = pytest.mark.xfail(raises=AssertionError, match=msg)
- request.node.add_marker(mark)
-
+ pass
elif (
isinstance(values, Categorical)
and len(keys) == 1
@@ -1957,7 +1951,11 @@ def test_empty_groupby(columns, keys, values, method, op, request, using_array_m
raises=TypeError, match="'Categorical' does not implement"
)
request.node.add_marker(mark)
- elif isinstance(values, Categorical) and len(keys) == 1 and op in ["sum", "prod"]:
+ elif (
+ isinstance(values, Categorical)
+ and len(keys) == 1
+ and op in ["mad", "min", "max", "sum", "prod", "skew"]
+ ):
mark = pytest.mark.xfail(
raises=AssertionError, match="(DataFrame|Series) are different"
)
@@ -1971,20 +1969,6 @@ def test_empty_groupby(columns, keys, values, method, op, request, using_array_m
raises=AssertionError, match="(DataFrame|Series) are different"
)
request.node.add_marker(mark)
-
- elif (
- op == "mad"
- and not isinstance(columns, list)
- and isinstance(values, pd.DatetimeIndex)
- and values.tz is not None
- and using_array_manager
- ):
- mark = pytest.mark.xfail(
- raises=TypeError,
- match=r"Cannot interpret 'datetime64\[ns, US/Eastern\]' as a data type",
- )
- request.node.add_marker(mark)
-
elif isinstance(values, BooleanArray) and op in ["sum", "prod"]:
# We expect to get Int64 back for these
override_dtype = "Int64"
@@ -2015,29 +1999,19 @@ def get_result():
if columns == "C":
# i.e. SeriesGroupBy
- if op in ["prod", "sum", "skew"]:
+ if op in ["prod", "sum"]:
# ops that require more than just ordered-ness
if df.dtypes[0].kind == "M":
# GH#41291
# datetime64 -> prod and sum are invalid
- if op == "skew":
- msg = "does not support reduction 'skew'"
- else:
- msg = "datetime64 type does not support"
+ msg = "datetime64 type does not support"
with pytest.raises(TypeError, match=msg):
get_result()
return
- if op in ["prod", "sum", "skew", "mad"]:
- if isinstance(values, Categorical):
+ elif isinstance(values, Categorical):
# GH#41291
- if op == "mad":
- # mad calls mean, which Categorical doesn't implement
- msg = "does not support reduction 'mean'"
- elif op == "skew":
- msg = f"does not support reduction '{op}'"
- else:
- msg = "category type does not support"
+ msg = "category type does not support"
with pytest.raises(TypeError, match=msg):
get_result()
@@ -2084,34 +2058,6 @@ def get_result():
tm.assert_equal(result, expected)
return
- if (
- op in ["mad", "min", "max", "skew"]
- and isinstance(values, Categorical)
- and len(keys) == 1
- ):
- # Categorical doesn't implement, so with numeric_only=True
- # these are dropped and we get an empty DataFrame back
- result = get_result()
- expected = df.set_index(keys)[[]]
-
- # with numeric_only=True, these are dropped, and we get
- # an empty DataFrame back
- if len(keys) != 1:
- # Categorical is special without 'observed=True'
- lev = Categorical([0], dtype=values.dtype)
- mi = MultiIndex.from_product([lev, lev], names=keys)
- expected = DataFrame([], columns=[], index=mi)
- else:
- # all columns are dropped, but we end up with one row
- # Categorical is special without 'observed=True'
- lev = Categorical([0], dtype=values.dtype)
- ci = Index(lev, name=keys[0])
- expected = DataFrame([], columns=[], index=ci)
- # expected = df.set_index(keys)[columns]
-
- tm.assert_equal(result, expected)
- return
-
result = get_result()
expected = df.set_index(keys)[columns]
if override_dtype is not None:
| This reverts commit 884d00f8af4fa73a24a5e522dad4e216caca95e0.
closes #46496 | https://api.github.com/repos/pandas-dev/pandas/pulls/47463 | 2022-06-22T14:08:55Z | 2022-06-22T15:40:17Z | null | 2022-06-22T15:40:18Z |
Revert "ENH/TST: Add BaseInterfaceTests tests for ArrowExtensionArray" | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 043f682f7dfa8..c1380fcdbba06 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -31,7 +31,6 @@
)
from pandas.core.dtypes.missing import isna
-from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.base import ExtensionArray
from pandas.core.indexers import (
check_array_indexer,
@@ -46,22 +45,13 @@
from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
from pandas.core.arrays.arrow.dtype import ArrowDtype
- ARROW_CMP_FUNCS = {
- "eq": pc.equal,
- "ne": pc.not_equal,
- "lt": pc.less,
- "gt": pc.greater,
- "le": pc.less_equal,
- "ge": pc.greater_equal,
- }
-
if TYPE_CHECKING:
from pandas import Series
ArrowExtensionArrayT = TypeVar("ArrowExtensionArrayT", bound="ArrowExtensionArray")
-class ArrowExtensionArray(OpsMixin, ExtensionArray):
+class ArrowExtensionArray(ExtensionArray):
"""
Base class for ExtensionArray backed by Arrow ChunkedArray.
"""
@@ -189,34 +179,6 @@ def __arrow_array__(self, type=None):
"""Convert myself to a pyarrow ChunkedArray."""
return self._data
- def _cmp_method(self, other, op):
- from pandas.arrays import BooleanArray
-
- pc_func = ARROW_CMP_FUNCS[op.__name__]
- if isinstance(other, ArrowExtensionArray):
- result = pc_func(self._data, other._data)
- elif isinstance(other, (np.ndarray, list)):
- result = pc_func(self._data, other)
- elif is_scalar(other):
- try:
- result = pc_func(self._data, pa.scalar(other))
- except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid):
- mask = isna(self) | isna(other)
- valid = ~mask
- result = np.zeros(len(self), dtype="bool")
- result[valid] = op(np.array(self)[valid], other)
- return BooleanArray(result, mask)
- else:
- return NotImplementedError(
- f"{op.__name__} not implemented for {type(other)}"
- )
-
- if pa_version_under2p0:
- result = result.to_pandas().values
- else:
- result = result.to_numpy()
- return BooleanArray._from_sequence(result)
-
def equals(self, other) -> bool:
if not isinstance(other, ArrowExtensionArray):
return False
@@ -619,7 +581,7 @@ def _replace_with_indices(
# fast path for a contiguous set of indices
arrays = [
chunk[:start],
- pa.array(value, type=chunk.type, from_pandas=True),
+ pa.array(value, type=chunk.type),
chunk[stop + 1 :],
]
arrays = [arr for arr in arrays if len(arr)]
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index c4d1a35315d7d..a07f748fa0c8c 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -34,6 +34,7 @@
)
from pandas.core.dtypes.missing import isna
+from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.arrow import ArrowExtensionArray
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
@@ -50,6 +51,15 @@
from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
+ ARROW_CMP_FUNCS = {
+ "eq": pc.equal,
+ "ne": pc.not_equal,
+ "lt": pc.less,
+ "gt": pc.greater,
+ "le": pc.less_equal,
+ "ge": pc.greater_equal,
+ }
+
ArrowStringScalarOrNAT = Union[str, libmissing.NAType]
@@ -64,7 +74,9 @@ def _chk_pyarrow_available() -> None:
# fallback for the ones that pyarrow doesn't yet support
-class ArrowStringArray(ArrowExtensionArray, BaseStringArray, ObjectStringArrayMixin):
+class ArrowStringArray(
+ OpsMixin, ArrowExtensionArray, BaseStringArray, ObjectStringArrayMixin
+):
"""
Extension array for string data in a ``pyarrow.ChunkedArray``.
@@ -178,6 +190,32 @@ def to_numpy(
result[mask] = na_value
return result
+ def _cmp_method(self, other, op):
+ from pandas.arrays import BooleanArray
+
+ pc_func = ARROW_CMP_FUNCS[op.__name__]
+ if isinstance(other, ArrowStringArray):
+ result = pc_func(self._data, other._data)
+ elif isinstance(other, (np.ndarray, list)):
+ result = pc_func(self._data, other)
+ elif is_scalar(other):
+ try:
+ result = pc_func(self._data, pa.scalar(other))
+ except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid):
+ mask = isna(self) | isna(other)
+ valid = ~mask
+ result = np.zeros(len(self), dtype="bool")
+ result[valid] = op(np.array(self)[valid], other)
+ return BooleanArray(result, mask)
+ else:
+ return NotImplemented
+
+ if pa_version_under2p0:
+ result = result.to_pandas().values
+ else:
+ result = result.to_numpy()
+ return BooleanArray._from_sequence(result)
+
def insert(self, loc: int, item):
if not isinstance(item, str) and item is not libmissing.NA:
raise TypeError("Scalar must be NA or str")
diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py
index 26b94ebe5a8da..22595c4e461d7 100644
--- a/pandas/tests/extension/arrow/arrays.py
+++ b/pandas/tests/extension/arrow/arrays.py
@@ -23,6 +23,7 @@
take,
)
from pandas.api.types import is_scalar
+from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.arrow import ArrowExtensionArray as _ArrowExtensionArray
from pandas.core.construction import extract_array
@@ -71,7 +72,7 @@ def construct_array_type(cls) -> type_t[ArrowStringArray]:
return ArrowStringArray
-class ArrowExtensionArray(_ArrowExtensionArray):
+class ArrowExtensionArray(OpsMixin, _ArrowExtensionArray):
_data: pa.ChunkedArray
@classmethod
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 9eeaf39959f29..95cb7045ac68d 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -93,18 +93,6 @@ def data_missing(data):
return type(data)._from_sequence([None, data[0]])
-@pytest.fixture(params=["data", "data_missing"])
-def all_data(request, data, data_missing):
- """Parametrized fixture returning 'data' or 'data_missing' integer arrays.
-
- Used to test dtype conversion with and without missing values.
- """
- if request.param == "data":
- return data
- elif request.param == "data_missing":
- return data_missing
-
-
@pytest.fixture
def na_value():
"""The scalar missing value for this type. Default 'None'"""
@@ -283,36 +271,6 @@ class TestBaseIndex(base.BaseIndexTests):
pass
-class TestBaseInterface(base.BaseInterfaceTests):
- def test_contains(self, data, data_missing, request):
- tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
- unit = getattr(data.dtype.pyarrow_dtype, "unit", None)
- if pa_version_under2p0 and tz not in (None, "UTC") and unit == "us":
- request.node.add_marker(
- pytest.mark.xfail(
- reason=(
- f"Not supported by pyarrow < 2.0 "
- f"with timestamp type {tz} and {unit}"
- )
- )
- )
- super().test_contains(data, data_missing)
-
- @pytest.mark.xfail(reason="pyarrow.ChunkedArray does not support views.")
- def test_view(self, data):
- super().test_view(data)
-
-
-class TestBaseMissing(base.BaseMissingTests):
- pass
-
-
-class TestBaseSetitemTests(base.BaseSetitemTests):
- @pytest.mark.xfail(reason="GH 45419: pyarrow.ChunkedArray does not support views")
- def test_setitem_preserves_views(self, data):
- super().test_setitem_preserves_views(data)
-
-
def test_arrowdtype_construct_from_string_type_with_unsupported_parameters():
with pytest.raises(NotImplementedError, match="Passing pyarrow type"):
ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]")
| Reverts pandas-dev/pandas#47377 | https://api.github.com/repos/pandas-dev/pandas/pulls/47462 | 2022-06-22T13:39:49Z | 2022-06-22T15:06:03Z | 2022-06-22T15:06:03Z | 2022-06-22T15:06:07Z |
Backport PR #47393 on branch 1.4.x (CI/TST: Don't require length for construct_1d_arraylike_from_scalar cast to float64) | diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 2595cff5c43c4..957fcf4ac10fc 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -533,7 +533,10 @@ def sanitize_array(
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
- subarr = _try_cast(data, dtype, copy, True)
+ # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int
+ # casting aligning with IntCastingNaNError below
+ with np.errstate(invalid="ignore"):
+ subarr = _try_cast(data, dtype, copy, True)
except IntCastingNaNError:
warnings.warn(
"In a future version, passing float-dtype values containing NaN "
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index e70fd443d61e8..1f7789e72be2c 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1914,7 +1914,9 @@ def construct_1d_arraylike_from_scalar(
value = maybe_unbox_datetimelike_tz_deprecation(value, dtype)
subarr = np.empty(length, dtype=dtype)
- subarr.fill(value)
+ if length:
+ # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes
+ subarr.fill(value)
return subarr
@@ -2218,7 +2220,10 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any:
if isinstance(element, np.ndarray) and element.dtype.kind == "f":
# If all can be losslessly cast to integers, then we can hold them
# We do something similar in putmask_smart
- casted = element.astype(dtype)
+
+ # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int
+ with np.errstate(invalid="ignore"):
+ casted = element.astype(dtype)
comp = casted == element
if comp.all():
return element
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 3a39713f18d65..d3ec9fec4640d 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1201,23 +1201,27 @@ def _maybe_coerce_merge_keys(self) -> None:
# check whether ints and floats
elif is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype):
- if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all():
- warnings.warn(
- "You are merging on int and float "
- "columns where the float values "
- "are not equal to their int representation.",
- UserWarning,
- )
+ # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int
+ with np.errstate(invalid="ignore"):
+ if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all():
+ warnings.warn(
+ "You are merging on int and float "
+ "columns where the float values "
+ "are not equal to their int representation.",
+ UserWarning,
+ )
continue
elif is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype):
- if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all():
- warnings.warn(
- "You are merging on int and float "
- "columns where the float values "
- "are not equal to their int representation.",
- UserWarning,
- )
+ # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int
+ with np.errstate(invalid="ignore"):
+ if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all():
+ warnings.warn(
+ "You are merging on int and float "
+ "columns where the float values "
+ "are not equal to their int representation.",
+ UserWarning,
+ )
continue
# let's infer and see if we are ok
| Backport PR #47393: CI/TST: Don't require length for construct_1d_arraylike_from_scalar cast to float64 | https://api.github.com/repos/pandas-dev/pandas/pulls/47460 | 2022-06-22T12:07:21Z | 2022-06-22T20:52:47Z | 2022-06-22T20:52:47Z | 2022-06-22T20:52:48Z |
PERF improve performance of is_lexsorted | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 76f6e864a174f..373323649eacb 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -739,6 +739,7 @@ Performance improvements
- Performance improvement in :func:`factorize` (:issue:`46109`)
- Performance improvement in :class:`DataFrame` and :class:`Series` constructors for extension dtype scalars (:issue:`45854`)
- Performance improvement in :func:`read_excel` when ``nrows`` argument provided (:issue:`32727`)
+- Performance improvement in :meth:`MultiIndex.is_monotonic_increasing` (:issue:`47458`)
.. ---------------------------------------------------------------------------
.. _whatsnew_150.bug_fixes:
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index d33eba06988e9..29f9a22c9b36e 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -180,6 +180,8 @@ def is_lexsorted(list_of_arrays: list) -> bint:
else:
result = False
break
+ if not result:
+ break
free(vecs)
return result
| If `result` is False, there's no need to keep the outer loop going, right?
Timing result, with `failure` taken from `test_is_lexsorted`:
```
%load_ext cython
%%cython -a
cimport cython
from cython cimport Py_ssize_t
from numpy cimport int64_t, import_array, ndarray, PyArray_DATA
import numpy as np
from libc.stdlib cimport free, malloc
from libc.stdio cimport printf
import_array()
@cython.wraparound(False)
@cython.boundscheck(False)
def main_is_lexsorted(list_of_arrays: list) -> bint:
cdef:
Py_ssize_t i
Py_ssize_t n, nlevels
int64_t k, cur, pre
ndarray arr
bint result = True
nlevels = len(list_of_arrays)
n = len(list_of_arrays[0])
cdef int64_t **vecs = <int64_t**>malloc(nlevels * sizeof(int64_t*))
for i in range(nlevels):
arr = list_of_arrays[i]
assert arr.dtype.name == 'int64'
vecs[i] = <int64_t*>PyArray_DATA(arr)
# Assume uniqueness??
with nogil:
for i in range(1, n):
for k in range(nlevels):
cur = vecs[k][i]
pre = vecs[k][i -1]
if cur == pre:
continue
elif cur > pre:
break
else:
result = False
break
free(vecs)
return result
@cython.wraparound(False)
@cython.boundscheck(False)
def branch_is_lexsorted(list_of_arrays: list) -> bint:
cdef:
Py_ssize_t i
Py_ssize_t n, nlevels
int64_t k, cur, pre
ndarray arr
bint result = True
nlevels = len(list_of_arrays)
n = len(list_of_arrays[0])
cdef int64_t **vecs = <int64_t**>malloc(nlevels * sizeof(int64_t*))
for i in range(nlevels):
arr = list_of_arrays[i]
assert arr.dtype.name == 'int64'
vecs[i] = <int64_t*>PyArray_DATA(arr)
# Assume uniqueness??
with nogil:
for i in range(1, n):
for k in range(nlevels):
cur = vecs[k][i]
pre = vecs[k][i -1]
if cur == pre:
continue
elif cur > pre:
break
else:
result = False
break
if not result:
break
free(vecs)
return result
%%timeit
main_is_lexsorted(failure)
# 16.6 µs ± 411 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)
%%timeit
branch_is_lexsorted(failure)
# 15.8 µs ± 113 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)
```
More extreme example:
```
failure = [np.ones((100_000,), dtype='int64')]
failure[0][1] = 0
%%timeit
main_is_lexsorted(failure)
# 556 µs ± 26.8 µs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)
%%timeit
branch_is_lexsorted(failure)
# 8.07 µs ± 72.3 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/47459 | 2022-06-22T10:13:39Z | 2022-06-24T02:11:20Z | 2022-06-24T02:11:20Z | 2022-06-24T06:41:25Z |
Backport PR #47431 on branch 1.4.x (Fix segmentation fault when JSON serializing a PeriodIndex) | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index a4d81533df23d..4034655ccd325 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -30,6 +30,7 @@ Fixed regressions
- Fixed regression in :func:`assert_index_equal` when ``check_order=False`` and :class:`Index` has extension or object dtype (:issue:`47207`)
- Fixed regression in :func:`read_excel` returning ints as floats on certain input sheets (:issue:`46988`)
- Fixed regression in :meth:`DataFrame.shift` when ``axis`` is ``columns`` and ``fill_value`` is absent, ``freq`` is ignored (:issue:`47039`)
+- Fixed regression in :meth:`DataFrame.to_json` causing a segmentation violation when :class:`DataFrame` is created with an ``index`` parameter of the type :class:`PeriodIndex` (:issue:`46683`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index c4609992342c3..5ad8029b38754 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -228,8 +228,10 @@ static PyObject *get_values(PyObject *obj) {
PyErr_Clear();
} else if (PyObject_HasAttrString(values, "__array__")) {
// We may have gotten a Categorical or Sparse array so call np.array
+ PyObject *array_values = PyObject_CallMethod(values, "__array__",
+ NULL);
Py_DECREF(values);
- values = PyObject_CallMethod(values, "__array__", NULL);
+ values = array_values;
} else if (!PyArray_CheckExact(values)) {
// Didn't get a numpy array, so keep trying
Py_DECREF(values);
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index 41a417f6b3ef4..982d751692eb9 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -24,6 +24,7 @@
DatetimeIndex,
Index,
NaT,
+ PeriodIndex,
Series,
Timedelta,
Timestamp,
@@ -1242,3 +1243,9 @@ def test_encode_timedelta_iso(self, td):
expected = f'"{td.isoformat()}"'
assert result == expected
+
+ def test_encode_periodindex(self):
+ # GH 46683
+ p = PeriodIndex(["2022-04-06", "2022-04-07"], freq="D")
+ df = DataFrame(index=p)
+ assert df.to_json() == "{}"
| Backport PR #47431: Fix segmentation fault when JSON serializing a PeriodIndex | https://api.github.com/repos/pandas-dev/pandas/pulls/47457 | 2022-06-22T09:18:45Z | 2022-06-22T11:07:32Z | 2022-06-22T11:07:32Z | 2022-06-22T11:07:32Z |
Cache Conda env | diff --git a/.github/actions/build_pandas/action.yml b/.github/actions/build_pandas/action.yml
index 39d5998b4ee74..23bb988ef4d73 100644
--- a/.github/actions/build_pandas/action.yml
+++ b/.github/actions/build_pandas/action.yml
@@ -6,8 +6,8 @@ runs:
- name: Environment Detail
run: |
- conda info
- conda list
+ micromamba info
+ micromamba list
shell: bash -el {0}
- name: Build Pandas
diff --git a/.github/actions/setup-conda/action.yml b/.github/actions/setup-conda/action.yml
index 87a0bd2ed1715..002d0020c2df1 100644
--- a/.github/actions/setup-conda/action.yml
+++ b/.github/actions/setup-conda/action.yml
@@ -6,8 +6,8 @@ inputs:
environment-name:
description: Name to use for the Conda environment
default: test
- python-version:
- description: Python version to install
+ extra-specs:
+ description: Extra packages to install
required: false
pyarrow-version:
description: If set, overrides the PyArrow version in the Conda environment to the given string.
@@ -24,14 +24,13 @@ runs:
if: ${{ inputs.pyarrow-version }}
- name: Install ${{ inputs.environment-file }}
- uses: conda-incubator/setup-miniconda@v2.1.1
+ uses: mamba-org/provision-with-micromamba@v12
with:
environment-file: ${{ inputs.environment-file }}
- activate-environment: ${{ inputs.environment-name }}
- python-version: ${{ inputs.python-version }}
- channel-priority: ${{ runner.os == 'macOS' && 'flexible' || 'strict' }}
+ environment-name: ${{ inputs.environment-name }}
+ extra-specs: ${{ inputs.extra-specs }}
channels: conda-forge
- mamba-version: "0.24"
- use-mamba: true
- use-only-tar-bz2: true
+ channel-priority: ${{ runner.os == 'macOS' && 'flexible' || 'strict' }}
condarc-file: ci/condarc.yml
+ cache-env: true
+ cache-downloads: true
diff --git a/.github/workflows/asv-bot.yml b/.github/workflows/asv-bot.yml
index 022c12cf6ff6c..dbf0ab0acb9ec 100644
--- a/.github/workflows/asv-bot.yml
+++ b/.github/workflows/asv-bot.yml
@@ -33,12 +33,6 @@ jobs:
with:
fetch-depth: 0
- - name: Cache conda
- uses: actions/cache@v3
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }}
-
# Although asv sets up its own env, deps are still needed
# during discovery process
- name: Set up Conda
diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index e8f54b33a92c0..85a7f26d7b505 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -52,12 +52,6 @@ jobs:
with:
fetch-depth: 0
- - name: Cache conda
- uses: actions/cache@v3
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }}
-
- name: Set up Conda
uses: ./.github/actions/setup-conda
@@ -115,12 +109,6 @@ jobs:
with:
fetch-depth: 0
- - name: Cache conda
- uses: actions/cache@v3
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }}
-
- name: Set up Conda
uses: ./.github/actions/setup-conda
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml
index 5ae2280c5069f..89312cdaaa80a 100644
--- a/.github/workflows/sdist.yml
+++ b/.github/workflows/sdist.yml
@@ -62,9 +62,10 @@ jobs:
- name: Set up Conda
uses: ./.github/actions/setup-conda
with:
- environment-file: ""
+ environment-file: false
environment-name: pandas-sdist
- python-version: ${{ matrix.python-version }}
+ extra-specs: |
+ python =${{ matrix.python-version }}
- name: Install pandas from sdist
run: |
diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml
index 961ba57d36b94..8d6cae6278dcf 100644
--- a/.github/workflows/ubuntu.yml
+++ b/.github/workflows/ubuntu.yml
@@ -134,15 +134,6 @@ jobs:
with:
fetch-depth: 0
- - name: Cache conda
- uses: actions/cache@v3
- env:
- CACHE_NUMBER: 0
- with:
- path: ~/conda_pkgs_dir
- key: ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{
- hashFiles('${{ env.ENV_FILE }}') }}
-
- name: Extra installs
# xsel for clipboard tests
run: sudo apt-get update && sudo apt-get install -y xsel ${{ env.EXTRA_APT }}
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47454 | 2022-06-22T07:02:08Z | 2022-06-27T16:55:06Z | 2022-06-27T16:55:06Z | 2022-06-27T16:55:45Z |
Manual Backport PR #47287 on branch 1.4.x (DEPS: Sync environment.yml with CI dep files)" | diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index 87b80204d0c19..eaf9fafbff993 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -166,3 +166,32 @@ jobs:
- name: Build image
run: docker build --pull --no-cache --tag pandas-dev-env .
+
+ requirements-dev-text-installable:
+ name: Test install requirements-dev.txt
+ runs-on: ubuntu-latest
+
+ concurrency:
+ # https://github.community/t/concurrecy-not-work-for-push/183068/7
+ group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-requirements-dev-text-installable
+ cancel-in-progress: true
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Setup Python
+ id: setup_python
+ uses: actions/setup-python@v3
+ with:
+ python-version: '3.8'
+ cache: 'pip'
+ cache-dependency-path: 'requirements-dev.txt'
+
+ - name: Install requirements-dev.txt
+ run: pip install -r requirements-dev.txt
+
+ - name: Check Pip Cache Hit
+ run: echo ${{ steps.setup_python.outputs.cache-hit }}
diff --git a/.github/workflows/posix.yml b/.github/workflows/ubuntu.yml
similarity index 98%
rename from .github/workflows/posix.yml
rename to .github/workflows/ubuntu.yml
index 35c40f2a4aa54..1a57f021e6c4c 100644
--- a/.github/workflows/posix.yml
+++ b/.github/workflows/ubuntu.yml
@@ -1,4 +1,4 @@
-name: Posix
+name: Ubuntu
on:
push:
@@ -145,7 +145,7 @@ jobs:
- name: Extra installs
# xsel for clipboard tests
- run: sudo apt-get update && sudo apt-get install -y libc6-dev-i386 xsel ${{ env.EXTRA_APT }}
+ run: sudo apt-get update && sudo apt-get install -y xsel ${{ env.EXTRA_APT }}
- uses: conda-incubator/setup-miniconda@v2.1.1
with:
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index 7a879b5ac9648..27a2715c20e86 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -31,8 +31,7 @@ dependencies:
- jinja2
- lxml
- matplotlib
- # TODO: uncomment after numba supports py310
- #- numba
+ - numba
- numexpr
- openpyxl
- odfpy
diff --git a/environment.yml b/environment.yml
index 83b00c0dd6421..c5382811a8467 100644
--- a/environment.yml
+++ b/environment.yml
@@ -1,21 +1,85 @@
+# Local development dependencies including docs building, website upload, ASV benchmark
name: pandas-dev
channels:
- conda-forge
dependencies:
- # required
- - numpy>=1.18.5
- python=3.8
- - python-dateutil>=2.8.1
+
+ # test dependencies
+ - cython=0.29.30
+ - pytest>=6.0
+ - pytest-cov
+ - pytest-xdist>=1.31
+ - psutil
+ - pytest-asyncio>=0.17
+ - boto3
+
+ # required dependencies
+ - python-dateutil
+ - numpy
- pytz
+ # optional dependencies
+ - beautifulsoup4
+ - blosc
+ - brotlipy
+ - bottleneck
+ - fastparquet
+ - fsspec
+ - html5lib
+ - hypothesis
+ - gcsfs
+ - jinja2
+ - lxml
+ - matplotlib
+ - numba>=0.53.1
+ - numexpr>=2.8.0 # pin for "Run checks on imported code" job
+ - openpyxl
+ - odfpy
+ - pandas-gbq
+ - psycopg2
+ - pyarrow
+ - pymysql
+ - pyreadstat
+ - pytables
+ - python-snappy
+ - pyxlsb
+ - s3fs
+ - scipy
+ - sqlalchemy
+ - tabulate
+ - xarray
+ - xlrd
+ - xlsxwriter
+ - xlwt
+ - zstandard
+
+ # downstream packages
+ - aiobotocore<2.0.0 # GH#44311 pinned to fix docbuild
+ - botocore
+ - cftime
+ - dask
+ - ipython
+ - geopandas-base
+ - seaborn
+ - scikit-learn
+ - statsmodels
+ - coverage
+ - pandas-datareader
+ - pyyaml
+ - py
+ - pytorch
+
+ # local testing dependencies
+ - moto
+ - flask
+
# benchmarks
- asv
- # building
# The compiler packages are meta-packages and install the correct compiler (activation) packages on the respective platforms.
- c-compiler
- cxx-compiler
- - cython>=0.29.30
# code checks
- black=22.3.0
@@ -24,18 +88,19 @@ dependencies:
- flake8-bugbear=21.3.2 # used by flake8, find likely bugs
- flake8-comprehensions=3.7.0 # used by flake8, linting of unnecessary comprehensions
- isort>=5.2.1 # check that imports are in the right order
- - mypy=0.930
- - pre-commit>=2.9.2
+ - mypy=0.960
+ - pre-commit>=2.15.0
- pycodestyle # used by flake8
- pyupgrade
# documentation
- gitpython # obtain contributors from git for whatsnew
- gitdb
- - numpydoc < 1.2 # 2021-02-09 1.2dev breaking CI
+ - natsort # DataFrame.sort_values doctest
+ - numpydoc
- pandas-dev-flaker=0.4.0
- pydata-sphinx-theme=0.8.0
- - pytest-cython
+ - pytest-cython # doctest
- sphinx
- sphinx-panels
- types-python-dateutil
@@ -47,77 +112,14 @@ dependencies:
- nbconvert>=6.4.5
- nbsphinx
- pandoc
-
- # Dask and its dependencies (that dont install with dask)
- - dask-core
- - toolz>=0.7.3
- - partd>=0.3.10
- - cloudpickle>=0.2.1
-
- # web (jinja2 is also needed, but it's also an optional pandas dependency)
- - markdown
- - feedparser
- - pyyaml
- - requests
-
- # testing
- - boto3
- - botocore>=1.11
- - hypothesis>=5.5.3
- - moto # mock S3
- - flask
- - pytest>=6.0
- - pytest-cov
- - pytest-xdist>=1.31
- - pytest-asyncio>=0.17
- - pytest-instafail
-
- # downstream tests
- - seaborn
- - statsmodels
-
- # unused (required indirectly may be?)
- ipywidgets
- nbformat
- notebook>=6.0.3
-
- # optional
- - blosc
- - bottleneck>=1.3.1
- ipykernel
- - ipython>=7.11.1
- - jinja2 # pandas.Styler
- - matplotlib>=3.3.2 # pandas.plotting, Series.plot, DataFrame.plot
- - numexpr>=2.7.1
- - scipy>=1.4.1
- - numba>=0.50.1
-
- # optional for io
- # ---------------
- # pd.read_html
- - beautifulsoup4>=4.8.2
- - html5lib
- - lxml
-
- # pd.read_excel, DataFrame.to_excel, pd.ExcelWriter, pd.ExcelFile
- - openpyxl
- - xlrd
- - xlsxwriter
- - xlwt
- - odfpy
-
- - fastparquet>=0.4.0 # pandas.read_parquet, DataFrame.to_parquet
- - pyarrow>2.0.1 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather
- - python-snappy # required by pyarrow
- - pytables>=3.6.1 # pandas.read_hdf, DataFrame.to_hdf
- - s3fs>=0.4.0 # file IO when using 's3://...' path
- - aiobotocore<2.0.0 # GH#44311 pinned to fix docbuild
- - fsspec>=0.7.4 # for generic remote file operations
- - gcsfs>=0.6.0 # file IO when using 'gcs://...' path
- - sqlalchemy # pandas.read_sql, DataFrame.to_sql
- - xarray<0.19 # DataFrame.to_xarray
- - cftime # Needed for downstream xarray.CFTimeIndex test
- - pyreadstat # pandas.read_spss
- - tabulate>=0.8.3 # DataFrame.to_markdown
- - natsort # DataFrame.sort_values
+ # web
+ - jinja2 # in optional dependencies, but documented here as needed
+ - markdown
+ - feedparser
+ - pyyaml
+ - requests
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 32e3e19688a63..5493f84fb0be1 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1078,12 +1078,7 @@ def checked_add_with_arr(
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
- # Argument 1 to "__call__" of "_UFunc_Nin1_Nout1" has incompatible type
- # "Optional[ndarray[Any, dtype[bool_]]]"; expected
- # "Union[_SupportsArray[dtype[Any]], _NestedSequence[_SupportsArray[dtype[An
- # y]]], bool, int, float, complex, str, bytes, _NestedSequence[Union[bool,
- # int, float, complex, str, bytes]]]" [arg-type]
- not_nan = np.logical_not(b2_mask) # type: ignore[arg-type]
+ not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py
index 64cd43a3e77cb..4b0db5eccb6f1 100644
--- a/pandas/core/array_algos/quantile.py
+++ b/pandas/core/array_algos/quantile.py
@@ -184,5 +184,8 @@ def _nanpercentile(
return result
else:
return np.percentile(
- values, qs, axis=1, **{np_percentile_argname: interpolation}
+ values,
+ qs,
+ axis=1,
+ **{np_percentile_argname: interpolation},
)
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py
index b6e9bf1420b21..e241fc119ae02 100644
--- a/pandas/core/arraylike.py
+++ b/pandas/core/arraylike.py
@@ -265,7 +265,11 @@ def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any)
return result
# Determine if we should defer.
- no_defer = (np.ndarray.__array_ufunc__, cls.__array_ufunc__)
+ # error: "Type[ndarray[Any, Any]]" has no attribute "__array_ufunc__"
+ no_defer = (
+ np.ndarray.__array_ufunc__, # type: ignore[attr-defined]
+ cls.__array_ufunc__,
+ )
for item in inputs:
higher_priority = (
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 9a1435c3f033d..eb2c6927f56d1 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -661,10 +661,20 @@ def __getitem__(
if is_scalar(left) and isna(left):
return self._fill_value
return Interval(left, right, self.closed)
- # error: Argument 1 to "ndim" has incompatible type "Union[ndarray,
- # ExtensionArray]"; expected "Union[Union[int, float, complex, str, bytes,
- # generic], Sequence[Union[int, float, complex, str, bytes, generic]],
- # Sequence[Sequence[Any]], _SupportsArray]"
+ # error: Argument 1 to "ndim" has incompatible type
+ # "Union[ndarray[Any, Any], ExtensionArray]"; expected
+ # "Union[Sequence[Sequence[Sequence[Sequence[Sequence[Any]]]]],
+ # Union[Union[_SupportsArray[dtype[Any]],
+ # Sequence[_SupportsArray[dtype[Any]]],
+ # Sequence[Sequence[_SupportsArray[dtype[Any]]]],
+ # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]],
+ # Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]],
+ # Union[bool, int, float, complex, str, bytes,
+ # Sequence[Union[bool, int, float, complex, str, bytes]],
+ # Sequence[Sequence[Union[bool, int, float, complex, str, bytes]]],
+ # Sequence[Sequence[Sequence[Union[bool, int, float, complex, str, bytes]]]],
+ # Sequence[Sequence[Sequence[Sequence[Union[bool, int, float,
+ # complex, str, bytes]]]]]]]]"
if np.ndim(left) > 1: # type: ignore[arg-type]
# GH#30588 multi-dimensional indexer disallowed
raise ValueError("multi-dimensional indexing not allowed")
@@ -1639,13 +1649,7 @@ def isin(self, values) -> np.ndarray:
# complex128 ndarray is much more performant.
left = self._combined.view("complex128")
right = values._combined.view("complex128")
- # Argument 1 to "in1d" has incompatible type "Union[ExtensionArray,
- # ndarray[Any, Any], ndarray[Any, dtype[Any]]]"; expected
- # "Union[_SupportsArray[dtype[Any]], _NestedSequence[_SupportsArray[
- # dtype[Any]]], bool, int, float, complex, str, bytes,
- # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
- # [arg-type]
- return np.in1d(left, right) # type: ignore[arg-type]
+ return np.in1d(left, right)
elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion(
values.left.dtype
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 7d232654e121e..f271e6c47222e 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -140,7 +140,13 @@ class BaseMaskedArray(OpsMixin, ExtensionArray):
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
# values is supposed to already be validated in the subclass
- if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_):
+ if not (
+ isinstance(mask, np.ndarray)
+ and
+ # error: Non-overlapping equality check
+ # (left operand type: "dtype[bool_]", right operand type: "Type[bool_]")
+ mask.dtype == np.bool_ # type: ignore[comparison-overlap]
+ ):
raise TypeError(
"mask should be boolean numpy array. Use "
"the 'pd.array' function instead"
@@ -943,11 +949,7 @@ def any(self, *, skipna: bool = True, **kwargs):
nv.validate_any((), kwargs)
values = self._data.copy()
- # Argument 3 to "putmask" has incompatible type "object"; expected
- # "Union[_SupportsArray[dtype[Any]], _NestedSequence[
- # _SupportsArray[dtype[Any]]], bool, int, float, complex, str, bytes, _Nested
- # Sequence[Union[bool, int, float, complex, str, bytes]]]" [arg-type]
- np.putmask(values, self._mask, self._falsey_value) # type: ignore[arg-type]
+ np.putmask(values, self._mask, self._falsey_value)
result = values.any()
if skipna:
return result
@@ -1023,11 +1025,7 @@ def all(self, *, skipna: bool = True, **kwargs):
nv.validate_all((), kwargs)
values = self._data.copy()
- # Argument 3 to "putmask" has incompatible type "object"; expected
- # "Union[_SupportsArray[dtype[Any]], _NestedSequence[
- # _SupportsArray[dtype[Any]]], bool, int, float, complex, str, bytes, _Neste
- # dSequence[Union[bool, int, float, complex, str, bytes]]]" [arg-type]
- np.putmask(values, self._mask, self._truthy_value) # type: ignore[arg-type]
+ np.putmask(values, self._mask, self._truthy_value)
result = values.all()
if skipna:
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index ebfa769eb559d..28501b53a4d02 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -925,15 +925,7 @@ def __getitem__(
if is_integer(key):
return self._get_val_at(key)
elif isinstance(key, tuple):
- # Invalid index type "Tuple[Union[int, ellipsis], ...]" for
- # "ndarray[Any, Any]"; expected type "Union[SupportsIndex,
- # _SupportsArray[dtype[Union[bool_, integer[Any]]]], _NestedSequence[_Su
- # pportsArray[dtype[Union[bool_, integer[Any]]]]],
- # _NestedSequence[Union[bool, int]], Tuple[Union[SupportsIndex,
- # _SupportsArray[dtype[Union[bool_, integer[Any]]]],
- # _NestedSequence[_SupportsArray[dtype[Union[bool_, integer[Any]]]]], _N
- # estedSequence[Union[bool, int]]], ...]]" [index]
- data_slice = self.to_dense()[key] # type: ignore[index]
+ data_slice = self.to_dense()[key]
elif isinstance(key, slice):
# Avoid densifying when handling contiguous slices
@@ -1173,9 +1165,7 @@ def _concat_same_type(
data = np.concatenate(values)
indices_arr = np.concatenate(indices)
- # Argument 2 to "IntIndex" has incompatible type "ndarray[Any,
- # dtype[signedinteger[_32Bit]]]"; expected "Sequence[int]"
- sp_index = IntIndex(length, indices_arr) # type: ignore[arg-type]
+ sp_index = IntIndex(length, indices_arr)
else:
# when concatenating block indices, we don't claim that you'll
@@ -1353,8 +1343,7 @@ def __setstate__(self, state):
if isinstance(state, tuple):
# Compat for pandas < 0.24.0
nd_state, (fill_value, sp_index) = state
- # Need type annotation for "sparse_values" [var-annotated]
- sparse_values = np.array([]) # type: ignore[var-annotated]
+ sparse_values = np.array([])
sparse_values.__setstate__(nd_state)
self._sparse_values = sparse_values
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 6776064342db0..8c3a032d93a2d 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -534,7 +534,9 @@ def is_string_or_object_np_dtype(dtype: np.dtype) -> bool:
"""
Faster alternative to is_string_dtype, assumes we have a np.dtype object.
"""
- return dtype == object or dtype.kind in "SU"
+ # error: Non-overlapping equality check (left operand type:
+ # "dtype[Any]", right operand type: "Type[object]")
+ return dtype == object or dtype.kind in "SU" # type: ignore[comparison-overlap]
def is_string_dtype(arr_or_dtype) -> bool:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 391c12905adae..61be23fcfb0f2 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2427,9 +2427,7 @@ def to_records(
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
- # Argument 1 to "append" of "list" has incompatible type
- # "Union[type, dtype[Any], str]"; expected "dtype[_SCT]" [arg-type]
- formats.append(dtype_mapping) # type: ignore[arg-type]
+ formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 7175b85e966d7..de83fe12007a2 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4552,12 +4552,7 @@ def _join_non_unique(
right = other._values.take(right_idx)
if isinstance(join_array, np.ndarray):
- # Argument 3 to "putmask" has incompatible type "Union[ExtensionArray,
- # ndarray[Any, Any]]"; expected "Union[_SupportsArray[dtype[Any]],
- # _NestedSequence[_SupportsArray[dtype[Any]]], bool, int, f
- # loat, complex, str, bytes, _NestedSequence[Union[bool, int, float,
- # complex, str, bytes]]]" [arg-type]
- np.putmask(join_array, mask, right) # type: ignore[arg-type]
+ np.putmask(join_array, mask, right)
else:
join_array._putmask(mask, right)
@@ -5057,11 +5052,9 @@ def __getitem__(self, key):
if result.ndim > 1:
deprecate_ndim_indexing(result)
if hasattr(result, "_ndarray"):
- # error: Item "ndarray[Any, Any]" of "Union[ExtensionArray,
- # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr]
# i.e. NDArrayBackedExtensionArray
# Unpack to ndarray for MPL compat
- return result._ndarray # type: ignore[union-attr]
+ return result._ndarray
return result
# NB: Using _constructor._simple_new would break if MultiIndex
@@ -6602,9 +6595,7 @@ def insert(self, loc: int, item) -> Index:
new_values = np.insert(arr, loc, casted)
else:
- # No overload variant of "insert" matches argument types
- # "ndarray[Any, Any]", "int", "None" [call-overload]
- new_values = np.insert(arr, loc, None) # type: ignore[call-overload]
+ new_values = np.insert(arr, loc, None)
loc = loc if loc >= 0 else loc - 1
new_values[loc] = item
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 68db372ff4e51..4c65f50a444d7 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -365,9 +365,7 @@ def _validate_codes(self, level: list, code: list):
"""
null_mask = isna(level)
if np.any(null_mask):
- # Incompatible types in assignment (expression has type
- # "ndarray[Any, dtype[Any]]", variable has type "List[Any]")
- code = np.where(null_mask[code], -1, code) # type: ignore[assignment]
+ code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(self, codes: list | None = None, levels: list | None = None):
diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py
index 1160d3b2a8e3a..c938a018574f9 100644
--- a/pandas/core/internals/ops.py
+++ b/pandas/core/internals/ops.py
@@ -125,7 +125,9 @@ def _get_same_shape_values(
# argument type "Tuple[Union[ndarray, slice], slice]"
lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload]
assert lvals.shape[0] == 1, lvals.shape
- lvals = lvals[0, :]
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
+ # argument type "Tuple[int, slice]"
+ lvals = lvals[0, :] # type: ignore[call-overload]
else:
# lvals are 1D, rvals are 2D
assert rvals.shape[0] == 1, rvals.shape
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index e09701e69c62c..d589a8fbbca70 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -333,15 +333,7 @@ def func(yvalues: np.ndarray) -> None:
**kwargs,
)
- # Argument 1 to "apply_along_axis" has incompatible type
- # "Callable[[ndarray[Any, Any]], None]"; expected
- # "Callable[..., Union[_SupportsArray[dtype[<nothing>]],
- # Sequence[_SupportsArray[dtype[<nothing>
- # ]]], Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]],
- # Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
- # Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]]]]]"
- # interp each column independently
- np.apply_along_axis(func, axis, data) # type: ignore[arg-type]
+ np.apply_along_axis(func, axis, data)
return
@@ -779,23 +771,14 @@ def interpolate_2d(
Modifies values in-place.
"""
if limit_area is not None:
- # Argument 1 to "apply_along_axis" has incompatible type "partial[None]";
- # expected "Callable[..., Union[_SupportsArray[dtype[<nothing>]],
- # Sequence[_SupportsArray[dtype[<nothing>]]], Sequence[Sequence
- # [_SupportsArray[dtype[<nothing>]]]],
- # Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
- # Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]]]]]"
-
- # Argument 2 to "apply_along_axis" has incompatible type "Union[str, int]";
- # expected "SupportsIndex" [arg-type]
np.apply_along_axis(
partial(
_interpolate_with_limit_area,
method=method,
limit=limit,
limit_area=limit_area,
- ), # type: ignore[arg-type]
- axis, # type: ignore[arg-type]
+ ),
+ axis,
values,
)
return
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 262cd9774f694..aa426d24db75d 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -133,9 +133,7 @@ def melt(
if is_extension_array_dtype(id_data):
id_data = concat([id_data] * K, ignore_index=True)
else:
- # Incompatible types in assignment (expression has type
- # "ndarray[Any, dtype[Any]]", variable has type "Series") [assignment]
- id_data = np.tile(id_data._values, K) # type: ignore[assignment]
+ id_data = np.tile(id_data._values, K)
mdata[col] = id_data
mcolumns = id_vars + var_name + [value_name]
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 43ad67d36ad4b..b957e3a238c2e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1990,9 +1990,7 @@ def count(self, level=None):
lev = lev.insert(cnt, lev._na_value)
obs = level_codes[notna(self._values)]
- # Argument "minlength" to "bincount" has incompatible type "Optional[int]";
- # expected "SupportsIndex" [arg-type]
- out = np.bincount(obs, minlength=len(lev) or None) # type: ignore[arg-type]
+ out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev, dtype="int64").__finalize__(
self, method="count"
)
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 6d74c6db1f7ed..712495aadde56 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -354,7 +354,10 @@ def _prep_values(self, values: ArrayLike) -> np.ndarray:
if inf.any():
values = np.where(inf, np.nan, values)
- return values
+ # error: Incompatible return value type
+ # (got "Union[ExtensionArray, ndarray[Any, Any],
+ # ndarray[Any, dtype[floating[_64Bit]]]]", expected "ndarray[Any, Any]")
+ return values # type: ignore[return-value]
def _insert_on_column(self, result: DataFrame, obj: DataFrame) -> None:
# if we have an 'on' column we want to put it back into
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 4a8169c0609fd..5c0c4518bc2fb 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -3612,11 +3612,21 @@ def _highlight_between(
Return an array of css props based on condition of data values within given range.
"""
if np.iterable(left) and not isinstance(left, str):
+ # error: Argument 1 to "_validate_apply_axis_arg"
+ # has incompatible type "Union[str, float, Period,
+ # Timedelta, Interval[Any], datetime64, timedelta64,
+ # datetime, Sequence[Any], ndarray[Any, Any], NDFrame, None]";
+ # expected "Union[NDFrame, Sequence[Any], ndarray[Any, Any]]"
left = _validate_apply_axis_arg(
left, "left", None, data # type: ignore[arg-type]
)
if np.iterable(right) and not isinstance(right, str):
+ # error: Argument 1 to "_validate_apply_axis_arg"
+ # has incompatible type "Union[str, float, Period,
+ # Timedelta, Interval[Any], datetime64, timedelta64,
+ # datetime, Sequence[Any], ndarray[Any, Any], NDFrame, None]";
+ # expected "Union[NDFrame, Sequence[Any], ndarray[Any, Any]]"
right = _validate_apply_axis_arg(
right, "right", None, data # type: ignore[arg-type]
)
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index fc0f572c79e6b..b0e9c81132ee4 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -367,7 +367,7 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
numpy_dtypes, # type: ignore[arg-type]
[],
)
- if common_type == object:
+ if common_type == np.dtype(object):
warning_columns.append(str(name))
dtype = dtypes.pop()
@@ -384,14 +384,7 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
arrs # type: ignore[arg-type]
)
else:
- # Argument 1 to "concatenate" has incompatible type
- # "List[Union[ExtensionArray, ndarray[Any, Any]]]"; expected
- # "Union[_SupportsArray[dtype[Any]],
- # Sequence[_SupportsArray[dtype[Any]]],
- # Sequence[Sequence[_SupportsArray[dtype[Any]]]],
- # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]],
- # Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]"
- result[name] = np.concatenate(arrs) # type: ignore[arg-type]
+ result[name] = np.concatenate(arrs)
if warning_columns:
warning_names = ",".join(warning_columns)
diff --git a/pandas/tests/extension/date/array.py b/pandas/tests/extension/date/array.py
index b14b9921be3d3..d29ed293e71ed 100644
--- a/pandas/tests/extension/date/array.py
+++ b/pandas/tests/extension/date/array.py
@@ -109,10 +109,7 @@ def __init__(
self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)
self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)
- # "object_" object is not iterable [misc]
- for (i,), (y, m, d) in np.ndenumerate( # type: ignore[misc]
- np.char.split(dates, sep="-")
- ):
+ for (i,), (y, m, d) in np.ndenumerate(np.char.split(dates, sep="-")):
self._year[i] = int(y)
self._month[i] = int(m)
self._day[i] = int(d)
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 05a9f0426440d..041b35e0ef2b2 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,24 +1,80 @@
# This file is auto-generated from environment.yml, do not modify.
# See that file for comments about the need/usage of each dependency.
-numpy>=1.18.5
-python-dateutil>=2.8.1
+cython==0.29.30
+pytest>=6.0
+pytest-cov
+pytest-xdist>=1.31
+psutil
+pytest-asyncio>=0.17
+boto3
+python-dateutil
+numpy
pytz
+beautifulsoup4
+blosc
+brotlipy
+bottleneck
+fastparquet
+fsspec
+html5lib
+hypothesis
+gcsfs
+jinja2
+lxml
+matplotlib
+numba>=0.53.1
+numexpr>=2.8.0
+openpyxl
+odfpy
+pandas-gbq
+psycopg2
+pyarrow
+pymysql
+pyreadstat
+tables
+python-snappy
+pyxlsb
+s3fs
+scipy
+sqlalchemy
+tabulate
+xarray
+xlrd
+xlsxwriter
+xlwt
+zstandard
+aiobotocore<2.0.0
+botocore
+cftime
+dask
+ipython
+geopandas
+seaborn
+scikit-learn
+statsmodels
+coverage
+pandas-datareader
+pyyaml
+py
+torch
+moto
+flask
asv
-cython>=0.29.30
black==22.3.0
cpplint
flake8==4.0.1
flake8-bugbear==21.3.2
flake8-comprehensions==3.7.0
isort>=5.2.1
-mypy==0.930
-pre-commit>=2.9.2
+mypy==0.960
+pre-commit>=2.15.0
pycodestyle
pyupgrade
gitpython
gitdb
-numpydoc < 1.2
+natsort
+numpydoc
pandas-dev-flaker==0.4.0
pydata-sphinx-theme==0.8.0
pytest-cython
@@ -31,58 +87,13 @@ types-setuptools
nbconvert>=6.4.5
nbsphinx
pandoc
-dask
-toolz>=0.7.3
-partd>=0.3.10
-cloudpickle>=0.2.1
-markdown
-feedparser
-pyyaml
-requests
-boto3
-botocore>=1.11
-hypothesis>=5.5.3
-moto
-flask
-pytest>=6.0
-pytest-cov
-pytest-xdist>=1.31
-pytest-asyncio>=0.17
-pytest-instafail
-seaborn
-statsmodels
ipywidgets
nbformat
notebook>=6.0.3
-blosc
-bottleneck>=1.3.1
ipykernel
-ipython>=7.11.1
jinja2
-matplotlib>=3.3.2
-numexpr>=2.7.1
-scipy>=1.4.1
-numba>=0.50.1
-beautifulsoup4>=4.8.2
-html5lib
-lxml
-openpyxl
-xlrd
-xlsxwriter
-xlwt
-odfpy
-fastparquet>=0.4.0
-pyarrow>2.0.1
-python-snappy
-tables>=3.6.1
-s3fs>=0.4.0
-aiobotocore<2.0.0
-fsspec>=0.7.4
-gcsfs>=0.6.0
-sqlalchemy
-xarray<0.19
-cftime
-pyreadstat
-tabulate>=0.8.3
-natsort
+markdown
+feedparser
+pyyaml
+requests
setuptools>=51.0.0
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index 2ea50fa3ac8d4..8cb539d3b02c8 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -21,7 +21,7 @@
import yaml
EXCLUDE = {"python", "c-compiler", "cxx-compiler"}
-RENAME = {"pytables": "tables", "dask-core": "dask"}
+RENAME = {"pytables": "tables", "geopandas-base": "geopandas", "pytorch": "torch"}
def conda_package_to_pip(package: str):
| PR #47287 | https://api.github.com/repos/pandas-dev/pandas/pulls/47453 | 2022-06-22T02:32:11Z | 2022-07-18T16:52:19Z | null | 2022-07-18T16:52:24Z |
DOC: Remove unused/flaky statsmodels intersphinx_mapping | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 49025288f0449..2a6ec8947c8d7 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -447,7 +447,6 @@
"py": ("https://pylib.readthedocs.io/en/latest/", None),
"python": ("https://docs.python.org/3/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
- "statsmodels": ("https://www.statsmodels.org/devel/", None),
"pyarrow": ("https://arrow.apache.org/docs/", None),
}
| The doc build can occasionally fail with
```
2022-06-21T19:46:59.4564919Z WARNING: failed to reach any of the inventories with the following issues:
2022-06-21T19:46:59.4567149Z intersphinx inventory 'https://www.statsmodels.org/devel/objects.inv' not fetchable due to <class 'requests.exceptions.ConnectionError'>: HTTPSConnectionPool(host='www.statsmodels.org', port=443): Max retries exceeded with url: /devel/objects.inv (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fd05f1ee8b0>: Failed to establish a new connection: [Errno -3] Temporary failure in name resolution'))
```
Looking through our docs, we don't have a sphinx reference to the statsmodel docs so probably safe to remove to avoid flakiness.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47452 | 2022-06-21T20:50:05Z | 2022-06-21T22:00:43Z | 2022-06-21T22:00:43Z | 2022-06-21T22:00:46Z |
Improve error message for DataFrame.from_dict when wrong orient is provided | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index 39e9a55b5c384..70b451a231453 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -52,6 +52,7 @@ Bug fixes
~~~~~~~~~
- Bug in :func:`pandas.eval`, :meth:`DataFrame.eval` and :meth:`DataFrame.query` where passing empty ``local_dict`` or ``global_dict`` was treated as passing ``None`` (:issue:`47084`)
- Most I/O methods no longer suppress ``OSError`` and ``ValueError`` when closing file handles (:issue:`47136`)
+- Improving error message raised by :meth:`DataFrame.from_dict` when passing an invalid ``orient`` parameter (:issue:`47450`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 7b5c374dc25d9..b4a278185b01b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1720,7 +1720,10 @@ def from_dict(
if columns is not None:
raise ValueError(f"cannot use columns parameter with orient='{orient}'")
else: # pragma: no cover
- raise ValueError("only recognize index or columns for orient")
+ raise ValueError(
+ f"Expected 'index', 'columns' or 'tight' for orient parameter. "
+ f"Got '{orient}' instead"
+ )
if orient != "tight":
return cls(data, index=index, columns=columns, dtype=dtype)
@@ -1817,7 +1820,7 @@ def to_dict(self, orient: str = "dict", into=dict):
Parameters
----------
- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
+ orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
diff --git a/pandas/tests/frame/constructors/test_from_dict.py b/pandas/tests/frame/constructors/test_from_dict.py
index 72107d849f598..7c2b009673bb7 100644
--- a/pandas/tests/frame/constructors/test_from_dict.py
+++ b/pandas/tests/frame/constructors/test_from_dict.py
@@ -17,11 +17,6 @@ class TestFromDict:
# Note: these tests are specific to the from_dict method, not for
# passing dictionaries to DataFrame.__init__
- def test_from_dict_scalars_requires_index(self):
- msg = "If using all scalar values, you must pass an index"
- with pytest.raises(ValueError, match=msg):
- DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
-
def test_constructor_list_of_odicts(self):
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
@@ -189,3 +184,16 @@ def test_frame_dict_constructor_empty_series(self):
# it works!
DataFrame({"foo": s1, "bar": s2, "baz": s3})
DataFrame.from_dict({"foo": s1, "baz": s3, "bar": s2})
+
+ def test_from_dict_scalars_requires_index(self):
+ msg = "If using all scalar values, you must pass an index"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
+
+ def test_from_dict_orient_invalid(self):
+ msg = (
+ "Expected 'index', 'columns' or 'tight' for orient parameter. "
+ "Got 'abc' instead"
+ )
+ with pytest.raises(ValueError, match=msg):
+ DataFrame.from_dict({"foo": 1, "baz": 3, "bar": 2}, orient="abc")
| - [X] closes #47450
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47451 | 2022-06-21T20:17:07Z | 2022-06-24T10:24:57Z | 2022-06-24T10:24:56Z | 2022-06-25T04:55:40Z |
Backport PR #47327 on branch 1.4.x (REGR: Fix fillna making a copy when dict was given as fill value and inplace is set) | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index a4d81533df23d..d031426a2abbf 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -18,6 +18,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.to_csv` raising error when :class:`DataFrame` contains extension dtype categorical column (:issue:`46297`, :issue:`46812`)
- Fixed regression in representation of ``dtypes`` attribute of :class:`MultiIndex` (:issue:`46900`)
- Fixed regression when setting values with :meth:`DataFrame.loc` updating :class:`RangeIndex` when index was set as new column and column was updated afterwards (:issue:`47128`)
+- Fixed regression in :meth:`DataFrame.fillna` and :meth:`DataFrame.update` creating a copy when updating inplace (:issue:`47188`)
- Fixed regression in :meth:`DataFrame.nsmallest` led to wrong results when ``np.nan`` in the sorting column (:issue:`46589`)
- Fixed regression in :func:`read_fwf` raising ``ValueError`` when ``widths`` was specified with ``usecols`` (:issue:`46580`)
- Fixed regression in :func:`concat` not sorting columns for mixed column names (:issue:`47127`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 391c12905adae..5b25f5be01d29 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7594,7 +7594,7 @@ def update(
if mask.all():
continue
- self[col] = expressions.where(mask, this, that)
+ self.loc[:, col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d924093203d7e..6357a670e6ba6 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6482,7 +6482,9 @@ def fillna(
if k not in result:
continue
downcast_k = downcast if not is_dict else downcast.get(k)
- result[k] = result[k].fillna(v, limit=limit, downcast=downcast_k)
+ result.loc[:, k] = result[k].fillna(
+ v, limit=limit, downcast=downcast_k
+ )
return result if not inplace else None
elif not is_list_like(value):
diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index f4957efcd228a..33bd32ad65371 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -265,6 +265,7 @@ def test_fillna_downcast_false(self, frame_or_series):
result = obj.fillna("", downcast=False)
tm.assert_equal(result, obj)
+ @td.skip_array_manager_invalid_test
@pytest.mark.parametrize("columns", [["A", "A", "B"], ["A", "A"]])
def test_fillna_dictlike_value_duplicate_colnames(self, columns):
# GH#43476
@@ -654,6 +655,17 @@ def test_fillna_inplace_with_columns_limit_and_value(self):
df.fillna(axis=1, value=100, limit=1, inplace=True)
tm.assert_frame_equal(df, expected)
+ @td.skip_array_manager_invalid_test
+ @pytest.mark.parametrize("val", [-1, {"x": -1, "y": -1}])
+ def test_inplace_dict_update_view(self, val):
+ # GH#47188
+ df = DataFrame({"x": [np.nan, 2], "y": [np.nan, 2]})
+ result_view = df[:]
+ df.fillna(val, inplace=True)
+ expected = DataFrame({"x": [-1, 2.0], "y": [-1.0, 2]})
+ tm.assert_frame_equal(df, expected)
+ tm.assert_frame_equal(result_view, expected)
+
def test_fillna_nonconsolidated_frame():
# https://github.com/pandas-dev/pandas/issues/36495
diff --git a/pandas/tests/frame/methods/test_update.py b/pandas/tests/frame/methods/test_update.py
index 408113e9bc417..d3257ac09a0ab 100644
--- a/pandas/tests/frame/methods/test_update.py
+++ b/pandas/tests/frame/methods/test_update.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
import pandas as pd
from pandas import (
DataFrame,
@@ -146,3 +148,14 @@ def test_update_with_different_dtype(self):
expected = DataFrame({"a": [1, 3], "b": [np.nan, 2], "c": ["foo", np.nan]})
tm.assert_frame_equal(df, expected)
+
+ @td.skip_array_manager_invalid_test
+ def test_update_modify_view(self):
+ # GH#47188
+ df = DataFrame({"A": ["1", np.nan], "B": ["100", np.nan]})
+ df2 = DataFrame({"A": ["a", "x"], "B": ["100", "200"]})
+ result_view = df2[:]
+ df2.update(df)
+ expected = DataFrame({"A": ["1", "x"], "B": ["100", "200"]})
+ tm.assert_frame_equal(df2, expected)
+ tm.assert_frame_equal(result_view, expected)
| Backport PR #47327 | https://api.github.com/repos/pandas-dev/pandas/pulls/47448 | 2022-06-21T18:47:56Z | 2022-06-22T23:31:10Z | 2022-06-22T23:31:10Z | 2022-06-22T23:31:14Z |
Deprecate non-keyword arguments for rsplit | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index a5cb716317689..4c23e5e5b1906 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -705,6 +705,7 @@ Other Deprecations
- Deprecated the ``closed`` argument in :meth:`interval_range` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
- Deprecated the methods :meth:`DataFrame.mad`, :meth:`Series.mad`, and the corresponding groupby methods (:issue:`11787`)
- Deprecated positional arguments to :meth:`Index.join` except for ``other``, use keyword-only arguments instead of positional arguments (:issue:`46518`)
+- Deprecated positional arguments to :meth:`StringMethods.rsplit` and :meth:`StringMethods.split` except for ``pat``, use keyword-only arguments instead of positional arguments (:issue:`47423`)
- Deprecated indexing on a timezone-naive :class:`DatetimeIndex` using a string representing a timezone-aware datetime (:issue:`46903`, :issue:`36148`)
- Deprecated the ``closed`` argument in :class:`Interval` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
- Deprecated the ``closed`` argument in :class:`IntervalIndex` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`)
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index abd380299ba02..ae92926195273 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -18,7 +18,10 @@
DtypeObj,
F,
)
-from pandas.util._decorators import Appender
+from pandas.util._decorators import (
+ Appender,
+ deprecate_nonkeyword_arguments,
+)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
@@ -843,6 +846,7 @@ def cat(
""",
}
)
+ @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "pat"])
@forbid_nonstring_types(["bytes"])
def split(
self,
@@ -874,6 +878,7 @@ def split(
"regex_examples": "",
}
)
+ @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "pat"])
@forbid_nonstring_types(["bytes"])
def rsplit(self, pat=None, n=-1, expand=False):
result = self._data.array._str_rsplit(pat, n=n)
diff --git a/pandas/tests/strings/test_split_partition.py b/pandas/tests/strings/test_split_partition.py
index 74458c13e8df7..7d73414a672c8 100644
--- a/pandas/tests/strings/test_split_partition.py
+++ b/pandas/tests/strings/test_split_partition.py
@@ -130,6 +130,23 @@ def test_rsplit_max_number(any_string_dtype):
tm.assert_series_equal(result, exp)
+@pytest.mark.parametrize("method", ["split", "rsplit"])
+def test_posargs_deprecation(method):
+ # GH 47423; Deprecate passing n as positional.
+ s = Series(["foo,bar,lorep"])
+
+ msg = (
+ f"In a future version of pandas all arguments of StringMethods.{method} "
+ "except for the argument 'pat' will be keyword-only"
+ )
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = getattr(s.str, method)(",", 3)
+
+ expected = Series([["foo", "bar", "lorep"]])
+ tm.assert_series_equal(result, expected)
+
+
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
| - [x] closes #47423
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47446 | 2022-06-21T17:26:03Z | 2022-06-23T20:49:58Z | 2022-06-23T20:49:58Z | 2022-06-23T20:49:58Z |
Backport PR #47318 on branch 1.4.x (CI: Pin PYTEST_WORKERS=1 for Windows builds due to memory errors) | diff --git a/.github/workflows/macos-windows.yml b/.github/workflows/macos-windows.yml
index 560a421ec74ec..923ac8f2e0fd6 100644
--- a/.github/workflows/macos-windows.yml
+++ b/.github/workflows/macos-windows.yml
@@ -15,7 +15,6 @@ on:
env:
PANDAS_CI: 1
PYTEST_TARGET: pandas
- PYTEST_WORKERS: auto
PATTERN: "not slow and not db and not network and not single_cpu"
@@ -36,6 +35,9 @@ jobs:
# https://github.community/t/concurrecy-not-work-for-push/183068/7
group: ${{ github.event_name == 'push' && github.run_number || github.ref }}-${{ matrix.env_file }}-${{ matrix.os }}
cancel-in-progress: true
+ env:
+ # GH 47443: PYTEST_WORKERS > 1 crashes Windows builds with memory related errors
+ PYTEST_WORKERS: ${{ matrix.os == 'macos-latest' && 'auto' || '1' }}
steps:
- name: Checkout
| Backport PR #47318: CI: Pin PYTEST_WORKERS=1 for Windows builds due to memory errors | https://api.github.com/repos/pandas-dev/pandas/pulls/47445 | 2022-06-21T17:01:53Z | 2022-06-21T18:41:45Z | 2022-06-21T18:41:45Z | 2022-06-21T18:41:45Z |
PDEP-1: Purpose and guidelines for pandas enhancement proposals | diff --git a/web/pandas/about/roadmap.md b/web/pandas/about/roadmap.md
index 3c6c4d4fdf9a2..6e922d01518ba 100644
--- a/web/pandas/about/roadmap.md
+++ b/web/pandas/about/roadmap.md
@@ -15,10 +15,35 @@ fundamental changes to the project that are likely to take months or
years of developer time. Smaller-scoped items will continue to be
tracked on our [issue tracker](https://github.com/pandas-dev/pandas/issues).
-See [Roadmap evolution](#roadmap-evolution) for proposing
-changes to this document.
+The roadmap is defined as a set of major enhancement proposals named PDEPs.
+For more information about PDEPs, and how to submit one, please refer to
+[PEDP-1](/pdeps/accepted/0001-puropose-and-guidelines.html).
-## Extensibility
+## PDEPs
+
+{% for pdep_type in ["Under discussion", "Accepted", "Implemented", "Rejected"] %}
+
+<h3 id="pdeps-{{pdep_type}}">{{ pdep_type.replace("_", " ").capitalize() }}</h3>
+
+<ul>
+{% for pdep in pdeps[pdep_type] %}
+ <li><a href="{{ pdep.url }}">{{ pdep.title }}</a></li>
+{% else %}
+ <li>There are currently no PDEPs with this status</li>
+{% endfor %}
+</ul>
+
+{% endfor %}
+
+## Roadmap points pending a PDEP
+
+<div class="alert alert-warning" role="alert">
+ pandas is in the process of moving roadmap points to PDEPs (implemented in
+ August 2022). During the transition, some roadmap points will exist as PDEPs,
+ while others will exist as sections below.
+</div>
+
+### Extensibility
Pandas `extending.extension-types` allow
for extending NumPy types with custom data types and array storage.
@@ -33,7 +58,7 @@ library, making their behavior more consistent with the handling of
NumPy arrays. We'll do this by cleaning up pandas' internals and
adding new methods to the extension array interface.
-## String data type
+### String data type
Currently, pandas stores text data in an `object` -dtype NumPy array.
The current implementation has two primary drawbacks: First, `object`
@@ -54,7 +79,7 @@ work, we may need to implement certain operations expected by pandas
users (for example the algorithm used in, `Series.str.upper`). That work
may be done outside of pandas.
-## Apache Arrow interoperability
+### Apache Arrow interoperability
[Apache Arrow](https://arrow.apache.org) is a cross-language development
platform for in-memory data. The Arrow logical types are closely aligned
@@ -65,7 +90,7 @@ data types within pandas. This will let us take advantage of its I/O
capabilities and provide for better interoperability with other
languages and libraries using Arrow.
-## Block manager rewrite
+### Block manager rewrite
We'd like to replace pandas current internal data structures (a
collection of 1 or 2-D arrays) with a simpler collection of 1-D arrays.
@@ -92,7 +117,7 @@ See [these design
documents](https://dev.pandas.io/pandas2/internal-architecture.html#removal-of-blockmanager-new-dataframe-internals)
for more.
-## Decoupling of indexing and internals
+### Decoupling of indexing and internals
The code for getting and setting values in pandas' data structures
needs refactoring. In particular, we must clearly separate code that
@@ -150,7 +175,7 @@ which are actually expected (typically `KeyError`).
and when small differences in behavior are expected (e.g. getting with `.loc` raises for
missing labels, setting still doesn't), they can be managed with a specific parameter.
-## Numba-accelerated operations
+### Numba-accelerated operations
[Numba](https://numba.pydata.org) is a JIT compiler for Python code.
We'd like to provide ways for users to apply their own Numba-jitted
@@ -162,7 +187,7 @@ window contexts). This will improve the performance of
user-defined-functions in these operations by staying within compiled
code.
-## Documentation improvements
+### Documentation improvements
We'd like to improve the content, structure, and presentation of the
pandas documentation. Some specific goals include
@@ -177,7 +202,7 @@ pandas documentation. Some specific goals include
subsections of the documentation to make navigation and finding
content easier.
-## Performance monitoring
+### Performance monitoring
Pandas uses [airspeed velocity](https://asv.readthedocs.io/en/stable/)
to monitor for performance regressions. ASV itself is a fabulous tool,
@@ -197,29 +222,3 @@ We'd like to fund improvements and maintenance of these tools to
<https://pyperf.readthedocs.io/en/latest/system.html>
- Build a GitHub bot to request ASV runs *before* a PR is merged.
Currently, the benchmarks are only run nightly.
-
-## Roadmap Evolution
-
-Pandas continues to evolve. The direction is primarily determined by
-community interest. Everyone is welcome to review existing items on the
-roadmap and to propose a new item.
-
-Each item on the roadmap should be a short summary of a larger design
-proposal. The proposal should include
-
-1. Short summary of the changes, which would be appropriate for
- inclusion in the roadmap if accepted.
-2. Motivation for the changes.
-3. An explanation of why the change is in scope for pandas.
-4. Detailed design: Preferably with example-usage (even if not
- implemented yet) and API documentation
-5. API Change: Any API changes that may result from the proposal.
-
-That proposal may then be submitted as a GitHub issue, where the pandas
-maintainers can review and comment on the design. The [pandas mailing
-list](https://mail.python.org/mailman/listinfo/pandas-dev) should be
-notified of the proposal.
-
-When there's agreement that an implementation would be welcome, the
-roadmap should be updated to include the summary and a link to the
-discussion issue.
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index 1330addf9a229..aa4deaea98a6c 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -11,6 +11,7 @@ main:
- pandas_web.Preprocessors.blog_add_posts
- pandas_web.Preprocessors.maintainers_add_info
- pandas_web.Preprocessors.home_add_releases
+ - pandas_web.Preprocessors.roadmap_pdeps
markdown_extensions:
- toc
- tables
@@ -177,3 +178,5 @@ sponsors:
- name: "Gousto"
url: https://www.gousto.co.uk/
kind: partner
+roadmap:
+ pdeps_path: pdeps
diff --git a/web/pandas/pdeps/0001-purpose-and-guidelines.md b/web/pandas/pdeps/0001-purpose-and-guidelines.md
new file mode 100644
index 0000000000000..085f675974b2e
--- /dev/null
+++ b/web/pandas/pdeps/0001-purpose-and-guidelines.md
@@ -0,0 +1,128 @@
+# PDEP-1: Purpose and guidelines
+
+- Created: 3 August 2022
+- Status: Under discussion
+- Discussion: [#47444](https://github.com/pandas-dev/pandas/pull/47444)
+- Author: [Marc Garcia](https://github.com/datapythonista)
+- Revision: 1
+
+## PDEP definition, purpose and scope
+
+A PDEP (pandas enhancement proposal) is a proposal for a **major** change in
+pandas, in a similar way as a Python [PEP](https://peps.python.org/pep-0001/)
+or a NumPy [NEP](https://numpy.org/neps/nep-0000.html).
+
+Bug fixes and conceptually minor changes (e.g. adding a parameter to a function)
+are out of the scope of PDEPs. A PDEP should be used for changes that are not
+immediate and not obvious, and are expected to require a significant amount of
+discussion and require detailed documentation before being implemented.
+
+PDEP are appropriate for user facing changes, internal changes and organizational
+discussions. Examples of topics worth a PDEP could include moving a module from
+pandas to a separate repository, a refactoring of the pandas block manager or
+a proposal of a new code of conduct.
+
+## PDEP guidelines
+
+### Target audience
+
+A PDEP is a public document available to anyone, but the main stakeholders to
+consider when writing a PDEP are:
+
+- The core development team, who will have the final decision on whether a PDEP
+ is approved or not
+- Contributors to pandas and other related projects, and experienced users. Their
+ feedback is highly encouraged and appreciated, to make sure all points of views
+ are taken into consideration
+- The wider pandas community, in particular users, who may or may not have feedback
+ on the proposal, but should know and be able to understand the future direction of
+ the project
+
+### PDEP authors
+
+Anyone can propose a PDEP, but in most cases developers of pandas itself and related
+projects are expected to author PDEPs. If you are unsure if you should be opening
+an issue or creating a PDEP, it's probably safe to start by
+[opening an issue](https://github.com/pandas-dev/pandas/issues/new/choose), which can
+be eventually moved to a PDEP.
+
+### Workflow
+
+The possible states of a PDEP are:
+
+- Under discussion
+- Accepted
+- Implemented
+- Rejected
+
+Next is described the workflow that PDEPs can follow.
+
+#### Submitting a PDEP
+
+Proposing a PDEP is done by creating a PR adding a new file to `web/pdeps/`.
+The file is a markdown file, you can use `web/pdeps/0001.md` as a reference
+for the expected format.
+
+The initial status of a PDEP will be `Status: Under discussion`. This will be changed
+to `Status: Accepted` when the PDEP is ready and have the approval of the core team.
+
+#### Accepted PDEP
+
+A PDEP can only be accepted by the core development team, if the proposal is considered
+worth implementing. Decisions will be made based on the process detailed in the
+[pandas governance document](https://github.com/pandas-dev/pandas-governance/blob/master/governance.md).
+In general, more than one approval will be needed before the PR is merged. And
+there should not be any `Request changes` review at the time of merging.
+
+Once a PDEP is accepted, any contributions can be made toward the implementation of the PDEP,
+with an open-ended completion timeline. Development of pandas is difficult to understand and
+forecast, being that the contributors to pandas are a mix of volunteers and developers paid from different sources,
+with different priorities. For companies, institutions or individuals with interest in seeing a
+PDEP being implemented, or to in general see progress to the pandas roadmap, please check how
+you can help in the [contributing page](/contribute.html).
+
+#### Implemented PDEP
+
+Once a PDEP is implemented and available in the main branch of pandas, its
+status will be changed to `Status: Implemented`, so there is visibility that the PDEP
+is not part of the roadmap and future plans, but a change that has already
+happened. The first pandas version in which the PDEP implementation is
+available will also be included in the PDEP header with for example
+`Implemented: v2.0.0`.
+
+#### Rejected PDEP
+
+A PDEP can be rejected when the final decision is that its implementation is
+not in the best interests of the project. Rejected PDEPs are as useful as accepted
+PDEPs, since there are discussions that are worth having, and decisions about
+changes to pandas being made. They will be merged with `Status: Rejected`, so
+there is visibility on what was discussed and what was the outcome of the
+discussion. A PDEP can be rejected for different reasons, for example good ideas
+that aren't backward-compatible, and the breaking changes aren't considered worth
+implementing.
+
+#### Invalid PDEP
+
+For submitted PDEPs that do not contain proper documentation, are out of scope, or
+are not useful to the community for any other reason, the PR will be closed after
+discussion with the author, instead of merging them as rejected. This is to avoid
+adding noise to the list of rejected PDEPs, which should contain documentation as
+good as an accepted PDEP, but where the final decision was to not implement the changes.
+
+## Evolution of PDEPs
+
+Most PDEPs aren't expected to change after accepted. Once there is agreement in the changes,
+and they are implemented, the PDEP will be only useful to understand why the development happened,
+and the details of the discussion.
+
+But in some cases, a PDEP can be updated. For example, a PDEP defining a procedure or
+a policy, like this one (PDEP-1). Or cases when after attempting the implementation,
+new knowledge is obtained that makes the original PDEP obsolete, and changes are
+required. When there are specific changes to be made to the original PDEP, this will
+be edited, its `Revision: X` label will be increased by one, and a note will be added
+to the `PDEP-N history` section. This will let readers understand that the PDEP has
+changed and avoid confusion.
+
+### PDEP-1 History
+
+- 3 August 2022: Initial version
diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css
index d5112dd220355..96ea6a6f2ae52 100644
--- a/web/pandas/static/css/pandas.css
+++ b/web/pandas/static/css/pandas.css
@@ -8,15 +8,19 @@ h1 {
color: #130654;
}
h2 {
- font-size: 1.45rem;
+ font-size: 1.8rem;
font-weight: 700;
- color: black;
+ color: #130654;
}
h3 {
font-size: 1.3rem;
font-weight: 600;
color: black;
}
+h3 a {
+ color: black;
+ text-decoration: underline dotted !important;
+}
a {
color: #130654;
}
diff --git a/web/pandas_web.py b/web/pandas_web.py
index 7dd63175e69ac..16e9024d8d1d8 100755
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -24,10 +24,12 @@
The rest of the items in the file will be added directly to the context.
"""
import argparse
+import collections
import datetime
import importlib
import operator
import os
+import pathlib
import re
import shutil
import sys
@@ -185,6 +187,61 @@ def home_add_releases(context):
)
return context
+ @staticmethod
+ def roadmap_pdeps(context):
+ """
+ PDEP's (pandas enhancement proposals) are not part of the bar
+ navigation. They are included as lists in the "Roadmap" page
+ and linked from there. This preprocessor obtains the list of
+ PDEP's in different status from the directory tree and GitHub.
+ """
+ KNOWN_STATUS = {"Under discussion", "Accepted", "Implemented", "Rejected"}
+ context["pdeps"] = collections.defaultdict(list)
+
+ # accepted, rejected and implemented
+ pdeps_path = (
+ pathlib.Path(context["source_path"]) / context["roadmap"]["pdeps_path"]
+ )
+ for pdep in sorted(pdeps_path.iterdir()):
+ if pdep.suffix != ".md":
+ continue
+ with pdep.open() as f:
+ title = f.readline()[2:] # removing markdown title "# "
+ status = None
+ for line in f:
+ if line.startswith("- Status: "):
+ status = line.strip().split(": ", 1)[1]
+ break
+ if status not in KNOWN_STATUS:
+ raise RuntimeError(
+ f'PDEP "{pdep}" status "{status}" is unknown. '
+ f"Should be one of: {KNOWN_STATUS}"
+ )
+ html_file = pdep.with_suffix(".html").name
+ context["pdeps"][status].append(
+ {
+ "title": title,
+ "url": f"/pdeps/{html_file}",
+ }
+ )
+
+ # under discussion
+ github_repo_url = context["main"]["github_repo_url"]
+ resp = requests.get(
+ "https://api.github.com/search/issues?"
+ f"q=is:pr is:open label:PDEP repo:{github_repo_url}"
+ )
+ if context["ignore_io_errors"] and resp.status_code == 403:
+ return context
+ resp.raise_for_status()
+
+ for pdep in resp.json()["items"]:
+ context["pdeps"]["under_discussion"].append(
+ {"title": pdep["title"], "url": pdep["url"]}
+ )
+
+ return context
+
def get_callable(obj_as_str: str) -> object:
"""
| Closes #28568
Initial PDEP to define purpose and guidelines for pandas enhancement proposals (equivalent to PEPs or NEPs). **Feedback very welcome**.
This PR also makes the PDEPs public in the [roadmap page](https://pandas.pydata.org/about/roadmap.html) of our website.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47444 | 2022-06-21T08:40:26Z | 2022-08-03T05:53:58Z | 2022-08-03T05:53:58Z | 2022-08-03T09:16:38Z |
CI: start testing Python 3.11 | diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index d93b92a9662ec..580cafd6e4949 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -1,9 +1,21 @@
-# This file is purposely frozen(does not run). DO NOT DELETE IT
-# Unfreeze(by commentingthe if: false() condition) once the
-# next Python Dev version has released beta 1 and both Cython and numpy support it
-# After that Python has released, migrate the workflows to the
-# posix GHA workflows and "freeze" this file by
-# uncommenting the if: false() condition
+# This workflow may or may not run depending on the state of the next
+# unreleased Python version. DO NOT DELETE IT.
+#
+# In general, this file will remain frozen(present, but not running) until:
+# - The next unreleased Python version has released beta 1
+# - This version should be available on Github Actions.
+# - Our required build/runtime dependencies(numpy, pytz, Cython, python-dateutil)
+# support that unreleased Python version.
+# To unfreeze, comment out the ``if: false`` condition, and make sure you update
+# the name of the workflow and Python version in actions/setup-python to: '3.12-dev'
+#
+# After it has been unfrozen, this file should remain unfrozen(present, and running) until:
+# - The next Python version has been officially released.
+# OR
+# - Most/All of our optional dependencies support Python 3.11 AND
+# - The next Python version has released a rc(we are guaranteed a stable ABI).
+# To freeze this file, uncomment out the ``if: false`` condition, and migrate the jobs
+# to the corresponding posix/windows-macos/sdist etc. workflows.
# Feel free to modify this comment as necessary.
name: Python Dev
@@ -32,7 +44,7 @@ permissions:
jobs:
build:
- if: false # Comment this line out to "unfreeze"
+ # if: false # Uncomment this to freeze the workflow, comment it to unfreeze
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
@@ -53,27 +65,27 @@ jobs:
fetch-depth: 0
- name: Set up Python Dev Version
- uses: actions/setup-python@v3
+ uses: actions/setup-python@v4
with:
python-version: '3.11-dev'
- name: Install dependencies
- shell: bash -el {0}
run: |
- python3 -m pip install --upgrade pip setuptools wheel
- python3 -m pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy
- python3 -m pip install git+https://github.com/nedbat/coveragepy.git
- python3 -m pip install cython python-dateutil pytz hypothesis pytest>=6.2.5 pytest-xdist pytest-cov pytest-asyncio>=0.17
- python3 -m pip list
+ python --version
+ python -m pip install --upgrade pip setuptools wheel
+ python -m pip install git+https://github.com/numpy/numpy.git
+ python -m pip install git+https://github.com/nedbat/coveragepy.git
+ python -m pip install python-dateutil pytz cython hypothesis==6.52.1 pytest>=6.2.5 pytest-xdist pytest-cov pytest-asyncio>=0.17
+ python -m pip list
- name: Build Pandas
run: |
- python3 setup.py build_ext -q -j2
- python3 -m pip install -e . --no-build-isolation --no-use-pep517
+ python setup.py build_ext -q -j2
+ python -m pip install -e . --no-build-isolation --no-use-pep517
- name: Build Version
run: |
- python3 -c "import pandas; pandas.show_versions();"
+ python -c "import pandas; pandas.show_versions();"
- name: Test
uses: ./.github/actions/run-tests
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 91d05ea66402b..80f66c945ba27 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -36,6 +36,7 @@
PY39 = sys.version_info >= (3, 9)
PY310 = sys.version_info >= (3, 10)
+PY311 = sys.version_info >= (3, 11)
PYPY = platform.python_implementation() == "PyPy"
IS64 = sys.maxsize > 2**32
diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py
index 3d5f1d3733254..f0669f52acee2 100644
--- a/pandas/tests/arrays/categorical/test_api.py
+++ b/pandas/tests/arrays/categorical/test_api.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+from pandas.compat import PY311
+
from pandas import (
Categorical,
CategoricalIndex,
@@ -61,7 +63,11 @@ def test_set_ordered(self):
assert not cat2.ordered
# removed in 0.19.0
- msg = "can't set attribute"
+ msg = (
+ "property 'ordered' of 'Categorical' object has no setter"
+ if PY311
+ else "can't set attribute"
+ )
with pytest.raises(AttributeError, match=msg):
cat.ordered = True
with pytest.raises(AttributeError, match=msg):
@@ -515,7 +521,12 @@ def test_codes_immutable(self):
tm.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
- with pytest.raises(AttributeError, match="can't set attribute"):
+ msg = (
+ "property 'codes' of 'Categorical' object has no setter"
+ if PY311
+ else "can't set attribute"
+ )
+ with pytest.raises(AttributeError, match=msg):
c.codes = np.array([0, 1, 2, 0, 1], dtype="int8")
# changes in the codes array should raise
diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 086cb18dbe463..1d161630b1356 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -1137,7 +1137,10 @@ def test_timestamp_constructor_retain_fold(tz, fold):
_tzs = ["dateutil/Europe/London"]
if PY39:
- _tzs = ["dateutil/Europe/London", zoneinfo.ZoneInfo("Europe/London")]
+ try:
+ _tzs = ["dateutil/Europe/London", zoneinfo.ZoneInfo("Europe/London")]
+ except zoneinfo.ZoneInfoNotFoundError:
+ pass
@pytest.mark.parametrize("tz", _tzs)
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index aa0e91cecd4fc..42cf0168f6599 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+from pandas.compat import PY311
+
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
@@ -139,9 +141,15 @@ def test_set_levels_codes_directly(idx):
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
- msg = "[Cc]an't set attribute"
+ msg = "Can't set attribute"
with pytest.raises(AttributeError, match=msg):
idx.levels = new_levels
+
+ msg = (
+ "property 'codes' of 'MultiIndex' object has no setter"
+ if PY311
+ else "can't set attribute"
+ )
with pytest.raises(AttributeError, match=msg):
idx.codes = new_codes
diff --git a/pandas/tests/indexes/period/test_freq_attr.py b/pandas/tests/indexes/period/test_freq_attr.py
index 3bf3e700e5e72..e1ecffa4982bd 100644
--- a/pandas/tests/indexes/period/test_freq_attr.py
+++ b/pandas/tests/indexes/period/test_freq_attr.py
@@ -1,5 +1,7 @@
import pytest
+from pandas.compat import PY311
+
from pandas import (
offsets,
period_range,
@@ -17,5 +19,10 @@ def test_freq_setter_deprecated(self):
idx.freq
# warning for setter
- with pytest.raises(AttributeError, match="can't set attribute"):
+ msg = (
+ "property 'freq' of 'PeriodArray' object has no setter"
+ if PY311
+ else "can't set attribute"
+ )
+ with pytest.raises(AttributeError, match=msg):
idx.freq = offsets.Day()
diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py
index 47f1052808e0c..f52af109626e9 100644
--- a/pandas/tests/io/parser/common/test_read_errors.py
+++ b/pandas/tests/io/parser/common/test_read_errors.py
@@ -12,6 +12,7 @@
import numpy as np
import pytest
+from pandas.compat import PY311
from pandas.errors import (
EmptyDataError,
ParserError,
@@ -224,13 +225,19 @@ def test_read_csv_wrong_num_columns(all_parsers):
parser.read_csv(StringIO(data))
-def test_null_byte_char(all_parsers):
+def test_null_byte_char(request, all_parsers):
# see gh-2741
data = "\x00,foo"
names = ["a", "b"]
parser = all_parsers
- if parser.engine == "c":
+ if parser.engine == "c" or (parser.engine == "python" and PY311):
+ if parser.engine == "python" and PY311:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="In Python 3.11, this is read as an empty character not null"
+ )
+ )
expected = DataFrame([[np.nan, "foo"]], columns=names)
out = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(out, expected)
diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py
index a1aba949e74fe..025a612dc47d2 100644
--- a/pandas/tests/io/parser/test_quoting.py
+++ b/pandas/tests/io/parser/test_quoting.py
@@ -8,6 +8,7 @@
import pytest
+from pandas.compat import PY311
from pandas.errors import ParserError
from pandas import DataFrame
@@ -80,11 +81,16 @@ def test_null_quote_char(all_parsers, quoting, quote_char):
if quoting != csv.QUOTE_NONE:
# Sanity checking.
- msg = "quotechar must be set if quoting enabled"
+ msg = (
+ '"quotechar" must be a 1-character string'
+ if PY311 and all_parsers.engine == "python" and quote_char == ""
+ else "quotechar must be set if quoting enabled"
+ )
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
- else:
+ elif not (PY311 and all_parsers.engine == "python"):
+ # Python 3.11+ doesn't support null/blank quote chars in their csv parsers
expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
| - [ ] xref #46680 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47442 | 2022-06-21T05:57:11Z | 2022-08-17T22:33:04Z | 2022-08-17T22:33:04Z | 2022-08-17T23:01:36Z |
DOC: Update v1.3.0 changelog to reflect breaking change in join dtypes | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index a392aeb5274c2..59fa3acc3883d 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -709,6 +709,7 @@ Other API changes
- :meth:`ExtensionDtype.construct_array_type` is now a required method instead of an optional one for :class:`ExtensionDtype` subclasses (:issue:`24860`)
- Calling ``hash`` on non-hashable pandas objects will now raise ``TypeError`` with the built-in error message (e.g. ``unhashable type: 'Series'``). Previously it would raise a custom message such as ``'Series' objects are mutable, thus they cannot be hashed``. Furthermore, ``isinstance(<Series>, abc.collections.Hashable)`` will now return ``False`` (:issue:`40013`)
- :meth:`.Styler.from_custom_template` now has two new arguments for template names, and removed the old ``name``, due to template inheritance having been introducing for better parsing (:issue:`42053`). Subclassing modifications to Styler attributes are also needed.
+- DataFrame join dtypes are taken from left side not right, which may result in a different dtype on the resulting index. See (:issue:`47384`).
.. _whatsnew_130.api_breaking.build:
| Minor update to old docs to reflect a change in how dtype is chosen in joins. Docs only.
- [ * ] closes #47384
- [ n/a ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ n/a ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ n/a ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ n/a ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47441 | 2022-06-21T05:07:17Z | 2022-07-19T20:36:42Z | null | 2022-07-22T05:41:34Z |
REF: stop passing self as arg from ExtensionArray methods | diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index f17d343024915..3f896ed4c51f9 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -45,7 +45,7 @@
ExtensionDtype,
PeriodDtype,
)
-from pandas.core.dtypes.missing import array_equivalent
+from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core import missing
from pandas.core.algorithms import (
@@ -197,7 +197,9 @@ def argmin(self, axis: int = 0, skipna: bool = True): # type: ignore[override]
validate_bool_kwarg(skipna, "skipna")
if not skipna and self._hasna:
raise NotImplementedError
- return nargminmax(self, "argmin", axis=axis)
+ values = self._values_for_argsort()
+ mask = np.asarray(isna(values))
+ return nargminmax(values, mask, "argmin", axis=axis)
# Signature of "argmax" incompatible with supertype "ExtensionArray"
def argmax(self, axis: int = 0, skipna: bool = True): # type: ignore[override]
@@ -205,7 +207,9 @@ def argmax(self, axis: int = 0, skipna: bool = True): # type: ignore[override]
validate_bool_kwarg(skipna, "skipna")
if not skipna and self._hasna:
raise NotImplementedError
- return nargminmax(self, "argmax", axis=axis)
+ values = self._values_for_argsort()
+ mask = np.asarray(isna(values))
+ return nargminmax(values, mask, "argmax", axis=axis)
def unique(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = unique(self._ndarray)
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 882cc76cf2d77..6bd17db994a65 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -151,6 +151,9 @@ class ExtensionArray:
_reduce
_values_for_argsort
_values_for_factorize
+ _values_for_mode
+ _values_for_rank
+ _values_for_format_object_summary
Notes
-----
@@ -739,7 +742,9 @@ def argmin(self, skipna: bool = True) -> int:
validate_bool_kwarg(skipna, "skipna")
if not skipna and self._hasna:
raise NotImplementedError
- return nargminmax(self, "argmin")
+ values = self._values_for_argsort()
+ mask = np.asarray(isna(values))
+ return nargminmax(values, mask, "argmin")
def argmax(self, skipna: bool = True) -> int:
"""
@@ -767,7 +772,9 @@ def argmax(self, skipna: bool = True) -> int:
validate_bool_kwarg(skipna, "skipna")
if not skipna and self._hasna:
raise NotImplementedError
- return nargminmax(self, "argmax")
+ values = self._values_for_argsort()
+ mask = np.asarray(isna(values))
+ return nargminmax(values, mask, "argmax")
def fillna(
self: ExtensionArrayT,
@@ -1281,6 +1288,16 @@ def view(self, dtype: Dtype | None = None) -> ArrayLike:
# Printing
# ------------------------------------------------------------------------
+ def _values_for_format_object_summary(self) -> np.ndarray:
+ """
+ Return values for object summary
+
+ Returns
+ -------
+ ndarray
+ """
+ return np.array(self)
+
def __repr__(self) -> str:
if self.ndim > 1:
return self._repr_2d()
@@ -1290,8 +1307,9 @@ def __repr__(self) -> str:
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
+ values = self._values_for_format_object_summary()
data = format_object_summary(
- self, self._formatter(), indent_for_name=False
+ values, self._formatter(), indent_for_name=False
).rstrip(", \n")
class_name = f"<{type(self).__name__}>\n"
return f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}"
@@ -1567,6 +1585,16 @@ def _fill_mask_inplace(
self[mask] = new_values[mask]
return
+ def _values_for_rank(self) -> np.ndarray:
+ """
+ Return values for ranking
+
+ Returns
+ -------
+ ndarray
+ """
+ return np.array(self)
+
def _rank(
self,
*,
@@ -1584,8 +1612,10 @@ def _rank(
# TODO: we only have tests that get here with dt64 and td64
# TODO: all tests that get here use the defaults for all the kwds
+
+ values = self._values_for_rank()
return rank(
- self,
+ values,
axis=axis,
method=method,
na_option=na_option,
@@ -1638,6 +1668,16 @@ def _quantile(
res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation)
return type(self)._from_sequence(res_values)
+ def _values_for_mode(self) -> np.ndarray:
+ """
+ Return values for mode
+
+ Returns
+ -------
+ ndarray
+ """
+ return np.array(self)
+
def _mode(self: ExtensionArrayT, dropna: bool = True) -> ExtensionArrayT:
"""
Returns the mode(s) of the ExtensionArray.
@@ -1656,7 +1696,9 @@ def _mode(self: ExtensionArrayT, dropna: bool = True) -> ExtensionArrayT:
"""
# error: Incompatible return value type (got "Union[ExtensionArray,
# ndarray[Any, Any]]", expected "ExtensionArrayT")
- return mode(self, dropna=dropna) # type: ignore[return-value]
+
+ values = self._values_for_mode()
+ return mode(values, dropna=dropna) # type: ignore[return-value]
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
if any(
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 16facfc915e40..b9278efbd13aa 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -22,6 +22,7 @@
)
from pandas._libs.hashtable import unique_label_indices
from pandas._typing import (
+ ArrayLike,
IndexKeyFunc,
Level,
NaPosition,
@@ -447,14 +448,15 @@ def nargsort(
return ensure_platform_int(indexer)
-def nargminmax(values: ExtensionArray, method: str, axis: int = 0):
+def nargminmax(values: ArrayLike, mask: np.ndarray, method: str, axis: int = 0):
"""
Implementation of np.argmin/argmax but for ExtensionArray and which
handles missing values.
Parameters
----------
- values : ExtensionArray
+ values : ArrayLike
+ mask: np.ndarray
method : {"argmax", "argmin"}
axis : int, default 0
@@ -465,19 +467,16 @@ def nargminmax(values: ExtensionArray, method: str, axis: int = 0):
assert method in {"argmax", "argmin"}
func = np.argmax if method == "argmax" else np.argmin
- mask = np.asarray(isna(values))
- arr_values = values._values_for_argsort()
-
- if arr_values.ndim > 1:
+ if values.ndim > 1:
if mask.any():
if axis == 1:
- zipped = zip(arr_values, mask)
+ zipped = zip(values, mask)
else:
- zipped = zip(arr_values.T, mask.T)
+ zipped = zip(values.T, mask.T)
return np.array([_nanargminmax(v, m, func) for v, m in zipped])
- return func(arr_values, axis=axis)
+ return func(values, axis=axis)
- return _nanargminmax(arr_values, mask, func)
+ return _nanargminmax(values, mask, func)
def _nanargminmax(values: np.ndarray, mask: npt.NDArray[np.bool_], func) -> int:
| - [x] closes #46843
- [ ] All [code checks passed].
- [x] Added [type annotations] to new arguments/methods/functions.
🐼
| https://api.github.com/repos/pandas-dev/pandas/pulls/47440 | 2022-06-21T01:45:26Z | 2022-08-15T16:42:22Z | null | 2022-08-18T19:27:31Z |
REF: if statements merge + typehint correction | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 39a940169e1f3..2852fddb133d4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4536,9 +4536,8 @@ def predicate(arr: ArrayLike) -> bool:
if not dtype_predicate(dtype, include):
return False
- if exclude:
- if dtype_predicate(dtype, exclude):
- return False
+ if exclude and dtype_predicate(dtype, exclude):
+ return False
return True
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 65720e675a77a..7ee46b23a9d93 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -515,7 +515,12 @@ def _generate_col_header_row(self, iter: tuple, max_cols: int, col_lengths: dict
return index_blanks + column_name + column_headers
- def _generate_index_names_row(self, iter: tuple, max_cols: int, col_lengths: dict):
+ def _generate_index_names_row(
+ self,
+ iter: tuple | list,
+ max_cols: int,
+ col_lengths: dict,
+ ):
"""
Generate the row containing index names
@@ -525,7 +530,7 @@ def _generate_index_names_row(self, iter: tuple, max_cols: int, col_lengths: dic
Parameters
----------
- iter : tuple
+ iter : tuple or list
Looping variables from outer scope
max_cols : int
Permissible number of columns
diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index 181b0fe115f4c..466971f8b37be 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -408,9 +408,8 @@ def _iterparse_nodes(self) -> list[dict[str, str | None]]:
for event, elem in iterparse(self.path_or_buffer, events=("start", "end")):
curr_elem = elem.tag.split("}")[1] if "}" in elem.tag else elem.tag
- if event == "start":
- if curr_elem == row_node:
- row = {}
+ if event == "start" and curr_elem == row_node:
+ row = {}
if row is not None:
for col in self.iterparse[row_node]:
@@ -656,9 +655,8 @@ def _iterparse_nodes(self) -> list[dict[str, str | None]]:
for event, elem in iterparse(self.path_or_buffer, events=("start", "end")):
curr_elem = elem.tag.split("}")[1] if "}" in elem.tag else elem.tag
- if event == "start":
- if curr_elem == row_node:
- row = {}
+ if event == "start" and curr_elem == row_node:
+ row = {}
if row is not None:
for col in self.iterparse[row_node]:
| - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- if statements merge on `pandas/core/frame.py` and `pandas/io/xml.py`
- Function `_generate_index_names_row()` on `pandas/io/formats/style_render.py` expected a tuple as a parameter but its only reference uses a list, so i altered the typehint
| https://api.github.com/repos/pandas-dev/pandas/pulls/47439 | 2022-06-21T01:06:11Z | 2022-08-15T16:41:07Z | null | 2022-08-15T16:41:07Z |
CLN: some simple code cleanup | diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 4279a9707e692..f9b3145eab170 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -96,7 +96,7 @@ def __init__(
unique_declarations = frozenset(declaration_dict.items())
style = css_converter(unique_declarations)
- return super().__init__(row=row, col=col, val=val, style=style, **kwargs)
+ super().__init__(row=row, col=col, val=val, style=style, **kwargs)
class CSSToExcelConverter:
diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py
index 8900aa0060559..afe1c236858ab 100644
--- a/pandas/tests/apply/test_series_apply.py
+++ b/pandas/tests/apply/test_series_apply.py
@@ -668,7 +668,7 @@ def test_map_abc_mapping_with_missing(non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
class NonDictMappingWithMissing(non_dict_mapping_subclass):
- def __missing__(key):
+ def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 8fe6abd3b0ed5..14f37bca71f82 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -236,7 +236,7 @@ def test_is_sequence():
assert not is_seq(np.int64)
class A:
- def __getitem__(self):
+ def __getitem__(self, item):
return 1
assert not is_seq(A())
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index d00cf198b3296..fb5da7e81adbd 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1390,7 +1390,7 @@ def __init__(self, lst) -> None:
def __getitem__(self, n):
return self._lst.__getitem__(n)
- def __len__(self, n):
+ def __len__(self):
return self._lst.__len__()
lst_containers = [DummyContainer([1, "a"]), DummyContainer([2, "b"])]
| i've changed some inconsistencies on some magic methods's parameters and removed a return from a object initialization | https://api.github.com/repos/pandas-dev/pandas/pulls/47438 | 2022-06-21T00:21:23Z | 2022-08-03T04:05:53Z | 2022-08-03T04:05:52Z | 2022-08-03T04:06:39Z |
Check data manager | diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py
index 5008e64dd0e99..3e16d9593b5a5 100644
--- a/pandas/tests/frame/methods/test_fillna.py
+++ b/pandas/tests/frame/methods/test_fillna.py
@@ -13,6 +13,7 @@
TimedeltaIndex,
Timestamp,
date_range,
+ get_option,
)
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float
@@ -673,6 +674,13 @@ def test_fillna_inplace_with_columns_limit_and_value(self):
df.fillna(axis=1, value=100, limit=1, inplace=True)
tm.assert_frame_equal(df, expected)
+ def test_mode(self):
+ mode = get_option("mode.data_manager")
+ if mode == "array":
+ raise ValueError(get_option("mode.data_manager"))
+ else:
+ raise AttributeError(get_option("mode.data_manager"))
+
def test_fillna_nonconsolidated_frame():
# https://github.com/pandas-dev/pandas/issues/36495
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47437 | 2022-06-20T21:54:57Z | 2022-06-20T22:14:06Z | null | 2022-06-24T11:26:26Z |
DOC: clarify to_csv float format docstring | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f32b347de32c3..a9fe349d53e4d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3302,7 +3302,7 @@ def to_csv(
path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
sep: str = ",",
na_rep: str = "",
- float_format: str | None = None,
+ float_format: str | Callable | None = None,
columns: Sequence[Hashable] | None = None,
header: bool_t | list[str] = True,
index: bool_t = True,
@@ -3341,8 +3341,9 @@ def to_csv(
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
- float_format : str, default None
- Format string for floating point numbers.
+ float_format : str, Callable, default None
+ Format string for floating point numbers. If a Callable is given, it takes
+ precedence over other numeric formatting parameters, like decimal.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py
index f1ecad2f711bc..df7bc04202e39 100644
--- a/pandas/tests/frame/methods/test_to_csv.py
+++ b/pandas/tests/frame/methods/test_to_csv.py
@@ -833,6 +833,18 @@ def test_to_csv_float_format(self):
)
tm.assert_frame_equal(rs, xp)
+ def test_to_csv_float_format_over_decimal(self):
+ # GH#47436
+ df = DataFrame({"a": [0.5, 1.0]})
+ result = df.to_csv(
+ decimal=",",
+ float_format=lambda x: np.format_float_positional(x, trim="-"),
+ index=False,
+ )
+ expected_rows = ["a", "0.5", "1"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert result == expected
+
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({"A": [1, 2, 3], "B": ["foo", "bar", "baz"]})
| - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
I stumbled across this myself a few days ago. I think this would help
| https://api.github.com/repos/pandas-dev/pandas/pulls/47436 | 2022-06-20T20:23:34Z | 2022-06-21T21:58:44Z | 2022-06-21T21:58:44Z | 2022-06-21T22:01:46Z |
REF: simplify Timestamp.replace | diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 8a2810825fc1d..46a4308e8aafd 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -81,7 +81,6 @@ from pandas._libs.tslibs.nattype cimport (
from pandas._libs.tslibs.np_datetime cimport (
NPY_DATETIMEUNIT,
NPY_FR_ns,
- check_dts_bounds,
cmp_dtstructs,
cmp_scalar,
get_datetime64_unit,
@@ -2259,15 +2258,11 @@ default 'raise'
'fold': fold}
ts_input = datetime(**kwargs)
- ts = convert_datetime_to_tsobject(ts_input, tzobj, nanos=0, reso=self._reso)
- # TODO: passing nanos=dts.ps // 1000 causes a RecursionError in
- # TestTimestampConstructors.test_constructor; not clear why
- value = ts.value + (dts.ps // 1000)
- if value != NPY_NAT:
- check_dts_bounds(&dts, self._reso)
-
+ ts = convert_datetime_to_tsobject(
+ ts_input, tzobj, nanos=dts.ps // 1000, reso=self._reso
+ )
return create_timestamp_from_ts(
- value, dts, tzobj, self._freq, fold, reso=self._reso
+ ts.value, dts, tzobj, self._freq, fold, reso=self._reso
)
def to_julian_date(self) -> np.float64:
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47435 | 2022-06-20T19:09:51Z | 2022-06-21T19:09:38Z | 2022-06-21T19:09:38Z | 2022-06-21T20:50:03Z |
Fix `vertical-align: middle;` for `Styler.to_excel` with xlsxwriter engine | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index a76b682f135db..56b09348189ee 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -967,8 +967,9 @@ Styler
^^^^^^
- Bug when attempting to apply styling functions to an empty DataFrame subset (:issue:`45313`)
- Bug in :class:`CSSToExcelConverter` leading to ``TypeError`` when border color provided without border style for ``xlsxwriter`` engine (:issue:`42276`)
-- Bug in :meth:`.Styler.set_sticky` leading to white text on white background in dark mode (:issue:`46984`)
-- Bug in :meth:`.Styler.to_latex` causing ``UnboundLocalError`` when ``clines="all;data"`` and the ``DataFrame`` has no rows. (:issue:`47203`)
+- Bug in :meth:`Styler.set_sticky` leading to white text on white background in dark mode (:issue:`46984`)
+- Bug in :meth:`Styler.to_latex` causing ``UnboundLocalError`` when ``clines="all;data"`` and the ``DataFrame`` has no rows. (:issue:`47203`)
+- Bug in :meth:`Styler.to_excel` when using ``vertical-align: middle;`` with ``xlsxwriter`` engine (:issue:`30107`)
Metadata
^^^^^^^^
diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py
index 302d0281019f5..a3edccd3a5779 100644
--- a/pandas/io/excel/_xlsxwriter.py
+++ b/pandas/io/excel/_xlsxwriter.py
@@ -165,6 +165,10 @@ def convert(cls, style_dict, num_format_str=None):
"doubleAccounting": 34,
}[props["underline"]]
+ # GH 30107 - xlsxwriter uses different name
+ if props.get("valign") == "center":
+ props["valign"] = "vcenter"
+
return props
diff --git a/pandas/tests/io/excel/test_style.py b/pandas/tests/io/excel/test_style.py
index c31e8ec022dcd..9bbe61c90d973 100644
--- a/pandas/tests/io/excel/test_style.py
+++ b/pandas/tests/io/excel/test_style.py
@@ -70,6 +70,7 @@ def test_styler_to_excel_unstyled(engine):
["alignment", "vertical"],
{"xlsxwriter": None, "openpyxl": "bottom"}, # xlsxwriter Fails
),
+ ("vertical-align: middle;", ["alignment", "vertical"], "center"),
# Border widths
("border-left: 2pt solid red", ["border", "left", "style"], "medium"),
("border-left: 1pt dotted red", ["border", "left", "style"], "dotted"),
| - [x] closes #30107
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47434 | 2022-06-20T16:18:41Z | 2022-06-27T16:17:07Z | 2022-06-27T16:17:06Z | 2022-06-27T16:17:14Z |
DOC clarify inplace operation section in 1.5 whats_new | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index a4fa31f7fc368..499e410cb81fd 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -572,31 +572,37 @@ As ``group_keys=True`` is the default value of :meth:`DataFrame.groupby` and
raise a ``FutureWarning``. This can be silenced and the previous behavior
retained by specifying ``group_keys=False``.
-.. _whatsnew_150.notable_bug_fixes.setitem_column_try_inplace:
+.. _whatsnew_150.deprecations.setitem_column_try_inplace:
_ see also _whatsnew_130.notable_bug_fixes.setitem_column_try_inplace
-Try operating inplace when setting values with ``loc`` and ``iloc``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Inplace operation when setting values with ``loc`` and ``iloc``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Most of the time setting values with ``frame.iloc`` attempts to set values
-in-place, only falling back to inserting a new array if necessary. In the past,
-setting entire columns has been an exception to this rule:
+inplace, only falling back to inserting a new array if necessary. There are
+some cases where this rule is not followed, for example when setting an entire
+column from an array with different dtype:
.. ipython:: python
- values = np.arange(4).reshape(2, 2)
- df = pd.DataFrame(values)
- ser = df[0]
+ df = pd.DataFrame({'price': [11.1, 12.2]}, index=['book1', 'book2'])
+ original_prices = df['price']
+ new_prices = np.array([98, 99])
*Old behavior*:
.. code-block:: ipython
- In [3]: df.iloc[:, 0] = np.array([10, 11])
- In [4]: ser
+ In [3]: df.iloc[:, 0] = new_prices
+ In [4]: df.iloc[:, 0]
Out[4]:
- 0 0
- 1 2
- Name: 0, dtype: int64
+ book1 98
+ book2 99
+ Name: price, dtype: int64
+ In [5]: original_prices
+ Out[5]:
+ book1 11.1
+ book2 12.2
+ Name: price, float: 64
This behavior is deprecated. In a future version, setting an entire column with
iloc will attempt to operate inplace.
@@ -605,39 +611,52 @@ iloc will attempt to operate inplace.
.. code-block:: ipython
- In [3]: df.iloc[:, 0] = np.array([10, 11])
- In [4]: ser
+ In [3]: df.iloc[:, 0] = new_prices
+ In [4]: df.iloc[:, 0]
Out[4]:
- 0 10
- 1 11
- Name: 0, dtype: int64
+ book1 98.0
+ book2 99.0
+ Name: price, dtype: float64
+ In [5]: original_prices
+ Out[5]:
+ book1 98.0
+ book2 99.0
+ Name: price, dtype: float64
To get the old behavior, use :meth:`DataFrame.__setitem__` directly:
-*Future behavior*:
-
.. code-block:: ipython
- In [5]: df[0] = np.array([21, 31])
- In [4]: ser
- Out[4]:
- 0 10
- 1 11
- Name: 0, dtype: int64
-
-In the case where ``df.columns`` is not unique, use :meth:`DataFrame.isetitem`:
-
-*Future behavior*:
+ In [3]: df[df.columns[0]] = new_prices
+ In [4]: df.iloc[:, 0]
+ Out[4]
+ book1 98
+ book2 99
+ Name: price, dtype: int64
+ In [5]: original_prices
+ Out[5]:
+ book1 11.1
+ book2 12.2
+ Name: price, dtype: float64
+
+To get the old behaviour when ``df.columns`` is not unique and you want to
+change a single column by index, you can use :meth:`DataFrame.isetitem`, which
+has been added in pandas 1.5:
.. code-block:: ipython
- In [5]: df.columns = ["A", "A"]
- In [5]: df.isetitem(0, np.array([21, 31]))
- In [4]: ser
+ In [3]: df_with_duplicated_cols = pd.concat([df, df], axis='columns')
+ In [3]: df_with_duplicated_cols.isetitem(0, new_prices)
+ In [4]: df_with_duplicated_cols.iloc[:, 0]
Out[4]:
- 0 10
- 1 11
- Name: 0, dtype: int64
+ book1 98
+ book2 99
+ Name: price, dtype: int64
+ In [5]: original_prices
+ Out[5]:
+ book1 11.1
+ book2 12.2
+ Name: 0, dtype: float64
.. _whatsnew_150.deprecations.numeric_only_default:
| xref https://github.com/pandas-dev/pandas/issues/47381
Coming from scikit-learn tests failing on scikit-learn with the pandas development version, I found the whats_new entry not very helpful at all. This does a few things:
- use an example with different dtypes so that the behaviour is as advertised for old behaviour (i.e. `ser` is not updated in place). This was not the case before, since when dtype matches, `ser` is updated in place so the code snippet was not showing the right behaviour.
- make it clearer that there is no behaviour change in pandas 1.5
Some of the wording may be not completely accurate, as I don't have a very good grasp of the pandas internals, feel free to suggest improvements!
| https://api.github.com/repos/pandas-dev/pandas/pulls/47433 | 2022-06-20T15:39:46Z | 2022-06-30T20:11:34Z | 2022-06-30T20:11:34Z | 2022-07-01T06:31:12Z |
Fix segmentation fault when JSON serializing a PeriodIndex | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index a4d81533df23d..4034655ccd325 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -30,6 +30,7 @@ Fixed regressions
- Fixed regression in :func:`assert_index_equal` when ``check_order=False`` and :class:`Index` has extension or object dtype (:issue:`47207`)
- Fixed regression in :func:`read_excel` returning ints as floats on certain input sheets (:issue:`46988`)
- Fixed regression in :meth:`DataFrame.shift` when ``axis`` is ``columns`` and ``fill_value`` is absent, ``freq`` is ignored (:issue:`47039`)
+- Fixed regression in :meth:`DataFrame.to_json` causing a segmentation violation when :class:`DataFrame` is created with an ``index`` parameter of the type :class:`PeriodIndex` (:issue:`46683`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 73d2a1f786f8b..260f1ffb6165f 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -238,8 +238,10 @@ static PyObject *get_values(PyObject *obj) {
PyErr_Clear();
} else if (PyObject_HasAttrString(values, "__array__")) {
// We may have gotten a Categorical or Sparse array so call np.array
+ PyObject *array_values = PyObject_CallMethod(values, "__array__",
+ NULL);
Py_DECREF(values);
- values = PyObject_CallMethod(values, "__array__", NULL);
+ values = array_values;
} else if (!PyArray_CheckExact(values)) {
// Didn't get a numpy array, so keep trying
Py_DECREF(values);
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index e82a888f47388..ae13d8d5fb180 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -23,6 +23,7 @@
DatetimeIndex,
Index,
NaT,
+ PeriodIndex,
Series,
Timedelta,
Timestamp,
@@ -1240,3 +1241,9 @@ def test_encode_timedelta_iso(self, td):
expected = f'"{td.isoformat()}"'
assert result == expected
+
+ def test_encode_periodindex(self):
+ # GH 46683
+ p = PeriodIndex(["2022-04-06", "2022-04-07"], freq="D")
+ df = DataFrame(index=p)
+ assert df.to_json() == "{}"
| Fixes #46683
- [x] closes #46683
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47431 | 2022-06-20T15:16:16Z | 2022-06-22T09:18:14Z | 2022-06-22T09:18:14Z | 2022-06-22T10:16:51Z |
Jess | - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47430 | 2022-06-20T14:38:50Z | 2022-06-20T14:43:33Z | null | 2022-06-20T14:43:33Z | |
DOC: Add an interactive shell powered by JupyterLite to the website | diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml
index 5ffd4135802bd..f9a941b87387c 100644
--- a/.github/workflows/docbuild-and-upload.yml
+++ b/.github/workflows/docbuild-and-upload.yml
@@ -46,6 +46,11 @@ jobs:
- name: Build documentation
run: doc/make.py --warnings-are-errors
+ - name: Build the interactive terminal
+ run: |
+ cd web/interactive_terminal
+ jupyter lite build
+
- name: Install ssh key
run: |
mkdir -m 700 -p ~/.ssh
diff --git a/.gitignore b/.gitignore
index 87224f1d6060f..07b1f056d511b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -122,3 +122,7 @@ doc/build/html/index.html
doc/tmp.sv
env/
doc/source/savefig/
+
+# Interactive terminal generated files #
+########################################
+.jupyterlite.doit.db
diff --git a/environment.yml b/environment.yml
index 98631d8485736..0a6055d80c071 100644
--- a/environment.yml
+++ b/environment.yml
@@ -123,3 +123,8 @@ dependencies:
- feedparser
- pyyaml
- requests
+
+ # build the interactive terminal
+ - jupyterlab >=3.4,<4
+ - pip:
+ - jupyterlite==0.1.0b9
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 2b8aee80882a5..f5dfeb8e7ff30 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -96,4 +96,6 @@ markdown
feedparser
pyyaml
requests
+jupyterlab >=3.4,<4
+jupyterlite==0.1.0b9
setuptools>=51.0.0
diff --git a/web/interactive_terminal/README.md b/web/interactive_terminal/README.md
new file mode 100644
index 0000000000000..865cf282676c9
--- /dev/null
+++ b/web/interactive_terminal/README.md
@@ -0,0 +1,35 @@
+# The interactive `pandas` terminal
+
+An interactive terminal to easily try `pandas` in the browser, powered by JupyterLite.
+
+
+
+## Build
+
+The interactive terminal is built with the `jupyterlite` CLI.
+
+First make sure `jupyterlite` is installed:
+
+```bash
+python -m pip install jupyterlite
+```
+
+Then in `web/interactive_terminal`, run the following command:
+
+```bash
+jupyter lite build
+```
+
+## Configuration
+
+This folder contains configuration files for the interactive terminal powered by JupyterLite:
+
+- `jupyter_lite_config.json`: build time configuration, used when building the assets with the `jupyter lite build` command
+- `jupyter-lite.json` run time configuration applied when launching the application in the browser
+
+The interactive `pandas` terminal application enables a couple of optimizations to only include the `repl` app in the generated static assets.
+To learn more about it, check out the JupyterLite documentation:
+
+- Optimizations: https://jupyterlite.readthedocs.io/en/latest/howto/configure/advanced/optimizations.html
+- JupyterLite schema: https://jupyterlite.readthedocs.io/en/latest/reference/schema-v0.html
+- CLI reference: https://jupyterlite.readthedocs.io/en/latest/reference/cli.html
diff --git a/web/interactive_terminal/jupyter-lite.json b/web/interactive_terminal/jupyter-lite.json
new file mode 100644
index 0000000000000..473fb5a3dcc1a
--- /dev/null
+++ b/web/interactive_terminal/jupyter-lite.json
@@ -0,0 +1,13 @@
+{
+ "jupyter-lite-schema-version": 0,
+ "jupyter-config-data": {
+ "appName": "Pandas REPL",
+ "appUrl": "./repl",
+ "disabledExtensions": [
+ "@jupyter-widgets/jupyterlab-manager"
+ ],
+ "enableMemoryStorage": true,
+ "settingsStorageDrivers": ["memoryStorageDriver"],
+ "contentsStorageDrivers": ["memoryStorageDriver"]
+ }
+ }
diff --git a/web/interactive_terminal/jupyter_lite_config.json b/web/interactive_terminal/jupyter_lite_config.json
new file mode 100644
index 0000000000000..8a8c4eb1ae051
--- /dev/null
+++ b/web/interactive_terminal/jupyter_lite_config.json
@@ -0,0 +1,7 @@
+{
+ "LiteBuildConfig": {
+ "apps": ["repl"],
+ "no_unused_shared_packages": true,
+ "output_dir": "../build/lite"
+ }
+ }
diff --git a/web/pandas/getting_started.md b/web/pandas/getting_started.md
index d4f40a1153fb4..dc43a6a273832 100644
--- a/web/pandas/getting_started.md
+++ b/web/pandas/getting_started.md
@@ -1,5 +1,18 @@
# Getting started
+## Try it in your browser
+
+You can try `pandas` in your browser with the following interactive shell
+without installing anything on your computer.
+
+*Note it can take up to 30 seconds before the shell finishes loading and is ready to run commands.*
+
+<iframe
+ src="./lite/repl/index.html?toolbar=1&kernel=python&code=import%20pandas%20as%20pd&code=df%20=%20pd.DataFrame(%7B'num_legs':%20%5B2,%204%5D,%20'num_wings':%20%5B2,%200%5D%7D,%20index=%5B'falcon',%20'dog'%5D)"
+ width="100%"
+ height="500px"
+></iframe>
+
## Installation instructions
The next steps provides the easiest and recommended way to set up your
| This allows for easily trying `pandas` in a web browser without installing anything.
Since the default kernel is based on Pyodide, it also includes a couple of other libraries by default such as `matplotlib`.

Other popular libraries have also adopted JupyterLite to power their documentation with an interactive shell:
- `numpy`: https://numpy.org/
- `sympy`: https://www.sympy.org/en/shell.html
---
- [x] closes #46682
- [x] ~[Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature~
- [x] ~All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).~
- [x] ~Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.~
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
### TODO
- [x] Embed the JupyterLite REPL application
- [x] Create a separate repo for the JupyterLite deployment with a csv file and other custom settings
- [x] Add example code that can be copied and pasted
- [ ] Move the demo repo to the `pandas-dev` organization | https://api.github.com/repos/pandas-dev/pandas/pulls/47428 | 2022-06-20T11:49:21Z | 2022-06-23T21:15:23Z | 2022-06-23T21:15:23Z | 2022-06-24T07:25:23Z |
TST: GH26650, added new test to validate numpy matmul function with dataframes | diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py
index 13af94feaf744..624496ea26a81 100644
--- a/pandas/tests/series/test_ufunc.py
+++ b/pandas/tests/series/test_ufunc.py
@@ -439,3 +439,14 @@ def test_outer():
with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN):
np.subtract.outer(s, o)
+
+
+def test_np_matmul():
+ # GH26650
+ df1 = pd.DataFrame(data=[[-1, 1, 10]])
+ df2 = pd.DataFrame(data=[-1, 1, 10])
+ expected_result = pd.DataFrame(data=[102])
+ tm.assert_frame_equal(
+ expected_result,
+ np.matmul(df1, df2),
+ )
| - [X] closes #26650
- [X] [Tests added and passed]
- [X] All [code checks passed]
| https://api.github.com/repos/pandas-dev/pandas/pulls/47427 | 2022-06-20T10:09:27Z | 2022-06-21T17:43:44Z | 2022-06-21T17:43:44Z | 2022-06-22T19:58:19Z |
BUG: DataFrame.setitem raising when rhs is ea dtype Series | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 5a97e1d5a04ea..09fa08339bd43 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1977,7 +1977,11 @@ def _setitem_single_column(self, loc: int, value, plane_indexer):
# We will not operate in-place, but will attempt to in the future.
# To determine whether we need to issue a FutureWarning, see if the
# setting in-place would work, i.e. behavior will change.
- warn = can_hold_element(orig_values, value)
+ if isinstance(value, ABCSeries):
+ warn = can_hold_element(orig_values, value._values)
+ else:
+ warn = can_hold_element(orig_values, value)
+
# Don't issue the warning yet, as we can still trim a few cases where
# behavior will not change.
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index cf6d351aa78a0..a59d7d4f3bd45 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -683,6 +683,13 @@ def test_boolean_mask_nullable_int64(self):
)
tm.assert_frame_equal(result, expected)
+ def test_setitem_ea_dtype_rhs_series(self):
+ # GH#47425
+ df = DataFrame({"a": [1, 2]})
+ df["a"] = Series([1, 2], dtype="Int64")
+ expected = DataFrame({"a": [1, 2]}, dtype="Int64")
+ tm.assert_frame_equal(df, expected)
+
# TODO(ArrayManager) set column with 2d column array, see #44788
@td.skip_array_manager_not_yet_implemented
def test_setitem_npmatrix_2d(self):
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
This is a regression on main, so no whatsnew needed.
Is it on purpose, that np_can_hold_element raises when feeding a ea dtype Series as element?
| https://api.github.com/repos/pandas-dev/pandas/pulls/47425 | 2022-06-19T19:53:00Z | 2022-06-24T21:18:28Z | 2022-06-24T21:18:28Z | 2022-07-11T17:39:23Z |
ENH: TDA.total_seconds support non-nano | diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index 68452ce011f9d..599ddfec5a268 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -29,6 +29,7 @@
"astype_overflowsafe",
"get_unit_from_dtype",
"periods_per_day",
+ "periods_per_second",
]
from pandas._libs.tslibs import dtypes
@@ -36,6 +37,7 @@
from pandas._libs.tslibs.dtypes import (
Resolution,
periods_per_day,
+ periods_per_second,
)
from pandas._libs.tslibs.nattype import (
NaT,
diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd
index dc2a1b186edcf..356bd9dc3d7a0 100644
--- a/pandas/_libs/tslibs/dtypes.pxd
+++ b/pandas/_libs/tslibs/dtypes.pxd
@@ -7,7 +7,7 @@ cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit)
cdef NPY_DATETIMEUNIT abbrev_to_npy_unit(str abbrev)
cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil
cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=*) except? -1
-cdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1
+cpdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1
cdef int64_t get_conversion_factor(NPY_DATETIMEUNIT from_unit, NPY_DATETIMEUNIT to_unit) except? -1
cdef dict attrname_to_abbrevs
diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi
index 5c343f89f38ea..f8c6a36a63c7e 100644
--- a/pandas/_libs/tslibs/dtypes.pyi
+++ b/pandas/_libs/tslibs/dtypes.pyi
@@ -6,6 +6,7 @@ _attrname_to_abbrevs: dict[str, str]
_period_code_map: dict[str, int]
def periods_per_day(reso: int) -> int: ...
+def periods_per_second(reso: int) -> int: ...
class PeriodDtypeBase:
_dtype_code: int # PeriodDtypeCode
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index a340fe477e982..6cbe31cb1dc16 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -403,7 +403,7 @@ cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=NPY_DATETIMEUNIT.NPY_FR_ns)
return day_units
-cdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1:
+cpdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1:
if reso == NPY_DATETIMEUNIT.NPY_FR_ns:
return 1_000_000_000
elif reso == NPY_DATETIMEUNIT.NPY_FR_us:
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 68f0d73b8556c..b2dabc5276ba6 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -21,6 +21,7 @@
Timedelta,
astype_overflowsafe,
iNaT,
+ periods_per_second,
to_offset,
)
from pandas._libs.tslibs.conversion import precision_from_unit
@@ -818,10 +819,11 @@ def total_seconds(self) -> npt.NDArray[np.float64]:
dtype='timedelta64[ns]', freq=None)
>>> idx.total_seconds()
- Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],
+ Float64Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0],
dtype='float64')
"""
- return self._maybe_mask_results(1e-9 * self.asi8, fill_value=None)
+ pps = periods_per_second(self._reso)
+ return self._maybe_mask_results(self.asi8 / pps, fill_value=None)
def to_pytimedelta(self) -> npt.NDArray[np.object_]:
"""
@@ -832,7 +834,7 @@ def to_pytimedelta(self) -> npt.NDArray[np.object_]:
-------
timedeltas : ndarray[object]
"""
- return tslibs.ints_to_pytimedelta(self._ndarray)
+ return ints_to_pytimedelta(self._ndarray)
days = _field_accessor("days", "days", "Number of days for each element.")
seconds = _field_accessor(
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index c8b850d35035a..5983c2f644949 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -33,7 +33,7 @@ def test_non_nano(self, unit, reso):
assert tda[0]._reso == reso
@pytest.mark.parametrize("field", TimedeltaArray._field_ops)
- def test_fields(self, unit, reso, field):
+ def test_fields(self, unit, field):
arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype)
@@ -44,6 +44,28 @@ def test_fields(self, unit, reso, field):
expected = getattr(tda_nano, field)
tm.assert_numpy_array_equal(result, expected)
+ def test_to_pytimedelta(self, unit):
+ arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
+ tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype)
+
+ as_nano = arr.astype("m8[ns]")
+ tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)
+
+ result = tda.to_pytimedelta()
+ expected = tda_nano.to_pytimedelta()
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_total_seconds(self, unit):
+ arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
+ tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype)
+
+ as_nano = arr.astype("m8[ns]")
+ tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)
+
+ result = tda.total_seconds()
+ expected = tda_nano.total_seconds()
+ tm.assert_numpy_array_equal(result, expected)
+
class TestTimedeltaArray:
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py
index d61a2fca33f56..5891c28c11a68 100644
--- a/pandas/tests/tslibs/test_api.py
+++ b/pandas/tests/tslibs/test_api.py
@@ -54,6 +54,7 @@ def test_namespace():
"astype_overflowsafe",
"get_unit_from_dtype",
"periods_per_day",
+ "periods_per_second",
]
expected = set(submodules + api)
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47421 | 2022-06-19T15:13:31Z | 2022-06-27T22:00:19Z | 2022-06-27T22:00:19Z | 2022-06-27T23:00:55Z |
Add tests | diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index 5d1efea657426..57c4af1a0fe1c 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -544,3 +544,24 @@ def test_union_duplicates(index, request):
result = mi2.union(mi1)
tm.assert_index_equal(result, mi2.sort_values())
+
+
+@pytest.mark.parametrize(
+ "levels1, levels2, codes1, codes2, names",
+ [
+ (
+ [["a", "b", "c"], [0, ""]],
+ [["c", "d", "b"], [""]],
+ [[0, 1, 2], [1, 1, 1]],
+ [[0, 1, 2], [0, 0, 0]],
+ ["name1", "name2"],
+ ),
+ ],
+)
+def test_intersection_lexsort_depth(levels1, levels2, codes1, codes2, names):
+ # GH#25169
+ mi1 = MultiIndex(levels=levels1, codes=codes1, names=names)
+ mi2 = MultiIndex(levels=levels2, codes=codes2, names=names)
+ mi_int = mi1.intersection(mi2)
+
+ assert mi_int.lexsort_depth == 0
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 01dfeaef1a5a1..4c38a2219372d 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -25,6 +25,7 @@
IndexSlice,
MultiIndex,
Period,
+ PeriodIndex,
Series,
SparseDtype,
Timedelta,
@@ -2876,6 +2877,22 @@ def test_loc_set_int_dtype():
tm.assert_frame_equal(df, expected)
+def test_loc_periodindex_3_levels():
+ # GH#24091
+ p_index = PeriodIndex(
+ ["20181101 1100", "20181101 1200", "20181102 1300", "20181102 1400"],
+ name="datetime",
+ freq="B",
+ )
+ mi_series = DataFrame(
+ [["A", "B", 1.0], ["A", "C", 2.0], ["Z", "Q", 3.0], ["W", "F", 4.0]],
+ index=p_index,
+ columns=["ONE", "TWO", "VALUES"],
+ )
+ mi_series = mi_series.set_index(["ONE", "TWO"], append=True)["VALUES"]
+ assert mi_series.loc[(p_index[0], "A", "B")] == 1.0
+
+
class TestLocSeries:
@pytest.mark.parametrize("val,expected", [(2**63 - 1, 3), (2**63, 4)])
def test_loc_uint64(self, val, expected):
| - [x] closes #24091
- [x] closes #25169
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47420 | 2022-06-19T14:05:20Z | 2022-06-21T20:30:53Z | 2022-06-21T20:30:52Z | 2022-06-21T23:50:28Z |
BUG FIX: incorrect type when casting to nullable type in multiindex dataframe | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 49e5bc24786dd..c5d1ade924d15 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -11917,6 +11917,11 @@ def _reindex_for_setitem(value: DataFrame | Series, index: Index) -> ArrayLike:
# reindex if necessary
if value.index.equals(index) or not len(index):
+ if isinstance(value, DataFrame):
+ dtype_list = value.dtypes.unique()
+ if len(dtype_list) == 1:
+ dtype = dtype_list[0].name.lower()
+ return value._values.astype(dtype).copy()
return value._values.copy()
# GH#4107
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index 08e15545cb998..bf74a52e81dd3 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -227,3 +227,17 @@ def test_multiindex_repeated_keys(self):
],
Series([1, 1, 2, 2], MultiIndex.from_arrays([["a", "a", "b", "b"]])),
)
+
+ @pytest.mark.parametrize("data_type", ["int64", "int32", "float64", "float32"])
+ def test_multiindex_dataframe_incorrect_type(self, data_type):
+ # GH 46896
+ df = DataFrame(
+ columns=MultiIndex.from_tuples([("a", "c"), ("a", "d")]),
+ data=[[1, 2], [3, 4]],
+ )
+ df["a"] = df["a"].astype(data_type)
+
+ result = df.dtypes
+ expected = Series(data=[data_type, data_type], index=[["a", "a"], ["c", "d"]])
+
+ tm.assert_series_equal(result, expected)
| - [ ] closes #46896
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47419 | 2022-06-19T13:15:14Z | 2022-08-16T10:51:20Z | null | 2022-08-16T10:57:46Z |
annotation, missing test case, perf DTA.mode | diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index dffe02ef15148..2b7f9b9659354 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -519,7 +519,7 @@ cdef ndarray[int64_t] _get_dst_hours(
trans_idx = mismatch.nonzero()[0]
if trans_idx.size == 1:
- # TODO: not reached in tests 2022-05-02; possible?
+ # see test_tz_localize_to_utc_ambiguous_infer
stamp = _render_tstamp(vals[trans_idx[0]], reso=reso)
raise pytz.AmbiguousTimeError(
f"Cannot infer dst time from {stamp} as there "
@@ -541,7 +541,7 @@ cdef ndarray[int64_t] _get_dst_hours(
delta = np.diff(result_a[grp])
if grp.size == 1 or np.all(delta > 0):
- # TODO: not reached in tests 2022-05-02; possible?
+ # see test_tz_localize_to_utc_ambiguous_infer
stamp = _render_tstamp(vals[grp[0]], reso=reso)
raise pytz.AmbiguousTimeError(stamp)
@@ -549,7 +549,7 @@ cdef ndarray[int64_t] _get_dst_hours(
# for standard
switch_idxs = (delta <= 0).nonzero()[0]
if switch_idxs.size > 1:
- # TODO: not reached in tests 2022-05-02; possible?
+ # see test_tz_localize_to_utc_ambiguous_infer
raise pytz.AmbiguousTimeError(
f"There are {switch_idxs.size} dst switches when "
"there should only be 1."
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 4b1d50c23c110..d577abd87bca3 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1088,12 +1088,13 @@ def _cmp_method(self, other, op):
@final
def _add_datetimelike_scalar(self, other):
- # Overridden by TimedeltaArray
if not is_timedelta64_dtype(self.dtype):
raise TypeError(
f"cannot add {type(self).__name__} and {type(other).__name__}"
)
+ self = cast("TimedeltaArray", self)
+
from pandas.core.arrays import DatetimeArray
assert other is not NaT
@@ -1111,7 +1112,7 @@ def _add_datetimelike_scalar(self, other):
return DatetimeArray(result, dtype=dtype, freq=self.freq)
@final
- def _add_datetime_arraylike(self, other):
+ def _add_datetime_arraylike(self, other) -> DatetimeArray:
if not is_timedelta64_dtype(self.dtype):
raise TypeError(
f"cannot add {type(self).__name__} and {type(other).__name__}"
@@ -1176,7 +1177,7 @@ def _sub_datetime_arraylike(self, other):
return new_values.view("timedelta64[ns]")
@final
- def _sub_period(self, other: Period):
+ def _sub_period(self, other: Period) -> npt.NDArray[np.object_]:
if not is_period_dtype(self.dtype):
raise TypeError(f"cannot subtract Period from a {type(self).__name__}")
@@ -1184,8 +1185,8 @@ def _sub_period(self, other: Period):
# of DateOffsets. Null entries are filled with pd.NaT
self._check_compatible_with(other)
asi8 = self.asi8
- new_data = asi8 - other.ordinal
- new_data = np.array([self.freq.base * x for x in new_data])
+ new_i8_data = asi8 - other.ordinal # TODO: checked_add_with_arr
+ new_data = np.array([self.freq.base * x for x in new_i8_data])
if self._hasna:
new_data[self._isnan] = NaT
@@ -1193,7 +1194,7 @@ def _sub_period(self, other: Period):
return new_data
@final
- def _add_period(self, other: Period):
+ def _add_period(self, other: Period) -> PeriodArray:
if not is_timedelta64_dtype(self.dtype):
raise TypeError(f"cannot add Period to a {type(self).__name__}")
@@ -1683,12 +1684,11 @@ def median(self, *, axis: int | None = None, skipna: bool = True, **kwargs):
return self._wrap_reduction_result(axis, result)
def _mode(self, dropna: bool = True):
- values = self
+ mask = None
if dropna:
- mask = values.isna()
- values = values[~mask]
+ mask = self.isna()
- i8modes = mode(values.view("i8"))
+ i8modes = mode(self.view("i8"), mask=mask)
npmodes = i8modes.view(self._ndarray.dtype)
npmodes = cast(np.ndarray, npmodes)
return self._from_backing_data(npmodes)
diff --git a/pandas/tests/tslibs/test_tzconversion.py b/pandas/tests/tslibs/test_tzconversion.py
new file mode 100644
index 0000000000000..c1a56ffb71b02
--- /dev/null
+++ b/pandas/tests/tslibs/test_tzconversion.py
@@ -0,0 +1,23 @@
+import numpy as np
+import pytest
+import pytz
+
+from pandas._libs.tslibs.tzconversion import tz_localize_to_utc
+
+
+class TestTZLocalizeToUTC:
+ def test_tz_localize_to_utc_ambiguous_infer(self):
+ # val is a timestamp that is ambiguous when localized to US/Eastern
+ val = 1_320_541_200_000_000_000
+ vals = np.array([val, val - 1, val], dtype=np.int64)
+
+ with pytest.raises(pytz.AmbiguousTimeError, match="2011-11-06 01:00:00"):
+ tz_localize_to_utc(vals, pytz.timezone("US/Eastern"), ambiguous="infer")
+
+ with pytest.raises(pytz.AmbiguousTimeError, match="are no repeated times"):
+ tz_localize_to_utc(vals[:1], pytz.timezone("US/Eastern"), ambiguous="infer")
+
+ vals[1] += 1
+ msg = "There are 2 dst switches when there should only be 1"
+ with pytest.raises(pytz.AmbiguousTimeError, match=msg):
+ tz_localize_to_utc(vals, pytz.timezone("US/Eastern"), ambiguous="infer")
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47418 | 2022-06-19T03:08:17Z | 2022-06-21T19:07:52Z | 2022-06-21T19:07:52Z | 2022-06-21T20:50:35Z |
REF: GH38174 - Refactoring DataFrame.combine_first | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 39a940169e1f3..403d21ed29cb1 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7822,12 +7822,10 @@ def combine_first(self, other: DataFrame) -> DataFrame:
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
- import pandas.core.computation.expressions as expressions
def combiner(x, y):
mask = extract_array(isna(x))
- x_values = extract_array(x, extract_numpy=True)
y_values = extract_array(y, extract_numpy=True)
# If the column y in other DataFrame is not in first DataFrame,
@@ -7835,7 +7833,9 @@ def combiner(x, y):
if y.name not in self.columns:
return y_values
- return expressions.where(mask, y_values, x_values)
+ values = self._mgr.where(y_values, mask, align=True)
+
+ return self._constructor(data=values)
combined = self.combine(other, combiner, overwrite=False)
| - [ ] closes #38174
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47417 | 2022-06-19T02:08:22Z | 2022-08-01T18:46:22Z | null | 2022-08-01T19:11:19Z |
CI: remove comment-bot | diff --git a/.github/workflows/comment_bot.yml b/.github/workflows/comment_bot.yml
deleted file mode 100644
index 3824e015e8336..0000000000000
--- a/.github/workflows/comment_bot.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-name: Comment-bot
-
-on:
- issue_comment:
- types:
- - created
- - edited
-
-jobs:
- autotune:
- name: "Fixup pre-commit formatting"
- if: startsWith(github.event.comment.body, '@github-actions pre-commit')
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - uses: r-lib/actions/pr-fetch@v2
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- - name: Cache multiple paths
- uses: actions/cache@v3
- with:
- path: |
- ~/.cache/pre-commit
- ~/.cache/pip
- key: pre-commit-dispatched-${{ runner.os }}-build
- - uses: actions/setup-python@v3
- with:
- python-version: 3.8
- - name: Install-pre-commit
- run: python -m pip install --upgrade pre-commit
- - name: Run pre-commit
- run: pre-commit run --from-ref=origin/main --to-ref=HEAD --all-files || (exit 0)
- - name: Commit results
- run: |
- git config user.name "$(git log -1 --pretty=format:%an)"
- git config user.email "$(git log -1 --pretty=format:%ae)"
- git commit -a -m 'Fixes from pre-commit [automated commit]' || echo "No changes to commit"
- - uses: r-lib/actions/pr-push@v2
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 1d745d21dacae..e76197e302ca4 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -326,13 +326,7 @@ Autofixing formatting errors
----------------------------
We use several styling checks (e.g. ``black``, ``flake8``, ``isort``) which are run after
-you make a pull request. If there is a scenario where any of these checks fail then you
-can comment::
-
- @github-actions pre-commit
-
-on that pull request. This will trigger a workflow which will autofix formatting
-errors.
+you make a pull request.
To automatically fix formatting errors on each commit you make, you can
set up pre-commit yourself. First, create a Python :ref:`environment
| as discussed | https://api.github.com/repos/pandas-dev/pandas/pulls/47416 | 2022-06-18T08:56:34Z | 2022-06-21T17:45:51Z | 2022-06-21T17:45:51Z | 2022-06-21T17:45:58Z |
BUG: .fillna(-1) does not work in pd.eval | diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index 3a556b57ea5a5..cd5096f405770 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -555,6 +555,7 @@ class UnaryOp(Op):
def __init__(self, op: str, operand) -> None:
super().__init__(op, (operand,))
self.operand = operand
+ self.value = operand
try:
self.func = _unary_ops_dict[op]
| - [x] closes #46471
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47415 | 2022-06-18T07:19:39Z | 2022-07-21T17:20:51Z | null | 2022-07-23T05:53:00Z |
BUG: iterparse of read_xml not parsing duplicate element and attribute names | diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index 181b0fe115f4c..78fbeaad09300 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -413,11 +413,21 @@ def _iterparse_nodes(self) -> list[dict[str, str | None]]:
row = {}
if row is not None:
- for col in self.iterparse[row_node]:
- if curr_elem == col:
- row[col] = elem.text.strip() if elem.text else None
- if col in elem.attrib:
- row[col] = elem.attrib[col]
+ if self.names:
+ for col, nm in zip(self.iterparse[row_node], self.names):
+ if curr_elem == col:
+ elem_val = elem.text.strip() if elem.text else None
+ if elem_val not in row.values() and nm not in row:
+ row[nm] = elem_val
+ if col in elem.attrib:
+ if elem.attrib[col] not in row.values() and nm not in row:
+ row[nm] = elem.attrib[col]
+ else:
+ for col in self.iterparse[row_node]:
+ if curr_elem == col:
+ row[col] = elem.text.strip() if elem.text else None
+ if col in elem.attrib:
+ row[col] = elem.attrib[col]
if event == "end":
if curr_elem == row_node and row is not None:
@@ -661,11 +671,21 @@ def _iterparse_nodes(self) -> list[dict[str, str | None]]:
row = {}
if row is not None:
- for col in self.iterparse[row_node]:
- if curr_elem == col:
- row[col] = elem.text.strip() if elem.text else None
- if col in elem.attrib:
- row[col] = elem.attrib[col]
+ if self.names:
+ for col, nm in zip(self.iterparse[row_node], self.names):
+ if curr_elem == col:
+ elem_val = elem.text.strip() if elem.text else None
+ if elem_val not in row.values() and nm not in row:
+ row[nm] = elem_val
+ if col in elem.attrib:
+ if elem.attrib[col] not in row.values() and nm not in row:
+ row[nm] = elem.attrib[col]
+ else:
+ for col in self.iterparse[row_node]:
+ if curr_elem == col:
+ row[col] = elem.text.strip() if elem.text else None
+ if col in elem.attrib:
+ row[col] = elem.attrib[col]
if event == "end":
if curr_elem == row_node and row is not None:
@@ -1020,7 +1040,8 @@ def read_xml(
names : list-like, optional
Column names for DataFrame of parsed XML data. Use this parameter to
- rename original element names and distinguish same named elements.
+ rename original element names and distinguish same named elements and
+ attributes.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index 277b6442a0a8c..eb2230bbf7fd5 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -789,6 +789,41 @@ def test_names_option_output(datapath, parser):
tm.assert_frame_equal(df_iter, df_expected)
+def test_repeat_names(parser):
+ xml = """\
+<shapes>
+ <shape type="2D">
+ <name>circle</name>
+ <type>curved</type>
+ </shape>
+ <shape type="3D">
+ <name>sphere</name>
+ <type>curved</type>
+ </shape>
+</shapes>"""
+ df_xpath = read_xml(
+ xml, xpath=".//shape", parser=parser, names=["type_dim", "shape", "type_edge"]
+ )
+
+ df_iter = read_xml_iterparse(
+ xml,
+ parser=parser,
+ iterparse={"shape": ["type", "name", "type"]},
+ names=["type_dim", "shape", "type_edge"],
+ )
+
+ df_expected = DataFrame(
+ {
+ "type_dim": ["2D", "3D"],
+ "shape": ["circle", "sphere"],
+ "type_edge": ["curved", "curved"],
+ }
+ )
+
+ tm.assert_frame_equal(df_xpath, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
+
+
def test_names_option_wrong_length(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
| - [X] closes #47343 (Replace xxxx with the Github issue number)
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47414 | 2022-06-18T04:29:50Z | 2022-06-21T23:10:46Z | 2022-06-21T23:10:45Z | 2022-06-22T03:09:13Z |
CLN: Remove .github markdown files | diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
deleted file mode 100644
index 87a5b7905fc6d..0000000000000
--- a/.github/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# Contributor Code of Conduct
-
-As contributors and maintainers of this project, and in the interest of
-fostering an open and welcoming community, we pledge to respect all people who
-contribute through reporting issues, posting feature requests, updating
-documentation, submitting pull requests or patches, and other activities.
-
-We are committed to making participation in this project a harassment-free
-experience for everyone, regardless of level of experience, gender, gender
-identity and expression, sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information, such as physical or electronic
- addresses, without explicit permission
-* Other unethical or unprofessional conduct
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
-
-By adopting this Code of Conduct, project maintainers commit themselves to
-fairly and consistently applying these principles to every aspect of managing
-this project. Project maintainers who do not follow or enforce the Code of
-Conduct may be permanently removed from the project team.
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-A working group of community members is committed to promptly addressing any
-reported issues. The working group is made up of pandas contributors and users.
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting the working group by e-mail (pandas-coc@googlegroups.com).
-Messages sent to this e-mail address will not be publicly visible but only to
-the working group members. The working group currently includes
-
-- Safia Abdalla
-- Tom Augspurger
-- Joris Van den Bossche
-- Camille Scott
-- Nathaniel Smith
-
-All complaints will be reviewed and investigated and will result in a response
-that is deemed necessary and appropriate to the circumstances. Maintainers are
-obligated to maintain confidentiality with regard to the reporter of an
-incident.
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage],
-version 1.3.0, available at
-[https://www.contributor-covenant.org/version/1/3/0/][version],
-and the [Swift Code of Conduct][swift].
-
-[homepage]: https://www.contributor-covenant.org
-[version]: https://www.contributor-covenant.org/version/1/3/0/
-[swift]: https://swift.org/community/#code-of-conduct
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
deleted file mode 100644
index d27eab5b9c95c..0000000000000
--- a/.github/CONTRIBUTING.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Contributing to pandas
-
-A detailed overview on how to contribute can be found in the **[contributing guide](https://pandas.pydata.org/docs/dev/development/contributing.html)**.
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
deleted file mode 100644
index 27dfded808b95..0000000000000
--- a/.github/FUNDING.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-custom: https://pandas.pydata.org/donate.html
-github: [numfocus]
-tidelift: pypi/pandas
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
deleted file mode 100644
index f3b059a5d4f13..0000000000000
--- a/.github/SECURITY.md
+++ /dev/null
@@ -1 +0,0 @@
-To report a security vulnerability to pandas, please go to https://tidelift.com/security and see the instructions there.
| - [x] closes #27901 (Replace xxxx with the Github issue number)
These files were moved in https://github.com/pandas-dev/.github/pull/1 | https://api.github.com/repos/pandas-dev/pandas/pulls/47412 | 2022-06-18T01:40:05Z | 2022-06-18T15:19:54Z | 2022-06-18T15:19:54Z | 2022-06-18T18:39:46Z |
REF: Avoid ravel in DTA._format_native_types | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index f94314297dc62..dc7504b1073f5 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -105,7 +105,7 @@ def _test_parse_iso8601(ts: str):
@cython.wraparound(False)
@cython.boundscheck(False)
def format_array_from_datetime(
- ndarray[int64_t] values,
+ ndarray values,
tzinfo tz=None,
str format=None,
object na_rep=None,
@@ -129,14 +129,21 @@ def format_array_from_datetime(
np.ndarray[object]
"""
cdef:
- int64_t val, ns, N = len(values)
+ int64_t val, ns, N = values.size
bint show_ms = False, show_us = False, show_ns = False
bint basic_format = False
- ndarray[object] result = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
_Timestamp ts
- str res
+ object res
npy_datetimestruct dts
+ # Note that `result` (and thus `result_flat`) is C-order and
+ # `it` iterates C-order as well, so the iteration matches
+ # See discussion at
+ # github.com/pandas-dev/pandas/pull/46886#discussion_r860261305
+ ndarray result = cnp.PyArray_EMPTY(values.ndim, values.shape, cnp.NPY_OBJECT, 0)
+ object[::1] res_flat = result.ravel() # should NOT be a copy
+ cnp.flatiter it = cnp.PyArray_IterNew(values)
+
if na_rep is None:
na_rep = 'NaT'
@@ -150,10 +157,11 @@ def format_array_from_datetime(
show_ms = reso_obj == Resolution.RESO_MS
for i in range(N):
- val = values[i]
+ # Analogous to: utc_val = values[i]
+ val = (<int64_t*>cnp.PyArray_ITER_DATA(it))[0]
if val == NPY_NAT:
- result[i] = na_rep
+ res = na_rep
elif basic_format:
pandas_datetime_to_datetimestruct(val, reso, &dts)
@@ -168,22 +176,31 @@ def format_array_from_datetime(
elif show_ms:
res += f'.{dts.us // 1000:03d}'
- result[i] = res
else:
ts = Timestamp._from_value_and_reso(val, reso=reso, tz=tz)
if format is None:
- result[i] = str(ts)
+ res = str(ts)
else:
# invalid format string
# requires dates > 1900
try:
# Note: dispatches to pydatetime
- result[i] = ts.strftime(format)
+ res = ts.strftime(format)
except ValueError:
- result[i] = str(ts)
+ res = str(ts)
+
+ # Note: we can index result directly instead of using PyArray_MultiIter_DATA
+ # like we do for the other functions because result is known C-contiguous
+ # and is the first argument to PyArray_MultiIterNew2. The usual pattern
+ # does not seem to work with object dtype.
+ # See discussion at
+ # github.com/pandas-dev/pandas/pull/46886#discussion_r860261305
+ res_flat[i] = res
+
+ cnp.PyArray_ITER_NEXT(it)
return result
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 6ecb89b02afe3..5f060542526d3 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -687,7 +687,6 @@ def astype(self, dtype, copy: bool = True):
# -----------------------------------------------------------------
# Rendering Methods
- @dtl.ravel_compat
def _format_native_types(
self, *, na_rep="NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47411 | 2022-06-18T01:02:34Z | 2022-06-18T04:12:47Z | 2022-06-18T04:12:47Z | 2022-06-18T17:34:04Z |
TYP: read_sas | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index a5cb716317689..c35f01470763b 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -461,6 +461,7 @@ Other API changes
October 2022. (:issue:`46312`)
- :func:`read_json` now raises ``FileNotFoundError`` (previously ``ValueError``) when input is a string ending in ``.json``, ``.json.gz``, ``.json.bz2``, etc. but no such file exists. (:issue:`29102`)
- Operations with :class:`Timestamp` or :class:`Timedelta` that would previously raise ``OverflowError`` instead raise ``OutOfBoundsDatetime`` or ``OutOfBoundsTimedelta`` where appropriate (:issue:`47268`)
+- When :func:`read_sas` previously returned ``None``, it now returns an empty :class:`DataFrame` (:issue:`47410`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index a992c1af5ddaf..5298178b4efcd 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -163,12 +163,12 @@ def __init__(
self,
path_or_buf: FilePath | ReadBuffer[bytes],
index=None,
- convert_dates=True,
- blank_missing=True,
- chunksize=None,
- encoding=None,
- convert_text=True,
- convert_header_text=True,
+ convert_dates: bool = True,
+ blank_missing: bool = True,
+ chunksize: int | None = None,
+ encoding: str | None = None,
+ convert_text: bool = True,
+ convert_header_text: bool = True,
compression: CompressionOptions = "infer",
) -> None:
@@ -361,9 +361,9 @@ def _get_properties(self) -> None:
self.encoding or self.default_encoding
)
- def __next__(self):
+ def __next__(self) -> DataFrame:
da = self.read(nrows=self.chunksize or 1)
- if da is None:
+ if da.empty:
self.close()
raise StopIteration
return da
@@ -732,7 +732,7 @@ def _process_format_subheader(self, offset: int, length: int) -> None:
self.column_formats.append(column_format)
self.columns.append(col)
- def read(self, nrows: int | None = None) -> DataFrame | None:
+ def read(self, nrows: int | None = None) -> DataFrame:
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
@@ -744,7 +744,7 @@ def read(self, nrows: int | None = None) -> DataFrame | None:
raise EmptyDataError("No columns to parse from file")
if nrows > 0 and self._current_row_in_file_index >= self.row_count:
- return None
+ return DataFrame()
m = self.row_count - self._current_row_in_file_index
if nrows > m:
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index db09983cacfbc..500e88eb0ef76 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -280,7 +280,7 @@ def __init__(
self.close()
raise
- def close(self):
+ def close(self) -> None:
self.handles.close()
def _get_row(self):
@@ -463,7 +463,7 @@ def _missing_double(self, vec):
return miss
@Appender(_read_method_doc)
- def read(self, nrows=None):
+ def read(self, nrows: int | None = None) -> pd.DataFrame:
if nrows is None:
nrows = self.nobs
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index ff50df886e627..052e674d1a488 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -38,17 +38,17 @@ class ReaderBase(metaclass=ABCMeta):
"""
@abstractmethod
- def read(self, nrows=None):
+ def read(self, nrows: int | None = None) -> DataFrame:
pass
@abstractmethod
- def close(self):
+ def close(self) -> None:
pass
- def __enter__(self):
+ def __enter__(self) -> ReaderBase:
return self
- def __exit__(self, exc_type, exc_value, traceback):
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json
index f6f9bed1af065..83d76e752a057 100644
--- a/pyright_reportGeneralTypeIssues.json
+++ b/pyright_reportGeneralTypeIssues.json
@@ -105,8 +105,7 @@
"pandas/io/parsers/base_parser.py",
"pandas/io/parsers/c_parser_wrapper.py",
"pandas/io/pytables.py",
- "pandas/io/sas/sas7bdat.py",
- "pandas/io/sas/sasreader.py",
+ "pandas/io/sas/sas_xport.py",
"pandas/io/sql.py",
"pandas/io/stata.py",
"pandas/io/xml.py",
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47410 | 2022-06-18T00:52:06Z | 2022-06-24T21:30:55Z | 2022-06-24T21:30:55Z | 2022-09-21T15:29:51Z |
BUG: iterparse of read_xml not parsing duplicate element and attribute names | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index f11b6af24e4e4..a9219cd811b6f 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3287,6 +3287,45 @@ output (as shown below for demonstration) for easier parse into ``DataFrame``:
df = pd.read_xml(xml, stylesheet=xsl)
df
+For very large XML files that can range in hundreds of megabytes to gigabytes, :func:`pandas.read_xml`
+supports parsing such sizeable files using `lxml's iterparse`_ and `etree's iterparse`_
+which are memory-efficient methods to iterate through an XML tree and extract specific elements and attributes.
+without holding entire tree in memory.
+
+ .. versionadded:: 1.5.0
+
+.. _`lxml's iterparse`: https://lxml.de/3.2/parsing.html#iterparse-and-iterwalk
+.. _`etree's iterparse`: https://docs.python.org/3/library/xml.etree.elementtree.html#xml.etree.ElementTree.iterparse
+
+To use this feature, you must pass a physical XML file path into ``read_xml`` and use the ``iterparse`` argument.
+Files should not be compressed or point to online sources but stored on local disk. Also, ``iterparse`` should be
+a dictionary where the key is the repeating nodes in document (which become the rows) and the value is a list of
+any element or attribute that is a descendant (i.e., child, grandchild) of repeating node. Since XPath is not
+used in this method, descendants do not need to share same relationship with one another. Below shows example
+of reading in Wikipedia's very large (12 GB+) latest article data dump.
+
+.. code-block:: ipython
+
+ In [1]: df = pd.read_xml(
+ ... "/path/to/downloaded/enwikisource-latest-pages-articles.xml",
+ ... iterparse = {"page": ["title", "ns", "id"]}
+ ... )
+ ... df
+ Out[2]:
+ title ns id
+ 0 Gettysburg Address 0 21450
+ 1 Main Page 0 42950
+ 2 Declaration by United Nations 0 8435
+ 3 Constitution of the United States of America 0 8435
+ 4 Declaration of Independence (Israel) 0 17858
+ ... ... ... ...
+ 3578760 Page:Black cat 1897 07 v2 n10.pdf/17 104 219649
+ 3578761 Page:Black cat 1897 07 v2 n10.pdf/43 104 219649
+ 3578762 Page:Black cat 1897 07 v2 n10.pdf/44 104 219649
+ 3578763 The History of Tom Jones, a Foundling/Book IX 0 12084291
+ 3578764 Page:Shakespeare of Stratford (1926) Yale.djvu/91 104 21450
+
+ [3578765 rows x 3 columns]
.. _io.xml:
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index c4a760efd9a40..9f119afe0e623 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -162,6 +162,43 @@ apply converter methods, and parse dates (:issue:`43567`).
df
df.dtypes
+.. _whatsnew_150.read_xml_iterparse:
+
+read_xml now supports large XML using ``iterparse``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+For very large XML files that can range in hundreds of megabytes to gigabytes, :func:`pandas.read_xml`
+now supports parsing such sizeable files using `lxml's iterparse`_ and `etree's iterparse`_
+which are memory-efficient methods to iterate through XML trees and extract specific elements
+and attributes without holding entire tree in memory (:issue:`#45442`).
+
+.. code-block:: ipython
+
+ In [1]: df = pd.read_xml(
+ ... "/path/to/downloaded/enwikisource-latest-pages-articles.xml",
+ ... iterparse = {"page": ["title", "ns", "id"]})
+ ... )
+ df
+ Out[2]:
+ title ns id
+ 0 Gettysburg Address 0 21450
+ 1 Main Page 0 42950
+ 2 Declaration by United Nations 0 8435
+ 3 Constitution of the United States of America 0 8435
+ 4 Declaration of Independence (Israel) 0 17858
+ ... ... ... ...
+ 3578760 Page:Black cat 1897 07 v2 n10.pdf/17 104 219649
+ 3578761 Page:Black cat 1897 07 v2 n10.pdf/43 104 219649
+ 3578762 Page:Black cat 1897 07 v2 n10.pdf/44 104 219649
+ 3578763 The History of Tom Jones, a Foundling/Book IX 0 12084291
+ 3578764 Page:Shakespeare of Stratford (1926) Yale.djvu/91 104 21450
+
+ [3578765 rows x 3 columns]
+
+
+.. _`lxml's iterparse`: https://lxml.de/3.2/parsing.html#iterparse-and-iterwalk
+.. _`etree's iterparse`: https://docs.python.org/3/library/xml.etree.elementtree.html#xml.etree.ElementTree.iterparse
+
.. _whatsnew_150.api_breaking.api_breaking2:
api_breaking_change2
@@ -439,6 +476,7 @@ I/O
- Bug in :func:`DataFrame.to_excel` and :class:`ExcelWriter` would raise when writing an empty DataFrame to a ``.ods`` file (:issue:`45793`)
- Bug in Parquet roundtrip for Interval dtype with ``datetime64[ns]`` subtype (:issue:`45881`)
- Bug in :func:`read_excel` when reading a ``.ods`` file with newlines between xml elements(:issue:`45598`)
+- Bug in :func:`read_xml` when reading XML with duplicate element and attribute names (:issue:`47343`)
Period
^^^^^^
diff --git a/pandas/io/xml.py b/pandas/io/xml.py
index 3e4a54fe19032..cc008e7eef6f4 100644
--- a/pandas/io/xml.py
+++ b/pandas/io/xml.py
@@ -5,7 +5,10 @@
from __future__ import annotations
import io
-from typing import Sequence
+from typing import (
+ Any,
+ Sequence,
+)
from pandas._typing import (
CompressionOptions,
@@ -35,6 +38,7 @@
from pandas.io.common import (
file_exists,
get_handle,
+ infer_compression,
is_fsspec_url,
is_url,
stringify_path,
@@ -97,6 +101,13 @@ class _XMLFrameParser:
URL, file, file-like object, or a raw string containing XSLT,
`etree` does not support XSLT but retained for consistency.
+ iterparse : dict, optional
+ Dict with row element as key and list of descendant elements
+ and/or attributes as value to be retrieved in iterparsing of
+ XML document.
+
+ .. versionadded:: 1.5.0
+
{decompression_options}
.. versionchanged:: 1.4.0 Zstandard support.
@@ -113,6 +124,7 @@ class _XMLFrameParser:
To subclass this class effectively you must override the following methods:`
* :func:`parse_data`
* :func:`_parse_nodes`
+ * :func:`_iterparse_nodes`
* :func:`_parse_doc`
* :func:`_validate_names`
* :func:`_validate_path`
@@ -135,6 +147,7 @@ def __init__(
parse_dates: ParseDatesArg | None,
encoding: str | None,
stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None,
+ iterparse: dict[str, list[str]] | None,
compression: CompressionOptions,
storage_options: StorageOptions,
) -> None:
@@ -149,6 +162,7 @@ def __init__(
self.parse_dates = parse_dates
self.encoding = encoding
self.stylesheet = stylesheet
+ self.iterparse = iterparse
self.is_style = None
self.compression = compression
self.storage_options = storage_options
@@ -178,9 +192,34 @@ def _parse_nodes(self) -> list[dict[str, str | None]]:
Notes
-----
- Namespace URIs will be removed from return node values.Also,
+ Namespace URIs will be removed from return node values. Also,
elements with missing children or attributes compared to siblings
- will have optional keys filled withi None values.
+ will have optional keys filled with None values.
+ """
+
+ raise AbstractMethodError(self)
+
+ def _iterparse_nodes(self) -> list[dict[str, str | None]]:
+ """
+ Iterparse xml nodes.
+
+ This method will read in local disk, decompressed XML files for elements
+ and underlying descendants using iterparse, a method to iterate through
+ an XML tree without holding entire XML tree in memory.
+
+ Raises
+ ------
+ TypeError
+ * If `iterparse` is not a dict or its dict value is not list-like.
+ ParserError
+ * If `path_or_buffer` is not a physical, decompressed file on disk.
+ * If no data is returned from selected items in `iterparse`.
+
+ Notes
+ -----
+ Namespace URIs will be removed from return node values. Also,
+ elements with missing children or attributes in submitted list
+ will have optional keys filled with None values.
"""
raise AbstractMethodError(self)
@@ -240,12 +279,17 @@ def parse_data(self) -> list[dict[str, str | None]]:
"To use stylesheet, you need lxml installed and selected as parser."
)
- self.xml_doc = XML(self._parse_doc(self.path_or_buffer))
+ if self.iterparse is None:
+ self.xml_doc = XML(self._parse_doc(self.path_or_buffer))
+ self._validate_path()
- self._validate_path()
self._validate_names()
- return self._parse_nodes()
+ xml_dicts: list[dict[str, str | None]] = (
+ self._parse_nodes() if self.iterparse is None else self._iterparse_nodes()
+ )
+
+ return xml_dicts
def _parse_nodes(self) -> list[dict[str, str | None]]:
elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces)
@@ -331,6 +375,77 @@ def _parse_nodes(self) -> list[dict[str, str | None]]:
return dicts
+ def _iterparse_nodes(self) -> list[dict[str, str | None]]:
+ from xml.etree.ElementTree import iterparse
+
+ dicts: list[dict[str, str | None]] = []
+ row: dict[str, str | None] | None = None
+
+ if not isinstance(self.iterparse, dict):
+ raise TypeError(
+ f"{type(self.iterparse).__name__} is not a valid type for iterparse"
+ )
+
+ row_node = next(iter(self.iterparse.keys())) if self.iterparse else ""
+ if not is_list_like(self.iterparse[row_node]):
+ raise TypeError(
+ f"{type(self.iterparse[row_node])} is not a valid type "
+ "for value in iterparse"
+ )
+
+ if (
+ not isinstance(self.path_or_buffer, str)
+ or is_url(self.path_or_buffer)
+ or is_fsspec_url(self.path_or_buffer)
+ or self.path_or_buffer.startswith(("<?xml", "<"))
+ or infer_compression(self.path_or_buffer, "infer") is not None
+ ):
+ raise ParserError(
+ "iterparse is designed for large XML files that are fully extracted on "
+ "local disk and not as compressed files or online sources."
+ )
+
+ for event, elem in iterparse(self.path_or_buffer, events=("start", "end")):
+ curr_elem = elem.tag.split("}")[1] if "}" in elem.tag else elem.tag
+
+ if event == "start":
+ if curr_elem == row_node:
+ row = {}
+
+ if row is not None:
+ if self.names:
+ for col, nm in zip(self.iterparse[row_node], self.names):
+ if curr_elem == col:
+ elem_val = elem.text.strip() if elem.text else None
+ if elem_val not in row.values() and nm not in row:
+ row[nm] = elem_val
+ if col in elem.attrib:
+ if elem.attrib[col] not in row.values() and nm not in row:
+ row[nm] = elem.attrib[col]
+ else:
+ for col in self.iterparse[row_node]:
+ if curr_elem == col:
+ row[col] = elem.text.strip() if elem.text else None
+ if col in elem.attrib:
+ row[col] = elem.attrib[col]
+
+ if event == "end":
+ if curr_elem == row_node and row is not None:
+ dicts.append(row)
+ row = None
+ elem.clear()
+
+ if dicts == []:
+ raise ParserError("No result from selected items in iterparse.")
+
+ keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))
+ dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]
+
+ if self.names:
+ dicts = [{nm: v for nm, v in zip(self.names, d.values())} for d in dicts]
+
+ return dicts
+
def _validate_path(self) -> None:
"""
Notes
@@ -361,9 +476,14 @@ def _validate_path(self) -> None:
)
def _validate_names(self) -> None:
+ children: list[Any]
+
if self.names:
- parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces)
- children = parent.findall("*") if parent else []
+ if self.iterparse:
+ children = self.iterparse[next(iter(self.iterparse))]
+ else:
+ parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces)
+ children = parent.findall("*") if parent else []
if is_list_like(self.names):
if len(self.names) < len(children):
@@ -413,16 +533,22 @@ def parse_data(self) -> list[dict[str, str | None]]:
"""
from lxml.etree import XML
- self.xml_doc = XML(self._parse_doc(self.path_or_buffer))
+ if self.iterparse is None:
+ self.xml_doc = XML(self._parse_doc(self.path_or_buffer))
- if self.stylesheet is not None:
- self.xsl_doc = XML(self._parse_doc(self.stylesheet))
- self.xml_doc = XML(self._transform_doc())
+ if self.stylesheet:
+ self.xsl_doc = XML(self._parse_doc(self.stylesheet))
+ self.xml_doc = XML(self._transform_doc())
+
+ self._validate_path()
- self._validate_path()
self._validate_names()
- return self._parse_nodes()
+ xml_dicts: list[dict[str, str | None]] = (
+ self._parse_nodes() if self.iterparse is None else self._iterparse_nodes()
+ )
+
+ return xml_dicts
def _parse_nodes(self) -> list[dict[str, str | None]]:
elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces)
@@ -507,6 +633,80 @@ def _parse_nodes(self) -> list[dict[str, str | None]]:
return dicts
+ def _iterparse_nodes(self) -> list[dict[str, str | None]]:
+ from lxml.etree import iterparse
+
+ dicts: list[dict[str, str | None]] = []
+ row: dict[str, str | None] | None = None
+
+ if not isinstance(self.iterparse, dict):
+ raise TypeError(
+ f"{type(self.iterparse).__name__} is not a valid type for iterparse"
+ )
+
+ row_node = next(iter(self.iterparse.keys())) if self.iterparse else ""
+ if not is_list_like(self.iterparse[row_node]):
+ raise TypeError(
+ f"{type(self.iterparse[row_node])} is not a valid type "
+ "for value in iterparse"
+ )
+
+ if (
+ not isinstance(self.path_or_buffer, str)
+ or is_url(self.path_or_buffer)
+ or is_fsspec_url(self.path_or_buffer)
+ or self.path_or_buffer.startswith(("<?xml", "<"))
+ or infer_compression(self.path_or_buffer, "infer") is not None
+ ):
+ raise ParserError(
+ "iterparse is designed for large XML files that are fully extracted on "
+ "local disk and not as compressed files or online sources."
+ )
+
+ for event, elem in iterparse(self.path_or_buffer, events=("start", "end")):
+ curr_elem = elem.tag.split("}")[1] if "}" in elem.tag else elem.tag
+
+ if event == "start":
+ if curr_elem == row_node:
+ row = {}
+
+ if row is not None:
+ if self.names:
+ for col, nm in zip(self.iterparse[row_node], self.names):
+ if curr_elem == col:
+ elem_val = elem.text.strip() if elem.text else None
+ if elem_val not in row.values() and nm not in row:
+ row[nm] = elem_val
+ if col in elem.attrib:
+ if elem.attrib[col] not in row.values() and nm not in row:
+ row[nm] = elem.attrib[col]
+ else:
+ for col in self.iterparse[row_node]:
+ if curr_elem == col:
+ row[col] = elem.text.strip() if elem.text else None
+ if col in elem.attrib:
+ row[col] = elem.attrib[col]
+
+ if event == "end":
+ if curr_elem == row_node and row is not None:
+ dicts.append(row)
+ row = None
+
+ elem.clear()
+ while elem.getprevious() is not None:
+ del elem.getparent()[0]
+
+ if dicts == []:
+ raise ParserError("No result from selected items in iterparse.")
+
+ keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))
+ dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]
+
+ if self.names:
+ dicts = [{nm: v for nm, v in zip(self.names, d.values())} for d in dicts]
+
+ return dicts
+
def _validate_path(self) -> None:
msg = (
@@ -528,21 +728,15 @@ def _validate_path(self) -> None:
raise ValueError(msg)
def _validate_names(self) -> None:
- """
- Validate names.
-
- This method will check if names is a list and aligns with
- length of parse nodes.
+ children: list[Any]
- Raises
- ------
- ValueError
- * If value is not a list and less then length of nodes.
- """
if self.names:
- children = self.xml_doc.xpath(
- self.xpath + "[1]/*", namespaces=self.namespaces
- )
+ if self.iterparse:
+ children = self.iterparse[next(iter(self.iterparse))]
+ else:
+ children = self.xml_doc.xpath(
+ self.xpath + "[1]/*", namespaces=self.namespaces
+ )
if is_list_like(self.names):
if len(self.names) < len(children):
@@ -704,6 +898,7 @@ def _parse(
encoding: str | None,
parser: XMLParsers,
stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None,
+ iterparse: dict[str, list[str]] | None,
compression: CompressionOptions,
storage_options: StorageOptions,
**kwargs,
@@ -741,6 +936,7 @@ def _parse(
parse_dates,
encoding,
stylesheet,
+ iterparse,
compression,
storage_options,
)
@@ -760,6 +956,7 @@ def _parse(
parse_dates,
encoding,
stylesheet,
+ iterparse,
compression,
storage_options,
)
@@ -798,6 +995,7 @@ def read_xml(
encoding: str | None = "utf-8",
parser: XMLParsers = "lxml",
stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = None,
+ iterparse: dict[str, list[str]] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> DataFrame:
@@ -842,7 +1040,8 @@ def read_xml(
names : list-like, optional
Column names for DataFrame of parsed XML data. Use this parameter to
- rename original element names and distinguish same named elements.
+ rename original element names and distinguish same named elements or
+ attributes.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
@@ -890,6 +1089,20 @@ def read_xml(
transformation and not the original XML document. Only XSLT 1.0
scripts and not later versions is currently supported.
+ iterparse : dict, optional
+ The nodes or attributes to retrieve in iterparsing of XML document
+ as a dict with key being the name of repeating element and value being
+ list of elements or attribute names that are descendants of the repeated
+ element. Note: If this option is used, it will replace ``xpath`` parsing
+ and unlike xpath, descendants do not need to relate to each other but can
+ exist any where in document under the repeating element. This memory-
+ efficient method should be used for very large XML files (500MB, 1GB, or 5GB+).
+ For example, ::
+
+ iterparse = {{"row_element": ["child_elem", "attr", "grandchild_elem"]}}
+
+ .. versionadded:: 1.5.0
+
{decompression_options}
.. versionchanged:: 1.4.0 Zstandard support.
@@ -938,6 +1151,10 @@ def read_xml(
exceptions due to issues with XML document, ``xpath``, or other
parameters.
+ See the :ref:`read_xml documentation in the IO section of the docs
+ <io.read_xml>` for more information in using this method to parse XML
+ files to DataFrames.
+
Examples
--------
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
@@ -1022,6 +1239,7 @@ def read_xml(
encoding=encoding,
parser=parser,
stylesheet=stylesheet,
+ iterparse=iterparse,
compression=compression,
storage_options=storage_options,
)
diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py
index f0fd500bb443c..0ca08e071badd 100644
--- a/pandas/tests/io/xml/test_xml.py
+++ b/pandas/tests/io/xml/test_xml.py
@@ -14,11 +14,16 @@
from pandas.compat import is_ci_environment
from pandas.compat._optional import import_optional_dependency
+from pandas.errors import (
+ EmptyDataError,
+ ParserError,
+)
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
+from pandas.io.common import get_handle
from pandas.io.xml import read_xml
"""
@@ -243,6 +248,21 @@ def parser(request):
return request.param
+def read_xml_iterparse(data, **kwargs):
+ with tm.ensure_clean() as path:
+ with open(path, "w") as f:
+ f.write(data)
+ return read_xml(path, **kwargs)
+
+
+def read_xml_iterparse_comp(comp_path, compression_only, **kwargs):
+ with get_handle(comp_path, "r", compression=compression_only) as handles:
+ with tm.ensure_clean() as path:
+ with open(path, "w") as f:
+ f.write(handles.handle.read())
+ return read_xml(path, **kwargs)
+
+
# FILE / URL
@@ -252,12 +272,24 @@ def test_parser_consistency_file(datapath):
df_file_lxml = read_xml(filename, parser="lxml")
df_file_etree = read_xml(filename, parser="etree")
+ df_iter_lxml = read_xml(
+ filename,
+ parser="lxml",
+ iterparse={"book": ["category", "title", "year", "author", "price"]},
+ )
+ df_iter_etree = read_xml(
+ filename,
+ parser="etree",
+ iterparse={"book": ["category", "title", "year", "author", "price"]},
+ )
+
tm.assert_frame_equal(df_file_lxml, df_file_etree)
+ tm.assert_frame_equal(df_file_lxml, df_iter_lxml)
+ tm.assert_frame_equal(df_iter_lxml, df_iter_etree)
@pytest.mark.network
@pytest.mark.slow
-@td.skip_if_no("lxml")
@tm.network(
url=(
"https://data.cityofchicago.org/api/views/"
@@ -265,15 +297,47 @@ def test_parser_consistency_file(datapath):
),
check_before_test=True,
)
-def test_parser_consistency_url():
+def test_parser_consistency_url(parser):
url = (
"https://data.cityofchicago.org/api/views/"
"8pix-ypme/rows.xml?accessType=DOWNLOAD"
)
- df_url_lxml = read_xml(url, xpath=".//row/row", parser="lxml")
- df_url_etree = read_xml(url, xpath=".//row/row", parser="etree")
- tm.assert_frame_equal(df_url_lxml, df_url_etree)
+ with tm.ensure_clean(filename="cta.xml") as path:
+ (read_xml(url, xpath=".//row/row", parser=parser).to_xml(path, index=False))
+
+ df_xpath = read_xml(path, parser=parser)
+ df_iter = read_xml(
+ path,
+ parser=parser,
+ iterparse={
+ "row": [
+ "_id",
+ "_uuid",
+ "_position",
+ "_address",
+ "stop_id",
+ "direction_id",
+ "stop_name",
+ "station_name",
+ "station_descriptive_name",
+ "map_id",
+ "ada",
+ "red",
+ "blue",
+ "g",
+ "brn",
+ "p",
+ "pexp",
+ "y",
+ "pnk",
+ "o",
+ "location",
+ ]
+ },
+ )
+
+ tm.assert_frame_equal(df_xpath, df_iter)
def test_file_like(datapath, parser, mode):
@@ -479,6 +543,12 @@ def test_default_namespace(parser):
parser=parser,
)
+ df_iter = read_xml_iterparse(
+ xml_default_nmsp,
+ parser=parser,
+ iterparse={"row": ["shape", "degrees", "sides"]},
+ )
+
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
@@ -488,6 +558,7 @@ def test_default_namespace(parser):
)
tm.assert_frame_equal(df_nmsp, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_prefix_namespace(parser):
@@ -497,6 +568,9 @@ def test_prefix_namespace(parser):
namespaces={"doc": "http://example.com"},
parser=parser,
)
+ df_iter = read_xml_iterparse(
+ xml_prefix_nmsp, parser=parser, iterparse={"row": ["shape", "degrees", "sides"]}
+ )
df_expected = DataFrame(
{
@@ -507,6 +581,7 @@ def test_prefix_namespace(parser):
)
tm.assert_frame_equal(df_nmsp, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
@td.skip_if_no("lxml")
@@ -591,6 +666,11 @@ def test_none_namespace_prefix(key):
def test_file_elems_and_attrs(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
+ df_iter = read_xml(
+ filename,
+ parser=parser,
+ iterparse={"book": ["category", "title", "author", "year", "price"]},
+ )
df_expected = DataFrame(
{
"category": ["cooking", "children", "web"],
@@ -602,19 +682,27 @@ def test_file_elems_and_attrs(datapath, parser):
)
tm.assert_frame_equal(df_file, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_file_only_attrs(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, attrs_only=True, parser=parser)
+ df_iter = read_xml(filename, parser=parser, iterparse={"book": ["category"]})
df_expected = DataFrame({"category": ["cooking", "children", "web"]})
tm.assert_frame_equal(df_file, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_file_only_elems(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, elems_only=True, parser=parser)
+ df_iter = read_xml(
+ filename,
+ parser=parser,
+ iterparse={"book": ["title", "author", "year", "price"]},
+ )
df_expected = DataFrame(
{
"title": ["Everyday Italian", "Harry Potter", "Learning XML"],
@@ -625,6 +713,7 @@ def test_file_only_elems(datapath, parser):
)
tm.assert_frame_equal(df_file, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_elem_and_attrs_only(datapath, parser):
@@ -661,7 +750,13 @@ def test_attribute_centric_xml():
df_lxml = read_xml(xml, xpath=".//station")
df_etree = read_xml(xml, xpath=".//station", parser="etree")
+ df_iter_lx = read_xml_iterparse(xml, iterparse={"station": ["Name", "coords"]})
+ df_iter_et = read_xml_iterparse(
+ xml, parser="etree", iterparse={"station": ["Name", "coords"]}
+ )
+
tm.assert_frame_equal(df_lxml, df_etree)
+ tm.assert_frame_equal(df_iter_lx, df_iter_et)
# NAMES
@@ -672,6 +767,12 @@ def test_names_option_output(datapath, parser):
df_file = read_xml(
filename, names=["Col1", "Col2", "Col3", "Col4", "Col5"], parser=parser
)
+ df_iter = read_xml(
+ filename,
+ parser=parser,
+ names=["Col1", "Col2", "Col3", "Col4", "Col5"],
+ iterparse={"book": ["category", "title", "author", "year", "price"]},
+ )
df_expected = DataFrame(
{
@@ -684,6 +785,42 @@ def test_names_option_output(datapath, parser):
)
tm.assert_frame_equal(df_file, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
+
+
+def test_repeat_names(parser):
+ xml = """\
+<shapes>
+ <shape type="2D">
+ <name>circle</name>
+ <type>curved</type>
+ </shape>
+ <shape type="3D">
+ <name>sphere</name>
+ <type>curved</type>
+ </shape>
+</shapes>"""
+ df_xpath = read_xml(
+ xml, xpath=".//shape", parser=parser, names=["type_dim", "shape", "type_edge"]
+ )
+
+ df_iter = read_xml_iterparse(
+ xml,
+ parser=parser,
+ iterparse={"shape": ["type", "name", "type"]},
+ names=["type_dim", "shape", "type_edge"],
+ )
+
+ df_expected = DataFrame(
+ {
+ "type_dim": ["2D", "3D"],
+ "shape": ["circle", "sphere"],
+ "type_edge": ["curved", "curved"],
+ }
+ )
+
+ tm.assert_frame_equal(df_xpath, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_names_option_wrong_length(datapath, parser):
@@ -736,10 +873,25 @@ def test_ascii_encoding(datapath, parser):
@td.skip_if_no("lxml")
def test_parser_consistency_with_encoding(datapath):
filename = datapath("io", "data", "xml", "baby_names.xml")
- df_lxml = read_xml(filename, parser="lxml", encoding="ISO-8859-1")
- df_etree = read_xml(filename, parser="etree", encoding="iso-8859-1")
+ df_xpath_lxml = read_xml(filename, parser="lxml", encoding="ISO-8859-1")
+ df_xpath_etree = read_xml(filename, parser="etree", encoding="iso-8859-1")
- tm.assert_frame_equal(df_lxml, df_etree)
+ df_iter_lxml = read_xml(
+ filename,
+ parser="lxml",
+ encoding="ISO-8859-1",
+ iterparse={"row": ["rank", "malename", "femalename"]},
+ )
+ df_iter_etree = read_xml(
+ filename,
+ parser="etree",
+ encoding="ISO-8859-1",
+ iterparse={"row": ["rank", "malename", "femalename"]},
+ )
+
+ tm.assert_frame_equal(df_xpath_lxml, df_xpath_etree)
+ tm.assert_frame_equal(df_xpath_etree, df_iter_etree)
+ tm.assert_frame_equal(df_iter_lxml, df_iter_etree)
@td.skip_if_no("lxml")
@@ -805,7 +957,22 @@ def test_stylesheet_file(datapath):
stylesheet=xsl,
)
+ df_iter = read_xml(
+ kml,
+ iterparse={
+ "Placemark": [
+ "id",
+ "name",
+ "styleUrl",
+ "extrude",
+ "altitudeMode",
+ "coordinates",
+ ]
+ },
+ )
+
tm.assert_frame_equal(df_kml, df_style)
+ tm.assert_frame_equal(df_kml, df_iter)
def test_read_xml_passing_as_positional_deprecated(datapath, parser):
@@ -1029,6 +1196,143 @@ def test_empty_stylesheet(val):
read_xml(kml, stylesheet=val)
+# ITERPARSE
+
+
+def test_string_error(parser):
+ with pytest.raises(
+ ParserError, match=("iterparse is designed for large XML files")
+ ):
+ read_xml(
+ xml_default_nmsp,
+ parser=parser,
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
+
+
+def test_file_like_error(datapath, parser, mode):
+ filename = datapath("io", "data", "xml", "books.xml")
+ with pytest.raises(
+ ParserError, match=("iterparse is designed for large XML files")
+ ):
+ with open(filename) as f:
+ read_xml(
+ f,
+ parser=parser,
+ iterparse={"book": ["category", "title", "year", "author", "price"]},
+ )
+
+
+@pytest.mark.network
+@tm.network(url="https://www.w3schools.com/xml/books.xml", check_before_test=True)
+def test_url_path_error(parser):
+ url = "https://www.w3schools.com/xml/books.xml"
+ with pytest.raises(
+ ParserError, match=("iterparse is designed for large XML files")
+ ):
+ read_xml(
+ url,
+ parser=parser,
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
+
+
+def test_compression_error(parser, compression_only):
+ with tm.ensure_clean(filename="geom_xml.zip") as path:
+ geom_df.to_xml(path, parser=parser, compression=compression_only)
+
+ with pytest.raises(
+ ParserError, match=("iterparse is designed for large XML files")
+ ):
+ read_xml(
+ path,
+ parser=parser,
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ compression=compression_only,
+ )
+
+
+def test_wrong_dict_type(datapath, parser):
+ filename = datapath("io", "data", "xml", "books.xml")
+ with pytest.raises(TypeError, match="list is not a valid type for iterparse"):
+ read_xml(
+ filename,
+ parser=parser,
+ iterparse=["category", "title", "year", "author", "price"],
+ )
+
+
+def test_wrong_dict_value(datapath, parser):
+ filename = datapath("io", "data", "xml", "books.xml")
+ with pytest.raises(
+ TypeError, match="<class 'str'> is not a valid type for value in iterparse"
+ ):
+ read_xml(filename, parser=parser, iterparse={"book": "category"})
+
+
+def test_bad_xml(datapath, parser):
+ bad_xml = """\
+<?xml version='1.0' encoding='utf-8'?>
+ <row>
+ <shape>square</shape>
+ <degrees>00360</degrees>
+ <sides>4.0</sides>
+ <date>2020-01-01</date>
+ </row>
+ <row>
+ <shape>circle</shape>
+ <degrees>00360</degrees>
+ <sides/>
+ <date>2021-01-01</date>
+ </row>
+ <row>
+ <shape>triangle</shape>
+ <degrees>00180</degrees>
+ <sides>3.0</sides>
+ <date>2022-01-01</date>
+ </row>
+"""
+ with tm.ensure_clean(filename="bad.xml") as path:
+ with open(path, "w") as f:
+ f.write(bad_xml)
+
+ with pytest.raises(
+ SyntaxError,
+ match=(
+ "Extra content at the end of the document|"
+ "junk after document element"
+ ),
+ ):
+ read_xml(
+ path,
+ parser=parser,
+ parse_dates=["date"],
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
+
+
+def test_no_result(datapath, parser):
+ filename = datapath("io", "data", "xml", "books.xml")
+ with pytest.raises(
+ ParserError, match="No result from selected items in iterparse."
+ ):
+ read_xml(
+ filename,
+ parser=parser,
+ iterparse={"node": ["attr1", "elem1", "elem2", "elem3"]},
+ )
+
+
+def test_empty_data(datapath, parser):
+ filename = datapath("io", "data", "xml", "books.xml")
+ with pytest.raises(EmptyDataError, match="No columns to parse from file"):
+ read_xml(
+ filename,
+ parser=parser,
+ iterparse={"book": ["attr1", "elem1", "elem2", "elem3"]},
+ )
+
+
@pytest.mark.network
@td.skip_if_no("lxml")
@tm.network(
@@ -1071,12 +1375,23 @@ def test_online_stylesheet():
def test_compression_read(parser, compression_only):
- with tm.ensure_clean() as path:
- geom_df.to_xml(path, index=False, parser=parser, compression=compression_only)
+ with tm.ensure_clean() as comp_path:
+ geom_df.to_xml(
+ comp_path, index=False, parser=parser, compression=compression_only
+ )
- xml_df = read_xml(path, parser=parser, compression=compression_only)
+ df_xpath = read_xml(comp_path, parser=parser, compression=compression_only)
+
+ df_iter = read_xml_iterparse_comp(
+ comp_path,
+ compression_only,
+ parser=parser,
+ iterparse={"row": ["shape", "degrees", "sides"]},
+ compression=compression_only,
+ )
- tm.assert_frame_equal(xml_df, geom_df)
+ tm.assert_frame_equal(df_xpath, geom_df)
+ tm.assert_frame_equal(df_iter, geom_df)
def test_wrong_compression(parser, compression, compression_only):
diff --git a/pandas/tests/io/xml/test_xml_dtypes.py b/pandas/tests/io/xml/test_xml_dtypes.py
index 801461ed4288a..6aa4ddfac7628 100644
--- a/pandas/tests/io/xml/test_xml_dtypes.py
+++ b/pandas/tests/io/xml/test_xml_dtypes.py
@@ -20,6 +20,20 @@ def parser(request):
return request.param
+@pytest.fixture(
+ params=[None, {"book": ["category", "title", "author", "year", "price"]}]
+)
+def iterparse(request):
+ return request.param
+
+
+def read_xml_iterparse(data, **kwargs):
+ with tm.ensure_clean() as path:
+ with open(path, "w") as f:
+ f.write(data)
+ return read_xml(path, **kwargs)
+
+
xml_types = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
@@ -68,6 +82,12 @@ def parser(request):
def test_dtype_single_str(parser):
df_result = read_xml(xml_types, dtype={"degrees": "str"}, parser=parser)
+ df_iter = read_xml_iterparse(
+ xml_types,
+ parser=parser,
+ dtype={"degrees": "str"},
+ iterparse={"row": ["shape", "degrees", "sides"]},
+ )
df_expected = DataFrame(
{
@@ -78,10 +98,17 @@ def test_dtype_single_str(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_all_str(parser):
df_result = read_xml(xml_dates, dtype="string", parser=parser)
+ df_iter = read_xml_iterparse(
+ xml_dates,
+ parser=parser,
+ dtype="string",
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
df_expected = DataFrame(
{
@@ -94,6 +121,7 @@ def test_dtypes_all_str(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_with_names(parser):
@@ -103,6 +131,13 @@ def test_dtypes_with_names(parser):
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
parser=parser,
)
+ df_iter = read_xml_iterparse(
+ xml_dates,
+ parser=parser,
+ names=["Col1", "Col2", "Col3", "Col4"],
+ dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
df_expected = DataFrame(
{
@@ -114,10 +149,17 @@ def test_dtypes_with_names(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_dtype_nullable_int(parser):
df_result = read_xml(xml_types, dtype={"sides": "Int64"}, parser=parser)
+ df_iter = read_xml_iterparse(
+ xml_types,
+ parser=parser,
+ dtype={"sides": "Int64"},
+ iterparse={"row": ["shape", "degrees", "sides"]},
+ )
df_expected = DataFrame(
{
@@ -128,10 +170,17 @@ def test_dtype_nullable_int(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_dtype_float(parser):
df_result = read_xml(xml_types, dtype={"degrees": "float"}, parser=parser)
+ df_iter = read_xml_iterparse(
+ xml_types,
+ parser=parser,
+ dtype={"degrees": "float"},
+ iterparse={"row": ["shape", "degrees", "sides"]},
+ )
df_expected = DataFrame(
{
@@ -142,13 +191,15 @@ def test_dtype_float(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
-def test_wrong_dtype(parser):
+def test_wrong_dtype(datapath, parser, iterparse):
+ filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(
- ValueError, match=('Unable to parse string "square" at position 0')
+ ValueError, match=('Unable to parse string "Everyday Italian" at position 0')
):
- read_xml(xml_types, dtype={"shape": "Int64"}, parser=parser)
+ read_xml(filename, dtype={"title": "Int64"}, parser=parser, iterparse=iterparse)
def test_both_dtype_converters(parser):
@@ -167,8 +218,16 @@ def test_both_dtype_converters(parser):
converters={"degrees": str},
parser=parser,
)
+ df_iter = read_xml_iterparse(
+ xml_types,
+ dtype={"degrees": "str"},
+ converters={"degrees": str},
+ parser=parser,
+ iterparse={"row": ["shape", "degrees", "sides"]},
+ )
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
# CONVERTERS
@@ -176,6 +235,12 @@ def test_both_dtype_converters(parser):
def test_converters_str(parser):
df_result = read_xml(xml_types, converters={"degrees": str}, parser=parser)
+ df_iter = read_xml_iterparse(
+ xml_types,
+ parser=parser,
+ converters={"degrees": str},
+ iterparse={"row": ["shape", "degrees", "sides"]},
+ )
df_expected = DataFrame(
{
@@ -186,6 +251,7 @@ def test_converters_str(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_converters_date(parser):
@@ -193,6 +259,12 @@ def test_converters_date(parser):
df_result = read_xml(
xml_dates, converters={"date": convert_to_datetime}, parser=parser
)
+ df_iter = read_xml_iterparse(
+ xml_dates,
+ parser=parser,
+ converters={"date": convert_to_datetime},
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
df_expected = DataFrame(
{
@@ -204,21 +276,29 @@ def test_converters_date(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
-def test_wrong_converters_type(parser):
+def test_wrong_converters_type(datapath, parser, iterparse):
+ filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(TypeError, match=("Type converters must be a dict or subclass")):
- read_xml(xml_types, converters={"degrees", str}, parser=parser)
+ read_xml(filename, converters={"year", str}, parser=parser, iterparse=iterparse)
-def test_callable_func_converters(parser):
+def test_callable_func_converters(datapath, parser, iterparse):
+ filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(TypeError, match=("'float' object is not callable")):
- read_xml(xml_types, converters={"degrees": float()}, parser=parser)
+ read_xml(
+ filename, converters={"year": float()}, parser=parser, iterparse=iterparse
+ )
-def test_callable_str_converters(parser):
+def test_callable_str_converters(datapath, parser, iterparse):
+ filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(TypeError, match=("'str' object is not callable")):
- read_xml(xml_types, converters={"degrees": "float"}, parser=parser)
+ read_xml(
+ filename, converters={"year": "float"}, parser=parser, iterparse=iterparse
+ )
# PARSE DATES
@@ -226,6 +306,12 @@ def test_callable_str_converters(parser):
def test_parse_dates_column_name(parser):
df_result = read_xml(xml_dates, parse_dates=["date"], parser=parser)
+ df_iter = read_xml_iterparse(
+ xml_dates,
+ parser=parser,
+ parse_dates=["date"],
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
df_expected = DataFrame(
{
@@ -237,10 +323,17 @@ def test_parse_dates_column_name(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_parse_dates_column_index(parser):
df_result = read_xml(xml_dates, parse_dates=[3], parser=parser)
+ df_iter = read_xml_iterparse(
+ xml_dates,
+ parser=parser,
+ parse_dates=[3],
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
df_expected = DataFrame(
{
@@ -252,11 +345,19 @@ def test_parse_dates_column_index(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_parse_dates_true(parser):
df_result = read_xml(xml_dates, parse_dates=True, parser=parser)
+ df_iter = read_xml_iterparse(
+ xml_dates,
+ parser=parser,
+ parse_dates=True,
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
+
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
@@ -267,6 +368,7 @@ def test_parse_dates_true(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_parse_dates_dictionary(parser):
@@ -301,6 +403,12 @@ def test_parse_dates_dictionary(parser):
df_result = read_xml(
xml, parse_dates={"date_end": ["year", "month", "day"]}, parser=parser
)
+ df_iter = read_xml_iterparse(
+ xml,
+ parser=parser,
+ parse_dates={"date_end": ["year", "month", "day"]},
+ iterparse={"row": ["shape", "degrees", "sides", "year", "month", "day"]},
+ )
df_expected = DataFrame(
{
@@ -312,6 +420,7 @@ def test_parse_dates_dictionary(parser):
)
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
def test_day_first_parse_dates(parser):
@@ -351,11 +460,20 @@ def test_day_first_parse_dates(parser):
UserWarning, match="Parsing '31/12/2020' in DD/MM/YYYY format"
):
df_result = read_xml(xml, parse_dates=["date"], parser=parser)
+ df_iter = read_xml_iterparse(
+ xml,
+ parse_dates=["date"],
+ parser=parser,
+ iterparse={"row": ["shape", "degrees", "sides", "date"]},
+ )
+
tm.assert_frame_equal(df_result, df_expected)
+ tm.assert_frame_equal(df_iter, df_expected)
-def test_wrong_parse_dates_type(parser):
+def test_wrong_parse_dates_type(datapath, parser, iterparse):
+ filename = datapath("io", "data", "xml", "books.xml")
with pytest.raises(
TypeError, match=("Only booleans, lists, and dictionaries are accepted")
):
- read_xml(xml_dates, parse_dates={"date"}, parser=parser)
+ read_xml(filename, parse_dates={"date"}, parser=parser, iterparse=iterparse)
| - [X] closes #47343
- [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [X] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47409 | 2022-06-18T00:47:48Z | 2022-06-18T00:56:12Z | null | 2022-06-18T00:59:46Z |
PERF: Performance improvement on dataframe.update. | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 39a940169e1f3..8711d53353185 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8000,7 +8000,7 @@ def update(
if mask.all():
continue
- self[col] = expressions.where(mask, this, that)
+ self.loc[:, col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
| the setitem under the hood is doing df copy per https://github.com/pandas-dev/pandas/issues/46267
therefore, the performance is compromised, per the suggestion, using loc for assignment instead
see the detail example in the issue link for metrics.
- [X] closes #47392 (Replace xxxx with the Github issue number)
~Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature~
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
~Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.~
~Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.~
| https://api.github.com/repos/pandas-dev/pandas/pulls/47407 | 2022-06-17T15:33:49Z | 2022-06-21T18:17:37Z | null | 2022-06-21T18:17:38Z |
Revert "REF: remove JoinUnit.shape (#43651)" | diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 228d57fe196a4..8ce98f3891ff4 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -212,8 +212,6 @@ def concatenate_managers(
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
- # Assertion disabled for performance
- # assert len(join_units) == len(mgrs_indexers)
if len(join_units) == 1:
values = blk.values
@@ -331,10 +329,14 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
plan : list of (BlockPlacement, JoinUnit) tuples
"""
+ # Calculate post-reindex shape , save for item axis which will be separate
+ # for each block anyway.
+ mgr_shape_list = list(mgr.shape)
+ mgr_shape = tuple(mgr_shape_list)
if mgr.is_single_block:
blk = mgr.blocks[0]
- return [(blk.mgr_locs, JoinUnit(blk))]
+ return [(blk.mgr_locs, JoinUnit(blk, mgr_shape))]
blknos = mgr.blknos
blklocs = mgr.blklocs
@@ -342,9 +344,12 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):
- # Assertions disabled for performance; these should always hold
- # assert placements.is_slice_like
- # assert blkno != -1
+ assert placements.is_slice_like
+ assert blkno != -1
+
+ shape_list = list(mgr_shape)
+ shape_list[0] = len(placements)
+ shape = tuple(shape_list)
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
@@ -374,7 +379,8 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
# Assertions disabled for performance
# assert blk._mgr_locs.as_slice == placements.as_slice
- unit = JoinUnit(blk)
+ # assert blk.shape[0] == shape[0]
+ unit = JoinUnit(blk, shape)
plan.append((placements, unit))
@@ -382,8 +388,10 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
class JoinUnit:
- def __init__(self, block: Block) -> None:
+ def __init__(self, block: Block, shape: Shape) -> None:
+ # Passing shape explicitly is required for cases when block is None.
self.block = block
+ self.shape = shape
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)})"
@@ -396,11 +404,22 @@ def is_na(self) -> bool:
return False
def get_reindexed_values(self, empty_dtype: DtypeObj) -> ArrayLike:
+ values: ArrayLike
+
if self.is_na:
- return make_na_array(empty_dtype, self.block.shape)
+ return make_na_array(empty_dtype, self.shape)
else:
- return self.block.values
+
+ if not self.block._can_consolidate:
+ # preserve these for validation in concat_compat
+ return self.block.values
+
+ # No dtype upcasting is done here, it will be performed during
+ # concatenation itself.
+ values = self.block.values
+
+ return values
def make_na_array(dtype: DtypeObj, shape: Shape) -> ArrayLike:
@@ -539,9 +558,6 @@ def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:
first = join_units[0].block
if first.dtype.kind == "V":
return False
- elif len(join_units) == 1:
- # only use this path when there is something to concatenate
- return False
return (
# exclude cases where a) ju.block is None or b) we have e.g. Int64+int64
all(type(ju.block) is type(first) for ju in join_units)
@@ -554,8 +570,13 @@ def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:
or ju.block.dtype.kind in ["b", "i", "u"]
for ju in join_units
)
- # this also precludes any blocks with dtype.kind == "V", since
- # we excluded that case for `first` above.
+ and
+ # no blocks that would get missing values (can lead to type upcasts)
+ # unless we're an extension dtype.
+ all(not ju.is_na or ju.block.is_extension for ju in join_units)
+ and
+ # only use this path when there is something to concatenate
+ len(join_units) > 1
)
@@ -577,7 +598,10 @@ def _trim_join_unit(join_unit: JoinUnit, length: int) -> JoinUnit:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
- return JoinUnit(block=extra_block)
+ extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
+ join_unit.shape = (length,) + join_unit.shape[1:]
+
+ return JoinUnit(block=extra_block, shape=extra_shape)
def _combine_concat_plans(plans):
| This reverts commit bb9a9852265915a4688f772dd062d3fcf4159a32.
This is also included in https://github.com/pandas-dev/pandas/pull/47372/, but reverting this single commit separately will make it easier to backport the other PR, since this is the only commit that was not already included in 1.4, but was only done in 1.5 (cc @simonjayhawkins)
| https://api.github.com/repos/pandas-dev/pandas/pulls/47406 | 2022-06-17T14:56:44Z | 2022-06-21T14:19:22Z | 2022-06-21T14:19:22Z | 2022-06-22T12:18:56Z |
SAS7BDAT parser: Speed up RLE/RDC decompression | diff --git a/asv_bench/benchmarks/io/sas.py b/asv_bench/benchmarks/io/sas.py
index 369b79641dbc4..411e5b6099f76 100644
--- a/asv_bench/benchmarks/io/sas.py
+++ b/asv_bench/benchmarks/io/sas.py
@@ -1,30 +1,23 @@
-import os
+from pathlib import Path
from pandas import read_sas
+ROOT = Path(__file__).parents[3] / "pandas" / "tests" / "io" / "sas" / "data"
+
class SAS:
+ def time_read_sas7bdat(self):
+ read_sas(ROOT / "test1.sas7bdat")
- params = ["sas7bdat", "xport"]
- param_names = ["format"]
+ def time_read_xpt(self):
+ read_sas(ROOT / "paxraw_d_short.xpt")
- def setup(self, format):
- # Read files that are located in 'pandas/tests/io/sas/data'
- files = {"sas7bdat": "test1.sas7bdat", "xport": "paxraw_d_short.xpt"}
- file = files[format]
- paths = [
- os.path.dirname(__file__),
- "..",
- "..",
- "..",
- "pandas",
- "tests",
- "io",
- "sas",
- "data",
- file,
- ]
- self.f = os.path.join(*paths)
+ def time_read_sas7bdat_2(self):
+ next(read_sas(ROOT / "0x00controlbyte.sas7bdat.bz2", chunksize=11000))
- def time_read_sas(self, format):
- read_sas(self.f, format=format)
+ def time_read_sas7bdat_2_chunked(self):
+ for i, _ in enumerate(
+ read_sas(ROOT / "0x00controlbyte.sas7bdat.bz2", chunksize=1000)
+ ):
+ if i == 10:
+ break
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index 273f6f84a484d..8065859844b30 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -1,13 +1,58 @@
-# cython: profile=False
-# cython: boundscheck=False, initializedcheck=False
+# cython: language_level=3, initializedcheck=False
+# cython: warn.undeclared=True, warn.maybe_uninitialized=True, warn.unused=True
from cython cimport Py_ssize_t
+from libc.stddef cimport size_t
+from libc.stdint cimport (
+ int64_t,
+ uint8_t,
+ uint16_t,
+)
+from libc.stdlib cimport (
+ calloc,
+ free,
+)
+
import numpy as np
import pandas.io.sas.sas_constants as const
-ctypedef signed long long int64_t
-ctypedef unsigned char uint8_t
-ctypedef unsigned short uint16_t
+
+cdef struct Buffer:
+ # Convenience wrapper for uint8_t data to allow fast and safe reads and writes.
+ # We use this as a replacement for np.array(..., dtype=np.uint8) because it's
+ # much slower to create NumPy arrays and we create Buffer instances many times
+ # when reading a SAS7BDAT file (roughly once per row that is being read).
+ uint8_t *data
+ size_t length
+
+
+cdef inline uint8_t buf_get(Buffer buf, size_t offset) except? 255:
+ assert offset < buf.length, "Out of bounds read"
+ return buf.data[offset]
+
+
+cdef inline bint buf_set(Buffer buf, size_t offset, uint8_t value) except 0:
+ assert offset < buf.length, "Out of bounds write"
+ buf.data[offset] = value
+ return True
+
+
+cdef inline bytes buf_as_bytes(Buffer buf, size_t offset, size_t length):
+ assert offset + length <= buf.length, "Out of bounds read"
+ return buf.data[offset:offset+length]
+
+
+cdef inline Buffer buf_new(size_t length) except *:
+ cdef uint8_t *data = <uint8_t *>calloc(length, sizeof(uint8_t))
+ if data == NULL:
+ raise MemoryError(f"Failed to allocate {length} bytes")
+ return Buffer(data, length)
+
+
+cdef inline buf_free(Buffer buf):
+ if buf.data != NULL:
+ free(buf.data)
+
cdef object np_nan = np.nan
@@ -15,180 +60,170 @@ cdef object np_nan = np.nan
# algorithm. It is partially documented here:
#
# https://cran.r-project.org/package=sas7bdat/vignettes/sas7bdat.pdf
-cdef const uint8_t[:] rle_decompress(int result_length, const uint8_t[:] inbuff) except *:
+cdef int rle_decompress(Buffer inbuff, Buffer outbuff) except? 0:
cdef:
uint8_t control_byte, x
- uint8_t[:] result = np.zeros(result_length, np.uint8)
int rpos = 0
int i, nbytes, end_of_first_byte
- Py_ssize_t ipos = 0, length = len(inbuff)
+ size_t ipos = 0
- while ipos < length:
- control_byte = inbuff[ipos] & 0xF0
- end_of_first_byte = <int>(inbuff[ipos] & 0x0F)
+ while ipos < inbuff.length:
+ control_byte = buf_get(inbuff, ipos) & 0xF0
+ end_of_first_byte = <int>(buf_get(inbuff, ipos) & 0x0F)
ipos += 1
if control_byte == 0x00:
- nbytes = <int>(inbuff[ipos]) + 64 + end_of_first_byte * 256
+ nbytes = <int>(buf_get(inbuff, ipos)) + 64 + end_of_first_byte * 256
ipos += 1
for _ in range(nbytes):
- result[rpos] = inbuff[ipos]
+ buf_set(outbuff, rpos, buf_get(inbuff, ipos))
rpos += 1
ipos += 1
elif control_byte == 0x40:
# not documented
- nbytes = (inbuff[ipos] & 0xFF) + 18 + end_of_first_byte * 256
+ nbytes = <int>(buf_get(inbuff, ipos)) + 18 + end_of_first_byte * 256
ipos += 1
for _ in range(nbytes):
- result[rpos] = inbuff[ipos]
+ buf_set(outbuff, rpos, buf_get(inbuff, ipos))
rpos += 1
ipos += 1
elif control_byte == 0x60:
- nbytes = end_of_first_byte * 256 + <int>(inbuff[ipos]) + 17
+ nbytes = end_of_first_byte * 256 + <int>(buf_get(inbuff, ipos)) + 17
ipos += 1
for _ in range(nbytes):
- result[rpos] = 0x20
+ buf_set(outbuff, rpos, 0x20)
rpos += 1
elif control_byte == 0x70:
- nbytes = end_of_first_byte * 256 + <int>(inbuff[ipos]) + 17
+ nbytes = end_of_first_byte * 256 + <int>(buf_get(inbuff, ipos)) + 17
ipos += 1
for _ in range(nbytes):
- result[rpos] = 0x00
+ buf_set(outbuff, rpos, 0x00)
rpos += 1
elif control_byte == 0x80:
nbytes = end_of_first_byte + 1
for i in range(nbytes):
- result[rpos] = inbuff[ipos + i]
+ buf_set(outbuff, rpos, buf_get(inbuff, ipos + i))
rpos += 1
ipos += nbytes
elif control_byte == 0x90:
nbytes = end_of_first_byte + 17
for i in range(nbytes):
- result[rpos] = inbuff[ipos + i]
+ buf_set(outbuff, rpos, buf_get(inbuff, ipos + i))
rpos += 1
ipos += nbytes
elif control_byte == 0xA0:
nbytes = end_of_first_byte + 33
for i in range(nbytes):
- result[rpos] = inbuff[ipos + i]
+ buf_set(outbuff, rpos, buf_get(inbuff, ipos + i))
rpos += 1
ipos += nbytes
elif control_byte == 0xB0:
nbytes = end_of_first_byte + 49
for i in range(nbytes):
- result[rpos] = inbuff[ipos + i]
+ buf_set(outbuff, rpos, buf_get(inbuff, ipos + i))
rpos += 1
ipos += nbytes
elif control_byte == 0xC0:
nbytes = end_of_first_byte + 3
- x = inbuff[ipos]
+ x = buf_get(inbuff, ipos)
ipos += 1
for _ in range(nbytes):
- result[rpos] = x
+ buf_set(outbuff, rpos, x)
rpos += 1
elif control_byte == 0xD0:
nbytes = end_of_first_byte + 2
for _ in range(nbytes):
- result[rpos] = 0x40
+ buf_set(outbuff, rpos, 0x40)
rpos += 1
elif control_byte == 0xE0:
nbytes = end_of_first_byte + 2
for _ in range(nbytes):
- result[rpos] = 0x20
+ buf_set(outbuff, rpos, 0x20)
rpos += 1
elif control_byte == 0xF0:
nbytes = end_of_first_byte + 2
for _ in range(nbytes):
- result[rpos] = 0x00
+ buf_set(outbuff, rpos, 0x00)
rpos += 1
else:
raise ValueError(f"unknown control byte: {control_byte}")
- # In py37 cython/clang sees `len(outbuff)` as size_t and not Py_ssize_t
- if <Py_ssize_t>len(result) != <Py_ssize_t>result_length:
- raise ValueError(f"RLE: {len(result)} != {result_length}")
-
- return np.asarray(result)
+ return rpos
# rdc_decompress decompresses data using the Ross Data Compression algorithm:
#
# http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
-cdef const uint8_t[:] rdc_decompress(int result_length, const uint8_t[:] inbuff) except *:
+cdef int rdc_decompress(Buffer inbuff, Buffer outbuff) except? 0:
cdef:
uint8_t cmd
uint16_t ctrl_bits = 0, ctrl_mask = 0, ofs, cnt
- int rpos = 0, k
- uint8_t[:] outbuff = np.zeros(result_length, dtype=np.uint8)
- Py_ssize_t ipos = 0, length = len(inbuff)
+ int rpos = 0, k, ii
+ size_t ipos = 0
ii = -1
- while ipos < length:
+ while ipos < inbuff.length:
ii += 1
ctrl_mask = ctrl_mask >> 1
if ctrl_mask == 0:
- ctrl_bits = ((<uint16_t>inbuff[ipos] << 8) +
- <uint16_t>inbuff[ipos + 1])
+ ctrl_bits = ((<uint16_t>buf_get(inbuff, ipos) << 8) +
+ <uint16_t>buf_get(inbuff, ipos + 1))
ipos += 2
ctrl_mask = 0x8000
if ctrl_bits & ctrl_mask == 0:
- outbuff[rpos] = inbuff[ipos]
+ buf_set(outbuff, rpos, buf_get(inbuff, ipos))
ipos += 1
rpos += 1
continue
- cmd = (inbuff[ipos] >> 4) & 0x0F
- cnt = <uint16_t>(inbuff[ipos] & 0x0F)
+ cmd = (buf_get(inbuff, ipos) >> 4) & 0x0F
+ cnt = <uint16_t>(buf_get(inbuff, ipos) & 0x0F)
ipos += 1
# short RLE
if cmd == 0:
cnt += 3
for k in range(cnt):
- outbuff[rpos + k] = inbuff[ipos]
+ buf_set(outbuff, rpos + k, buf_get(inbuff, ipos))
rpos += cnt
ipos += 1
# long RLE
elif cmd == 1:
- cnt += <uint16_t>inbuff[ipos] << 4
+ cnt += <uint16_t>buf_get(inbuff, ipos) << 4
cnt += 19
ipos += 1
for k in range(cnt):
- outbuff[rpos + k] = inbuff[ipos]
+ buf_set(outbuff, rpos + k, buf_get(inbuff, ipos))
rpos += cnt
ipos += 1
# long pattern
elif cmd == 2:
ofs = cnt + 3
- ofs += <uint16_t>inbuff[ipos] << 4
+ ofs += <uint16_t>buf_get(inbuff, ipos) << 4
ipos += 1
- cnt = <uint16_t>inbuff[ipos]
+ cnt = <uint16_t>buf_get(inbuff, ipos)
ipos += 1
cnt += 16
for k in range(cnt):
- outbuff[rpos + k] = outbuff[rpos - <int>ofs + k]
+ buf_set(outbuff, rpos + k, buf_get(outbuff, rpos - <int>ofs + k))
rpos += cnt
# short pattern
else:
ofs = cnt + 3
- ofs += <uint16_t>inbuff[ipos] << 4
+ ofs += <uint16_t>buf_get(inbuff, ipos) << 4
ipos += 1
for k in range(cmd):
- outbuff[rpos + k] = outbuff[rpos - <int>ofs + k]
+ buf_set(outbuff, rpos + k, buf_get(outbuff, rpos - <int>ofs + k))
rpos += cmd
- # In py37 cython/clang sees `len(outbuff)` as size_t and not Py_ssize_t
- if <Py_ssize_t>len(outbuff) != <Py_ssize_t>result_length:
- raise ValueError(f"RDC: {len(outbuff)} != {result_length}\n")
-
- return np.asarray(outbuff)
+ return rpos
cdef enum ColumnTypes:
@@ -215,7 +250,8 @@ cdef class Parser:
int64_t[:] column_types
uint8_t[:, :] byte_chunk
object[:, :] string_chunk
- char *cached_page
+ uint8_t *cached_page
+ int cached_page_len
int current_row_on_page_index
int current_page_block_count
int current_page_data_subheader_pointers_len
@@ -229,7 +265,7 @@ cdef class Parser:
int subheader_pointer_length
int current_page_type
bint is_little_endian
- const uint8_t[:] (*decompress)(int result_length, const uint8_t[:] inbuff) except *
+ int (*decompress)(Buffer, Buffer) except? 0
object parser
def __init__(self, object parser):
@@ -306,7 +342,8 @@ cdef class Parser:
cdef update_next_page(self):
# update data for the current page
- self.cached_page = <char *>self.parser._cached_page
+ self.cached_page = <uint8_t *>self.parser._cached_page
+ self.cached_page_len = len(self.parser._cached_page)
self.current_row_on_page_index = 0
self.current_page_type = self.parser._current_page_type
self.current_page_block_count = self.parser._current_page_block_count
@@ -387,20 +424,28 @@ cdef class Parser:
cdef:
Py_ssize_t j
- int s, k, m, jb, js, current_row
+ int s, k, m, jb, js, current_row, rpos
int64_t lngt, start, ct
- const uint8_t[:] source
+ Buffer source, decompressed_source
int64_t[:] column_types
int64_t[:] lengths
int64_t[:] offsets
uint8_t[:, :] byte_chunk
object[:, :] string_chunk
-
- source = np.frombuffer(
- self.cached_page[offset:offset + length], dtype=np.uint8)
-
- if self.decompress != NULL and (length < self.row_length):
- source = self.decompress(self.row_length, source)
+ bint compressed
+
+ assert offset + length <= self.cached_page_len, "Out of bounds read"
+ source = Buffer(&self.cached_page[offset], length)
+
+ compressed = self.decompress != NULL and length < self.row_length
+ if compressed:
+ decompressed_source = buf_new(self.row_length)
+ rpos = self.decompress(source, decompressed_source)
+ if rpos != self.row_length:
+ raise ValueError(
+ f"Expected decompressed line of length {self.row_length} bytes but decompressed {rpos} bytes"
+ )
+ source = decompressed_source
current_row = self.current_row_in_chunk_index
column_types = self.column_types
@@ -424,20 +469,23 @@ cdef class Parser:
else:
m = s
for k in range(lngt):
- byte_chunk[jb, m + k] = source[start + k]
+ byte_chunk[jb, m + k] = buf_get(source, start + k)
jb += 1
elif column_types[j] == column_type_string:
# string
# Skip trailing whitespace. This is equivalent to calling
# .rstrip(b"\x00 ") but without Python call overhead.
- while lngt > 0 and source[start+lngt-1] in b"\x00 ":
+ while lngt > 0 and buf_get(source, start + lngt - 1) in b"\x00 ":
lngt -= 1
if lngt == 0 and self.blank_missing:
string_chunk[js, current_row] = np_nan
else:
- string_chunk[js, current_row] = (&source[start])[:lngt]
+ string_chunk[js, current_row] = buf_as_bytes(source, start, lngt)
js += 1
self.current_row_on_page_index += 1
self.current_row_in_chunk_index += 1
self.current_row_in_file_index += 1
+
+ if compressed:
+ buf_free(decompressed_source)
diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py
index 2b7ecbcdf9f80..ce4d960e3a9b0 100644
--- a/pandas/tests/io/sas/test_sas7bdat.py
+++ b/pandas/tests/io/sas/test_sas7bdat.py
@@ -350,37 +350,23 @@ def test_meta2_page(datapath):
assert len(df) == 1000
-@pytest.mark.parametrize("test_file", ["test2.sas7bdat", "test3.sas7bdat"])
-def test_exception_propagation_rdc_rle_decompress(datapath, monkeypatch, test_file):
- """Errors in RLE/RDC decompression should propagate the same error."""
- orig_np_zeros = np.zeros
-
- def _patched_zeros(size, dtype):
- if isinstance(size, int):
- # np.zeros() call in {rdc,rle}_decompress
- raise Exception("Test exception")
- else:
- # Other calls to np.zeros
- return orig_np_zeros(size, dtype)
-
- monkeypatch.setattr(np, "zeros", _patched_zeros)
-
- with pytest.raises(Exception, match="^Test exception$"):
- pd.read_sas(datapath("io", "sas", "data", test_file))
-
-
-def test_exception_propagation_rle_decompress(tmp_path, datapath):
- """Illegal control byte in RLE decompressor should raise the correct ValueError."""
- with open(datapath("io", "sas", "data", "test2.sas7bdat"), "rb") as f:
- data = bytearray(f.read())
- invalid_control_byte = 0x10
- page_offset = 0x10000
- control_byte_pos = 55229
- data[page_offset + control_byte_pos] = invalid_control_byte
- tmp_file = tmp_path / "test2.sas7bdat"
- tmp_file.write_bytes(data)
- with pytest.raises(ValueError, match="unknown control byte"):
- pd.read_sas(tmp_file)
+@pytest.mark.parametrize(
+ "test_file, override_offset, override_value, expected_msg",
+ [
+ ("test2.sas7bdat", 0x10000 + 55229, 0x80 | 0x0F, "Out of bounds"),
+ ("test2.sas7bdat", 0x10000 + 55229, 0x10, "unknown control byte"),
+ ("test3.sas7bdat", 118170, 184, "Out of bounds"),
+ ],
+)
+def test_rle_rdc_exceptions(
+ datapath, test_file, override_offset, override_value, expected_msg
+):
+ """Errors in RLE/RDC decompression should propagate."""
+ with open(datapath("io", "sas", "data", test_file), "rb") as fd:
+ data = bytearray(fd.read())
+ data[override_offset] = override_value
+ with pytest.raises(Exception, match=expected_msg):
+ pd.read_sas(io.BytesIO(data), format="sas7bdat")
def test_0x40_control_byte(datapath):
| Speed up RLE/RDC decompression. Brings a 30-50% performance improvement on SAS7BDAT files using compression.
Works by avoiding calls into NumPy array creation and using a custom-built buffer instead.
Also adds a bunch of `assert` statements to avoid illegal reads/writes. These slow the code down considerably; I will try to improve on that in a future PR.
Alternatives considered:
- Fast NumPy array creation: Didn't find a way to do it.
- Using Python's `bytearray`: Much slower.
- Using `array.array`: Much slower. Cython has a fast path but it is incompatible with PyPy.
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47405 | 2022-06-17T11:22:59Z | 2022-10-03T21:03:13Z | 2022-10-03T21:03:13Z | 2022-10-13T16:59:53Z |
SAS7BDAT parser: Faster string parsing | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 7aa1c1e84aa09..e63107a8eb243 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -799,6 +799,7 @@ Performance improvements
- Performance improvement in :class:`BusinessHour` ``str`` and ``repr`` (:issue:`44764`)
- Performance improvement in datetime arrays string formatting when one of the default strftime formats ``"%Y-%m-%d %H:%M:%S"`` or ``"%Y-%m-%d %H:%M:%S.%f"`` is used. (:issue:`44764`)
- Performance improvement in :meth:`Series.to_sql` and :meth:`DataFrame.to_sql` (:class:`SQLiteTable`) when processing time arrays. (:issue:`44764`)
+- Performance improvements to :func:`read_sas` (:issue:`47403`, :issue:`47404`, :issue:`47405`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index 9ea1c31c3d5cf..d8591c0b033a6 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -424,8 +424,11 @@ cdef class Parser:
jb += 1
elif column_types[j] == column_type_string:
# string
- string_chunk[js, current_row] = np.array(source[start:(
- start + lngt)]).tobytes().rstrip(b"\x00 ")
+ # Skip trailing whitespace. This is equivalent to calling
+ # .rstrip(b"\x00 ") but without Python call overhead.
+ while lngt > 0 and source[start+lngt-1] in b"\x00 ":
+ lngt -= 1
+ string_chunk[js, current_row] = (&source[start])[:lngt]
js += 1
self.current_row_on_page_index += 1
| Speed up SAS7BDAT string reading.
Today this brings a modest 10% performance improvement. But together with the other changes I will be proposing it will be a major bottleneck.
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47404 | 2022-06-17T11:17:00Z | 2022-07-10T15:55:13Z | 2022-07-10T15:55:13Z | 2022-07-11T17:21:34Z |
SAS7BDAT parser: Fast byteswap | diff --git a/doc/source/whatsnew/v1.6.0.rst b/doc/source/whatsnew/v1.6.0.rst
index 3c7a80f096844..dce505729b3ea 100644
--- a/doc/source/whatsnew/v1.6.0.rst
+++ b/doc/source/whatsnew/v1.6.0.rst
@@ -149,7 +149,7 @@ Performance improvements
- Performance improvement in :meth:`DataFrame.join` when joining on a subset of a :class:`MultiIndex` (:issue:`48611`)
- Performance improvement for :meth:`MultiIndex.intersection` (:issue:`48604`)
- Performance improvement in ``var`` for nullable dtypes (:issue:`48379`).
-- Performance improvement to :func:`read_sas` with ``blank_missing=True`` (:issue:`48502`)
+- Performance improvements to :func:`read_sas` (:issue:`47403`, :issue:`47405`, :issue:`47656`, :issue:`48502`)
- Memory improvement in :meth:`RangeIndex.sort_values` (:issue:`48801`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/sas/_byteswap.pyi b/pandas/io/sas/_byteswap.pyi
new file mode 100644
index 0000000000000..bb0dbfc6a50b1
--- /dev/null
+++ b/pandas/io/sas/_byteswap.pyi
@@ -0,0 +1,5 @@
+def read_float_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ...
+def read_double_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ...
+def read_uint16_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
+def read_uint32_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
+def read_uint64_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
diff --git a/pandas/io/sas/byteswap.pyx b/pandas/io/sas/byteswap.pyx
new file mode 100644
index 0000000000000..4620403910274
--- /dev/null
+++ b/pandas/io/sas/byteswap.pyx
@@ -0,0 +1,92 @@
+"""
+The following are faster versions of struct.unpack that avoid the overhead of Python function calls.
+
+In the SAS7BDAT parser, they may be called up to (n_rows * n_cols) times.
+"""
+from cython cimport Py_ssize_t
+from libc.stdint cimport (
+ uint16_t,
+ uint32_t,
+ uint64_t,
+)
+
+
+def read_float_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
+ assert offset + 4 < len(data)
+ cdef:
+ const char *data_ptr = data
+ float res = (<float*>(data_ptr + offset))[0]
+ if byteswap:
+ res = _byteswap_float(res)
+ return res
+
+
+def read_double_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
+ assert offset + 8 < len(data)
+ cdef:
+ const char *data_ptr = data
+ double res = (<double*>(data_ptr + offset))[0]
+ if byteswap:
+ res = _byteswap_double(res)
+ return res
+
+
+def read_uint16_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
+ assert offset + 2 < len(data)
+ cdef:
+ const char *data_ptr = data
+ uint16_t res = (<uint16_t *>(data_ptr + offset))[0]
+ if byteswap:
+ res = _byteswap2(res)
+ return res
+
+
+def read_uint32_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
+ assert offset + 4 < len(data)
+ cdef:
+ const char *data_ptr = data
+ uint32_t res = (<uint32_t *>(data_ptr + offset))[0]
+ if byteswap:
+ res = _byteswap4(res)
+ return res
+
+
+def read_uint64_with_byteswap(bytes data, Py_ssize_t offset, bint byteswap):
+ assert offset + 8 < len(data)
+ cdef:
+ const char *data_ptr = data
+ uint64_t res = (<uint64_t *>(data_ptr + offset))[0]
+ if byteswap:
+ res = _byteswap8(res)
+ return res
+
+
+# Byteswapping
+
+cdef extern from *:
+ """
+ #ifdef _MSC_VER
+ #define _byteswap2 _byteswap_ushort
+ #define _byteswap4 _byteswap_ulong
+ #define _byteswap8 _byteswap_uint64
+ #else
+ #define _byteswap2 __builtin_bswap16
+ #define _byteswap4 __builtin_bswap32
+ #define _byteswap8 __builtin_bswap64
+ #endif
+ """
+ uint16_t _byteswap2(uint16_t)
+ uint32_t _byteswap4(uint32_t)
+ uint64_t _byteswap8(uint64_t)
+
+
+cdef inline float _byteswap_float(float num):
+ cdef uint32_t *intptr = <uint32_t *>&num
+ intptr[0] = _byteswap4(intptr[0])
+ return num
+
+
+cdef inline double _byteswap_double(double num):
+ cdef uint64_t *intptr = <uint64_t *>&num
+ intptr[0] = _byteswap8(intptr[0])
+ return num
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 9f16e0def0882..a60c1eb025218 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -20,7 +20,7 @@
datetime,
timedelta,
)
-import struct
+import sys
from typing import cast
import numpy as np
@@ -42,6 +42,13 @@
)
from pandas.io.common import get_handle
+from pandas.io.sas._byteswap import (
+ read_double_with_byteswap,
+ read_float_with_byteswap,
+ read_uint16_with_byteswap,
+ read_uint32_with_byteswap,
+ read_uint64_with_byteswap,
+)
from pandas.io.sas._sas import (
Parser,
get_subheader_index,
@@ -263,8 +270,10 @@ def _get_properties(self) -> None:
buf = self._read_bytes(const.endianness_offset, const.endianness_length)
if buf == b"\x01":
self.byte_order = "<"
+ self.need_byteswap = sys.byteorder == "big"
else:
self.byte_order = ">"
+ self.need_byteswap = sys.byteorder == "little"
# Get encoding information
buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
@@ -286,7 +295,7 @@ def _get_properties(self) -> None:
)
self.date_modified = epoch + pd.to_timedelta(x, unit="s")
- self.header_length = self._read_int(
+ self.header_length = self._read_uint(
const.header_size_offset + align1, const.header_size_length
)
@@ -298,7 +307,7 @@ def _get_properties(self) -> None:
if len(self._cached_page) != self.header_length: # type: ignore[arg-type]
raise ValueError("The SAS7BDAT file appears to be truncated.")
- self._page_length = self._read_int(
+ self._page_length = self._read_uint(
const.page_size_offset + align1, const.page_size_length
)
@@ -311,37 +320,46 @@ def __next__(self) -> DataFrame:
# Read a single float of the given width (4 or 8).
def _read_float(self, offset: int, width: int):
- if width not in (4, 8):
+ assert self._cached_page is not None
+ if width == 4:
+ return read_float_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 8:
+ return read_double_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ else:
self.close()
raise ValueError("invalid float width")
- buf = self._read_bytes(offset, width)
- fd = "f" if width == 4 else "d"
- return struct.unpack(self.byte_order + fd, buf)[0]
- # Read a single signed integer of the given width (1, 2, 4 or 8).
- def _read_int(self, offset: int, width: int) -> int:
- if width not in (1, 2, 4, 8):
+ # Read a single unsigned integer of the given width (1, 2, 4 or 8).
+ def _read_uint(self, offset: int, width: int) -> int:
+ assert self._cached_page is not None
+ if width == 1:
+ return self._read_bytes(offset, 1)[0]
+ elif width == 2:
+ return read_uint16_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 4:
+ return read_uint32_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 8:
+ return read_uint64_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ else:
self.close()
raise ValueError("invalid int width")
- buf = self._read_bytes(offset, width)
- it = {1: "b", 2: "h", 4: "l", 8: "q"}[width]
- iv = struct.unpack(self.byte_order + it, buf)[0]
- return iv
def _read_bytes(self, offset: int, length: int):
- if self._cached_page is None:
- self._path_or_buf.seek(offset)
- buf = self._path_or_buf.read(length)
- if len(buf) < length:
- self.close()
- msg = f"Unable to read {length:d} bytes from file position {offset:d}."
- raise ValueError(msg)
- return buf
- else:
- if offset + length > len(self._cached_page):
- self.close()
- raise ValueError("The cached page is too small.")
- return self._cached_page[offset : offset + length]
+ assert self._cached_page is not None
+ if offset + length > len(self._cached_page):
+ self.close()
+ raise ValueError("The cached page is too small.")
+ return self._cached_page[offset : offset + length]
def _read_and_convert_header_text(self, offset: int, length: int) -> str | bytes:
return self._convert_header_text(
@@ -375,12 +393,12 @@ def _read_page_header(self) -> None:
bit_offset = self._page_bit_offset
tx = const.page_type_offset + bit_offset
self._current_page_type = (
- self._read_int(tx, const.page_type_length) & const.page_type_mask2
+ self._read_uint(tx, const.page_type_length) & const.page_type_mask2
)
tx = const.block_count_offset + bit_offset
- self._current_page_block_count = self._read_int(tx, const.block_count_length)
+ self._current_page_block_count = self._read_uint(tx, const.block_count_length)
tx = const.subheader_count_offset + bit_offset
- self._current_page_subheaders_count = self._read_int(
+ self._current_page_subheaders_count = self._read_uint(
tx, const.subheader_count_length
)
@@ -391,16 +409,16 @@ def _process_page_metadata(self) -> None:
offset = const.subheader_pointers_offset + bit_offset
total_offset = offset + self._subheader_pointer_length * i
- subheader_offset = self._read_int(total_offset, self._int_length)
+ subheader_offset = self._read_uint(total_offset, self._int_length)
total_offset += self._int_length
- subheader_length = self._read_int(total_offset, self._int_length)
+ subheader_length = self._read_uint(total_offset, self._int_length)
total_offset += self._int_length
- subheader_compression = self._read_int(total_offset, 1)
+ subheader_compression = self._read_uint(total_offset, 1)
total_offset += 1
- subheader_type = self._read_int(total_offset, 1)
+ subheader_type = self._read_uint(total_offset, 1)
if (
subheader_length == 0
@@ -442,29 +460,29 @@ def _process_rowsize_subheader(self, offset: int, length: int) -> None:
lcs_offset += 354
lcp_offset += 378
- self.row_length = self._read_int(
+ self.row_length = self._read_uint(
offset + const.row_length_offset_multiplier * int_len,
int_len,
)
- self.row_count = self._read_int(
+ self.row_count = self._read_uint(
offset + const.row_count_offset_multiplier * int_len,
int_len,
)
- self.col_count_p1 = self._read_int(
+ self.col_count_p1 = self._read_uint(
offset + const.col_count_p1_multiplier * int_len, int_len
)
- self.col_count_p2 = self._read_int(
+ self.col_count_p2 = self._read_uint(
offset + const.col_count_p2_multiplier * int_len, int_len
)
mx = const.row_count_on_mix_page_offset_multiplier * int_len
- self._mix_page_row_count = self._read_int(offset + mx, int_len)
- self._lcs = self._read_int(lcs_offset, 2)
- self._lcp = self._read_int(lcp_offset, 2)
+ self._mix_page_row_count = self._read_uint(offset + mx, int_len)
+ self._lcs = self._read_uint(lcs_offset, 2)
+ self._lcp = self._read_uint(lcp_offset, 2)
def _process_columnsize_subheader(self, offset: int, length: int) -> None:
int_len = self._int_length
offset += int_len
- self.column_count = self._read_int(offset, int_len)
+ self.column_count = self._read_uint(offset, int_len)
if self.col_count_p1 + self.col_count_p2 != self.column_count:
print(
f"Warning: column count mismatch ({self.col_count_p1} + "
@@ -478,7 +496,7 @@ def _process_subheader_counts(self, offset: int, length: int) -> None:
def _process_columntext_subheader(self, offset: int, length: int) -> None:
offset += self._int_length
- text_block_size = self._read_int(offset, const.text_block_size_length)
+ text_block_size = self._read_uint(offset, const.text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
@@ -542,13 +560,13 @@ def _process_columnname_subheader(self, offset: int, length: int) -> None:
+ const.column_name_length_offset
)
- idx = self._read_int(
+ idx = self._read_uint(
text_subheader, const.column_name_text_subheader_length
)
- col_offset = self._read_int(
+ col_offset = self._read_uint(
col_name_offset, const.column_name_offset_length
)
- col_len = self._read_int(col_name_length, const.column_name_length_length)
+ col_len = self._read_uint(col_name_length, const.column_name_length_length)
name_raw = self.column_names_raw[idx]
cname = name_raw[col_offset : col_offset + col_len]
@@ -571,13 +589,13 @@ def _process_columnattributes_subheader(self, offset: int, length: int) -> None:
offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)
)
- x = self._read_int(col_data_offset, int_len)
+ x = self._read_uint(col_data_offset, int_len)
self._column_data_offsets.append(x)
- x = self._read_int(col_data_len, const.column_data_length_length)
+ x = self._read_uint(col_data_len, const.column_data_length_length)
self._column_data_lengths.append(x)
- x = self._read_int(col_types, const.column_type_length)
+ x = self._read_uint(col_types, const.column_type_length)
self._column_types.append(b"d" if x == 1 else b"s")
def _process_columnlist_subheader(self, offset: int, length: int) -> None:
@@ -597,23 +615,25 @@ def _process_format_subheader(self, offset: int, length: int) -> None:
col_label_offset = offset + const.column_label_offset_offset + 3 * int_len
col_label_len = offset + const.column_label_length_offset + 3 * int_len
- x = self._read_int(
+ x = self._read_uint(
text_subheader_format, const.column_format_text_subheader_index_length
)
format_idx = min(x, len(self.column_names_raw) - 1)
- format_start = self._read_int(
+ format_start = self._read_uint(
col_format_offset, const.column_format_offset_length
)
- format_len = self._read_int(col_format_len, const.column_format_length_length)
+ format_len = self._read_uint(col_format_len, const.column_format_length_length)
- label_idx = self._read_int(
+ label_idx = self._read_uint(
text_subheader_label, const.column_label_text_subheader_index_length
)
label_idx = min(label_idx, len(self.column_names_raw) - 1)
- label_start = self._read_int(col_label_offset, const.column_label_offset_length)
- label_len = self._read_int(col_label_len, const.column_label_length_length)
+ label_start = self._read_uint(
+ col_label_offset, const.column_label_offset_length
+ )
+ label_len = self._read_uint(col_label_len, const.column_label_length_length)
label_names = self.column_names_raw[label_idx]
column_label = self._convert_header_text(
diff --git a/pandas/tests/io/sas/test_byteswap.py b/pandas/tests/io/sas/test_byteswap.py
new file mode 100644
index 0000000000000..2c88907df3b1d
--- /dev/null
+++ b/pandas/tests/io/sas/test_byteswap.py
@@ -0,0 +1,54 @@
+from hypothesis import (
+ assume,
+ example,
+ given,
+ strategies as st,
+)
+import numpy as np
+import pytest
+
+import pandas._testing as tm
+
+from pandas.io.sas._byteswap import (
+ read_double_with_byteswap,
+ read_float_with_byteswap,
+ read_uint16_with_byteswap,
+ read_uint32_with_byteswap,
+ read_uint64_with_byteswap,
+)
+
+
+@given(read_offset=st.integers(0, 11), number=st.integers(min_value=0))
+@example(number=2**16, read_offset=0)
+@example(number=2**32, read_offset=0)
+@example(number=2**64, read_offset=0)
+@pytest.mark.parametrize("int_type", [np.uint16, np.uint32, np.uint64])
+@pytest.mark.parametrize("should_byteswap", [True, False])
+def test_int_byteswap(read_offset, number, int_type, should_byteswap):
+ assume(number < 2 ** (8 * int_type(0).itemsize))
+ _test(number, int_type, read_offset, should_byteswap)
+
+
+@given(read_offset=st.integers(0, 11), number=st.floats())
+@pytest.mark.parametrize("float_type", [np.float32, np.float64])
+@pytest.mark.parametrize("should_byteswap", [True, False])
+def test_float_byteswap(read_offset, number, float_type, should_byteswap):
+ _test(number, float_type, read_offset, should_byteswap)
+
+
+def _test(number, number_type, read_offset, should_byteswap):
+ number = number_type(number)
+ data = np.random.default_rng().integers(0, 256, size=20, dtype="uint8")
+ data[read_offset : read_offset + number.itemsize] = number[None].view("uint8")
+ swap_func = {
+ np.float32: read_float_with_byteswap,
+ np.float64: read_double_with_byteswap,
+ np.uint16: read_uint16_with_byteswap,
+ np.uint32: read_uint32_with_byteswap,
+ np.uint64: read_uint64_with_byteswap,
+ }[type(number)]
+ output_number = number_type(swap_func(bytes(data), read_offset, should_byteswap))
+ if should_byteswap:
+ tm.assert_equal(output_number, number.byteswap())
+ else:
+ tm.assert_equal(output_number, number)
diff --git a/setup.py b/setup.py
index a6691ae6f1047..0e489c4c9b017 100755
--- a/setup.py
+++ b/setup.py
@@ -226,6 +226,7 @@ class CheckSDist(sdist_class):
"pandas/_libs/window/indexers.pyx",
"pandas/_libs/writers.pyx",
"pandas/io/sas/sas.pyx",
+ "pandas/io/sas/byteswap.pyx",
]
_cpp_pyxfiles = [
@@ -571,6 +572,7 @@ def srcpath(name=None, suffix=".pyx", subdir="src"):
"_libs.window.indexers": {"pyxfile": "_libs/window/indexers"},
"_libs.writers": {"pyxfile": "_libs/writers"},
"io.sas._sas": {"pyxfile": "io/sas/sas"},
+ "io.sas._byteswap": {"pyxfile": "io/sas/byteswap"},
}
extensions = []
| Speed up SAS7BDAT int/float reading.
This is order of magnitude faster than using `struct.unpack(fmt, data)` or `precompiled_unpacker = struct.Struct(fmt).unpack; ...; precompiled_unpacker(data)`.
Unfortunately Python does not expose a low-level interface to `struct` or a byteswapping interface. The byteswap implementation in this change is from `pyreadstat`.
Today this brings a modest 10-20% performance improvement. But together with the other changes I will be proposing it will be a major bottleneck.
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47403 | 2022-06-17T11:15:19Z | 2022-10-05T16:02:59Z | 2022-10-05T16:02:59Z | 2022-10-13T16:59:52Z |
BUG: read_excel raising uncontrolled IndexError when header references non-existing rows | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 76f6e864a174f..fa95ad06bd7ca 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -863,6 +863,7 @@ I/O
- Bug in :func:`read_csv` not respecting a specified converter to index columns in all cases (:issue:`40589`)
- Bug in :func:`read_parquet` when ``engine="pyarrow"`` which caused partial write to disk when column of unsupported datatype was passed (:issue:`44914`)
- Bug in :func:`DataFrame.to_excel` and :class:`ExcelWriter` would raise when writing an empty DataFrame to a ``.ods`` file (:issue:`45793`)
+- Bug in :func:`read_excel` raising uncontrolled ``IndexError`` when ``header`` references non-existing rows (:issue:`43143`)
- Bug in :func:`read_html` where elements surrounding ``<br>`` were joined without a space between them (:issue:`29528`)
- Bug in :func:`read_csv` when data is longer than header leading to issues with callables in ``usecols`` expecting strings (:issue:`46997`)
- Bug in Parquet roundtrip for Interval dtype with ``datetime64[ns]`` subtype (:issue:`45881`)
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index d20f347e54d6b..24b881bda4805 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -774,6 +774,12 @@ def parse(
assert isinstance(skiprows, int)
row += skiprows
+ if row > len(data) - 1:
+ raise ValueError(
+ f"header index {row} exceeds maximum index "
+ f"{len(data) - 1} of data.",
+ )
+
data[row], control_row = fill_mi_header(data[row], control_row)
if index_col is not None:
diff --git a/pandas/tests/io/data/excel/df_header_oob.xlsx b/pandas/tests/io/data/excel/df_header_oob.xlsx
new file mode 100644
index 0000000000000..1e26091cd2ace
Binary files /dev/null and b/pandas/tests/io/data/excel/df_header_oob.xlsx differ
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index f6c60b11cc8ff..4ca34bec0a7d9 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -1556,6 +1556,12 @@ def test_excel_read_binary_via_read_excel(self, read_ext, engine):
expected = pd.read_excel("test1" + read_ext, engine=engine)
tm.assert_frame_equal(result, expected)
+ def test_read_excel_header_index_out_of_range(self, engine):
+ # GH#43143
+ with open("df_header_oob.xlsx", "rb") as f:
+ with pytest.raises(ValueError, match="exceeds maximum"):
+ pd.read_excel(f, header=[0, 1])
+
@pytest.mark.parametrize("filename", ["df_empty.xlsx", "df_equals.xlsx"])
def test_header_with_index_col(self, filename):
# GH 33476
| - [x] closes #43143 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47399 | 2022-06-17T09:43:33Z | 2022-06-21T21:57:20Z | 2022-06-21T21:57:19Z | 2022-06-21T21:58:16Z |
BUG: read_csv may interpret second row as index names even if index_col is False | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 76f6e864a174f..2346a86ad21f8 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -861,6 +861,7 @@ I/O
- Bug in :func:`read_csv` not recognizing line break for ``on_bad_lines="warn"`` for ``engine="c"`` (:issue:`41710`)
- Bug in :meth:`DataFrame.to_csv` not respecting ``float_format`` for ``Float64`` dtype (:issue:`45991`)
- Bug in :func:`read_csv` not respecting a specified converter to index columns in all cases (:issue:`40589`)
+- Bug in :func:`read_csv` interpreting second row as :class:`Index` names even when ``index_col=False`` (:issue:`46569`)
- Bug in :func:`read_parquet` when ``engine="pyarrow"`` which caused partial write to disk when column of unsupported datatype was passed (:issue:`44914`)
- Bug in :func:`DataFrame.to_excel` and :class:`ExcelWriter` would raise when writing an empty DataFrame to a ``.ods`` file (:issue:`45793`)
- Bug in :func:`read_html` where elements surrounding ``<br>`` were joined without a space between them (:issue:`29528`)
diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py
index 37b2ce4c4148b..2bf66923ddd60 100644
--- a/pandas/io/parsers/python_parser.py
+++ b/pandas/io/parsers/python_parser.py
@@ -933,7 +933,11 @@ def _get_index_name(
implicit_first_cols = len(line) - self.num_original_columns
# Case 0
- if next_line is not None and self.header is not None:
+ if (
+ next_line is not None
+ and self.header is not None
+ and index_col is not False
+ ):
if len(next_line) == len(line) + self.num_original_columns:
# column and index names on diff rows
self.index_col = list(range(len(line)))
diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py
index abe6c831dd4e4..0717078a83a46 100644
--- a/pandas/tests/io/parser/test_python_parser_only.py
+++ b/pandas/tests/io/parser/test_python_parser_only.py
@@ -466,6 +466,17 @@ def test_index_col_false_and_header_none(python_parser_only):
0.5,0.03
0.1,0.2,0.3,2
"""
- result = parser.read_csv(StringIO(data), sep=",", header=None, index_col=False)
+ with tm.assert_produces_warning(ParserWarning, match="Length of header"):
+ result = parser.read_csv(StringIO(data), sep=",", header=None, index_col=False)
expected = DataFrame({0: [0.5, 0.1], 1: [0.03, 0.2]})
tm.assert_frame_equal(result, expected)
+
+
+def test_header_int_do_not_infer_multiindex_names_on_different_line(python_parser_only):
+ # GH#46569
+ parser = python_parser_only
+ data = StringIO("a\na,b\nc,d,e\nf,g,h")
+ with tm.assert_produces_warning(ParserWarning, match="Length of header"):
+ result = parser.read_csv(data, engine="python", index_col=False)
+ expected = DataFrame({"a": ["a", "c", "f"]})
+ tm.assert_frame_equal(result, expected)
| - [x] closes #46569 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
I think index_col should take precedence over guessing | https://api.github.com/repos/pandas-dev/pandas/pulls/47397 | 2022-06-17T08:38:15Z | 2022-06-21T18:45:42Z | 2022-06-21T18:45:41Z | 2022-06-21T19:50:19Z |
REF: do masking in checked_add_with_arr | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 888e943488953..db76f5c51752f 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1017,10 +1017,10 @@ def rank(
def checked_add_with_arr(
arr: npt.NDArray[np.int64],
- b,
+ b: int | npt.NDArray[np.int64],
arr_mask: npt.NDArray[np.bool_] | None = None,
b_mask: npt.NDArray[np.bool_] | None = None,
-) -> np.ndarray:
+) -> npt.NDArray[np.int64]:
"""
Perform array addition that checks for underflow and overflow.
@@ -1098,7 +1098,12 @@ def checked_add_with_arr(
if to_raise:
raise OverflowError("Overflow in int64 addition")
- return arr + b
+
+ result = arr + b
+ if arr_mask is not None or b2_mask is not None:
+ np.putmask(result, ~not_nan, iNaT)
+
+ return result
# --------------- #
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 1dfb070e29c30..122b61e2e351d 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1104,8 +1104,12 @@ def _add_datetimelike_scalar(self, other):
return DatetimeArray(result)
i8 = self.asi8
- result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan)
- result = self._maybe_mask_results(result)
+ # Incompatible types in assignment (expression has type "ndarray[Any,
+ # dtype[signedinteger[_64Bit]]]", variable has type
+ # "ndarray[Any, dtype[datetime64]]")
+ result = checked_add_with_arr( # type: ignore[assignment]
+ i8, other.value, arr_mask=self._isnan
+ )
dtype = DatetimeTZDtype(tz=other.tz) if other.tz else DT64NS_DTYPE
return DatetimeArray(result, dtype=dtype, freq=self.freq)
@@ -1146,7 +1150,6 @@ def _sub_datetimelike_scalar(self, other: datetime | np.datetime64):
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)
- result = self._maybe_mask_results(result)
return result.view("timedelta64[ns]")
@final
@@ -1168,14 +1171,13 @@ def _sub_datetime_arraylike(self, other):
self_i8 = self.asi8
other_i8 = other.asi8
- arr_mask = self._isnan | other._isnan
- new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)
- if self._hasna or other._hasna:
- np.putmask(new_values, arr_mask, iNaT)
+ new_values = checked_add_with_arr(
+ self_i8, -other_i8, arr_mask=self._isnan, b_mask=other._isnan
+ )
return new_values.view("timedelta64[ns]")
@final
- def _sub_period(self, other: Period):
+ def _sub_period(self, other: Period) -> npt.NDArray[np.object_]:
if not is_period_dtype(self.dtype):
raise TypeError(f"cannot subtract Period from a {type(self).__name__}")
@@ -1183,8 +1185,8 @@ def _sub_period(self, other: Period):
# of DateOffsets. Null entries are filled with pd.NaT
self._check_compatible_with(other)
asi8 = self.asi8
- new_data = asi8 - other.ordinal
- new_data = np.array([self.freq.base * x for x in new_data])
+ new_i8_data = asi8 - other.ordinal # TODO: checked_add_with_arr
+ new_data = np.array([self.freq.base * x for x in new_i8_data])
if self._hasna:
new_data[self._isnan] = NaT
@@ -1192,7 +1194,7 @@ def _sub_period(self, other: Period):
return new_data
@final
- def _add_period(self, other: Period):
+ def _add_period(self, other: Period) -> PeriodArray:
if not is_timedelta64_dtype(self.dtype):
raise TypeError(f"cannot add Period to a {type(self).__name__}")
@@ -1225,8 +1227,6 @@ def _add_timedeltalike_scalar(self, other):
inc = delta_to_nanoseconds(other, reso=self._reso) # type: ignore[attr-defined]
new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan)
- new_values = new_values.view("i8")
- new_values = self._maybe_mask_results(new_values)
new_values = new_values.view(self._ndarray.dtype)
new_freq = None
@@ -1262,10 +1262,6 @@ def _add_timedelta_arraylike(
new_values = checked_add_with_arr(
self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan
)
- if self._hasna or other._hasna:
- mask = self._isnan | other._isnan
- np.putmask(new_values, mask, iNaT)
-
return type(self)(new_values, dtype=self.dtype)
@final
@@ -1309,11 +1305,11 @@ def _sub_period_array(self, other: PeriodArray) -> npt.NDArray[np.object_]:
self = cast("PeriodArray", self)
self._require_matching_freq(other)
- new_values = checked_add_with_arr(
+ new_i8_values = checked_add_with_arr(
self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan
)
- new_values = np.array([self.freq.base * x for x in new_values])
+ new_values = np.array([self.freq.base * x for x in new_i8_values])
if self._hasna or other._hasna:
mask = self._isnan | other._isnan
new_values[mask] = NaT
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index b6d21cd9dac54..cab9a8a565145 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -733,8 +733,6 @@ def _addsub_int_array_or_scalar(
if op is operator.sub:
other = -other
res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan)
- res_values = res_values.view("i8")
- np.putmask(res_values, self._isnan, iNaT)
return type(self)(res_values, freq=self.freq)
def _add_offset(self, other: BaseOffset):
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47396 | 2022-06-17T02:33:50Z | 2022-06-21T19:05:14Z | 2022-06-21T19:05:14Z | 2022-06-21T20:50:56Z |
implement abbrev_to_npy_unit | diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 6cbc06830471e..fb77e2b5f3a0c 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -31,7 +31,10 @@ from cpython.datetime cimport (
import_datetime()
from pandas._libs.tslibs.base cimport ABCTimestamp
-from pandas._libs.tslibs.dtypes cimport periods_per_second
+from pandas._libs.tslibs.dtypes cimport (
+ abbrev_to_npy_unit,
+ periods_per_second,
+)
from pandas._libs.tslibs.np_datetime cimport (
NPY_DATETIMEUNIT,
NPY_FR_ns,
@@ -139,35 +142,36 @@ cpdef inline (int64_t, int) precision_from_unit(str unit):
cdef:
int64_t m
int p
+ NPY_DATETIMEUNIT reso = abbrev_to_npy_unit(unit)
- if unit == "Y":
+ if reso == NPY_DATETIMEUNIT.NPY_FR_Y:
m = 1_000_000_000 * 31556952
p = 9
- elif unit == "M":
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_M:
m = 1_000_000_000 * 2629746
p = 9
- elif unit == "W":
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_W:
m = 1_000_000_000 * 3600 * 24 * 7
p = 9
- elif unit == "D" or unit == "d":
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_D:
m = 1_000_000_000 * 3600 * 24
p = 9
- elif unit == "h":
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_h:
m = 1_000_000_000 * 3600
p = 9
- elif unit == "m":
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_m:
m = 1_000_000_000 * 60
p = 9
- elif unit == "s":
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_s:
m = 1_000_000_000
p = 9
- elif unit == "ms":
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_ms:
m = 1_000_000
p = 6
- elif unit == "us":
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_us:
m = 1000
p = 3
- elif unit == "ns" or unit is None:
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_ns or reso == NPY_DATETIMEUNIT.NPY_FR_GENERIC:
m = 1
p = 0
else:
diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd
index e16a389bc5459..dc2a1b186edcf 100644
--- a/pandas/_libs/tslibs/dtypes.pxd
+++ b/pandas/_libs/tslibs/dtypes.pxd
@@ -4,6 +4,7 @@ from pandas._libs.tslibs.np_datetime cimport NPY_DATETIMEUNIT
cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit)
+cdef NPY_DATETIMEUNIT abbrev_to_npy_unit(str abbrev)
cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil
cpdef int64_t periods_per_day(NPY_DATETIMEUNIT reso=*) except? -1
cdef int64_t periods_per_second(NPY_DATETIMEUNIT reso) except? -1
diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index f843f6ccdfc58..a340fe477e982 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -313,6 +313,39 @@ cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit):
raise NotImplementedError(unit)
+cdef NPY_DATETIMEUNIT abbrev_to_npy_unit(str abbrev):
+ if abbrev == "Y":
+ return NPY_DATETIMEUNIT.NPY_FR_Y
+ elif abbrev == "M":
+ return NPY_DATETIMEUNIT.NPY_FR_M
+ elif abbrev == "W":
+ return NPY_DATETIMEUNIT.NPY_FR_W
+ elif abbrev == "D" or abbrev == "d":
+ return NPY_DATETIMEUNIT.NPY_FR_D
+ elif abbrev == "h":
+ return NPY_DATETIMEUNIT.NPY_FR_h
+ elif abbrev == "m":
+ return NPY_DATETIMEUNIT.NPY_FR_m
+ elif abbrev == "s":
+ return NPY_DATETIMEUNIT.NPY_FR_s
+ elif abbrev == "ms":
+ return NPY_DATETIMEUNIT.NPY_FR_ms
+ elif abbrev == "us":
+ return NPY_DATETIMEUNIT.NPY_FR_us
+ elif abbrev == "ns":
+ return NPY_DATETIMEUNIT.NPY_FR_ns
+ elif abbrev == "ps":
+ return NPY_DATETIMEUNIT.NPY_FR_ps
+ elif abbrev == "fs":
+ return NPY_DATETIMEUNIT.NPY_FR_fs
+ elif abbrev == "as":
+ return NPY_DATETIMEUNIT.NPY_FR_as
+ elif abbrev is None:
+ return NPY_DATETIMEUNIT.NPY_FR_GENERIC
+ else:
+ raise ValueError(f"Unrecognized unit {abbrev}")
+
+
cdef NPY_DATETIMEUNIT freq_group_code_to_npy_unit(int freq) nogil:
"""
Convert the freq to the corresponding NPY_DATETIMEUNIT to pass
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47395 | 2022-06-17T02:17:47Z | 2022-06-21T19:02:14Z | 2022-06-21T19:02:14Z | 2022-06-21T20:52:25Z |
ENH: consistent add/sub behavior for mixed resolutions | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index d75fa5c91a3df..5fd3e33808800 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -791,8 +791,19 @@ def _binary_op_method_timedeltalike(op, name):
# e.g. if original other was timedelta64('NaT')
return NaT
- if self._reso != other._reso:
- raise NotImplementedError
+ # We allow silent casting to the lower resolution if and only
+ # if it is lossless.
+ try:
+ if self._reso < other._reso:
+ other = (<_Timedelta>other)._as_reso(self._reso, round_ok=False)
+ elif self._reso > other._reso:
+ self = (<_Timedelta>self)._as_reso(other._reso, round_ok=False)
+ except ValueError as err:
+ raise ValueError(
+ "Timedelta addition/subtraction with mismatched resolutions is not "
+ "allowed when casting to the lower resolution would require "
+ "lossy rounding."
+ ) from err
res = op(self.value, other.value)
if res == NPY_NAT:
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 7c503132fb9fc..aedecc33ceee9 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -103,6 +103,7 @@ from pandas._libs.tslibs.offsets cimport (
to_offset,
)
from pandas._libs.tslibs.timedeltas cimport (
+ _Timedelta,
delta_to_nanoseconds,
ensure_td64ns,
is_any_td_scalar,
@@ -384,11 +385,36 @@ cdef class _Timestamp(ABCTimestamp):
# TODO: no tests get here
other = ensure_td64ns(other)
- # TODO: what to do with mismatched resos?
- # TODO: disallow round_ok
- nanos = delta_to_nanoseconds(
- other, reso=self._reso, round_ok=True
- )
+ if isinstance(other, _Timedelta):
+ # TODO: share this with __sub__, Timedelta.__add__
+ # We allow silent casting to the lower resolution if and only
+ # if it is lossless. See also Timestamp.__sub__
+ # and Timedelta.__add__
+ try:
+ if self._reso < other._reso:
+ other = (<_Timedelta>other)._as_reso(self._reso, round_ok=False)
+ elif self._reso > other._reso:
+ self = (<_Timestamp>self)._as_reso(other._reso, round_ok=False)
+ except ValueError as err:
+ raise ValueError(
+ "Timestamp addition with mismatched resolutions is not "
+ "allowed when casting to the lower resolution would require "
+ "lossy rounding."
+ ) from err
+
+ try:
+ nanos = delta_to_nanoseconds(
+ other, reso=self._reso, round_ok=False
+ )
+ except OutOfBoundsTimedelta:
+ raise
+ except ValueError as err:
+ raise ValueError(
+ "Addition between Timestamp and Timedelta with mismatched "
+ "resolutions is not allowed when casting to the lower "
+ "resolution would require lossy rounding."
+ ) from err
+
try:
new_value = self.value + nanos
except OverflowError:
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 96a60af58dec2..f9cc1c6878068 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -231,6 +231,41 @@ def test_floordiv_numeric(self, td):
assert res.value == td.value // 2
assert res._reso == td._reso
+ def test_addsub_mismatched_reso(self, td):
+ other = Timedelta(days=1) # can losslessly convert to other resos
+
+ result = td + other
+ assert result._reso == td._reso
+ assert result.days == td.days + 1
+
+ result = other + td
+ assert result._reso == td._reso
+ assert result.days == td.days + 1
+
+ result = td - other
+ assert result._reso == td._reso
+ assert result.days == td.days - 1
+
+ result = other - td
+ assert result._reso == td._reso
+ assert result.days == 1 - td.days
+
+ other2 = Timedelta(500) # can't cast losslessly
+
+ msg = (
+ "Timedelta addition/subtraction with mismatched resolutions is "
+ "not allowed when casting to the lower resolution would require "
+ "lossy rounding"
+ )
+ with pytest.raises(ValueError, match=msg):
+ td + other2
+ with pytest.raises(ValueError, match=msg):
+ other2 + td
+ with pytest.raises(ValueError, match=msg):
+ td - other2
+ with pytest.raises(ValueError, match=msg):
+ other2 - td
+
class TestTimedeltaUnaryOps:
def test_invert(self):
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index a02268956651c..353c99688c139 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -966,6 +966,51 @@ def test_sub_datetimelike_mismatched_reso(self, ts_tz):
with pytest.raises(ValueError, match=msg):
other - ts2
+ def test_sub_timedeltalike_mismatched_reso(self, ts_tz):
+ # case with non-lossy rounding
+ ts = ts_tz
+
+ # choose a unit for `other` that doesn't match ts_tz's;
+ # this construction ensures we get cases with other._reso < ts._reso
+ # and cases with other._reso > ts._reso
+ unit = {
+ NpyDatetimeUnit.NPY_FR_us.value: "ms",
+ NpyDatetimeUnit.NPY_FR_ms.value: "s",
+ NpyDatetimeUnit.NPY_FR_s.value: "us",
+ }[ts._reso]
+ other = Timedelta(0)._as_unit(unit)
+ assert other._reso != ts._reso
+
+ result = ts + other
+ assert isinstance(result, Timestamp)
+ assert result == ts
+ assert result._reso == min(ts._reso, other._reso)
+
+ result = other + ts
+ assert isinstance(result, Timestamp)
+ assert result == ts
+ assert result._reso == min(ts._reso, other._reso)
+
+ msg = "Timestamp addition with mismatched resolutions"
+ if ts._reso < other._reso:
+ # Case where rounding is lossy
+ other2 = other + Timedelta._from_value_and_reso(1, other._reso)
+ with pytest.raises(ValueError, match=msg):
+ ts + other2
+ with pytest.raises(ValueError, match=msg):
+ other2 + ts
+ else:
+ ts2 = ts + Timedelta._from_value_and_reso(1, ts._reso)
+ with pytest.raises(ValueError, match=msg):
+ ts2 + other
+ with pytest.raises(ValueError, match=msg):
+ other + ts2
+
+ msg = "Addition between Timestamp and Timedelta with mismatched resolutions"
+ with pytest.raises(ValueError, match=msg):
+ # With a mismatched td64 as opposed to Timedelta
+ ts + np.timedelta64(1, "ns")
+
class TestAsUnit:
def test_as_unit(self):
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47394 | 2022-06-16T22:37:14Z | 2022-06-23T21:30:50Z | 2022-06-23T21:30:50Z | 2022-06-23T22:10:17Z |
CI/TST: Don't require length for construct_1d_arraylike_from_scalar cast to float64 | diff --git a/pandas/core/construction.py b/pandas/core/construction.py
index 8d26284a5ce45..4b63d492ec1dd 100644
--- a/pandas/core/construction.py
+++ b/pandas/core/construction.py
@@ -556,7 +556,10 @@ def sanitize_array(
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
- subarr = _try_cast(data, dtype, copy, True)
+ # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int
+ # casting aligning with IntCastingNaNError below
+ with np.errstate(invalid="ignore"):
+ subarr = _try_cast(data, dtype, copy, True)
except IntCastingNaNError:
warnings.warn(
"In a future version, passing float-dtype values containing NaN "
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index ed3f9ee525c9e..3f16632353a9d 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1709,7 +1709,9 @@ def construct_1d_arraylike_from_scalar(
value = _maybe_unbox_datetimelike_tz_deprecation(value, dtype)
subarr = np.empty(length, dtype=dtype)
- subarr.fill(value)
+ if length:
+ # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes
+ subarr.fill(value)
return subarr
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 4227d43c459d0..6ce5ffac9de52 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -1200,23 +1200,27 @@ def _maybe_coerce_merge_keys(self) -> None:
# check whether ints and floats
elif is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype):
- if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all():
- warnings.warn(
- "You are merging on int and float "
- "columns where the float values "
- "are not equal to their int representation.",
- UserWarning,
- )
+ # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int
+ with np.errstate(invalid="ignore"):
+ if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all():
+ warnings.warn(
+ "You are merging on int and float "
+ "columns where the float values "
+ "are not equal to their int representation.",
+ UserWarning,
+ )
continue
elif is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype):
- if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all():
- warnings.warn(
- "You are merging on int and float "
- "columns where the float values "
- "are not equal to their int representation.",
- UserWarning,
- )
+ # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int
+ with np.errstate(invalid="ignore"):
+ if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all():
+ warnings.warn(
+ "You are merging on int and float "
+ "columns where the float values "
+ "are not equal to their int representation.",
+ UserWarning,
+ )
continue
# let's infer and see if we are ok
| - [x] closes #47391 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47393 | 2022-06-16T21:54:07Z | 2022-06-22T12:07:12Z | 2022-06-22T12:07:12Z | 2022-06-22T16:47:37Z |
Consistent handling of 0-dim in Timedelta arithmetic methods | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 1df5468869df5..d75fa5c91a3df 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -765,8 +765,12 @@ def _binary_op_method_timedeltalike(op, name):
# defined by Timestamp methods.
elif is_array(other):
- # nd-array like
- if other.dtype.kind in ['m', 'M']:
+ if other.ndim == 0:
+ # see also: item_from_zerodim
+ item = cnp.PyArray_ToScalar(cnp.PyArray_DATA(other), other)
+ return f(self, item)
+
+ elif other.dtype.kind in ['m', 'M']:
return op(self.to_timedelta64(), other)
elif other.dtype.kind == 'O':
return np.array([op(self, x) for x in other])
@@ -943,14 +947,18 @@ cdef _timedelta_from_value_and_reso(int64_t value, NPY_DATETIMEUNIT reso):
td_base = _Timedelta.__new__(Timedelta, milliseconds=int(value))
elif reso == NPY_DATETIMEUNIT.NPY_FR_s:
td_base = _Timedelta.__new__(Timedelta, seconds=int(value))
- elif reso == NPY_DATETIMEUNIT.NPY_FR_m:
- td_base = _Timedelta.__new__(Timedelta, minutes=int(value))
- elif reso == NPY_DATETIMEUNIT.NPY_FR_h:
- td_base = _Timedelta.__new__(Timedelta, hours=int(value))
- elif reso == NPY_DATETIMEUNIT.NPY_FR_D:
- td_base = _Timedelta.__new__(Timedelta, days=int(value))
+ # Other resolutions are disabled but could potentially be implemented here:
+ # elif reso == NPY_DATETIMEUNIT.NPY_FR_m:
+ # td_base = _Timedelta.__new__(Timedelta, minutes=int(value))
+ # elif reso == NPY_DATETIMEUNIT.NPY_FR_h:
+ # td_base = _Timedelta.__new__(Timedelta, hours=int(value))
+ # elif reso == NPY_DATETIMEUNIT.NPY_FR_D:
+ # td_base = _Timedelta.__new__(Timedelta, days=int(value))
else:
- raise NotImplementedError(reso)
+ raise NotImplementedError(
+ "Only resolutions 's', 'ms', 'us', 'ns' are supported."
+ )
+
td_base.value = value
td_base._is_populated = 0
@@ -1006,7 +1014,6 @@ cdef class _Timedelta(timedelta):
def __richcmp__(_Timedelta self, object other, int op):
cdef:
_Timedelta ots
- int ndim
if isinstance(other, _Timedelta):
ots = other
@@ -1018,7 +1025,6 @@ cdef class _Timedelta(timedelta):
return op == Py_NE
elif util.is_array(other):
- # TODO: watch out for zero-dim
if other.dtype.kind == "m":
return PyObject_RichCompare(self.asm8, other, op)
elif other.dtype.kind == "O":
@@ -1728,7 +1734,10 @@ class Timedelta(_Timedelta):
)
elif is_array(other):
- # ndarray-like
+ if other.ndim == 0:
+ # see also: item_from_zerodim
+ item = cnp.PyArray_ToScalar(cnp.PyArray_DATA(other), other)
+ return self.__mul__(item)
return other * self.to_timedelta64()
return NotImplemented
@@ -1736,6 +1745,9 @@ class Timedelta(_Timedelta):
__rmul__ = __mul__
def __truediv__(self, other):
+ cdef:
+ int64_t new_value
+
if _should_cast_to_timedelta(other):
# We interpret NaT as timedelta64("NaT")
other = Timedelta(other)
@@ -1758,6 +1770,10 @@ class Timedelta(_Timedelta):
)
elif is_array(other):
+ if other.ndim == 0:
+ # see also: item_from_zerodim
+ item = cnp.PyArray_ToScalar(cnp.PyArray_DATA(other), other)
+ return self.__truediv__(item)
return self.to_timedelta64() / other
return NotImplemented
@@ -1777,9 +1793,17 @@ class Timedelta(_Timedelta):
return float(other.value) / self.value
elif is_array(other):
- if other.dtype.kind == "O":
+ if other.ndim == 0:
+ # see also: item_from_zerodim
+ item = cnp.PyArray_ToScalar(cnp.PyArray_DATA(other), other)
+ return self.__rtruediv__(item)
+ elif other.dtype.kind == "O":
# GH#31869
return np.array([x / self for x in other])
+
+ # TODO: if other.dtype.kind == "m" and other.dtype != self.asm8.dtype
+ # then should disallow for consistency with scalar behavior; requires
+ # deprecation cycle. (or changing scalar behavior)
return other / self.to_timedelta64()
return NotImplemented
@@ -1806,6 +1830,11 @@ class Timedelta(_Timedelta):
return type(self)._from_value_and_reso(self.value // other, self._reso)
elif is_array(other):
+ if other.ndim == 0:
+ # see also: item_from_zerodim
+ item = cnp.PyArray_ToScalar(cnp.PyArray_DATA(other), other)
+ return self.__floordiv__(item)
+
if other.dtype.kind == 'm':
# also timedelta-like
if self._reso != NPY_FR_ns:
@@ -1838,6 +1867,11 @@ class Timedelta(_Timedelta):
return other.value // self.value
elif is_array(other):
+ if other.ndim == 0:
+ # see also: item_from_zerodim
+ item = cnp.PyArray_ToScalar(cnp.PyArray_DATA(other), other)
+ return self.__rfloordiv__(item)
+
if other.dtype.kind == 'm':
# also timedelta-like
if self._reso != NPY_FR_ns:
@@ -1923,23 +1957,17 @@ cdef _broadcast_floordiv_td64(
result : varies based on `other`
"""
# assumes other.dtype.kind == 'm', i.e. other is timedelta-like
+ # assumes other.ndim != 0
# We need to watch out for np.timedelta64('NaT').
mask = other.view('i8') == NPY_NAT
- if other.ndim == 0:
- if mask:
- return np.nan
-
- return operation(value, other.astype('m8[ns]', copy=False).astype('i8'))
-
- else:
- res = operation(value, other.astype('m8[ns]', copy=False).astype('i8'))
+ res = operation(value, other.astype('m8[ns]', copy=False).astype('i8'))
- if mask.any():
- res = res.astype('f8')
- res[mask] = np.nan
- return res
+ if mask.any():
+ res = res.astype('f8')
+ res[mask] = np.nan
+ return res
# resolution in ns
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 8a2810825fc1d..cf36e75127d17 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -215,6 +215,11 @@ cdef class _Timestamp(ABCTimestamp):
if value == NPY_NAT:
return NaT
+ if reso < NPY_DATETIMEUNIT.NPY_FR_s or reso > NPY_DATETIMEUNIT.NPY_FR_ns:
+ raise NotImplementedError(
+ "Only resolutions 's', 'ms', 'us', 'ns' are supported."
+ )
+
obj.value = value
pandas_datetime_to_datetimestruct(value, reso, &obj.dts)
maybe_localize_tso(obj, tz, reso)
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index b618f12e9f6c9..154a6289dfc00 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -14,6 +14,7 @@
TimedeltaIndex,
Timestamp,
notna,
+ offsets,
timedelta_range,
to_timedelta,
)
@@ -346,3 +347,14 @@ def test_contains_nonunique(self):
):
idx = TimedeltaIndex(vals)
assert idx[0] in idx
+
+ def test_contains(self):
+ # Checking for any NaT-like objects
+ # GH#13603
+ td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
+ for v in [NaT, None, float("nan"), np.nan]:
+ assert not (v in td)
+
+ td = to_timedelta([NaT])
+ for v in [NaT, None, float("nan"), np.nan]:
+ assert v in td
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 614245ec7a93e..f3b84388b0f70 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -318,6 +318,26 @@ def test_td_add_sub_dt64_ndarray(self):
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
+ def test_td_add_sub_ndarray_0d(self):
+ td = Timedelta("1 day")
+ other = np.array(td.asm8)
+
+ result = td + other
+ assert isinstance(result, Timedelta)
+ assert result == 2 * td
+
+ result = other + td
+ assert isinstance(result, Timedelta)
+ assert result == 2 * td
+
+ result = other - td
+ assert isinstance(result, Timedelta)
+ assert result == 0 * td
+
+ result = td - other
+ assert isinstance(result, Timedelta)
+ assert result == 0 * td
+
class TestTimedeltaMultiplicationDivision:
"""
@@ -395,6 +415,20 @@ def test_td_mul_numeric_ndarray(self):
result = other * td
tm.assert_numpy_array_equal(result, expected)
+ def test_td_mul_numeric_ndarray_0d(self):
+ td = Timedelta("1 day")
+ other = np.array(2)
+ assert other.ndim == 0
+ expected = Timedelta("2 days")
+
+ res = td * other
+ assert type(res) is Timedelta
+ assert res == expected
+
+ res = other * td
+ assert type(res) is Timedelta
+ assert res == expected
+
def test_td_mul_td64_ndarray_invalid(self):
td = Timedelta("1 day")
other = np.array([Timedelta("2 Days").to_timedelta64()])
@@ -484,6 +518,14 @@ def test_td_div_td64_ndarray(self):
result = other / td
tm.assert_numpy_array_equal(result, expected * 4)
+ def test_td_div_ndarray_0d(self):
+ td = Timedelta("1 day")
+
+ other = np.array(1)
+ res = td / other
+ assert isinstance(res, Timedelta)
+ assert res == td
+
# ---------------------------------------------------------------
# Timedelta.__rdiv__
@@ -539,6 +581,13 @@ def test_td_rdiv_ndarray(self):
with pytest.raises(TypeError, match=msg):
arr / td
+ def test_td_rdiv_ndarray_0d(self):
+ td = Timedelta(10, unit="d")
+
+ arr = np.array(td.asm8)
+
+ assert arr / td == 1
+
# ---------------------------------------------------------------
# Timedelta.__floordiv__
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 99b3bbf0186bb..96a60af58dec2 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -84,15 +84,15 @@ def test_as_unit_rounding(self):
def test_as_unit_non_nano(self):
# case where we are going neither to nor from nano
- td = Timedelta(days=1)._as_unit("D")
+ td = Timedelta(days=1)._as_unit("ms")
assert td.days == 1
- assert td.value == 1
+ assert td.value == 86_400_000
assert td.components.days == 1
assert td._d == 1
assert td.total_seconds() == 86400
- res = td._as_unit("h")
- assert res.value == 24
+ res = td._as_unit("us")
+ assert res.value == 86_400_000_000
assert res.components.days == 1
assert res.components.hours == 0
assert res._d == 1
@@ -677,17 +677,6 @@ def test_round_non_nano(self, unit):
assert res == Timedelta("1 days 02:35:00")
assert res._reso == td._reso
- def test_contains(self):
- # Checking for any NaT-like objects
- # GH 13603
- td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
- for v in [NaT, None, float("nan"), np.nan]:
- assert not (v in td)
-
- td = to_timedelta([NaT])
- for v in [NaT, None, float("nan"), np.nan]:
- assert v in td
-
def test_identity(self):
td = Timedelta(10, unit="d")
diff --git a/pandas/tests/tslibs/test_timedeltas.py b/pandas/tests/tslibs/test_timedeltas.py
index 661bb113e9549..bb1efe38ea7c4 100644
--- a/pandas/tests/tslibs/test_timedeltas.py
+++ b/pandas/tests/tslibs/test_timedeltas.py
@@ -127,5 +127,6 @@ def test_ints_to_pytimedelta_unsupported(unit):
with pytest.raises(NotImplementedError, match=r"\d{1,2}"):
ints_to_pytimedelta(arr, box=False)
- with pytest.raises(NotImplementedError, match=r"\d{1,2}"):
+ msg = "Only resolutions 's', 'ms', 'us', 'ns' are supported"
+ with pytest.raises(NotImplementedError, match=msg):
ints_to_pytimedelta(arr, box=True)
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47390 | 2022-06-16T14:43:20Z | 2022-06-21T18:56:54Z | 2022-06-21T18:56:54Z | 2022-06-21T20:52:07Z |
DOC: update tutorials.rst to include website for searchable Pandas recipes | diff --git a/doc/source/getting_started/tutorials.rst b/doc/source/getting_started/tutorials.rst
index 8febc3adb9666..bff50bb1e4c2d 100644
--- a/doc/source/getting_started/tutorials.rst
+++ b/doc/source/getting_started/tutorials.rst
@@ -118,3 +118,4 @@ Various tutorials
* `Pandas and Python: Top 10, by Manish Amde <https://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/>`_
* `Pandas DataFrames Tutorial, by Karlijn Willems <https://www.datacamp.com/community/tutorials/pandas-tutorial-dataframe-python>`_
* `A concise tutorial with real life examples <https://tutswiki.com/pandas-cookbook/chapter1/>`_
+* `430+ Searchable Pandas recipes by Isshin Inada <https://skytowner.com/explore/pandas_recipes_reference>`_
| - [x] closes #xxxx (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47389 | 2022-06-16T14:34:02Z | 2022-06-30T18:16:45Z | 2022-06-30T18:16:45Z | 2022-06-30T18:16:52Z |
Backport PR #47347 on branch 1.4.x (REGR: Regression in to_csv for ea dtype categorical) | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index 05ace0509e0b7..a4d81533df23d 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`DataFrame.replace` when the replacement value was explicitly ``None`` when passed in a dictionary to ``to_replace`` also casting other columns to object dtype even when there were no values to replace (:issue:`46634`)
+- Fixed regression in :meth:`DataFrame.to_csv` raising error when :class:`DataFrame` contains extension dtype categorical column (:issue:`46297`, :issue:`46812`)
- Fixed regression in representation of ``dtypes`` attribute of :class:`MultiIndex` (:issue:`46900`)
- Fixed regression when setting values with :meth:`DataFrame.loc` updating :class:`RangeIndex` when index was set as new column and column was updated afterwards (:issue:`47128`)
- Fixed regression in :meth:`DataFrame.nsmallest` led to wrong results when ``np.nan`` in the sorting column (:issue:`46589`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 941b1648a9778..c5654db653de2 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2160,7 +2160,7 @@ def to_native_types(
**kwargs,
) -> np.ndarray:
"""convert to our native types format"""
- if isinstance(values, Categorical):
+ if isinstance(values, Categorical) and values.categories.dtype.kind in "Mm":
# GH#40754 Convert categorical datetimes to datetime array
values = take_nd(
values.categories._values,
diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py
index 8a857c033a2de..c7c3c41a07a1e 100644
--- a/pandas/tests/frame/methods/test_to_csv.py
+++ b/pandas/tests/frame/methods/test_to_csv.py
@@ -1333,3 +1333,32 @@ def test_to_csv_na_quoting(self):
)
expected = '""\n""\n'
assert result == expected
+
+ def test_to_csv_categorical_and_ea(self):
+ # GH#46812
+ df = DataFrame({"a": "x", "b": [1, pd.NA]})
+ df["b"] = df["b"].astype("Int16")
+ df["b"] = df["b"].astype("category")
+ result = df.to_csv()
+ expected_rows = [",a,b", "0,x,1", "1,x,"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert result == expected
+
+ def test_to_csv_categorical_and_interval(self):
+ # GH#46297
+ df = DataFrame(
+ {
+ "a": [
+ pd.Interval(
+ Timestamp("2020-01-01"),
+ Timestamp("2020-01-02"),
+ closed="both",
+ )
+ ]
+ }
+ )
+ df["a"] = df["a"].astype("category")
+ result = df.to_csv()
+ expected_rows = [",a", '0,"[2020-01-01, 2020-01-02]"']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert result == expected
| Backport PR #47347: REGR: Regression in to_csv for ea dtype categorical | https://api.github.com/repos/pandas-dev/pandas/pulls/47388 | 2022-06-16T13:45:54Z | 2022-06-17T07:52:28Z | 2022-06-17T07:52:28Z | 2022-06-17T07:52:29Z |
TST,WARN: read_csv raises warning at wrong stacklevel, but _assert_raised_with_correct_stacklevel doesn't fail | diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst
index b462f0c6a8ffe..ef9537200bccd 100644
--- a/doc/source/whatsnew/v1.4.4.rst
+++ b/doc/source/whatsnew/v1.4.4.rst
@@ -23,7 +23,7 @@ Fixed regressions
Bug fixes
~~~~~~~~~
--
+- The :class:`errors.FutureWarning` raised when passing arguments (other than ``filepath_or_buffer``) as positional in :func:`read_csv` is now raised at the correct stacklevel (:issue:`47385`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 867cdf0ee7636..e811ace78f1f5 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -829,9 +829,7 @@ def read_csv(
...
-@deprecate_nonkeyword_arguments(
- version=None, allowed_args=["filepath_or_buffer"], stacklevel=3
-)
+@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"])
@Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py
index 115a2976ce618..7f60fd60c91b0 100644
--- a/pandas/tests/io/parser/common/test_common_basic.py
+++ b/pandas/tests/io/parser/common/test_common_basic.py
@@ -806,8 +806,7 @@ def test_read_csv_posargs_deprecation(all_parsers):
"In a future version of pandas all arguments of read_csv "
"except for the argument 'filepath_or_buffer' will be keyword-only"
)
- with tm.assert_produces_warning(FutureWarning, match=msg):
- parser.read_csv(f, " ")
+ parser.read_csv_check_warnings(FutureWarning, msg, f, " ")
@pytest.mark.parametrize("delimiter", [",", "\t"])
| - [x] closes #47385 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47387 | 2022-06-16T12:42:56Z | 2022-06-26T19:08:42Z | 2022-06-26T19:08:42Z | 2022-07-02T16:51:20Z |
Use CMake for Build System | diff --git a/.circleci/setup_env.sh b/.circleci/setup_env.sh
index c03a7ff4be8b3..c88250d6f48d2 100755
--- a/.circleci/setup_env.sh
+++ b/.circleci/setup_env.sh
@@ -98,9 +98,10 @@ if [ "$(conda list -f qt --json)" != [] ]; then
fi
echo "Build extensions"
-python setup.py build_ext -q -j3
+cmake .
+cmake --build . --config Release --parallel
echo "Install pandas"
-python -m pip install --no-build-isolation --no-use-pep517 -e .
+python -m pip install --no-build-isolation -e .
echo "done"
diff --git a/.github/actions/build_pandas/action.yml b/.github/actions/build_pandas/action.yml
index 23bb988ef4d73..4d2ff17b4541b 100644
--- a/.github/actions/build_pandas/action.yml
+++ b/.github/actions/build_pandas/action.yml
@@ -12,8 +12,9 @@ runs:
- name: Build Pandas
run: |
- python setup.py build_ext -j $N_JOBS
- python -m pip install -e . --no-build-isolation --no-use-pep517 --no-index
+ cmake .
+ cmake --build . --config Release --parallel
+ python -m pip install -e . --no-build-isolation --no-index
shell: bash -el {0}
env:
# Cannot use parallel compilation on Windows, see https://github.com/pandas-dev/pandas/issues/30873
diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml
index 67e99b4486a12..25eac102ca599 100644
--- a/.github/workflows/32-bit-linux.yml
+++ b/.github/workflows/32-bit-linux.yml
@@ -38,10 +38,11 @@ jobs:
/opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \
. ~/virtualenvs/pandas-dev/bin/activate && \
python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \
- pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
- python setup.py build_ext -q -j1 && \
- python -m pip install --no-build-isolation --no-use-pep517 -e . && \
- python -m pip list && \
+ pip install cmake cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
+ cmake . && \
+ cmake --build . --parallel && \
+ python -m pip install --no-build-isolation -e . && \
+ python -m pip list && \
export PANDAS_CI=1 && \
pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml"
diff --git a/.gitignore b/.gitignore
index 07b1f056d511b..b0782dc31b09b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,6 +37,10 @@
*.so
.build_cache_dir
MANIFEST
+Makefile
+CMakeCache.txt
+CMakeFiles/
+cmake_install.cmake
# Python files #
################
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000000000..fcf450e557e39
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,29 @@
+cmake_minimum_required(VERSION 3.18)
+
+set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9")
+set(CMAKE_C_STANDARD 99)
+set(CMAKE_C_STANDARD_REQUIRED True)
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED True)
+
+project(pandas)
+
+if(WIN32)
+ if(NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE
+ Release
+ CACHE STRING "Build type" FORCE)
+ endif()
+ find_package(Python3 REQUIRED COMPONENTS Interpreter Development NumPy)
+ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE "")
+ link_directories(${Python3_LIBRARY_DIRS})
+else()
+ # we only choose Development.Module to support virtual environments where
+ # libpython may not be available see
+ # https://github.com/pypa/manylinux/issues/484
+ find_package(Python3 REQUIRED COMPONENTS Interpreter Development.Module NumPy)
+endif()
+
+add_compile_definitions(NPY_NO_DEPRECATED_API=0)
+add_subdirectory("pandas/_libs")
+add_subdirectory("pandas/io/sas")
diff --git a/Dockerfile b/Dockerfile
index 650ba14271092..8fd0bc0b927f2 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -49,5 +49,6 @@ RUN . /opt/conda/etc/profile.d/conda.sh \
&& conda activate base \
&& cd "$pandas_home" \
&& export \
- && python setup.py build_ext -j 4 \
+ && cmake . \
+ && cmake --build . --parallel \
&& python -m pip install --no-build-isolation -e .
diff --git a/MANIFEST.in b/MANIFEST.in
index d2b1b8cb887bc..cd13a469043b2 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,5 +1,6 @@
include RELEASE.md
include versioneer.py
+include CMakeLists.txt
graft doc
prune doc/build
@@ -7,6 +8,7 @@ prune doc/build
graft LICENSES
graft pandas
+graft build_support
global-exclude *.bz2
global-exclude *.csv
@@ -28,7 +30,6 @@ global-exclude *.odt
global-exclude *.orc
global-exclude *.sas7bdat
global-exclude *.sav
-global-exclude *.so
global-exclude *.xls
global-exclude *.xlsb
global-exclude *.xlsm
@@ -42,6 +43,9 @@ global-exclude *~
global-exclude .DS_Store
global-exclude .git*
global-exclude \#*
+recursive-exclude **/CMakeFiles *
+global-exclude *.cmake
+global-exclude CMakeCache.txt
global-exclude *.c
global-exclude *.cpp
diff --git a/Makefile b/Makefile
deleted file mode 100644
index c0aa685ed47ac..0000000000000
--- a/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-.PHONY : develop build clean clean_pyc doc lint-diff black test-scripts
-
-all: develop
-
-clean:
- -python setup.py clean
-
-clean_pyc:
- -find . -name '*.py[co]' -exec rm {} \;
-
-build: clean_pyc
- python setup.py build_ext
-
-lint-diff:
- git diff upstream/main --name-only -- "*.py" | xargs flake8
-
-black:
- black .
-
-develop: build
- python -m pip install --no-build-isolation -e .
-
-doc:
- -rm -rf doc/build doc/source/generated
- cd doc; \
- python make.py clean; \
- python make.py html
-
-test-scripts:
- pytest scripts
diff --git a/build_support/build_backend.py b/build_support/build_backend.py
new file mode 100644
index 0000000000000..90fe28fb9b33a
--- /dev/null
+++ b/build_support/build_backend.py
@@ -0,0 +1,21 @@
+import pathlib
+import subprocess
+
+from setuptools import build_meta as _orig
+
+prepare_metadata_for_build_wheel = _orig.prepare_metadata_for_build_wheel
+build_sdist = _orig.build_sdist
+get_requires_for_build_wheel = _orig.get_requires_for_build_wheel
+get_requires_for_build_sdist = _orig.get_requires_for_build_sdist
+
+
+def build_wheel(wheel_directory, config_settings=None, metadata_directory=None):
+ filedir = pathlib.Path(__file__).resolve().parent.parent
+ subprocess.run(["cmake", "."], cwd=filedir, check=True)
+ subprocess.run(
+ ["cmake", "--build", ".", "--config", "Release", "--parallel"],
+ cwd=filedir,
+ check=True,
+ )
+
+ return _orig.build_wheel(wheel_directory, config_settings, metadata_directory)
diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst
index c881770aa7584..32c9ea82f8ddf 100644
--- a/doc/source/development/contributing_environment.rst
+++ b/doc/source/development/contributing_environment.rst
@@ -42,7 +42,8 @@ Run Container::
If you bind your local repo for the first time, you have to build the C extensions afterwards.
Run the following command inside the container::
- python setup.py build_ext -j 4
+ cmake .
+ cmake --build . --config Release --parallel
You need to rebuild the C extensions anytime the Cython code in ``pandas/_libs`` changes.
This most frequently occurs when changing or merging branches.
@@ -169,7 +170,8 @@ We'll now kick off a three-step process:
source activate pandas-dev
# Build and install pandas
- python setup.py build_ext -j 4
+ cmake .
+ cmake --build . --config Release --parallel
python -m pip install -e . --no-build-isolation --no-use-pep517
At this point you should be able to import pandas from your locally built version::
@@ -216,7 +218,8 @@ You also need to have ``setuptools`` 51.0.0 or later to build pandas.
python -m pip install -r requirements-dev.txt
# Build and install pandas
- python setup.py build_ext -j 4
+ cmake .
+ cmake --build . --config Release --parallel
python -m pip install -e . --no-build-isolation --no-use-pep517
**Unix**/**macOS with pyenv**
@@ -240,7 +243,8 @@ Consult the docs for setting up pyenv `here <https://github.com/pyenv/pyenv>`__.
python -m pip install -r requirements-dev.txt
# Build and install pandas
- python setup.py build_ext -j 4
+ cmake .
+ cmake --build . --config Release --parallel
python -m pip install -e . --no-build-isolation --no-use-pep517
**Windows**
@@ -266,5 +270,6 @@ should already exist.
python -m pip install -r requirements-dev.txt
# Build and install pandas
- python setup.py build_ext -j 4
+ cmake .
+ cmake --build . --config Release --parallel
python -m pip install -e . --no-build-isolation --no-use-pep517
diff --git a/doc/source/development/debugging_extensions.rst b/doc/source/development/debugging_extensions.rst
index 7ba2091e18853..b2865f97e2613 100644
--- a/doc/source/development/debugging_extensions.rst
+++ b/doc/source/development/debugging_extensions.rst
@@ -8,11 +8,12 @@ Debugging C extensions
Pandas uses select C extensions for high performance IO operations. In case you need to debug segfaults or general issues with those extensions, the following steps may be helpful.
-First, be sure to compile the extensions with the appropriate flags to generate debug symbols and remove optimizations. This can be achieved as follows:
+First, be sure to compile the extensions with the appropriate flags to generate debug symbols and remove optimizations. This can be achieved on Unix-like systems as follows:
.. code-block:: sh
- python setup.py build_ext --inplace -j4 --with-debugging-symbols
+ cmake . -DCMAKE_BUILD_TYPE=Debug
+ cmake --build . --parallel
Using a debugger
================
diff --git a/environment.yml b/environment.yml
index f1472f453b935..5dc0dcc477dae 100644
--- a/environment.yml
+++ b/environment.yml
@@ -79,6 +79,7 @@ dependencies:
- asv
# The compiler packages are meta-packages and install the correct compiler (activation) packages on the respective platforms.
+ - cmake>=3.18.0
- c-compiler
- cxx-compiler
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 5016bde000c3b..229aa77efdefe 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -28,7 +28,8 @@
raise ImportError(
f"C extension: {_module} not built. If you want to import "
"pandas from the source directory, you may need to run "
- "'python setup.py build_ext --force' to build the C extensions first."
+ "'cmake . && cmake --build . --config Release --parallel' "
+ "to build the C extensions first."
) from _err
else:
del _tslib, _lib, _hashtable
diff --git a/pandas/_libs/CMakeLists.txt b/pandas/_libs/CMakeLists.txt
new file mode 100644
index 0000000000000..3aa1e7a225476
--- /dev/null
+++ b/pandas/_libs/CMakeLists.txt
@@ -0,0 +1,142 @@
+add_custom_command(
+ OUTPUT algos_common_helper.pxi
+ algos_take_helper.pxi
+ hashtable_class_helper.pxi
+ hashtable_func_helper.pxi
+ index_class_helper.pxi
+ intervaltree.pxi
+ khash_for_primitive_helper.pxi
+ sparse_op_helper.pxi
+ COMMAND ${Python3_EXECUTABLE} generate_templates.py)
+
+add_custom_command(
+ OUTPUT algos.c
+ COMMAND ${Python3_EXECUTABLE} -m cython -3 algos.pyx
+ DEPENDS algos_common_helper.pxi algos_take_helper.pxi)
+python3_add_library(algos MODULE WITH_SOABI algos.c)
+target_include_directories(
+ algos PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS} "src/klib")
+
+# There is a khash header file in src/klib and a cython generated one in _libs.
+# Depending on the build timing one of the other could get picked up, though
+# unclear why we need both? If we stick to the non-generated version we can
+# remove any DEPENDS khash.h
+add_custom_command(
+ OUTPUT khash.h
+ COMMAND ${Python3_EXECUTABLE} -m cython -3 khash.pxd
+ DEPENDS hkash_for_primitive_helper.pxi)
+
+add_custom_command(
+ OUTPUT hashtable.c
+ COMMAND ${Python3_EXECUTABLE} -m cython -3 hashtable.pyx
+ DEPENDS hashtable_class_helper.pxi hashtable_func_helper.pxi)
+
+python3_add_library(hashtable MODULE WITH_SOABI hashtable.c)
+target_include_directories(
+ hashtable PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS}
+ "src/klib" DEPENDS khash.h)
+
+add_custom_command(
+ OUTPUT index.c
+ COMMAND ${Python3_EXECUTABLE} -m cython -3 index.pyx
+ DEPENDS index_class_helper.pxi)
+python3_add_library(index MODULE WITH_SOABI index.c)
+target_include_directories(
+ index PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS} "src/klib"
+ "./tslibs")
+
+add_custom_command(
+ OUTPUT interval.c
+ COMMAND ${Python3_EXECUTABLE} -m cython -3 interval.pyx
+ DEPENDS intervaltree.pxi)
+python3_add_library(interval MODULE WITH_SOABI interval.c)
+target_include_directories(
+ interval PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS}
+ "src/klib" "./tslibs")
+
+add_custom_command(
+ OUTPUT sparse.c
+ COMMAND ${Python3_EXECUTABLE} -m cython -3 sparse.pyx
+ DEPENDS sparse_op_helper.pxi)
+python3_add_library(sparse MODULE WITH_SOABI sparse.c)
+target_include_directories(
+ sparse PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS}
+ "src/klib")
+
+set(BASIC_LIBRARIES
+ arrays
+ groupby
+ hashing
+ indexing
+ internals
+ reduction
+ ops
+ ops_dispatch
+ properties
+ reshape
+ testing
+ writers)
+foreach(LIB ${BASIC_LIBRARIES})
+ add_custom_command(OUTPUT ${LIB}.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ ${LIB}.pyx)
+ python3_add_library(${LIB} MODULE WITH_SOABI ${LIB}.c)
+ target_include_directories(${LIB} PUBLIC ${Python3_INCLUDE_DIRS}
+ ${Python3_NumPy_INCLUDE_DIRS})
+endforeach()
+
+add_subdirectory("tslibs")
+
+add_custom_command(OUTPUT tslib.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ tslib.pyx)
+python3_add_library(tslib ${LIB} MODULE WITH_SOABI tslib.c
+ tslibs/src/datetime/np_datetime.c)
+target_include_directories(
+ tslib PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS} "./tslibs")
+
+add_custom_command(OUTPUT missing.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ missing.pyx)
+python3_add_library(missing MODULE WITH_SOABI missing.c)
+target_include_directories(
+ missing PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS}
+ "./tslibs")
+
+add_custom_command(OUTPUT lib.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ lib.pyx)
+python3_add_library(lib MODULE WITH_SOABI lib.c src/parser/tokenizer.c)
+target_include_directories(
+ lib PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS} "src/klib"
+ "./tslibs")
+
+add_custom_command(
+ OUTPUT join.c
+ COMMAND ${Python3_EXECUTABLE} -m cython -3 join.pyx
+ DEPENDS khash_for_primitive_helper.pxi)
+python3_add_library(join MODULE WITH_SOABI join.c)
+target_include_directories(join PUBLIC ${Python3_INCLUDE_DIRS}
+ ${Python3_NumPy_INCLUDE_DIRS} "src/klib")
+
+add_custom_command(OUTPUT parsers.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ parsers.pyx)
+python3_add_library(parsers MODULE WITH_SOABI parsers.c src/parser/tokenizer.c
+ src/parser/io.c)
+target_include_directories(
+ parsers PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS}
+ "src/klib" "src")
+
+python3_add_library(
+ ujson
+ MODULE
+ WITH_SOABI
+ src/ujson/python/ujson.c
+ src/ujson/python/objToJSON.c
+ src/ujson/python/date_conversions.c
+ src/ujson/python/JSONtoObj.c
+ src/ujson/lib/ultrajsonenc.c
+ src/ujson/lib/ultrajsondec.c
+ tslibs/src/datetime/np_datetime.c
+ tslibs/src/datetime/np_datetime_strings.c)
+target_include_directories(
+ ujson PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS}
+ src/ujson/python src/ujson/lib src/datetime)
+
+add_subdirectory("window")
diff --git a/pandas/_libs/generate_templates.py b/pandas/_libs/generate_templates.py
new file mode 100644
index 0000000000000..013568a2812b8
--- /dev/null
+++ b/pandas/_libs/generate_templates.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from Cython import Tempita
+
+if __name__ == "__main__":
+ for template in (
+ "algos_common_helper.pxi.in",
+ "algos_take_helper.pxi.in",
+ "hashtable_class_helper.pxi.in",
+ "hashtable_func_helper.pxi.in",
+ "index_class_helper.pxi.in",
+ "intervaltree.pxi.in",
+ "khash_for_primitive_helper.pxi.in",
+ "sparse_op_helper.pxi.in",
+ ):
+ pyxcontent = Tempita.sub(open(template).read())
+ with open(template.replace(".in", ""), "w") as outfile:
+ outfile.write(pyxcontent)
diff --git a/pandas/_libs/src/ujson/python/ujson.c b/pandas/_libs/src/ujson/python/ujson.c
index 5d4a5693c0ff6..c9d5a91da2e86 100644
--- a/pandas/_libs/src/ujson/python/ujson.c
+++ b/pandas/_libs/src/ujson/python/ujson.c
@@ -74,7 +74,7 @@ static PyModuleDef moduledef = {
};
-PyMODINIT_FUNC PyInit_json(void) {
+PyMODINIT_FUNC PyInit_ujson(void) {
import_array()
initObjToJSON(); // TODO(username): clean up, maybe via tp_free?
return PyModuleDef_Init(&moduledef);
diff --git a/pandas/_libs/tslibs/CMakeLists.txt b/pandas/_libs/tslibs/CMakeLists.txt
new file mode 100644
index 0000000000000..0f8ebdeb8991c
--- /dev/null
+++ b/pandas/_libs/tslibs/CMakeLists.txt
@@ -0,0 +1,43 @@
+set(BASIC_LIBRARIES base ccalendar dtypes nattype strptime timezones)
+foreach(LIB ${BASIC_LIBRARIES})
+ add_custom_command(OUTPUT ${LIB}.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ ${LIB}.pyx)
+ python3_add_library(${LIB} MODULE WITH_SOABI ${LIB}.c)
+ target_include_directories(${LIB} PUBLIC ${Python3_INCLUDE_DIRS}
+ ${Python3_NumPy_INCLUDE_DIRS})
+endforeach()
+
+set(NP_DATETIME_REQUIRING
+ conversion
+ fields
+ offsets
+ period
+ timedeltas
+ timestamps
+ tzconversion
+ vectorized)
+foreach(LIB ${NP_DATETIME_REQUIRING})
+ add_custom_command(OUTPUT ${LIB}.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ ${LIB}.pyx)
+ python3_add_library(${LIB} MODULE WITH_SOABI ${LIB}.c
+ src/datetime/np_datetime.c)
+ target_include_directories(
+ ${LIB} PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS}
+ "src/datetime")
+endforeach()
+
+add_custom_command(OUTPUT np_datetime.c COMMAND ${Python3_EXECUTABLE} -m cython
+ -3 np_datetime.pyx)
+python3_add_library(
+ np_datetime MODULE WITH_SOABI np_datetime.c src/datetime/np_datetime.c
+ src/datetime/np_datetime_strings.c)
+target_include_directories(np_datetime PUBLIC ${Python3_INCLUDE_DIRS}
+ ${Python3_NumPy_INCLUDE_DIRS})
+
+add_custom_command(OUTPUT parsing.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ parsing.pyx)
+python3_add_library(parsing MODULE WITH_SOABI parsing.c
+ "../src/parser/tokenizer.c")
+target_include_directories(
+ parsing PUBLIC ${Python3_INCLUDE_DIRS} ${Python3_NumPy_INCLUDE_DIRS}
+ "../src/klib")
diff --git a/pandas/_libs/json.pyi b/pandas/_libs/ujson.pyi
similarity index 100%
rename from pandas/_libs/json.pyi
rename to pandas/_libs/ujson.pyi
diff --git a/pandas/_libs/window/CMakeLists.txt b/pandas/_libs/window/CMakeLists.txt
new file mode 100644
index 0000000000000..862abf42fd89f
--- /dev/null
+++ b/pandas/_libs/window/CMakeLists.txt
@@ -0,0 +1,12 @@
+add_custom_command(OUTPUT indexers.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ indexers.pyx)
+python3_add_library(indexers MODULE WITH_SOABI indexers.c)
+target_include_directories(indexers PUBLIC ${Python3_INCLUDE_DIRS}
+ ${Python3_NumPy_INCLUDE_DIRS})
+
+add_custom_command(
+ OUTPUT aggregations.cpp COMMAND ${Python3_EXECUTABLE} -m cython -3 --cplus
+ aggregations.pyx)
+python3_add_library(aggregations MODULE WITH_SOABI aggregations.cpp)
+target_include_directories(aggregations PUBLIC ${Python3_INCLUDE_DIRS}
+ ${Python3_NumPy_INCLUDE_DIRS})
diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py
index 185e93591cfe0..694a39ff36d1d 100644
--- a/pandas/io/excel/_odswriter.py
+++ b/pandas/io/excel/_odswriter.py
@@ -10,7 +10,7 @@
cast,
)
-import pandas._libs.json as json
+import pandas._libs.ujson as json
from pandas._typing import (
FilePath,
StorageOptions,
diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py
index a3edccd3a5779..4d299c717e23f 100644
--- a/pandas/io/excel/_xlsxwriter.py
+++ b/pandas/io/excel/_xlsxwriter.py
@@ -2,7 +2,7 @@
from typing import Any
-import pandas._libs.json as json
+import pandas._libs.ujson as json
from pandas._typing import (
FilePath,
StorageOptions,
diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py
index 234d9e72de10d..538b5f2a2c5e3 100644
--- a/pandas/io/excel/_xlwt.py
+++ b/pandas/io/excel/_xlwt.py
@@ -7,7 +7,7 @@
cast,
)
-import pandas._libs.json as json
+import pandas._libs.ujson as json
from pandas._typing import (
FilePath,
StorageOptions,
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 02a0b27f82ef8..6d59ca267af7c 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -21,8 +21,8 @@
import numpy as np
-import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
+import pandas._libs.ujson as json
from pandas._typing import (
CompressionOptions,
DtypeArg,
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py
index b7a8b5cc82f7a..57a3d5cff3cd7 100644
--- a/pandas/io/json/_table_schema.py
+++ b/pandas/io/json/_table_schema.py
@@ -12,7 +12,7 @@
)
import warnings
-import pandas._libs.json as json
+import pandas._libs.ujson as json
from pandas._typing import (
DtypeObj,
JSONSerializable,
diff --git a/pandas/io/sas/CMakeLists.txt b/pandas/io/sas/CMakeLists.txt
new file mode 100644
index 0000000000000..a8742edc7a939
--- /dev/null
+++ b/pandas/io/sas/CMakeLists.txt
@@ -0,0 +1,5 @@
+add_custom_command(OUTPUT _sas.c COMMAND ${Python3_EXECUTABLE} -m cython -3
+ _sas.pyx)
+python3_add_library(_sas MODULE WITH_SOABI _sas.c)
+target_include_directories(_sas PUBLIC ${Python3_INCLUDE_DIRS}
+ ${Python3_NumPy_INCLUDE_DIRS})
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/_sas.pyx
similarity index 100%
rename from pandas/io/sas/sas.pyx
rename to pandas/io/sas/_sas.pyx
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index b9f568fc9577b..61b9c1d851c0d 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -5,7 +5,6 @@
import pytest
from pandas._libs import lib
-from pandas.compat import IS64
from pandas.errors import (
PerformanceWarning,
SpecificationError,
@@ -2572,7 +2571,6 @@ def test_groupby_series_with_tuple_name():
tm.assert_series_equal(result, expected)
-@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
@pytest.mark.parametrize(
"func, values", [("sum", [97.0, 98.0]), ("mean", [24.25, 24.5])]
)
@@ -2585,7 +2583,6 @@ def test_groupby_numerical_stability_sum_mean(func, values):
tm.assert_frame_equal(result, expected)
-@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
def test_groupby_numerical_stability_cumsum():
# GH#38934
data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index ae13d8d5fb180..ff43933be1232 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -12,7 +12,7 @@
import pytest
import pytz
-import pandas._libs.json as ujson
+import pandas._libs.ujson as ujson
from pandas.compat import (
IS64,
is_platform_windows,
diff --git a/pandas/tests/io/parser/common/test_float.py b/pandas/tests/io/parser/common/test_float.py
index 2ca98de914f9e..e245b2ca019ee 100644
--- a/pandas/tests/io/parser/common/test_float.py
+++ b/pandas/tests/io/parser/common/test_float.py
@@ -7,7 +7,10 @@
import numpy as np
import pytest
-from pandas.compat import is_platform_linux
+from pandas.compat import (
+ is_platform_linux,
+ is_platform_mac,
+)
from pandas import DataFrame
import pandas._testing as tm
@@ -53,8 +56,8 @@ def test_too_many_exponent_digits(all_parsers_all_precisions, exp, request):
data = f"data\n10E{exp}"
result = parser.read_csv(StringIO(data), float_precision=precision)
if precision == "round_trip":
- if exp == 999999999999999999 and is_platform_linux():
- mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result")
+ if exp == 999999999999999999 and (is_platform_linux() or is_platform_mac()):
+ mark = pytest.mark.xfail(reason="GH38794, on Unix gives object result")
request.node.add_marker(mark)
value = np.inf if exp > 0 else 0.0
diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py
index 9a81790ca3bb0..6d92225f9458d 100644
--- a/pandas/tests/io/parser/test_c_parser_only.py
+++ b/pandas/tests/io/parser/test_c_parser_only.py
@@ -17,10 +17,7 @@
import numpy as np
import pytest
-from pandas.compat import (
- IS64,
- is_ci_environment,
-)
+from pandas.compat import is_ci_environment
from pandas.errors import ParserError
import pandas.util._test_decorators as td
@@ -678,10 +675,7 @@ def test_float_precision_options(c_parser_only):
df3 = parser.read_csv(StringIO(s), float_precision="legacy")
- if IS64:
- assert not df.iloc[0, 0] == df3.iloc[0, 0]
- else:
- assert df.iloc[0, 0] == df3.iloc[0, 0]
+ assert not df.iloc[0, 0] == df3.iloc[0, 0]
msg = "Unrecognized float_precision option: junk"
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index c9ec2985488be..b229d1b8db51e 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -7,6 +7,7 @@
import pytest
from pandas.compat import (
+ IS64,
is_platform_arm,
is_platform_mac,
)
@@ -1191,7 +1192,9 @@ def test_rolling_sem(frame_or_series):
tm.assert_series_equal(result, expected)
-@pytest.mark.xfail(is_platform_arm() and not is_platform_mac(), reason="GH 38921")
+@pytest.mark.xfail(
+ (is_platform_arm() and not is_platform_mac()) or not IS64, reason="GH 38921"
+)
@pytest.mark.parametrize(
("func", "third_value", "values"),
[
diff --git a/pyproject.toml b/pyproject.toml
index 67c56123a847c..81aa906bdd2bd 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -2,14 +2,15 @@
# Minimum requirements for the build system to execute.
# See https://github.com/scipy/scipy/pull/12940 for the AIX issue.
requires = [
+ "cmake>=3.18.0",
"setuptools>=51.0.0",
"wheel",
"Cython>=0.29.32,<3", # Note: sync with setup.py, environment.yml and asv.conf.json
"oldest-supported-numpy>=0.10"
]
-# uncomment to enable pep517 after versioneer problem is fixed.
-# https://github.com/python-versioneer/python-versioneer/issues/193
-# build-backend = "setuptools.build_meta"
+
+build-backend = "build_backend"
+backend-path = ["build_support"]
[tool.black]
target-version = ['py38', 'py39']
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 60dd738e43ba3..3a59ebc79e315 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -62,6 +62,7 @@ torch
moto
flask
asv
+cmake>=3.18.0
black==22.3.0
cpplint
flake8==5.0.4
diff --git a/setup.py b/setup.py
old mode 100755
new mode 100644
index 12e8aa36c3794..79e4fea9fdf36
--- a/setup.py
+++ b/setup.py
@@ -5,662 +5,21 @@
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
-
-import argparse
-import multiprocessing
import os
-from os.path import join as pjoin
-import platform
-import shutil
import sys
-from sysconfig import get_config_vars
-import numpy
-from pkg_resources import parse_version
-from setuptools import (
- Command,
- Extension,
- setup,
-)
-from setuptools.command.build_ext import build_ext as _build_ext
+from setuptools import setup
+# uncomment to enable pep517 after versioneer problem is fixed.
+# https://github.com/python-versioneer/python-versioneer/issues/193
+sys.path.insert(0, os.path.dirname(__file__))
import versioneer
cmdclass = versioneer.get_cmdclass()
-def is_platform_windows():
- return sys.platform == "win32" or sys.platform == "cygwin"
-
-
-def is_platform_mac():
- return sys.platform == "darwin"
-
-
-# note: sync with pyproject.toml, environment.yml and asv.conf.json
-min_cython_ver = "0.29.32"
-
-try:
- from Cython import (
- Tempita,
- __version__ as _CYTHON_VERSION,
- )
- from Cython.Build import cythonize
-
- _CYTHON_INSTALLED = parse_version(_CYTHON_VERSION) >= parse_version(min_cython_ver)
-except ImportError:
- _CYTHON_VERSION = None
- _CYTHON_INSTALLED = False
- cythonize = lambda x, *args, **kwargs: x # dummy func
-
-
-_pxi_dep_template = {
- "algos": ["_libs/algos_common_helper.pxi.in", "_libs/algos_take_helper.pxi.in"],
- "hashtable": [
- "_libs/hashtable_class_helper.pxi.in",
- "_libs/hashtable_func_helper.pxi.in",
- "_libs/khash_for_primitive_helper.pxi.in",
- ],
- "index": ["_libs/index_class_helper.pxi.in"],
- "sparse": ["_libs/sparse_op_helper.pxi.in"],
- "interval": ["_libs/intervaltree.pxi.in"],
-}
-
-_pxifiles = []
-_pxi_dep = {}
-for module, files in _pxi_dep_template.items():
- pxi_files = [pjoin("pandas", x) for x in files]
- _pxifiles.extend(pxi_files)
- _pxi_dep[module] = pxi_files
-
-
-class build_ext(_build_ext):
- @classmethod
- def render_templates(cls, pxifiles):
- for pxifile in pxifiles:
- # build pxifiles first, template extension must be .pxi.in
- assert pxifile.endswith(".pxi.in")
- outfile = pxifile[:-3]
-
- if (
- os.path.exists(outfile)
- and os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime
- ):
- # if .pxi.in is not updated, no need to output .pxi
- continue
-
- with open(pxifile) as f:
- tmpl = f.read()
- pyxcontent = Tempita.sub(tmpl)
-
- with open(outfile, "w") as f:
- f.write(pyxcontent)
-
- def build_extensions(self):
- # if building from c files, don't need to
- # generate template output
- if _CYTHON_INSTALLED:
- self.render_templates(_pxifiles)
-
- super().build_extensions()
-
-
-class CleanCommand(Command):
- """Custom command to clean the .so and .pyc files."""
-
- user_options = [("all", "a", "")]
-
- def initialize_options(self):
- self.all = True
- self._clean_me = []
- self._clean_trees = []
-
- base = pjoin("pandas", "_libs", "src")
- tsbase = pjoin("pandas", "_libs", "tslibs", "src")
- dt = pjoin(tsbase, "datetime")
- util = pjoin("pandas", "util")
- parser = pjoin(base, "parser")
- ujson_python = pjoin(base, "ujson", "python")
- ujson_lib = pjoin(base, "ujson", "lib")
- self._clean_exclude = [
- pjoin(dt, "np_datetime.c"),
- pjoin(dt, "np_datetime_strings.c"),
- pjoin(parser, "tokenizer.c"),
- pjoin(parser, "io.c"),
- pjoin(ujson_python, "ujson.c"),
- pjoin(ujson_python, "objToJSON.c"),
- pjoin(ujson_python, "JSONtoObj.c"),
- pjoin(ujson_python, "date_conversions.c"),
- pjoin(ujson_lib, "ultrajsonenc.c"),
- pjoin(ujson_lib, "ultrajsondec.c"),
- pjoin(util, "move.c"),
- ]
-
- for root, dirs, files in os.walk("pandas"):
- for f in files:
- filepath = pjoin(root, f)
- if filepath in self._clean_exclude:
- continue
-
- if os.path.splitext(f)[-1] in (
- ".pyc",
- ".so",
- ".o",
- ".pyo",
- ".pyd",
- ".c",
- ".cpp",
- ".orig",
- ):
- self._clean_me.append(filepath)
- for d in dirs:
- if d == "__pycache__":
- self._clean_trees.append(pjoin(root, d))
-
- # clean the generated pxi files
- for pxifile in _pxifiles:
- pxifile = pxifile.replace(".pxi.in", ".pxi")
- self._clean_me.append(pxifile)
-
- for d in ("build", "dist"):
- if os.path.exists(d):
- self._clean_trees.append(d)
-
- def finalize_options(self):
- pass
-
- def run(self):
- for clean_me in self._clean_me:
- try:
- os.unlink(clean_me)
- except OSError:
- pass
- for clean_tree in self._clean_trees:
- try:
- shutil.rmtree(clean_tree)
- except OSError:
- pass
-
-
-# we need to inherit from the versioneer
-# class as it encodes the version info
-sdist_class = cmdclass["sdist"]
-
-
-class CheckSDist(sdist_class):
- """Custom sdist that ensures Cython has compiled all pyx files to c."""
-
- _pyxfiles = [
- "pandas/_libs/arrays.pyx",
- "pandas/_libs/lib.pyx",
- "pandas/_libs/hashtable.pyx",
- "pandas/_libs/tslib.pyx",
- "pandas/_libs/index.pyx",
- "pandas/_libs/internals.pyx",
- "pandas/_libs/algos.pyx",
- "pandas/_libs/join.pyx",
- "pandas/_libs/indexing.pyx",
- "pandas/_libs/interval.pyx",
- "pandas/_libs/hashing.pyx",
- "pandas/_libs/missing.pyx",
- "pandas/_libs/reduction.pyx",
- "pandas/_libs/testing.pyx",
- "pandas/_libs/sparse.pyx",
- "pandas/_libs/ops.pyx",
- "pandas/_libs/parsers.pyx",
- "pandas/_libs/tslibs/base.pyx",
- "pandas/_libs/tslibs/ccalendar.pyx",
- "pandas/_libs/tslibs/dtypes.pyx",
- "pandas/_libs/tslibs/period.pyx",
- "pandas/_libs/tslibs/strptime.pyx",
- "pandas/_libs/tslibs/np_datetime.pyx",
- "pandas/_libs/tslibs/timedeltas.pyx",
- "pandas/_libs/tslibs/timestamps.pyx",
- "pandas/_libs/tslibs/timezones.pyx",
- "pandas/_libs/tslibs/conversion.pyx",
- "pandas/_libs/tslibs/fields.pyx",
- "pandas/_libs/tslibs/offsets.pyx",
- "pandas/_libs/tslibs/parsing.pyx",
- "pandas/_libs/tslibs/tzconversion.pyx",
- "pandas/_libs/tslibs/vectorized.pyx",
- "pandas/_libs/window/indexers.pyx",
- "pandas/_libs/writers.pyx",
- "pandas/io/sas/sas.pyx",
- ]
-
- _cpp_pyxfiles = [
- "pandas/_libs/window/aggregations.pyx",
- ]
-
- def initialize_options(self):
- sdist_class.initialize_options(self)
-
- def run(self):
- if "cython" in cmdclass:
- self.run_command("cython")
- else:
- # If we are not running cython then
- # compile the extensions correctly
- pyx_files = [(self._pyxfiles, "c"), (self._cpp_pyxfiles, "cpp")]
-
- for pyxfiles, extension in pyx_files:
- for pyxfile in pyxfiles:
- sourcefile = pyxfile[:-3] + extension
- msg = (
- f"{extension}-source file '{sourcefile}' not found.\n"
- "Run 'setup.py cython' before sdist."
- )
- assert os.path.isfile(sourcefile), msg
- sdist_class.run(self)
-
-
-class CheckingBuildExt(build_ext):
- """
- Subclass build_ext to get clearer report if Cython is necessary.
- """
-
- def check_cython_extensions(self, extensions):
- for ext in extensions:
- for src in ext.sources:
- if not os.path.exists(src):
- print(f"{ext.name}: -> [{ext.sources}]")
- raise Exception(
- f"""Cython-generated file '{src}' not found.
- Cython is required to compile pandas from a development branch.
- Please install Cython or download a release package of pandas.
- """
- )
-
- def build_extensions(self):
- self.check_cython_extensions(self.extensions)
- build_ext.build_extensions(self)
-
-
-class CythonCommand(build_ext):
- """
- Custom command subclassed from Cython.Distutils.build_ext
- to compile pyx->c, and stop there. All this does is override the
- C-compile method build_extension() with a no-op.
- """
-
- def build_extension(self, ext):
- pass
-
-
-class DummyBuildSrc(Command):
- """numpy's build_src command interferes with Cython's build_ext."""
-
- user_options = []
-
- def initialize_options(self):
- self.py_modules_dict = {}
-
- def finalize_options(self):
- pass
-
- def run(self):
- pass
-
-
-cmdclass["clean"] = CleanCommand
-cmdclass["build_ext"] = CheckingBuildExt
-
-if _CYTHON_INSTALLED:
- suffix = ".pyx"
- cmdclass["cython"] = CythonCommand
-else:
- suffix = ".c"
- cmdclass["build_src"] = DummyBuildSrc
-
-# ----------------------------------------------------------------------
-# Preparation of compiler arguments
-
-debugging_symbols_requested = "--with-debugging-symbols" in sys.argv
-if debugging_symbols_requested:
- sys.argv.remove("--with-debugging-symbols")
-
-
-if sys.byteorder == "big":
- endian_macro = [("__BIG_ENDIAN__", "1")]
-else:
- endian_macro = [("__LITTLE_ENDIAN__", "1")]
-
-
-extra_compile_args = []
-extra_link_args = []
-if is_platform_windows():
- if debugging_symbols_requested:
- extra_compile_args.append("/Z7")
- extra_link_args.append("/DEBUG")
-else:
- # PANDAS_CI=1 is set in CI
- if os.environ.get("PANDAS_CI", "0") == "1":
- extra_compile_args.append("-Werror")
- if debugging_symbols_requested:
- extra_compile_args.append("-g")
- extra_compile_args.append("-UNDEBUG")
- extra_compile_args.append("-O0")
-
-# Build for at least macOS 10.9 when compiling on a 10.9 system or above,
-# overriding CPython distuitls behaviour which is to target the version that
-# python was built for. This may be overridden by setting
-# MACOSX_DEPLOYMENT_TARGET before calling setup.py
-if is_platform_mac():
- if "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
- current_system = platform.mac_ver()[0]
- python_target = get_config_vars().get(
- "MACOSX_DEPLOYMENT_TARGET", current_system
- )
- target_macos_version = "10.9"
- parsed_macos_version = parse_version(target_macos_version)
- if (
- parse_version(str(python_target)) < parsed_macos_version
- and parse_version(current_system) >= parsed_macos_version
- ):
- os.environ["MACOSX_DEPLOYMENT_TARGET"] = target_macos_version
-
- if sys.version_info[:2] == (3, 8): # GH 33239
- extra_compile_args.append("-Wno-error=deprecated-declarations")
-
- # https://github.com/pandas-dev/pandas/issues/35559
- extra_compile_args.append("-Wno-error=unreachable-code")
-
-# enable coverage by building cython files by setting the environment variable
-# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext
-# with `--with-cython-coverage`enabled
-linetrace = os.environ.get("PANDAS_CYTHON_COVERAGE", False)
-if "--with-cython-coverage" in sys.argv:
- linetrace = True
- sys.argv.remove("--with-cython-coverage")
-
-# Note: if not using `cythonize`, coverage can be enabled by
-# pinning `ext.cython_directives = directives` to each ext in extensions.
-# github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy
-directives = {"linetrace": False, "language_level": 3}
-macros = []
-if linetrace:
- # https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py
- directives["linetrace"] = True
- macros = [("CYTHON_TRACE", "1"), ("CYTHON_TRACE_NOGIL", "1")]
-
-# silence build warnings about deprecated API usage
-# we can't do anything about these warnings because they stem from
-# cython+numpy version mismatches.
-macros.append(("NPY_NO_DEPRECATED_API", "0"))
-
-
-# ----------------------------------------------------------------------
-# Specification of Dependencies
-
-# TODO(cython#4518): Need to check to see if e.g. `linetrace` has changed and
-# possibly re-compile.
-def maybe_cythonize(extensions, *args, **kwargs):
- """
- Render tempita templates before calling cythonize. This is skipped for
-
- * clean
- * sdist
- """
- if "clean" in sys.argv or "sdist" in sys.argv:
- # See https://github.com/cython/cython/issues/1495
- return extensions
-
- elif not _CYTHON_INSTALLED:
- # GH#28836 raise a helfpul error message
- if _CYTHON_VERSION:
- raise RuntimeError(
- f"Cannot cythonize with old Cython version ({_CYTHON_VERSION} "
- f"installed, needs {min_cython_ver})"
- )
- raise RuntimeError("Cannot cythonize without Cython installed.")
-
- # reuse any parallel arguments provided for compilation to cythonize
- parser = argparse.ArgumentParser()
- parser.add_argument("--parallel", "-j", type=int, default=1)
- parsed, _ = parser.parse_known_args()
-
- kwargs["nthreads"] = parsed.parallel
- build_ext.render_templates(_pxifiles)
- return cythonize(extensions, *args, **kwargs)
-
-
-def srcpath(name=None, suffix=".pyx", subdir="src"):
- return pjoin("pandas", subdir, name + suffix)
-
-
-lib_depends = ["pandas/_libs/src/parse_helper.h"]
-
-klib_include = ["pandas/_libs/src/klib"]
-
-tseries_depends = [
- "pandas/_libs/tslibs/src/datetime/np_datetime.h",
- "pandas/_libs/tslibs/src/datetime/np_datetime_strings.h",
-]
-
-ext_data = {
- "_libs.algos": {
- "pyxfile": "_libs/algos",
- "include": klib_include,
- "depends": _pxi_dep["algos"],
- },
- "_libs.arrays": {"pyxfile": "_libs/arrays"},
- "_libs.groupby": {"pyxfile": "_libs/groupby"},
- "_libs.hashing": {"pyxfile": "_libs/hashing", "depends": []},
- "_libs.hashtable": {
- "pyxfile": "_libs/hashtable",
- "include": klib_include,
- "depends": (
- ["pandas/_libs/src/klib/khash_python.h", "pandas/_libs/src/klib/khash.h"]
- + _pxi_dep["hashtable"]
- ),
- },
- "_libs.index": {
- "pyxfile": "_libs/index",
- "include": klib_include,
- "depends": _pxi_dep["index"],
- },
- "_libs.indexing": {"pyxfile": "_libs/indexing"},
- "_libs.internals": {"pyxfile": "_libs/internals"},
- "_libs.interval": {
- "pyxfile": "_libs/interval",
- "include": klib_include,
- "depends": _pxi_dep["interval"],
- },
- "_libs.join": {"pyxfile": "_libs/join", "include": klib_include},
- "_libs.lib": {
- "pyxfile": "_libs/lib",
- "depends": lib_depends + tseries_depends,
- "include": klib_include, # due to tokenizer import
- "sources": ["pandas/_libs/src/parser/tokenizer.c"],
- },
- "_libs.missing": {"pyxfile": "_libs/missing", "depends": tseries_depends},
- "_libs.parsers": {
- "pyxfile": "_libs/parsers",
- "include": klib_include + ["pandas/_libs/src"],
- "depends": [
- "pandas/_libs/src/parser/tokenizer.h",
- "pandas/_libs/src/parser/io.h",
- ],
- "sources": [
- "pandas/_libs/src/parser/tokenizer.c",
- "pandas/_libs/src/parser/io.c",
- ],
- },
- "_libs.reduction": {"pyxfile": "_libs/reduction"},
- "_libs.ops": {"pyxfile": "_libs/ops"},
- "_libs.ops_dispatch": {"pyxfile": "_libs/ops_dispatch"},
- "_libs.properties": {"pyxfile": "_libs/properties"},
- "_libs.reshape": {"pyxfile": "_libs/reshape", "depends": []},
- "_libs.sparse": {"pyxfile": "_libs/sparse", "depends": _pxi_dep["sparse"]},
- "_libs.tslib": {
- "pyxfile": "_libs/tslib",
- "depends": tseries_depends,
- "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
- },
- "_libs.tslibs.base": {"pyxfile": "_libs/tslibs/base"},
- "_libs.tslibs.ccalendar": {"pyxfile": "_libs/tslibs/ccalendar"},
- "_libs.tslibs.dtypes": {"pyxfile": "_libs/tslibs/dtypes"},
- "_libs.tslibs.conversion": {
- "pyxfile": "_libs/tslibs/conversion",
- "depends": tseries_depends,
- "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
- },
- "_libs.tslibs.fields": {
- "pyxfile": "_libs/tslibs/fields",
- "depends": tseries_depends,
- "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
- },
- "_libs.tslibs.nattype": {"pyxfile": "_libs/tslibs/nattype"},
- "_libs.tslibs.np_datetime": {
- "pyxfile": "_libs/tslibs/np_datetime",
- "depends": tseries_depends,
- "sources": [
- "pandas/_libs/tslibs/src/datetime/np_datetime.c",
- "pandas/_libs/tslibs/src/datetime/np_datetime_strings.c",
- ],
- },
- "_libs.tslibs.offsets": {
- "pyxfile": "_libs/tslibs/offsets",
- "depends": tseries_depends,
- "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
- },
- "_libs.tslibs.parsing": {
- "pyxfile": "_libs/tslibs/parsing",
- "include": klib_include,
- "depends": ["pandas/_libs/src/parser/tokenizer.h"],
- "sources": ["pandas/_libs/src/parser/tokenizer.c"],
- },
- "_libs.tslibs.period": {
- "pyxfile": "_libs/tslibs/period",
- "depends": tseries_depends,
- "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
- },
- "_libs.tslibs.strptime": {
- "pyxfile": "_libs/tslibs/strptime",
- "depends": tseries_depends,
- },
- "_libs.tslibs.timedeltas": {
- "pyxfile": "_libs/tslibs/timedeltas",
- "depends": tseries_depends,
- "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
- },
- "_libs.tslibs.timestamps": {
- "pyxfile": "_libs/tslibs/timestamps",
- "depends": tseries_depends,
- "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
- },
- "_libs.tslibs.timezones": {"pyxfile": "_libs/tslibs/timezones"},
- "_libs.tslibs.tzconversion": {
- "pyxfile": "_libs/tslibs/tzconversion",
- "depends": tseries_depends,
- "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
- },
- "_libs.tslibs.vectorized": {
- "pyxfile": "_libs/tslibs/vectorized",
- "depends": tseries_depends,
- "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
- },
- "_libs.testing": {"pyxfile": "_libs/testing"},
- "_libs.window.aggregations": {
- "pyxfile": "_libs/window/aggregations",
- "language": "c++",
- "suffix": ".cpp",
- "depends": ["pandas/_libs/src/skiplist.h"],
- },
- "_libs.window.indexers": {"pyxfile": "_libs/window/indexers"},
- "_libs.writers": {"pyxfile": "_libs/writers"},
- "io.sas._sas": {"pyxfile": "io/sas/sas"},
-}
-
-extensions = []
-
-for name, data in ext_data.items():
- source_suffix = suffix if suffix == ".pyx" else data.get("suffix", ".c")
-
- sources = [srcpath(data["pyxfile"], suffix=source_suffix, subdir="")]
-
- sources.extend(data.get("sources", []))
-
- include = data.get("include", [])
- include.append(numpy.get_include())
-
- undef_macros = []
-
- if (
- sys.platform == "zos"
- and data.get("language") == "c++"
- and os.path.basename(os.environ.get("CXX", "/bin/xlc++")) in ("xlc", "xlc++")
- ):
- data.get("macros", macros).append(("__s390__", "1"))
- extra_compile_args.append("-qlanglvl=extended0x:nolibext")
- undef_macros.append("_POSIX_THREADS")
-
- obj = Extension(
- f"pandas.{name}",
- sources=sources,
- depends=data.get("depends", []),
- include_dirs=include,
- language=data.get("language", "c"),
- define_macros=data.get("macros", macros),
- extra_compile_args=extra_compile_args,
- extra_link_args=extra_link_args,
- undef_macros=undef_macros,
- )
-
- extensions.append(obj)
-
-# ----------------------------------------------------------------------
-# ujson
-
-if suffix == ".pyx":
- # undo dumb setuptools bug clobbering .pyx sources back to .c
- for ext in extensions:
- if ext.sources[0].endswith((".c", ".cpp")):
- root, _ = os.path.splitext(ext.sources[0])
- ext.sources[0] = root + suffix
-
-ujson_ext = Extension(
- "pandas._libs.json",
- depends=[
- "pandas/_libs/src/ujson/lib/ultrajson.h",
- "pandas/_libs/src/ujson/python/date_conversions.h",
- ],
- sources=(
- [
- "pandas/_libs/src/ujson/python/ujson.c",
- "pandas/_libs/src/ujson/python/objToJSON.c",
- "pandas/_libs/src/ujson/python/date_conversions.c",
- "pandas/_libs/src/ujson/python/JSONtoObj.c",
- "pandas/_libs/src/ujson/lib/ultrajsonenc.c",
- "pandas/_libs/src/ujson/lib/ultrajsondec.c",
- ]
- + [
- "pandas/_libs/tslibs/src/datetime/np_datetime.c",
- "pandas/_libs/tslibs/src/datetime/np_datetime_strings.c",
- ]
- ),
- include_dirs=[
- "pandas/_libs/src/ujson/python",
- "pandas/_libs/src/ujson/lib",
- "pandas/_libs/src/datetime",
- numpy.get_include(),
- ],
- extra_compile_args=(["-D_GNU_SOURCE"] + extra_compile_args),
- extra_link_args=extra_link_args,
- define_macros=macros,
-)
-
-
-extensions.append(ujson_ext)
-
-# ----------------------------------------------------------------------
-
-
if __name__ == "__main__":
- # Freeze to support parallel compilation when using spawn instead of fork
- multiprocessing.freeze_support()
setup(
version=versioneer.get_version(),
- ext_modules=maybe_cythonize(extensions, compiler_directives=directives),
cmdclass=cmdclass,
)
| Proof of concept. I think we can greatly simplify our building system using this. @mroeschke @lithomas1 @Dr-Irv
This is in an intermediate state, but you can do:
```sh
python setup.py
cmake .
make -j$(nproc)
```
`make clean` will clean up build artifacts, which right now excludes the generated Cython files but should include them in the future.
Have the following TODOs:
[ ]: The CMAKE_SHARED_LIBRARY_SUFFIX is hard-coded to assume Py38 on linux
[ ]: Get rid of `python setup.py` and have CMake perform cythonization
[ ]: Update CI, documentation, etc...
| https://api.github.com/repos/pandas-dev/pandas/pulls/47380 | 2022-06-16T06:07:22Z | 2022-08-22T20:34:26Z | null | 2022-08-22T20:34:26Z |
TYP: ndim is consistently a property | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 5f060542526d3..c6d0625de00fa 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -193,12 +193,15 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
_typ = "datetimearray"
- _scalar_type = Timestamp
_internal_fill_value = np.datetime64("NaT", "ns")
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
+ @property
+ def _scalar_type(self) -> type[Timestamp]:
+ return Timestamp
+
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 1015a54826ac8..e58032c48f8d3 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -8,6 +8,7 @@
import textwrap
from typing import (
TYPE_CHECKING,
+ Literal,
Sequence,
TypeVar,
Union,
@@ -204,10 +205,13 @@
}
)
class IntervalArray(IntervalMixin, ExtensionArray):
- ndim = 1
can_hold_na = True
_na_value = _fill_value = np.nan
+ @property
+ def ndim(self) -> Literal[1]:
+ return 1
+
# To make mypy recognize the fields
_left: np.ndarray
_right: np.ndarray
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 291810c5db2f9..7e6ea07c154cd 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -167,12 +167,15 @@ class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin):
# array priority higher than numpy scalars
__array_priority__ = 1000
_typ = "periodarray" # ABCPeriodArray
- _scalar_type = Period
_internal_fill_value = np.int64(iNaT)
_recognized_scalars = (Period,)
_is_recognized_dtype = is_period_dtype
_infer_matches = ("period",)
+ @property
+ def _scalar_type(self) -> type[Period]:
+ return Period
+
# Names others delegate to us
_other_ops: list[str] = []
_bool_ops: list[str] = ["is_leap_year"]
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index e08518a54fe6b..68f0d73b8556c 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -119,12 +119,15 @@ class TimedeltaArray(dtl.TimelikeOps):
"""
_typ = "timedeltaarray"
- _scalar_type = Timedelta
_internal_fill_value = np.timedelta64("NaT", "ns")
_recognized_scalars = (timedelta, np.timedelta64, Tick)
_is_recognized_dtype = is_timedelta64_dtype
_infer_matches = ("timedelta", "timedelta64")
+ @property
+ def _scalar_type(self) -> type[Timedelta]:
+ return Timedelta
+
__array_priority__ = 1000
# define my properties & methods for delegation
_other_ops: list[str] = []
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 7541eff9a11d4..3d18194d14bec 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -318,7 +318,7 @@ def __len__(self) -> int:
raise AbstractMethodError(self)
@property
- def ndim(self) -> int:
+ def ndim(self) -> Literal[1]:
"""
Number of dimensions of the underlying data, by definition 1.
"""
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py
index ee6c183898079..7d2e4129461a7 100644
--- a/pandas/core/internals/array_manager.py
+++ b/pandas/core/internals/array_manager.py
@@ -8,6 +8,7 @@
Any,
Callable,
Hashable,
+ Literal,
TypeVar,
)
@@ -704,7 +705,9 @@ def _equal_values(self, other) -> bool:
class ArrayManager(BaseArrayManager):
- ndim = 2
+ @property
+ def ndim(self) -> Literal[2]:
+ return 2
def __init__(
self,
@@ -1191,7 +1194,9 @@ class SingleArrayManager(BaseArrayManager, SingleDataManager):
arrays: list[np.ndarray | ExtensionArray]
_axes: list[Index]
- ndim = 1
+ @property
+ def ndim(self) -> Literal[1]:
+ return 1
def __init__(
self,
diff --git a/pandas/core/internals/base.py b/pandas/core/internals/base.py
index d8d1b6a34526c..ddc4495318568 100644
--- a/pandas/core/internals/base.py
+++ b/pandas/core/internals/base.py
@@ -5,6 +5,7 @@
from __future__ import annotations
from typing import (
+ Literal,
TypeVar,
final,
)
@@ -155,7 +156,9 @@ def _consolidate_inplace(self) -> None:
class SingleDataManager(DataManager):
- ndim = 1
+ @property
+ def ndim(self) -> Literal[1]:
+ return 1
@final
@property
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 7cccc9833de6b..435992f7d5cff 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -5,6 +5,7 @@
Any,
Callable,
Hashable,
+ Literal,
Sequence,
TypeVar,
cast,
@@ -142,7 +143,10 @@ class BaseBlockManager(DataManager):
blocks: tuple[Block, ...]
axes: list[Index]
- ndim: int
+ @property
+ def ndim(self) -> int:
+ raise NotImplementedError
+
_known_consolidated: bool
_is_consolidated: bool
@@ -1678,7 +1682,10 @@ def _consolidate_inplace(self) -> None:
class SingleBlockManager(BaseBlockManager, SingleDataManager):
"""manage a single block with"""
- ndim = 1
+ @property
+ def ndim(self) -> Literal[1]:
+ return 1
+
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 10881495c27b3..ea895e5656ccb 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -564,7 +564,7 @@ def test_shift_fill_int_deprecated(self):
expected = arr.copy()
if self.array_cls is PeriodArray:
- fill_val = PeriodArray._scalar_type._from_ordinal(1, freq=arr.freq)
+ fill_val = arr._scalar_type._from_ordinal(1, freq=arr.freq)
else:
fill_val = arr._scalar_type(1)
expected[0] = fill_val
diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json
index a8ef1a77aaced..541ae4b198166 100644
--- a/pyright_reportGeneralTypeIssues.json
+++ b/pyright_reportGeneralTypeIssues.json
@@ -61,7 +61,6 @@
"pandas/core/indexing.py",
"pandas/core/internals/api.py",
"pandas/core/internals/array_manager.py",
- "pandas/core/internals/base.py",
"pandas/core/internals/blocks.py",
"pandas/core/internals/concat.py",
"pandas/core/internals/construction.py",
| and `_scalar_type` | https://api.github.com/repos/pandas-dev/pandas/pulls/47378 | 2022-06-16T01:50:39Z | 2022-06-25T17:08:50Z | 2022-06-25T17:08:50Z | 2022-09-21T15:29:49Z |
ENH/TST: Add BaseInterfaceTests tests for ArrowExtensionArray | diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py
index 1f35013075751..b2a8ec6bf62e8 100644
--- a/pandas/core/arrays/arrow/array.py
+++ b/pandas/core/arrays/arrow/array.py
@@ -31,6 +31,7 @@
)
from pandas.core.dtypes.missing import isna
+from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.base import ExtensionArray
from pandas.core.indexers import (
check_array_indexer,
@@ -45,13 +46,22 @@
from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
from pandas.core.arrays.arrow.dtype import ArrowDtype
+ ARROW_CMP_FUNCS = {
+ "eq": pc.equal,
+ "ne": pc.not_equal,
+ "lt": pc.less,
+ "gt": pc.greater,
+ "le": pc.less_equal,
+ "ge": pc.greater_equal,
+ }
+
if TYPE_CHECKING:
from pandas import Series
ArrowExtensionArrayT = TypeVar("ArrowExtensionArrayT", bound="ArrowExtensionArray")
-class ArrowExtensionArray(ExtensionArray):
+class ArrowExtensionArray(OpsMixin, ExtensionArray):
"""
Base class for ExtensionArray backed by Arrow ChunkedArray.
"""
@@ -179,6 +189,34 @@ def __arrow_array__(self, type=None):
"""Convert myself to a pyarrow ChunkedArray."""
return self._data
+ def _cmp_method(self, other, op):
+ from pandas.arrays import BooleanArray
+
+ pc_func = ARROW_CMP_FUNCS[op.__name__]
+ if isinstance(other, ArrowExtensionArray):
+ result = pc_func(self._data, other._data)
+ elif isinstance(other, (np.ndarray, list)):
+ result = pc_func(self._data, other)
+ elif is_scalar(other):
+ try:
+ result = pc_func(self._data, pa.scalar(other))
+ except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid):
+ mask = isna(self) | isna(other)
+ valid = ~mask
+ result = np.zeros(len(self), dtype="bool")
+ result[valid] = op(np.array(self)[valid], other)
+ return BooleanArray(result, mask)
+ else:
+ return NotImplementedError(
+ f"{op.__name__} not implemented for {type(other)}"
+ )
+
+ if pa_version_under2p0:
+ result = result.to_pandas().values
+ else:
+ result = result.to_numpy()
+ return BooleanArray._from_sequence(result)
+
def equals(self, other) -> bool:
if not isinstance(other, ArrowExtensionArray):
return False
@@ -581,7 +619,7 @@ def _replace_with_indices(
# fast path for a contiguous set of indices
arrays = [
chunk[:start],
- pa.array(value, type=chunk.type),
+ pa.array(value, type=chunk.type, from_pandas=True),
chunk[stop + 1 :],
]
arrays = [arr for arr in arrays if len(arr)]
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index a07f748fa0c8c..c4d1a35315d7d 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -34,7 +34,6 @@
)
from pandas.core.dtypes.missing import isna
-from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.arrow import ArrowExtensionArray
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
@@ -51,15 +50,6 @@
from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning
- ARROW_CMP_FUNCS = {
- "eq": pc.equal,
- "ne": pc.not_equal,
- "lt": pc.less,
- "gt": pc.greater,
- "le": pc.less_equal,
- "ge": pc.greater_equal,
- }
-
ArrowStringScalarOrNAT = Union[str, libmissing.NAType]
@@ -74,9 +64,7 @@ def _chk_pyarrow_available() -> None:
# fallback for the ones that pyarrow doesn't yet support
-class ArrowStringArray(
- OpsMixin, ArrowExtensionArray, BaseStringArray, ObjectStringArrayMixin
-):
+class ArrowStringArray(ArrowExtensionArray, BaseStringArray, ObjectStringArrayMixin):
"""
Extension array for string data in a ``pyarrow.ChunkedArray``.
@@ -190,32 +178,6 @@ def to_numpy(
result[mask] = na_value
return result
- def _cmp_method(self, other, op):
- from pandas.arrays import BooleanArray
-
- pc_func = ARROW_CMP_FUNCS[op.__name__]
- if isinstance(other, ArrowStringArray):
- result = pc_func(self._data, other._data)
- elif isinstance(other, (np.ndarray, list)):
- result = pc_func(self._data, other)
- elif is_scalar(other):
- try:
- result = pc_func(self._data, pa.scalar(other))
- except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid):
- mask = isna(self) | isna(other)
- valid = ~mask
- result = np.zeros(len(self), dtype="bool")
- result[valid] = op(np.array(self)[valid], other)
- return BooleanArray(result, mask)
- else:
- return NotImplemented
-
- if pa_version_under2p0:
- result = result.to_pandas().values
- else:
- result = result.to_numpy()
- return BooleanArray._from_sequence(result)
-
def insert(self, loc: int, item):
if not isinstance(item, str) and item is not libmissing.NA:
raise TypeError("Scalar must be NA or str")
diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py
index 22595c4e461d7..26b94ebe5a8da 100644
--- a/pandas/tests/extension/arrow/arrays.py
+++ b/pandas/tests/extension/arrow/arrays.py
@@ -23,7 +23,6 @@
take,
)
from pandas.api.types import is_scalar
-from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.arrow import ArrowExtensionArray as _ArrowExtensionArray
from pandas.core.construction import extract_array
@@ -72,7 +71,7 @@ def construct_array_type(cls) -> type_t[ArrowStringArray]:
return ArrowStringArray
-class ArrowExtensionArray(OpsMixin, _ArrowExtensionArray):
+class ArrowExtensionArray(_ArrowExtensionArray):
_data: pa.ChunkedArray
@classmethod
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 95cb7045ac68d..9eeaf39959f29 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -93,6 +93,18 @@ def data_missing(data):
return type(data)._from_sequence([None, data[0]])
+@pytest.fixture(params=["data", "data_missing"])
+def all_data(request, data, data_missing):
+ """Parametrized fixture returning 'data' or 'data_missing' integer arrays.
+
+ Used to test dtype conversion with and without missing values.
+ """
+ if request.param == "data":
+ return data
+ elif request.param == "data_missing":
+ return data_missing
+
+
@pytest.fixture
def na_value():
"""The scalar missing value for this type. Default 'None'"""
@@ -271,6 +283,36 @@ class TestBaseIndex(base.BaseIndexTests):
pass
+class TestBaseInterface(base.BaseInterfaceTests):
+ def test_contains(self, data, data_missing, request):
+ tz = getattr(data.dtype.pyarrow_dtype, "tz", None)
+ unit = getattr(data.dtype.pyarrow_dtype, "unit", None)
+ if pa_version_under2p0 and tz not in (None, "UTC") and unit == "us":
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"Not supported by pyarrow < 2.0 "
+ f"with timestamp type {tz} and {unit}"
+ )
+ )
+ )
+ super().test_contains(data, data_missing)
+
+ @pytest.mark.xfail(reason="pyarrow.ChunkedArray does not support views.")
+ def test_view(self, data):
+ super().test_view(data)
+
+
+class TestBaseMissing(base.BaseMissingTests):
+ pass
+
+
+class TestBaseSetitemTests(base.BaseSetitemTests):
+ @pytest.mark.xfail(reason="GH 45419: pyarrow.ChunkedArray does not support views")
+ def test_setitem_preserves_views(self, data):
+ super().test_setitem_preserves_views(data)
+
+
def test_arrowdtype_construct_from_string_type_with_unsupported_parameters():
with pytest.raises(NotImplementedError, match="Passing pyarrow type"):
ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]")
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47377 | 2022-06-16T01:09:18Z | 2022-06-22T01:22:47Z | 2022-06-22T01:22:47Z | 2022-06-22T16:57:15Z |
PERF: pandas read_excel perf optimisations | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index d20f347e54d6b..68bca2842df9b 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -565,7 +565,13 @@ def get_sheet_by_index(self, index: int):
pass
@abc.abstractmethod
- def get_sheet_data(self, sheet, convert_float: bool, rows: int | None = None):
+ def get_sheet_data(
+ self,
+ sheet,
+ convert_float: bool,
+ rows: int | None = None,
+ offset: int | None = None,
+ ):
pass
def raise_if_bad_sheet_by_index(self, index: int) -> None:
@@ -740,7 +746,16 @@ def parse(
sheet = self.get_sheet_by_index(asheetname)
file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)
- data = self.get_sheet_data(sheet, convert_float, file_rows_needed)
+ file_offset_needed = None
+ if header is None:
+ if is_integer(skiprows):
+ file_offset_needed = skiprows
+ elif is_list_like(skiprows):
+ file_offset_needed = skiprows[0]
+
+ data = self.get_sheet_data(
+ sheet, convert_float, file_rows_needed, file_offset_needed
+ )
if hasattr(sheet, "close"):
# pyxlsb opens two TemporaryFiles
sheet.close()
@@ -817,6 +832,7 @@ def parse(
# GH 12292 : error when read one empty column from excel file
try:
+ skiprows = None if is_integer(skiprows) and header is None else skiprows
parser = TextParser(
data,
names=names,
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 075590f3535fe..292093663715d 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -90,7 +90,11 @@ def get_sheet_by_name(self, name: str):
raise ValueError(f"sheet {name} not found")
def get_sheet_data(
- self, sheet, convert_float: bool, file_rows_needed: int | None = None
+ self,
+ sheet,
+ convert_float: bool,
+ file_rows_needed: int | None = None,
+ file_offset_needed: int | None = None,
) -> list[list[Scalar | NaTType]]:
"""
Parse an ODF Table into a list of lists
@@ -111,7 +115,17 @@ def get_sheet_data(
table: list[list[Scalar | NaTType]] = []
- for sheet_row in sheet_rows:
+ loop_on = sheet_rows
+ if file_rows_needed:
+ loop_on = sheet_rows[: file_rows_needed + 1]
+ if file_offset_needed:
+ loop_on = sheet_rows[
+ file_offset_needed : file_offset_needed + file_rows_needed + 1
+ ]
+ elif file_offset_needed:
+ loop_on = sheet_rows[file_offset_needed:]
+
+ for sheet_row in loop_on:
sheet_cells = [
x
for x in sheet_row.childNodes
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py
index 87cc07d3fd21d..177651843d25a 100644
--- a/pandas/io/excel/_openpyxl.py
+++ b/pandas/io/excel/_openpyxl.py
@@ -595,36 +595,63 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar:
return cell.value
def get_sheet_data(
- self, sheet, convert_float: bool, file_rows_needed: int | None = None
+ self,
+ sheet,
+ convert_float: bool,
+ file_rows_needed: int | None = None,
+ file_offset_needed: int | None = None,
) -> list[list[Scalar]]:
if self.book.read_only:
sheet.reset_dimensions()
- data: list[list[Scalar]] = []
- last_row_with_data = -1
- for row_number, row in enumerate(sheet.rows):
- converted_row = [self._convert_cell(cell, convert_float) for cell in row]
- while converted_row and converted_row[-1] == "":
- # trim trailing empty elements
- converted_row.pop()
- if converted_row:
- last_row_with_data = row_number
- data.append(converted_row)
- if file_rows_needed is not None and len(data) >= file_rows_needed:
- break
-
- # Trim trailing empty rows
- data = data[: last_row_with_data + 1]
-
- if len(data) > 0:
- # extend rows to max width
- max_width = max(len(data_row) for data_row in data)
- if min(len(data_row) for data_row in data) < max_width:
- empty_cell: list[Scalar] = [""]
- data = [
- data_row + (max_width - len(data_row)) * empty_cell
- for data_row in data
+ def _loop_rows(iterator):
+ data: list[list[Scalar]] = []
+ last_row_with_data = -1
+ for row_number, row in iterator:
+ converted_row = [
+ self._convert_cell(cell, convert_float) for cell in row
]
+ while converted_row and converted_row[-1] == "":
+ # trim trailing empty elements
+ converted_row.pop()
+ if converted_row:
+ last_row_with_data = row_number
+ data.append(converted_row)
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
+ break
+
+ # Trim trailing empty rows
+ data = data[: last_row_with_data + 1]
+
+ if data:
+ # extend rows to max width
+ max_width = max(len(data_row) for data_row in data)
+ if min(len(data_row) for data_row in data) < max_width:
+ empty_cell: list[Scalar] = [""]
+ data = [
+ data_row + (max_width - len(data_row)) * empty_cell
+ for data_row in data
+ ]
+
+ return data
+
+ data: list[list[Scalar]] = []
+ loop_on = sheet.rows
+ if file_rows_needed or file_offset_needed:
+ min_row = max_row = None
+ # +1 are here because this is 1-based indexing
+ if file_rows_needed:
+ max_row = file_rows_needed + 1
+ if file_offset_needed:
+ max_row += file_offset_needed
+
+ if file_offset_needed:
+ min_row = file_offset_needed + 1
+
+ # Then we return the generator
+ loop_on = list(sheet.iter_rows(min_row=min_row, max_row=max_row))
+
+ data = _loop_rows(enumerate(loop_on))
return data
diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py
index 5d40ccdf2f8f3..109c01f330188 100644
--- a/pandas/io/excel/_pyxlsb.py
+++ b/pandas/io/excel/_pyxlsb.py
@@ -84,21 +84,33 @@ def get_sheet_data(
sheet,
convert_float: bool,
file_rows_needed: int | None = None,
+ file_offset_needed: int | None = None,
) -> list[list[Scalar]]:
data: list[list[Scalar]] = []
- prevous_row_number = -1
+ loop_on = sheet.rows(sparse=True)
+ previous_row_number = -1
+
+ if file_rows_needed:
+ loop_on = loop_on[: file_rows_needed + 1]
+ if file_offset_needed:
+ loop_on = loop_on[
+ file_offset_needed : file_offset_needed + file_rows_needed + 1
+ ]
+ elif file_offset_needed:
+ loop_on = loop_on[file_offset_needed:]
+
# When sparse=True the rows can have different lengths and empty rows are
# not returned. The cells are namedtuples of row, col, value (r, c, v).
- for row in sheet.rows(sparse=True):
+ for row in loop_on:
row_number = row[0].r
converted_row = [self._convert_cell(cell, convert_float) for cell in row]
while converted_row and converted_row[-1] == "":
# trim trailing empty elements
converted_row.pop()
if converted_row:
- data.extend([[]] * (row_number - prevous_row_number - 1))
+ data.extend([[]] * (row_number - previous_row_number - 1))
data.append(converted_row)
- prevous_row_number = row_number
+ previous_row_number = row_number
if file_rows_needed is not None and len(data) >= file_rows_needed:
break
if data:
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index 0bf3ac6134cf6..07fd5ce0e2e4b 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -62,7 +62,11 @@ def get_sheet_by_index(self, index):
return self.book.sheet_by_index(index)
def get_sheet_data(
- self, sheet, convert_float: bool, file_rows_needed: int | None = None
+ self,
+ sheet,
+ convert_float: bool,
+ file_rows_needed: int | None = None,
+ file_offset_needed: int | None = None,
) -> list[list[Scalar]]:
from xlrd import (
XL_CELL_BOOLEAN,
@@ -115,13 +119,26 @@ def _parse_cell(cell_contents, cell_typ):
data = []
nrows = sheet.nrows
- if file_rows_needed is not None:
- nrows = min(nrows, file_rows_needed)
- for i in range(nrows):
- row = [
- _parse_cell(value, typ)
- for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
- ]
- data.append(row)
+ nrows_range = range(nrows)
+ if file_rows_needed:
+ nrows_range = range(min(nrows, file_rows_needed))
+ if file_offset_needed:
+ nrows_range = range(
+ file_offset_needed, file_offset_needed + file_rows_needed
+ )
+ elif file_offset_needed:
+ nrows_range = range(file_offset_needed, nrows)
+
+ data = []
+
+ for i in nrows_range:
+ try:
+ row = [
+ _parse_cell(value, typ)
+ for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
+ ]
+ data.append(row)
+ except IndexError:
+ break
return data
| #### WHAT
Attempts on perf optimisations for pandas read_excel
by descending the skiprows (as an integer) to get_sheet_data depending on the engine
openpyxl, xlrd, pyxlsb or odf...
STATUS : Working in progress...
- [ ] closes #47290 (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
#### BENCHMARKS
##### BENCH SPECS
OS: Ubuntu 20.04.4 LTS x86_64
Host: 20TH0010FR ThinkPad P1 Gen 3
Kernel: 5.14.0-1038-oem
CPU: Intel i7-10750H (12) @ 5.000GHz
GPU: NVIDIA Quadro T1000 Mobile
GPU: Intel UHD Graphics
Memory: 47849MiB
Python 3.8.6 - [GCC 9.4.0]
##### BENCH SCRIPT
```python
import pandas as pd
from timeit import default_timer
def bench_mark_func():
print(f">>> {pd.__version__}")
for ext in ["xls", "xlsx", "xlsb", "ods"]:
print(f"\n[{ext}] no nrows, nor skiprows :")
start = default_timer()
for i in range(100):
pd.read_excel(f"./fixtures/benchmark_5000.{ext}")
print(f"[{ext}] done in {default_timer() - start}")
print("*" * 30)
print(f"\n[{ext}] with nrows and skiprows (reading top lines):")
start = default_timer()
for i in range(100):
pd.read_excel(
f"./fixtures/benchmark_5000.{ext}", nrows=50 * i, skiprows=100 + i
)
print(f"[{ext}] done in {default_timer() - start}")
print("*" * 30)
print(f"\n[{ext}] with nrows and skiprows (reading middle lines):")
start = default_timer()
for i in range(100):
pd.read_excel(
f"./fixtures/benchmark_5000.{ext}", nrows=50 * i, skiprows=2000 + i
)
print(f"[{ext}] done in {default_timer() - start}")
print("*" * 30)
print(f"\n[{ext}] with nrows and skiprows (reading bottom lines):")
start = default_timer()
for i in range(100):
pd.read_excel(
f"./fixtures/benchmark_5000.{ext}", nrows=50 * i, skiprows=4000 + i
)
print(f"[{ext}] done in {default_timer() - start}")
print("*" * 30)
print("==" * 30)
if __name__ == "__main__":
bench_mark_func()
```
Fixtures are available here : [fixtures](https://github.com/Sanix-Darker/pandas/tree/toucan-pandas-excel-preview/fixtures)
##### BENCH REPORTS
##### xls format - - -
| | main branch | perf branch | diff (second)
| --------------- |------------ |------------- |--------------
| no nrows, nor skiprows |9.355613674968481 |9.48072951193899 |__-0.1251158369705081__
| nrows and skiprows (top lines) |8.50429745996371 |8.566174849052913 |__-0.06187738908920437__
| nrows and skiprows (middle lines) |9.003354059066623 |9.093110702931881 |__-0.0897566438652575__
| nrows and skiprows (bottom lines) |9.281007815967314 |9.35020213900134 |__-0.06919432303402573__
| | | __AVERAGE__ |__-0.08648604823974892__
##### xlsx format + + +
| | main branch | perf branch | diff (second)
| --------------- |------------ |------------- |--------------
| no nrows, nor skiprows |47.444979660911486 |47.119721286930144 |__+0.3252583739813417__
| nrows and skiprows (top lines) |25.616206042002887 |25.10350460803602 |__+0.5127014339668676__
| nrows and skiprows (middle lines) |39.18539171805605 |38.66645851393696 |__+0.5189332041190937__
| nrows and skiprows (bottom lines) |46.56151840998791 |45.87342134909704 |__+0.6880970608908683__
| | | __AVERAGE__ |__+0.5112475182395428__
##### xlsb format - - -
| | main branch | perf branch | diff (second)
| --------------- |------------ |------------- |--------------
| no nrows, nor skiprows |9.378919824026525 |9.404310946003534 |__-0.02539112197700888__
| nrows and skiprows (top lines) |8.567789324908517 |8.607287417980842 |__-0.03949809307232499__
| nrows and skiprows (middle lines) |9.089394484995864 |9.141500656027347 |__-0.05210617103148252__
| nrows and skiprows (bottom lines) |9.342884571058676 |9.409172061015852 |__-0.06628748995717615__
| | | __AVERAGE__ |__-0.045820719009498134__
##### ods format - - -
| | main branch | perf branch | diff (second)
| --------------- |------------ |------------- |--------------
| no nrows, nor skiprows |9.38043470599223 |9.435379554051906 |__-0.05494484805967659__
| nrows and skiprows (top lines) |8.553725612931885 |8.641130893025547 |__-0.08740528009366244__
| nrows and skiprows (middle lines) |8.839795237989165 |8.97894280392211 |__-0.13914756593294442__
| nrows and skiprows (bottom lines) |9.031556493951939 |9.168779775965959 |__-0.1372232820140198__
| | | __AVERAGE__ |__-0.10468024402507581__
#### NOTE
Where openpyxl is beeing optimized, other readers's engine are not... but with 'realy low differences'.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47376 | 2022-06-15T19:55:11Z | 2022-07-21T17:40:54Z | null | 2022-07-21T17:40:55Z |
DOC: move enhancements in 1.5 release notes | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 76f6e864a174f..6cd44b39a18ad 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -147,6 +147,85 @@ If the compression method cannot be inferred, use the ``compression`` argument:
(``mode`` being one of ``tarfile.open``'s modes: https://docs.python.org/3/library/tarfile.html#tarfile.open)
+.. _whatsnew_150.enhancements.read_xml_dtypes:
+
+read_xml now supports ``dtype``, ``converters``, and ``parse_dates``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Similar to other IO methods, :func:`pandas.read_xml` now supports assigning specific dtypes to columns,
+apply converter methods, and parse dates (:issue:`43567`).
+
+.. ipython:: python
+
+ xml_dates = """<?xml version='1.0' encoding='utf-8'?>
+ <data>
+ <row>
+ <shape>square</shape>
+ <degrees>00360</degrees>
+ <sides>4.0</sides>
+ <date>2020-01-01</date>
+ </row>
+ <row>
+ <shape>circle</shape>
+ <degrees>00360</degrees>
+ <sides/>
+ <date>2021-01-01</date>
+ </row>
+ <row>
+ <shape>triangle</shape>
+ <degrees>00180</degrees>
+ <sides>3.0</sides>
+ <date>2022-01-01</date>
+ </row>
+ </data>"""
+
+ df = pd.read_xml(
+ xml_dates,
+ dtype={'sides': 'Int64'},
+ converters={'degrees': str},
+ parse_dates=['date']
+ )
+ df
+ df.dtypes
+
+
+.. _whatsnew_150.enhancements.read_xml_iterparse:
+
+read_xml now supports large XML using ``iterparse``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+For very large XML files that can range in hundreds of megabytes to gigabytes, :func:`pandas.read_xml`
+now supports parsing such sizeable files using `lxml's iterparse`_ and `etree's iterparse`_
+which are memory-efficient methods to iterate through XML trees and extract specific elements
+and attributes without holding entire tree in memory (:issue:`45442`).
+
+.. code-block:: ipython
+
+ In [1]: df = pd.read_xml(
+ ... "/path/to/downloaded/enwikisource-latest-pages-articles.xml",
+ ... iterparse = {"page": ["title", "ns", "id"]})
+ ... )
+ df
+ Out[2]:
+ title ns id
+ 0 Gettysburg Address 0 21450
+ 1 Main Page 0 42950
+ 2 Declaration by United Nations 0 8435
+ 3 Constitution of the United States of America 0 8435
+ 4 Declaration of Independence (Israel) 0 17858
+ ... ... ... ...
+ 3578760 Page:Black cat 1897 07 v2 n10.pdf/17 104 219649
+ 3578761 Page:Black cat 1897 07 v2 n10.pdf/43 104 219649
+ 3578762 Page:Black cat 1897 07 v2 n10.pdf/44 104 219649
+ 3578763 The History of Tom Jones, a Foundling/Book IX 0 12084291
+ 3578764 Page:Shakespeare of Stratford (1926) Yale.djvu/91 104 21450
+
+ [3578765 rows x 3 columns]
+
+
+.. _`lxml's iterparse`: https://lxml.de/3.2/parsing.html#iterparse-and-iterwalk
+.. _`etree's iterparse`: https://docs.python.org/3/library/xml.etree.elementtree.html#xml.etree.ElementTree.iterparse
+
.. _whatsnew_150.enhancements.other:
Other enhancements
@@ -294,83 +373,10 @@ upon serialization. (Related issue :issue:`12997`)
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. _whatsnew_150.api_breaking.read_xml_dtypes:
-
-read_xml now supports ``dtype``, ``converters``, and ``parse_dates``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Similar to other IO methods, :func:`pandas.read_xml` now supports assigning specific dtypes to columns,
-apply converter methods, and parse dates (:issue:`43567`).
-
-.. ipython:: python
-
- xml_dates = """<?xml version='1.0' encoding='utf-8'?>
- <data>
- <row>
- <shape>square</shape>
- <degrees>00360</degrees>
- <sides>4.0</sides>
- <date>2020-01-01</date>
- </row>
- <row>
- <shape>circle</shape>
- <degrees>00360</degrees>
- <sides/>
- <date>2021-01-01</date>
- </row>
- <row>
- <shape>triangle</shape>
- <degrees>00180</degrees>
- <sides>3.0</sides>
- <date>2022-01-01</date>
- </row>
- </data>"""
+.. _whatsnew_150.api_breaking.api_breaking1:
- df = pd.read_xml(
- xml_dates,
- dtype={'sides': 'Int64'},
- converters={'degrees': str},
- parse_dates=['date']
- )
- df
- df.dtypes
-
-.. _whatsnew_150.read_xml_iterparse:
-
-read_xml now supports large XML using ``iterparse``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-For very large XML files that can range in hundreds of megabytes to gigabytes, :func:`pandas.read_xml`
-now supports parsing such sizeable files using `lxml's iterparse`_ and `etree's iterparse`_
-which are memory-efficient methods to iterate through XML trees and extract specific elements
-and attributes without holding entire tree in memory (:issue:`#45442`).
-
-.. code-block:: ipython
-
- In [1]: df = pd.read_xml(
- ... "/path/to/downloaded/enwikisource-latest-pages-articles.xml",
- ... iterparse = {"page": ["title", "ns", "id"]})
- ... )
- df
- Out[2]:
- title ns id
- 0 Gettysburg Address 0 21450
- 1 Main Page 0 42950
- 2 Declaration by United Nations 0 8435
- 3 Constitution of the United States of America 0 8435
- 4 Declaration of Independence (Israel) 0 17858
- ... ... ... ...
- 3578760 Page:Black cat 1897 07 v2 n10.pdf/17 104 219649
- 3578761 Page:Black cat 1897 07 v2 n10.pdf/43 104 219649
- 3578762 Page:Black cat 1897 07 v2 n10.pdf/44 104 219649
- 3578763 The History of Tom Jones, a Foundling/Book IX 0 12084291
- 3578764 Page:Shakespeare of Stratford (1926) Yale.djvu/91 104 21450
-
- [3578765 rows x 3 columns]
-
-
-.. _`lxml's iterparse`: https://lxml.de/3.2/parsing.html#iterparse-and-iterwalk
-.. _`etree's iterparse`: https://docs.python.org/3/library/xml.etree.elementtree.html#xml.etree.ElementTree.iterparse
+api_breaking_change1
+^^^^^^^^^^^^^^^^^^^^
.. _whatsnew_150.api_breaking.api_breaking2:
| @ParfaitG can you confirm these are enhancements and not api breaking changes. | https://api.github.com/repos/pandas-dev/pandas/pulls/47375 | 2022-06-15T19:17:39Z | 2022-06-23T21:37:06Z | 2022-06-23T21:37:06Z | 2022-06-24T09:39:34Z |
ENH: DTA/DTI __repr__ support non-nano | diff --git a/pandas/_libs/tslib.pyi b/pandas/_libs/tslib.pyi
index 4b02235ac9925..2212f8db8ea1e 100644
--- a/pandas/_libs/tslib.pyi
+++ b/pandas/_libs/tslib.pyi
@@ -9,6 +9,7 @@ def format_array_from_datetime(
tz: tzinfo | None = ...,
format: str | None = ...,
na_rep: object = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.object_]: ...
def array_with_unit_to_datetime(
values: np.ndarray,
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index e6bbf52ab1272..f94314297dc62 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -28,11 +28,12 @@ import pytz
from pandas._libs.tslibs.np_datetime cimport (
NPY_DATETIMEUNIT,
+ NPY_FR_ns,
check_dts_bounds,
- dt64_to_dtstruct,
dtstruct_to_dt64,
get_datetime64_value,
npy_datetimestruct,
+ pandas_datetime_to_datetimestruct,
pydate_to_dt64,
pydatetime_to_dt64,
string_to_dts,
@@ -107,7 +108,8 @@ def format_array_from_datetime(
ndarray[int64_t] values,
tzinfo tz=None,
str format=None,
- object na_rep=None
+ object na_rep=None,
+ NPY_DATETIMEUNIT reso=NPY_FR_ns,
) -> np.ndarray:
"""
return a np object array of the string formatted values
@@ -120,6 +122,7 @@ def format_array_from_datetime(
a strftime capable string
na_rep : optional, default is None
a nat format
+ reso : NPY_DATETIMEUNIT, default NPY_FR_ns
Returns
-------
@@ -141,7 +144,7 @@ def format_array_from_datetime(
# a format based on precision
basic_format = format is None and tz is None
if basic_format:
- reso_obj = get_resolution(values)
+ reso_obj = get_resolution(values, reso=reso)
show_ns = reso_obj == Resolution.RESO_NS
show_us = reso_obj == Resolution.RESO_US
show_ms = reso_obj == Resolution.RESO_MS
@@ -153,7 +156,7 @@ def format_array_from_datetime(
result[i] = na_rep
elif basic_format:
- dt64_to_dtstruct(val, &dts)
+ pandas_datetime_to_datetimestruct(val, reso, &dts)
res = (f'{dts.year}-{dts.month:02d}-{dts.day:02d} '
f'{dts.hour:02d}:{dts.min:02d}:{dts.sec:02d}')
@@ -169,7 +172,7 @@ def format_array_from_datetime(
else:
- ts = Timestamp(val, tz=tz)
+ ts = Timestamp._from_value_and_reso(val, reso=reso, tz=tz)
if format is None:
result[i] = str(ts)
else:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 18133d7cf25ea..6ecb89b02afe3 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -696,7 +696,7 @@ def _format_native_types(
fmt = get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(
- self.asi8, tz=self.tz, format=fmt, na_rep=na_rep
+ self.asi8, tz=self.tz, format=fmt, na_rep=na_rep, reso=self._reso
)
# -----------------------------------------------------------------
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 045e74c1b6083..cf5e35f6ddcd1 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -42,7 +42,9 @@
NaT,
Timedelta,
Timestamp,
+ get_unit_from_dtype,
iNaT,
+ periods_per_day,
)
from pandas._libs.tslibs.nattype import NaTType
from pandas._typing import (
@@ -1738,16 +1740,21 @@ def is_dates_only(values: np.ndarray | DatetimeArray | Index | DatetimeIndex) ->
if not isinstance(values, Index):
values = values.ravel()
- values = DatetimeIndex(values)
+ if not isinstance(values, (DatetimeArray, DatetimeIndex)):
+ values = DatetimeIndex(values)
+
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
- one_day_nanos = 86400 * 10**9
- even_days = (
- np.logical_and(consider_values, values_int % int(one_day_nanos) != 0).sum() == 0
- )
+ # error: Argument 1 to "py_get_unit_from_dtype" has incompatible type
+ # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]"
+ reso = get_unit_from_dtype(values.dtype) # type: ignore[arg-type]
+ ppd = periods_per_day(reso)
+
+ # TODO: can we reuse is_date_array_normalized? would need a skipna kwd
+ even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0
if even_days:
return True
return False
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index d82e865b069aa..6c6a8b269aee8 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -155,6 +155,20 @@ def test_time_date(self, dta_dti, meth):
expected = getattr(dti, meth)
tm.assert_numpy_array_equal(result, expected)
+ def test_format_native_types(self, unit, reso, dtype, dta_dti):
+ # In this case we should get the same formatted values with our nano
+ # version dti._data as we do with the non-nano dta
+ dta, dti = dta_dti
+
+ res = dta._format_native_types()
+ exp = dti._data._format_native_types()
+ tm.assert_numpy_array_equal(res, exp)
+
+ def test_repr(self, dta_dti, unit):
+ dta, dti = dta_dti
+
+ assert repr(dta) == repr(dti._data).replace("[ns", f"[{unit}")
+
class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
diff --git a/setup.py b/setup.py
index 27e6d8cb10025..70adbd3c083af 100755
--- a/setup.py
+++ b/setup.py
@@ -492,7 +492,11 @@ def srcpath(name=None, suffix=".pyx", subdir="src"):
"_libs.properties": {"pyxfile": "_libs/properties"},
"_libs.reshape": {"pyxfile": "_libs/reshape", "depends": []},
"_libs.sparse": {"pyxfile": "_libs/sparse", "depends": _pxi_dep["sparse"]},
- "_libs.tslib": {"pyxfile": "_libs/tslib", "depends": tseries_depends},
+ "_libs.tslib": {
+ "pyxfile": "_libs/tslib",
+ "depends": tseries_depends,
+ "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
+ },
"_libs.tslibs.base": {"pyxfile": "_libs/tslibs/base"},
"_libs.tslibs.ccalendar": {"pyxfile": "_libs/tslibs/ccalendar"},
"_libs.tslibs.dtypes": {"pyxfile": "_libs/tslibs/dtypes"},
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47374 | 2022-06-15T19:09:32Z | 2022-06-16T01:16:31Z | 2022-06-16T01:16:31Z | 2022-06-16T01:18:48Z |
ENH: Timedelta division support non-nano | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 028371633a2c1..dfd64dd50f213 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1742,13 +1742,21 @@ class Timedelta(_Timedelta):
other = Timedelta(other)
if other is NaT:
return np.nan
+ if other._reso != self._reso:
+ raise ValueError(
+ "division between Timedeltas with mismatched resolutions "
+ "are not supported. Explicitly cast to matching resolutions "
+ "before dividing."
+ )
return self.value / float(other.value)
elif is_integer_object(other) or is_float_object(other):
# integers or floats
- if self._reso != NPY_FR_ns:
- raise NotImplementedError
- return Timedelta(self.value / other, unit='ns')
+ if util.is_nan(other):
+ return NaT
+ return Timedelta._from_value_and_reso(
+ <int64_t>(self.value / other), self._reso
+ )
elif is_array(other):
return self.to_timedelta64() / other
@@ -1761,8 +1769,12 @@ class Timedelta(_Timedelta):
other = Timedelta(other)
if other is NaT:
return np.nan
- if self._reso != NPY_FR_ns:
- raise NotImplementedError
+ if self._reso != other._reso:
+ raise ValueError(
+ "division between Timedeltas with mismatched resolutions "
+ "are not supported. Explicitly cast to matching resolutions "
+ "before dividing."
+ )
return float(other.value) / self.value
elif is_array(other):
@@ -1781,14 +1793,18 @@ class Timedelta(_Timedelta):
other = Timedelta(other)
if other is NaT:
return np.nan
- if self._reso != NPY_FR_ns:
- raise NotImplementedError
+ if self._reso != other._reso:
+ raise ValueError(
+ "floordivision between Timedeltas with mismatched resolutions "
+ "are not supported. Explicitly cast to matching resolutions "
+ "before dividing."
+ )
return self.value // other.value
elif is_integer_object(other) or is_float_object(other):
- if self._reso != NPY_FR_ns:
- raise NotImplementedError
- return Timedelta(self.value // other, unit='ns')
+ if util.is_nan(other):
+ return NaT
+ return type(self)._from_value_and_reso(self.value // other, self._reso)
elif is_array(other):
if other.dtype.kind == 'm':
@@ -1798,9 +1814,7 @@ class Timedelta(_Timedelta):
return _broadcast_floordiv_td64(self.value, other, _floordiv)
elif other.dtype.kind in ['i', 'u', 'f']:
if other.ndim == 0:
- if self._reso != NPY_FR_ns:
- raise NotImplementedError
- return Timedelta(self.value // other)
+ return self // other.item()
else:
return self.to_timedelta64() // other
@@ -1816,8 +1830,12 @@ class Timedelta(_Timedelta):
other = Timedelta(other)
if other is NaT:
return np.nan
- if self._reso != NPY_FR_ns:
- raise NotImplementedError
+ if self._reso != other._reso:
+ raise ValueError(
+ "floordivision between Timedeltas with mismatched resolutions "
+ "are not supported. Explicitly cast to matching resolutions "
+ "before dividing."
+ )
return other.value // self.value
elif is_array(other):
@@ -1914,10 +1932,10 @@ cdef _broadcast_floordiv_td64(
if mask:
return np.nan
- return operation(value, other.astype('m8[ns]').astype('i8'))
+ return operation(value, other.astype('m8[ns]', copy=False).astype('i8'))
else:
- res = operation(value, other.astype('m8[ns]').astype('i8'))
+ res = operation(value, other.astype('m8[ns]', copy=False).astype('i8'))
if mask.any():
res = res.astype('f8')
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 5ae6ed9f13ece..00072e6724a6b 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -173,6 +173,64 @@ def test_to_timedelta64(self, td, unit):
elif unit == 9:
assert res.dtype == "m8[us]"
+ def test_truediv_timedeltalike(self, td):
+ assert td / td == 1
+ assert (2.5 * td) / td == 2.5
+
+ other = Timedelta(td.value)
+ msg = "with mismatched resolutions are not supported"
+ with pytest.raises(ValueError, match=msg):
+ td / other
+
+ with pytest.raises(ValueError, match=msg):
+ # __rtruediv__
+ other.to_pytimedelta() / td
+
+ def test_truediv_numeric(self, td):
+ assert td / np.nan is NaT
+
+ res = td / 2
+ assert res.value == td.value / 2
+ assert res._reso == td._reso
+
+ res = td / 2.0
+ assert res.value == td.value / 2
+ assert res._reso == td._reso
+
+ def test_floordiv_timedeltalike(self, td):
+ assert td // td == 1
+ assert (2.5 * td) // td == 2
+
+ other = Timedelta(td.value)
+ msg = "with mismatched resolutions are not supported"
+ with pytest.raises(ValueError, match=msg):
+ td // other
+
+ with pytest.raises(ValueError, match=msg):
+ # __rfloordiv__
+ other.to_pytimedelta() // td
+
+ def test_floordiv_numeric(self, td):
+ assert td // np.nan is NaT
+
+ res = td // 2
+ assert res.value == td.value // 2
+ assert res._reso == td._reso
+
+ res = td // 2.0
+ assert res.value == td.value // 2
+ assert res._reso == td._reso
+
+ assert td // np.array(np.nan) is NaT
+
+ res = td // np.array(2)
+ assert res.value == td.value // 2
+ assert res._reso == td._reso
+
+ res = td // np.array(2.0)
+ assert res.value == td.value // 2
+ assert res._reso == td._reso
+
class TestTimedeltaUnaryOps:
def test_invert(self):
| For now settled on disallowing division with mismatched resos. Will want to revisit that decision as part of a larger API discussion. | https://api.github.com/repos/pandas-dev/pandas/pulls/47373 | 2022-06-15T19:00:17Z | 2022-06-15T22:15:31Z | 2022-06-15T22:15:31Z | 2022-06-15T22:49:23Z |
REGR: revert behaviour change for concat with empty/all-NaN data | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 52aa9312d4c14..697070e50a40a 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -271,6 +271,9 @@ the given ``dayfirst`` value when the value is a delimited date string (e.g.
Ignoring dtypes in concat with empty or all-NA columns
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. note::
+ This behaviour change has been reverted in pandas 1.4.3.
+
When using :func:`concat` to concatenate two or more :class:`DataFrame` objects,
if one of the DataFrames was empty or had all-NA values, its dtype was
*sometimes* ignored when finding the concatenated dtype. These are now
@@ -301,9 +304,15 @@ object, the ``np.nan`` is retained.
*New behavior*:
-.. ipython:: python
+.. code-block:: ipython
+
+ In [4]: res
+ Out[4]:
+ bar
+ 0 2013-01-01 00:00:00
+ 1 NaN
+
- res
.. _whatsnew_140.notable_bug_fixes.value_counts_and_mode_do_not_coerce_to_nan:
diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index de15a318ed547..2550a12ebbb9d 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -10,6 +10,17 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
+.. _whatsnew_143.concat:
+
+Behaviour of ``concat`` with empty or all-NA DataFrame columns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The behaviour change in version 1.4.0 to stop ignoring the data type
+of empty or all-NA columns with float or object dtype in :func:`concat`
+(:ref:`whatsnew_140.notable_bug_fixes.concat_with_empty_or_all_na`) has been
+reverted (:issue:`45637`).
+
+
.. _whatsnew_143.regressions:
Fixed regressions
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 4316109da1cbb..37b42ad66c027 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -18,6 +18,7 @@
import pandas._libs.missing as libmissing
from pandas._libs.tslibs import (
NaT,
+ Period,
iNaT,
)
@@ -739,3 +740,40 @@ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool:
# fallback, default to allowing NaN, None, NA, NaT
return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal))
+
+
+def isna_all(arr: ArrayLike) -> bool:
+ """
+ Optimized equivalent to isna(arr).all()
+ """
+ total_len = len(arr)
+
+ # Usually it's enough to check but a small fraction of values to see if
+ # a block is NOT null, chunks should help in such cases.
+ # parameters 1000 and 40 were chosen arbitrarily
+ chunk_len = max(total_len // 40, 1000)
+
+ dtype = arr.dtype
+ if dtype.kind == "f":
+ checker = nan_checker
+
+ elif dtype.kind in ["m", "M"] or dtype.type is Period:
+ # error: Incompatible types in assignment (expression has type
+ # "Callable[[Any], Any]", variable has type "ufunc")
+ checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment]
+
+ else:
+ # error: Incompatible types in assignment (expression has type "Callable[[Any],
+ # Any]", variable has type "ufunc")
+ checker = lambda x: _isna_array( # type: ignore[assignment]
+ x, inf_as_na=INF_AS_NA
+ )
+
+ return all(
+ # error: Argument 1 to "__call__" of "ufunc" has incompatible type
+ # "Union[ExtensionArray, Any]"; expected "Union[Union[int, float, complex, str,
+ # bytes, generic], Sequence[Union[int, float, complex, str, bytes, generic]],
+ # Sequence[Sequence[Any]], _SupportsArray]"
+ checker(arr[i : i + chunk_len]).all() # type: ignore[arg-type]
+ for i in range(0, total_len, chunk_len)
+ )
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 8ce98f3891ff4..4a352d614e1d9 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import copy
import itertools
from typing import (
TYPE_CHECKING,
@@ -13,6 +14,7 @@
NaT,
internals as libinternals,
)
+from pandas._libs.missing import NA
from pandas._typing import (
ArrayLike,
DtypeObj,
@@ -29,17 +31,26 @@
is_1d_only_ea_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
+ is_scalar,
+ needs_i8_conversion,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
+from pandas.core.dtypes.missing import (
+ is_valid_na_for_dtype,
+ isna,
+ isna_all,
+)
+import pandas.core.algorithms as algos
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
+from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
@@ -191,29 +202,19 @@ def concatenate_managers(
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
- # Assertions disabled for performance
- # for tup in mgrs_indexers:
- # # caller is responsible for ensuring this
- # indexers = tup[1]
- # assert concat_axis not in indexers
-
- if concat_axis == 0:
- return _concat_managers_axis0(mgrs_indexers, axes, copy)
-
mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers)
- # Assertion disabled for performance
- # assert all(not x[1] for x in mgrs_indexers)
-
- concat_plans = [_get_mgr_concatenation_plan(mgr) for mgr, _ in mgrs_indexers]
- concat_plan = _combine_concat_plans(concat_plans)
+ concat_plans = [
+ _get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
+ ]
+ concat_plan = _combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
- if len(join_units) == 1:
+ if len(join_units) == 1 and not join_units[0].indexers:
values = blk.values
if copy:
values = values.copy()
@@ -237,7 +238,7 @@ def concatenate_managers(
fastpath = blk.values.dtype == values.dtype
else:
- values = _concatenate_join_units(join_units, copy=copy)
+ values = _concatenate_join_units(join_units, concat_axis, copy=copy)
fastpath = False
if fastpath:
@@ -250,42 +251,6 @@ def concatenate_managers(
return BlockManager(tuple(blocks), axes)
-def _concat_managers_axis0(
- mgrs_indexers, axes: list[Index], copy: bool
-) -> BlockManager:
- """
- concat_managers specialized to concat_axis=0, with reindexing already
- having been done in _maybe_reindex_columns_na_proxy.
- """
- had_reindexers = {
- i: len(mgrs_indexers[i][1]) > 0 for i in range(len(mgrs_indexers))
- }
- mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers)
-
- mgrs = [x[0] for x in mgrs_indexers]
-
- offset = 0
- blocks = []
- for i, mgr in enumerate(mgrs):
- # If we already reindexed, then we definitely don't need another copy
- made_copy = had_reindexers[i]
-
- for blk in mgr.blocks:
- if made_copy:
- nb = blk.copy(deep=False)
- elif copy:
- nb = blk.copy()
- else:
- # by slicing instead of copy(deep=False), we get a new array
- # object, see test_concat_copy
- nb = blk.getitem_block(slice(None))
- nb._mgr_locs = nb._mgr_locs.add(offset)
- blocks.append(nb)
-
- offset += len(mgr.items)
- return BlockManager(tuple(blocks), axes)
-
-
def _maybe_reindex_columns_na_proxy(
axes: list[Index], mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]]
) -> list[tuple[BlockManager, dict[int, np.ndarray]]]:
@@ -296,33 +261,36 @@ def _maybe_reindex_columns_na_proxy(
Columns added in this reindexing have dtype=np.void, indicating they
should be ignored when choosing a column's final dtype.
"""
- new_mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]] = []
-
+ new_mgrs_indexers = []
for mgr, indexers in mgrs_indexers:
- # For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this
- # is a cheap reindexing.
- for i, indexer in indexers.items():
- mgr = mgr.reindex_indexer(
- axes[i],
- indexers[i],
- axis=i,
+ # We only reindex for axis=0 (i.e. columns), as this can be done cheaply
+ if 0 in indexers:
+ new_mgr = mgr.reindex_indexer(
+ axes[0],
+ indexers[0],
+ axis=0,
copy=False,
- only_slice=True, # only relevant for i==0
+ only_slice=True,
allow_dups=True,
- use_na_proxy=True, # only relevant for i==0
+ use_na_proxy=True,
)
- new_mgrs_indexers.append((mgr, {}))
+ new_indexers = indexers.copy()
+ del new_indexers[0]
+ new_mgrs_indexers.append((new_mgr, new_indexers))
+ else:
+ new_mgrs_indexers.append((mgr, indexers))
return new_mgrs_indexers
-def _get_mgr_concatenation_plan(mgr: BlockManager):
+def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: dict[int, np.ndarray]):
"""
- Construct concatenation plan for given block manager.
+ Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
+ indexers : dict of {axis: indexer}
Returns
-------
@@ -332,11 +300,15 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape_list = list(mgr.shape)
+ for ax, indexer in indexers.items():
+ mgr_shape_list[ax] = len(indexer)
mgr_shape = tuple(mgr_shape_list)
+ assert 0 not in indexers
+
if mgr.is_single_block:
blk = mgr.blocks[0]
- return [(blk.mgr_locs, JoinUnit(blk, mgr_shape))]
+ return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
blknos = mgr.blknos
blklocs = mgr.blklocs
@@ -347,6 +319,8 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
assert placements.is_slice_like
assert blkno != -1
+ join_unit_indexers = indexers.copy()
+
shape_list = list(mgr_shape)
shape_list[0] = len(placements)
shape = tuple(shape_list)
@@ -371,16 +345,13 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
)
)
- if not unit_no_ax0_reindexing:
- # create block from subset of columns
- # Note: Blocks with only 1 column will always have unit_no_ax0_reindexing,
- # so we will never get here with ExtensionBlock.
- blk = blk.getitem_block(ax0_blk_indexer)
+ # Omit indexer if no item reindexing is required.
+ if unit_no_ax0_reindexing:
+ join_unit_indexers.pop(0, None)
+ else:
+ join_unit_indexers[0] = ax0_blk_indexer
- # Assertions disabled for performance
- # assert blk._mgr_locs.as_slice == placements.as_slice
- # assert blk.shape[0] == shape[0]
- unit = JoinUnit(blk, shape)
+ unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
@@ -388,82 +359,192 @@ def _get_mgr_concatenation_plan(mgr: BlockManager):
class JoinUnit:
- def __init__(self, block: Block, shape: Shape) -> None:
+ def __init__(self, block: Block, shape: Shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
+ # Note: block is None implies indexers is None, but not vice-versa
+ if indexers is None:
+ indexers = {}
self.block = block
+ self.indexers = indexers
self.shape = shape
def __repr__(self) -> str:
- return f"{type(self).__name__}({repr(self.block)})"
+ return f"{type(self).__name__}({repr(self.block)}, {self.indexers})"
+
+ @cache_readonly
+ def needs_filling(self) -> bool:
+ for indexer in self.indexers.values():
+ # FIXME: cache results of indexer == -1 checks.
+ if (indexer == -1).any():
+ return True
+
+ return False
+
+ @cache_readonly
+ def dtype(self):
+ blk = self.block
+ if blk.values.dtype.kind == "V":
+ raise AssertionError("Block is None, no dtype")
+
+ if not self.needs_filling:
+ return blk.dtype
+ return ensure_dtype_can_hold_na(blk.dtype)
+
+ def _is_valid_na_for(self, dtype: DtypeObj) -> bool:
+ """
+ Check that we are all-NA of a type/dtype that is compatible with this dtype.
+ Augments `self.is_na` with an additional check of the type of NA values.
+ """
+ if not self.is_na:
+ return False
+ if self.block.dtype.kind == "V":
+ return True
+
+ if self.dtype == object:
+ values = self.block.values
+ return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
+
+ na_value = self.block.fill_value
+ if na_value is NaT and not is_dtype_equal(self.dtype, dtype):
+ # e.g. we are dt64 and other is td64
+ # fill_values match but we should not cast self.block.values to dtype
+ # TODO: this will need updating if we ever have non-nano dt64/td64
+ return False
+
+ if na_value is NA and needs_i8_conversion(dtype):
+ # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat
+ # e.g. self.dtype == "Int64" and dtype is td64, we dont want
+ # to consider these as matching
+ return False
+
+ # TODO: better to use can_hold_element?
+ return is_valid_na_for_dtype(na_value, dtype)
@cache_readonly
def is_na(self) -> bool:
blk = self.block
if blk.dtype.kind == "V":
return True
- return False
-
- def get_reindexed_values(self, empty_dtype: DtypeObj) -> ArrayLike:
- values: ArrayLike
- if self.is_na:
- return make_na_array(empty_dtype, self.shape)
+ if not blk._can_hold_na:
+ return False
+ values = blk.values
+ if values.size == 0:
+ return True
+ if isinstance(values.dtype, SparseDtype):
+ return False
+
+ if values.ndim == 1:
+ # TODO(EA2D): no need for special case with 2D EAs
+ val = values[0]
+ if not is_scalar(val) or not isna(val):
+ # ideally isna_all would do this short-circuiting
+ return False
+ return isna_all(values)
else:
+ val = values[0][0]
+ if not is_scalar(val) or not isna(val):
+ # ideally isna_all would do this short-circuiting
+ return False
+ return all(isna_all(row) for row in values)
+
+ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
+ values: ArrayLike
- if not self.block._can_consolidate:
+ if upcasted_na is None and self.block.dtype.kind != "V":
+ # No upcasting is necessary
+ fill_value = self.block.fill_value
+ values = self.block.get_values()
+ else:
+ fill_value = upcasted_na
+
+ if self._is_valid_na_for(empty_dtype):
+ # note: always holds when self.block.dtype.kind == "V"
+ blk_dtype = self.block.dtype
+
+ if blk_dtype == np.dtype("object"):
+ # we want to avoid filling with np.nan if we are
+ # using None; we already know that we are all
+ # nulls
+ values = self.block.values.ravel(order="K")
+ if len(values) and values[0] is None:
+ fill_value = None
+
+ if is_datetime64tz_dtype(empty_dtype):
+ i8values = np.full(self.shape, fill_value.value)
+ return DatetimeArray(i8values, dtype=empty_dtype)
+
+ elif is_1d_only_ea_dtype(empty_dtype):
+ empty_dtype = cast(ExtensionDtype, empty_dtype)
+ cls = empty_dtype.construct_array_type()
+
+ missing_arr = cls._from_sequence([], dtype=empty_dtype)
+ ncols, nrows = self.shape
+ assert ncols == 1, ncols
+ empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
+ return missing_arr.take(
+ empty_arr, allow_fill=True, fill_value=fill_value
+ )
+ elif isinstance(empty_dtype, ExtensionDtype):
+ # TODO: no tests get here, a handful would if we disabled
+ # the dt64tz special-case above (which is faster)
+ cls = empty_dtype.construct_array_type()
+ missing_arr = cls._empty(shape=self.shape, dtype=empty_dtype)
+ missing_arr[:] = fill_value
+ return missing_arr
+ else:
+ # NB: we should never get here with empty_dtype integer or bool;
+ # if we did, the missing_arr.fill would cast to gibberish
+ missing_arr = np.empty(self.shape, dtype=empty_dtype)
+ missing_arr.fill(fill_value)
+ return missing_arr
+
+ if (not self.indexers) and (not self.block._can_consolidate):
# preserve these for validation in concat_compat
return self.block.values
- # No dtype upcasting is done here, it will be performed during
- # concatenation itself.
- values = self.block.values
+ if self.block.is_bool:
+ # External code requested filling/upcasting, bool values must
+ # be upcasted to object to avoid being upcasted to numeric.
+ values = self.block.astype(np.dtype("object")).values
+ else:
+ # No dtype upcasting is done here, it will be performed during
+ # concatenation itself.
+ values = self.block.values
- return values
+ if not self.indexers:
+ # If there's no indexing to be done, we want to signal outside
+ # code that this array must be copied explicitly. This is done
+ # by returning a view and checking `retval.base`.
+ values = values.view()
+ else:
+ for ax, indexer in self.indexers.items():
+ values = algos.take_nd(values, indexer, axis=ax)
-def make_na_array(dtype: DtypeObj, shape: Shape) -> ArrayLike:
- """
- Construct an np.ndarray or ExtensionArray of the given dtype and shape
- holding all-NA values.
- """
- if is_datetime64tz_dtype(dtype):
- # NaT here is analogous to dtype.na_value below
- i8values = np.full(shape, NaT.value)
- return DatetimeArray(i8values, dtype=dtype)
-
- elif is_1d_only_ea_dtype(dtype):
- dtype = cast(ExtensionDtype, dtype)
- cls = dtype.construct_array_type()
-
- missing_arr = cls._from_sequence([], dtype=dtype)
- nrows = shape[-1]
- taker = -1 * np.ones((nrows,), dtype=np.intp)
- return missing_arr.take(taker, allow_fill=True, fill_value=dtype.na_value)
- elif isinstance(dtype, ExtensionDtype):
- # TODO: no tests get here, a handful would if we disabled
- # the dt64tz special-case above (which is faster)
- cls = dtype.construct_array_type()
- missing_arr = cls._empty(shape=shape, dtype=dtype)
- missing_arr[:] = dtype.na_value
- return missing_arr
- else:
- # NB: we should never get here with dtype integer or bool;
- # if we did, the missing_arr.fill would cast to gibberish
- missing_arr = np.empty(shape, dtype=dtype)
- fill_value = _dtype_to_na_value(dtype)
- missing_arr.fill(fill_value)
- return missing_arr
+ return values
-def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike:
+def _concatenate_join_units(
+ join_units: list[JoinUnit], concat_axis: int, copy: bool
+) -> ArrayLike:
"""
- Concatenate values from several join units along axis=1.
+ Concatenate values from several join units along selected axis.
"""
+ if concat_axis == 0 and len(join_units) > 1:
+ # Concatenating join units along ax0 is handled in _merge_blocks.
+ raise AssertionError("Concatenating join units along axis0")
empty_dtype = _get_empty_dtype(join_units)
- to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype) for ju in join_units]
+ has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
+ upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)
+
+ to_concat = [
+ ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
+ for ju in join_units
+ ]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
@@ -495,12 +576,12 @@ def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike
concat_values = ensure_block_shape(concat_values, 2)
else:
- concat_values = concat_compat(to_concat, axis=1)
+ concat_values = concat_compat(to_concat, axis=concat_axis)
return concat_values
-def _dtype_to_na_value(dtype: DtypeObj):
+def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):
"""
Find the NA value to go with this dtype.
"""
@@ -514,6 +595,9 @@ def _dtype_to_na_value(dtype: DtypeObj):
# different from missing.na_value_for_dtype
return None
elif dtype.kind in ["i", "u"]:
+ if not has_none_blocks:
+ # different from missing.na_value_for_dtype
+ return None
return np.nan
elif dtype.kind == "O":
return np.nan
@@ -538,12 +622,14 @@ def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:
empty_dtype = join_units[0].block.dtype
return empty_dtype
- needs_can_hold_na = any(unit.is_na for unit in join_units)
+ has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
- dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]
+ dtypes = [unit.dtype for unit in join_units if not unit.is_na]
+ if not len(dtypes):
+ dtypes = [unit.dtype for unit in join_units if unit.block.dtype.kind != "V"]
dtype = find_common_type(dtypes)
- if needs_can_hold_na:
+ if has_none_blocks:
dtype = ensure_dtype_can_hold_na(dtype)
return dtype
@@ -575,6 +661,9 @@ def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units)
and
+ # no blocks with indexers (as then the dimensions do not fit)
+ all(not ju.indexers for ju in join_units)
+ and
# only use this path when there is something to concatenate
len(join_units) > 1
)
@@ -594,17 +683,28 @@ def _trim_join_unit(join_unit: JoinUnit, length: int) -> JoinUnit:
Extra items that didn't fit are returned as a separate block.
"""
+ if 0 not in join_unit.indexers:
+ extra_indexers = join_unit.indexers
+
+ if join_unit.block is None:
+ extra_block = None
+ else:
+ extra_block = join_unit.block.getitem_block(slice(length, None))
+ join_unit.block = join_unit.block.getitem_block(slice(length))
+ else:
+ extra_block = join_unit.block
- extra_block = join_unit.block.getitem_block(slice(length, None))
- join_unit.block = join_unit.block.getitem_block(slice(length))
+ extra_indexers = copy.copy(join_unit.indexers)
+ extra_indexers[0] = extra_indexers[0][length:]
+ join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
- return JoinUnit(block=extra_block, shape=extra_shape)
+ return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)
-def _combine_concat_plans(plans):
+def _combine_concat_plans(plans, concat_axis: int):
"""
Combine multiple concatenation plans into one.
@@ -614,6 +714,18 @@ def _combine_concat_plans(plans):
for p in plans[0]:
yield p[0], [p[1]]
+ elif concat_axis == 0:
+ offset = 0
+ for plan in plans:
+ last_plc = None
+
+ for plc, unit in plan:
+ yield plc.add(offset), [unit]
+ last_plc = plc
+
+ if last_plc is not None:
+ offset += last_plc.as_slice.stop
+
else:
# singleton list so we can modify it as a side-effect within _next_or_none
num_ended = [0]
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 9e016e0101ef6..04fa3c11a6c40 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -357,6 +357,20 @@ def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
self.assert_frame_equal(result, expected)
+ def test_setitem_with_expansion_row(self, data, na_value):
+ df = pd.DataFrame({"data": data[:1]})
+
+ df.loc[1, "data"] = data[1]
+ expected = pd.DataFrame({"data": data[:2]})
+ self.assert_frame_equal(df, expected)
+
+ # https://github.com/pandas-dev/pandas/issues/47284
+ df.loc[2, "data"] = na_value
+ expected = pd.DataFrame(
+ {"data": pd.Series([data[0], data[1], na_value], dtype=data.dtype)}
+ )
+ self.assert_frame_equal(df, expected)
+
def test_setitem_series(self, data, full_indexer):
# https://github.com/pandas-dev/pandas/issues/32395
ser = pd.Series(data, name="data")
diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py
index d1c9c379759b5..f07ffee20a55f 100644
--- a/pandas/tests/frame/methods/test_append.py
+++ b/pandas/tests/frame/methods/test_append.py
@@ -159,7 +159,7 @@ def test_append_empty_dataframe(self):
expected = df1.copy()
tm.assert_frame_equal(result, expected)
- def test_append_dtypes(self):
+ def test_append_dtypes(self, using_array_manager):
# GH 5754
# row appends of different dtypes (so need to do by-item)
@@ -183,7 +183,10 @@ def test_append_dtypes(self):
expected = DataFrame(
{"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")}
)
- expected = expected.astype(object)
+ if using_array_manager:
+ # TODO(ArrayManager) decide on exact casting rules in concat
+ # With ArrayManager, all-NaN float is not ignored
+ expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
@@ -192,7 +195,9 @@ def test_append_dtypes(self):
expected = DataFrame(
{"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")}
)
- expected = expected.astype(object)
+ if using_array_manager:
+ # With ArrayManager, all-NaN float is not ignored
+ expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": np.nan}, index=range(1))
@@ -201,7 +206,9 @@ def test_append_dtypes(self):
expected = DataFrame(
{"bar": Series([np.nan, Timestamp("20130101")], dtype="M8[ns]")}
)
- expected = expected.astype(object)
+ if using_array_manager:
+ # With ArrayManager, all-NaN float is not ignored
+ expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index 17c797fc36159..4ba231523af14 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -15,6 +15,7 @@
InvalidIndexError,
PerformanceWarning,
)
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
@@ -755,3 +756,50 @@ def test_concat_retain_attrs(data):
df2.attrs = {1: 1}
df = concat([df1, df2])
assert df.attrs[1] == 1
+
+
+@td.skip_array_manager_invalid_test
+@pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"])
+@pytest.mark.parametrize("empty_dtype", [None, "float64", "object"])
+def test_concat_ignore_emtpy_object_float(empty_dtype, df_dtype):
+ # https://github.com/pandas-dev/pandas/issues/45637
+ df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype)
+ empty = DataFrame(columns=["foo", "bar"], dtype=empty_dtype)
+ result = concat([empty, df])
+ expected = df
+ if df_dtype == "int64":
+ # TODO what exact behaviour do we want for integer eventually?
+ if empty_dtype == "float64":
+ expected = df.astype("float64")
+ else:
+ expected = df.astype("object")
+ tm.assert_frame_equal(result, expected)
+
+
+@td.skip_array_manager_invalid_test
+@pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"])
+@pytest.mark.parametrize("empty_dtype", [None, "float64", "object"])
+def test_concat_ignore_all_na_object_float(empty_dtype, df_dtype):
+ df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype)
+ empty = DataFrame({"foo": [np.nan], "bar": [np.nan]}, dtype=empty_dtype)
+ result = concat([empty, df], ignore_index=True)
+
+ if df_dtype == "int64":
+ # TODO what exact behaviour do we want for integer eventually?
+ if empty_dtype == "object":
+ df_dtype = "object"
+ else:
+ df_dtype = "float64"
+ expected = DataFrame({"foo": [None, 1, 2], "bar": [None, 1, 2]}, dtype=df_dtype)
+ tm.assert_frame_equal(result, expected)
+
+
+@td.skip_array_manager_invalid_test
+def test_concat_ignore_empty_from_reindex():
+ # https://github.com/pandas-dev/pandas/pull/43507#issuecomment-920375856
+ df1 = DataFrame({"a": [1], "b": [pd.Timestamp("2012-01-01")]})
+ df2 = DataFrame({"a": [2]})
+
+ result = concat([df1, df2.reindex(columns=df1.columns)], ignore_index=True)
+ expected = df1 = DataFrame({"a": [1, 2], "b": [pd.Timestamp("2012-01-01"), pd.NaT]})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index ccdfc3cd23790..116fb298df61d 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -682,7 +682,7 @@ def _constructor(self):
assert isinstance(result, NotADataFrame)
- def test_join_append_timedeltas(self):
+ def test_join_append_timedeltas(self, using_array_manager):
# timedelta64 issues with join/merge
# GH 5695
@@ -696,9 +696,11 @@ def test_join_append_timedeltas(self):
{
"d": [datetime(2013, 11, 5, 5, 56), datetime(2013, 11, 5, 5, 56)],
"t": [timedelta(0, 22500), timedelta(0, 22500)],
- },
- dtype=object,
+ }
)
+ if using_array_manager:
+ # TODO(ArrayManager) decide on exact casting rules in concat
+ expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
def test_join_append_timedeltas2(self):
| Closes https://github.com/pandas-dev/pandas/issues/45637
Closes https://github.com/pandas-dev/pandas/issues/47284
Initially I tried it with a more selective revert of only the changes in #43577 and #43507, but then I ran into some other failures in existing tests. So in the end tried it with reverting all subsequent clean-up PRs as well. I assume quite some of those changes could be re-applied after this, but for now just ensuring the tests are passing. | https://api.github.com/repos/pandas-dev/pandas/pulls/47372 | 2022-06-15T18:56:28Z | 2022-06-22T20:41:30Z | 2022-06-22T20:41:29Z | 2022-12-29T19:14:46Z |
PERF: Improve Styler `to_excel` Performance | diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index a2d989e787e0f..a88c4374b7030 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -47,6 +47,25 @@ def time_write_excel(self, engine):
writer.save()
+class WriteExcelStyled:
+ params = ["openpyxl", "xlsxwriter"]
+ param_names = ["engine"]
+
+ def setup(self, engine):
+ self.df = _generate_dataframe()
+
+ def time_write_excel_style(self, engine):
+ bio = BytesIO()
+ bio.seek(0)
+ writer = ExcelWriter(bio, engine=engine)
+ df_style = self.df.style
+ df_style.applymap(lambda x: "border: red 1px solid;")
+ df_style.applymap(lambda x: "color: blue")
+ df_style.applymap(lambda x: "border-color: green black", subset=["float1"])
+ df_style.to_excel(writer, sheet_name="Sheet1")
+ writer.save()
+
+
class ReadExcel:
params = ["xlrd", "openpyxl", "odf"]
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 6cc0190f00e31..a4fa31f7fc368 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -752,6 +752,7 @@ Performance improvements
- Performance improvement in :func:`factorize` (:issue:`46109`)
- Performance improvement in :class:`DataFrame` and :class:`Series` constructors for extension dtype scalars (:issue:`45854`)
- Performance improvement in :func:`read_excel` when ``nrows`` argument provided (:issue:`32727`)
+- Performance improvement in :meth:`.Styler.to_excel` when applying repeated CSS formats (:issue:`47371`)
- Performance improvement in :meth:`MultiIndex.is_monotonic_increasing` (:issue:`47458`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index a6d2645590dde..92dafffc9c3de 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -7,6 +7,7 @@
from typing import (
Callable,
Generator,
+ Iterable,
Iterator,
)
import warnings
@@ -188,9 +189,24 @@ class CSSResolver:
SIDES = ("top", "right", "bottom", "left")
+ CSS_EXPANSIONS = {
+ **{
+ "-".join(["border", prop] if prop else ["border"]): _border_expander(prop)
+ for prop in ["", "top", "right", "bottom", "left"]
+ },
+ **{
+ "-".join(["border", prop]): _side_expander("border-{:s}-" + prop)
+ for prop in ["color", "style", "width"]
+ },
+ **{
+ "margin": _side_expander("margin-{:s}"),
+ "padding": _side_expander("padding-{:s}"),
+ },
+ }
+
def __call__(
self,
- declarations_str: str,
+ declarations: str | Iterable[tuple[str, str]],
inherited: dict[str, str] | None = None,
) -> dict[str, str]:
"""
@@ -198,8 +214,10 @@ def __call__(
Parameters
----------
- declarations_str : str
- A list of CSS declarations
+ declarations_str : str | Iterable[tuple[str, str]]
+ A CSS string or set of CSS declaration tuples
+ e.g. "font-weight: bold; background: blue" or
+ {("font-weight", "bold"), ("background", "blue")}
inherited : dict, optional
Atomic properties indicating the inherited style context in which
declarations_str is to be resolved. ``inherited`` should already
@@ -230,7 +248,9 @@ def __call__(
('font-size', '24pt'),
('font-weight', 'bold')]
"""
- props = dict(self.atomize(self.parse(declarations_str)))
+ if isinstance(declarations, str):
+ declarations = self.parse(declarations)
+ props = dict(self.atomize(declarations))
if inherited is None:
inherited = {}
@@ -347,28 +367,15 @@ def _error():
size_fmt = f"{val:f}pt"
return size_fmt
- def atomize(self, declarations) -> Generator[tuple[str, str], None, None]:
+ def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]:
for prop, value in declarations:
- attr = "expand_" + prop.replace("-", "_")
- try:
- expand = getattr(self, attr)
- except AttributeError:
- yield prop, value
+ prop = prop.lower()
+ value = value.lower()
+ if prop in self.CSS_EXPANSIONS:
+ expand = self.CSS_EXPANSIONS[prop]
+ yield from expand(self, prop, value)
else:
- for prop, value in expand(prop, value):
- yield prop, value
-
- expand_border = _border_expander()
- expand_border_top = _border_expander("top")
- expand_border_right = _border_expander("right")
- expand_border_bottom = _border_expander("bottom")
- expand_border_left = _border_expander("left")
-
- expand_border_color = _side_expander("border-{:s}-color")
- expand_border_style = _side_expander("border-{:s}-style")
- expand_border_width = _side_expander("border-{:s}-width")
- expand_margin = _side_expander("margin-{:s}")
- expand_padding = _side_expander("padding-{:s}")
+ yield prop, value
def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]:
"""
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 8478b72d97a5e..811b079c3c693 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -3,7 +3,10 @@
"""
from __future__ import annotations
-from functools import reduce
+from functools import (
+ lru_cache,
+ reduce,
+)
import itertools
import re
from typing import (
@@ -85,10 +88,13 @@ def __init__(
**kwargs,
) -> None:
if css_styles and css_converter:
- css = ";".join(
- [a + ":" + str(v) for (a, v) in css_styles[css_row, css_col]]
- )
- style = css_converter(css)
+ # Use dict to get only one (case-insensitive) declaration per property
+ declaration_dict = {
+ prop.lower(): val for prop, val in css_styles[css_row, css_col]
+ }
+ # Convert to frozenset for order-invariant caching
+ unique_declarations = frozenset(declaration_dict.items())
+ style = css_converter(unique_declarations)
return super().__init__(row=row, col=col, val=val, style=style, **kwargs)
@@ -166,15 +172,19 @@ def __init__(self, inherited: str | None = None) -> None:
compute_css = CSSResolver()
- def __call__(self, declarations_str: str) -> dict[str, dict[str, str]]:
+ @lru_cache(maxsize=None)
+ def __call__(
+ self, declarations: str | frozenset[tuple[str, str]]
+ ) -> dict[str, dict[str, str]]:
"""
Convert CSS declarations to ExcelWriter style.
Parameters
----------
- declarations_str : str
- List of CSS declarations.
- e.g. "font-weight: bold; background: blue"
+ declarations : str | frozenset[tuple[str, str]]
+ CSS string or set of CSS declaration tuples.
+ e.g. "font-weight: bold; background: blue" or
+ {("font-weight", "bold"), ("background", "blue")}
Returns
-------
@@ -182,8 +192,7 @@ def __call__(self, declarations_str: str) -> dict[str, dict[str, str]]:
A style as interpreted by ExcelWriter when found in
ExcelCell.style.
"""
- # TODO: memoize?
- properties = self.compute_css(declarations_str, self.inherited)
+ properties = self.compute_css(declarations, self.inherited)
return self.build_xlstyle(properties)
def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]:
diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py
index b95a5b4365f43..b98fd74643207 100644
--- a/pandas/tests/io/formats/test_to_excel.py
+++ b/pandas/tests/io/formats/test_to_excel.py
@@ -11,7 +11,10 @@
import pandas._testing as tm
from pandas.io.formats.css import CSSWarning
-from pandas.io.formats.excel import CSSToExcelConverter
+from pandas.io.formats.excel import (
+ CssExcelCell,
+ CSSToExcelConverter,
+)
@pytest.mark.parametrize(
@@ -340,3 +343,89 @@ def test_css_named_colors_from_mpl_present():
pd_colors = CSSToExcelConverter.NAMED_COLORS
for name, color in mpl_colors.items():
assert name in pd_colors and pd_colors[name] == color[1:]
+
+
+@pytest.mark.parametrize(
+ "styles,expected",
+ [
+ ([("color", "green"), ("color", "red")], "color: red;"),
+ ([("font-weight", "bold"), ("font-weight", "normal")], "font-weight: normal;"),
+ ([("text-align", "center"), ("TEXT-ALIGN", "right")], "text-align: right;"),
+ ],
+)
+def test_css_excel_cell_precedence(styles, expected):
+ """It applies favors latter declarations over former declarations"""
+ # See GH 47371
+ converter = CSSToExcelConverter()
+ converter.__call__.cache_clear()
+ css_styles = {(0, 0): styles}
+ cell = CssExcelCell(
+ row=0,
+ col=0,
+ val="",
+ style=None,
+ css_styles=css_styles,
+ css_row=0,
+ css_col=0,
+ css_converter=converter,
+ )
+ converter.__call__.cache_clear()
+
+ assert cell.style == converter(expected)
+
+
+@pytest.mark.parametrize(
+ "styles,cache_hits,cache_misses",
+ [
+ ([[("color", "green"), ("color", "red"), ("color", "green")]], 0, 1),
+ (
+ [
+ [("font-weight", "bold")],
+ [("font-weight", "normal"), ("font-weight", "bold")],
+ ],
+ 1,
+ 1,
+ ),
+ ([[("text-align", "center")], [("TEXT-ALIGN", "center")]], 1, 1),
+ (
+ [
+ [("font-weight", "bold"), ("text-align", "center")],
+ [("font-weight", "bold"), ("text-align", "left")],
+ ],
+ 0,
+ 2,
+ ),
+ (
+ [
+ [("font-weight", "bold"), ("text-align", "center")],
+ [("font-weight", "bold"), ("text-align", "left")],
+ [("font-weight", "bold"), ("text-align", "center")],
+ ],
+ 1,
+ 2,
+ ),
+ ],
+)
+def test_css_excel_cell_cache(styles, cache_hits, cache_misses):
+ """It caches unique cell styles"""
+ # See GH 47371
+ converter = CSSToExcelConverter()
+ converter.__call__.cache_clear()
+
+ css_styles = {(0, i): _style for i, _style in enumerate(styles)}
+ for css_row, css_col in css_styles:
+ CssExcelCell(
+ row=0,
+ col=0,
+ val="",
+ style=None,
+ css_styles=css_styles,
+ css_row=css_row,
+ css_col=css_col,
+ css_converter=converter,
+ )
+ cache_info = converter.__call__.cache_info()
+ converter.__call__.cache_clear()
+
+ assert cache_info.hits == cache_hits
+ assert cache_info.misses == cache_misses
| - [x] closes #47352
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47371 | 2022-06-15T18:36:43Z | 2022-06-29T17:09:20Z | 2022-06-29T17:09:19Z | 2022-06-29T17:09:29Z |
Backport PR #47349 on branch 1.4.x (REGR: MultiIndex.dtypes has regular Index instead of MultiIndex index) | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index bfc9422711690..1ee1677b5ffe4 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`DataFrame.replace` when the replacement value was explicitly ``None`` when passed in a dictionary to ``to_replace`` also casting other columns to object dtype even when there were no values to replace (:issue:`46634`)
+- Fixed regression in representation of ``dtypes`` attribute of :class:`MultiIndex` (:issue:`46900`)
- Fixed regression when setting values with :meth:`DataFrame.loc` updating :class:`RangeIndex` when index was set as new column and column was updated afterwards (:issue:`47128`)
- Fixed regression in :meth:`DataFrame.nsmallest` led to wrong results when ``np.nan`` in the sorting column (:issue:`46589`)
- Fixed regression in :func:`read_fwf` raising ``ValueError`` when ``widths`` was specified with ``usecols`` (:issue:`46580`)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 1168325378e92..68db372ff4e51 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -741,7 +741,7 @@ def dtypes(self) -> Series:
from pandas import Series
names = com.fill_missing_names([level.name for level in self.levels])
- return Series([level.dtype for level in self.levels], index=names)
+ return Series([level.dtype for level in self.levels], index=Index(names))
def __len__(self) -> int:
return len(self.codes[0])
diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py
index 63b0bd235e57c..7fad59fc6654c 100644
--- a/pandas/tests/indexes/multi/test_constructors.py
+++ b/pandas/tests/indexes/multi/test_constructors.py
@@ -827,3 +827,13 @@ def test_multiindex_inference_consistency():
mi = MultiIndex.from_tuples([(x,) for x in arr])
lev = mi.levels[0]
assert lev.dtype == object
+
+
+def test_dtype_representation():
+ # GH#46900
+ pmidx = MultiIndex.from_arrays([[1], ["a"]], names=[("a", "b"), ("c", "d")])
+ result = pmidx.dtypes
+ expected = Series(
+ ["int64", "object"], index=MultiIndex.from_tuples([("a", "b"), ("c", "d")])
+ )
+ tm.assert_series_equal(result, expected)
| Backport PR #47349: REGR: MultiIndex.dtypes has regular Index instead of MultiIndex index | https://api.github.com/repos/pandas-dev/pandas/pulls/47369 | 2022-06-15T13:20:51Z | 2022-06-15T15:23:19Z | 2022-06-15T15:23:19Z | 2022-06-15T15:23:19Z |
Revert inclusive default change of IntervalDtype | diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst
index fed0d2c5f7827..0d8444841fcae 100644
--- a/doc/source/reference/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -304,6 +304,7 @@ Properties
:toctree: api/
Interval.inclusive
+ Interval.closed
Interval.closed_left
Interval.closed_right
Interval.is_empty
diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi
index d177e597478d9..1d504498fc055 100644
--- a/pandas/_libs/interval.pyi
+++ b/pandas/_libs/interval.pyi
@@ -66,6 +66,8 @@ class Interval(IntervalMixin, Generic[_OrderableT]):
def right(self: Interval[_OrderableT]) -> _OrderableT: ...
@property
def inclusive(self) -> IntervalClosedType: ...
+ @property
+ def closed(self) -> IntervalClosedType: ...
mid: _MidDescriptor
length: _LengthDescriptor
def __init__(
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 178836ff1548b..79b3c0d056735 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -9,6 +9,8 @@ from cpython.datetime cimport (
import_datetime,
)
+from pandas.util._exceptions import find_stack_level
+
import_datetime()
cimport cython
@@ -229,7 +231,7 @@ def _warning_interval(inclusive: str | None = None, closed: None | lib.NoDefault
stacklevel=2,
)
if closed is None:
- inclusive = "both"
+ inclusive = "right"
elif closed in ("both", "neither", "left", "right"):
inclusive = closed
else:
@@ -364,7 +366,7 @@ cdef class Interval(IntervalMixin):
inclusive, closed = _warning_interval(inclusive, closed)
if inclusive is None:
- inclusive = "both"
+ inclusive = "right"
if inclusive not in VALID_CLOSED:
raise ValueError(f"invalid option for 'inclusive': {inclusive}")
@@ -379,6 +381,21 @@ cdef class Interval(IntervalMixin):
self.right = right
self.inclusive = inclusive
+ @property
+ def closed(self):
+ """
+ Whether the interval is closed on the left-side, right-side, both or
+ neither.
+
+ .. deprecated:: 1.5.0
+ """
+ warnings.warn(
+ "Attribute `closed` is deprecated in favor of `inclusive`.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self.inclusive
+
def _validate_endpoint(self, endpoint):
# GH 23013
if not (is_integer_object(endpoint) or is_float_object(endpoint) or
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in
index 9842332bae7ef..1a6106173e58e 100644
--- a/pandas/_libs/intervaltree.pxi.in
+++ b/pandas/_libs/intervaltree.pxi.in
@@ -69,7 +69,7 @@ cdef class IntervalTree(IntervalMixin):
inclusive, closed = _warning_interval(inclusive, closed)
if inclusive is None:
- inclusive = "both"
+ inclusive = "right"
if inclusive not in ['left', 'right', 'both', 'neither']:
raise ValueError("invalid option for 'inclusive': %s" % inclusive)
diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py
index e0f242e2ced5d..e4bb7dc94cb8d 100644
--- a/pandas/core/arrays/arrow/_arrow_utils.py
+++ b/pandas/core/arrays/arrow/_arrow_utils.py
@@ -6,9 +6,8 @@
import numpy as np
import pyarrow
-from pandas._libs import lib
-from pandas._libs.interval import _warning_interval
from pandas.errors import PerformanceWarning
+from pandas.util._decorators import deprecate_kwarg
from pandas.util._exceptions import find_stack_level
from pandas.core.arrays.interval import VALID_CLOSED
@@ -105,15 +104,10 @@ def to_pandas_dtype(self):
class ArrowIntervalType(pyarrow.ExtensionType):
- def __init__(
- self,
- subtype,
- inclusive: str | None = None,
- closed: None | lib.NoDefault = lib.no_default,
- ) -> None:
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
+ def __init__(self, subtype, inclusive: str) -> None:
# attributes need to be set first before calling
# super init (as that calls serialize)
- inclusive, closed = _warning_interval(inclusive, closed)
assert inclusive in VALID_CLOSED
self._closed = inclusive
if not isinstance(subtype, pyarrow.DataType):
@@ -131,6 +125,15 @@ def subtype(self):
def inclusive(self):
return self._closed
+ @property
+ def closed(self):
+ warnings.warn(
+ "Attribute `closed` is deprecated in favor of `inclusive`.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._closed
+
def __arrow_ext_serialize__(self):
metadata = {"subtype": str(self.subtype), "inclusive": self.inclusive}
return json.dumps(metadata).encode()
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index e58032c48f8d3..56aae3039f7d6 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -15,6 +15,7 @@
cast,
overload,
)
+import warnings
import numpy as np
@@ -25,7 +26,6 @@
VALID_CLOSED,
Interval,
IntervalMixin,
- _warning_interval,
intervals_to_interval_bounds,
)
from pandas._libs.missing import NA
@@ -43,8 +43,10 @@
from pandas.errors import IntCastingNaNError
from pandas.util._decorators import (
Appender,
+ deprecate_kwarg,
deprecate_nonkeyword_arguments,
)
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import LossySetitemError
from pandas.core.dtypes.common import (
@@ -220,16 +222,15 @@ def ndim(self) -> Literal[1]:
# ---------------------------------------------------------------------
# Constructors
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def __new__(
cls: type[IntervalArrayT],
data,
inclusive: str | None = None,
- closed: None | lib.NoDefault = lib.no_default,
dtype: Dtype | None = None,
copy: bool = False,
verify_integrity: bool = True,
):
- inclusive, closed = _warning_interval(inclusive, closed)
data = extract_array(data, extract_numpy=True)
@@ -267,24 +268,22 @@ def __new__(
)
@classmethod
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def _simple_new(
cls: type[IntervalArrayT],
left,
right,
inclusive=None,
- closed: None | lib.NoDefault = lib.no_default,
copy: bool = False,
dtype: Dtype | None = None,
verify_integrity: bool = True,
) -> IntervalArrayT:
result = IntervalMixin.__new__(cls)
- inclusive, closed = _warning_interval(inclusive, closed)
-
if inclusive is None and isinstance(dtype, IntervalDtype):
inclusive = dtype.inclusive
- inclusive = inclusive or "both"
+ inclusive = inclusive or "right"
left = ensure_index(left, copy=copy)
right = ensure_index(right, copy=copy)
@@ -424,13 +423,17 @@ def _from_factorized(
),
}
)
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def from_breaks(
cls: type[IntervalArrayT],
breaks,
- inclusive="both",
+ inclusive: IntervalClosedType | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalArrayT:
+ if inclusive is None:
+ inclusive = "right"
+
breaks = _maybe_convert_platform_interval(breaks)
return cls.from_arrays(
@@ -501,14 +504,19 @@ def from_breaks(
),
}
)
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def from_arrays(
cls: type[IntervalArrayT],
left,
right,
- inclusive="both",
+ inclusive: IntervalClosedType | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalArrayT:
+
+ if inclusive is None:
+ inclusive = "right"
+
left = _maybe_convert_platform_interval(left)
right = _maybe_convert_platform_interval(right)
@@ -570,13 +578,17 @@ def from_arrays(
),
}
)
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def from_tuples(
cls: type[IntervalArrayT],
data,
- inclusive="both",
+ inclusive=None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalArrayT:
+ if inclusive is None:
+ inclusive = "right"
+
if len(data):
left, right = [], []
else:
@@ -1355,6 +1367,19 @@ def inclusive(self) -> IntervalClosedType:
"""
return self.dtype.inclusive
+ @property
+ def closed(self) -> IntervalClosedType:
+ """
+ Whether the intervals are closed on the left-side, right-side, both or
+ neither.
+ """
+ warnings.warn(
+ "Attribute `closed` is deprecated in favor of `inclusive`.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self.dtype.inclusive
+
_interval_shared_docs["set_closed"] = textwrap.dedent(
"""
Return an %(klass)s identical to the current one, but closed on the
@@ -1395,6 +1420,7 @@ def inclusive(self) -> IntervalClosedType:
),
}
)
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def set_closed(
self: IntervalArrayT, inclusive: IntervalClosedType
) -> IntervalArrayT:
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 32594854f49ae..20fecbb0095c5 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -10,6 +10,7 @@
MutableMapping,
cast,
)
+import warnings
import numpy as np
import pytz
@@ -40,6 +41,7 @@
npt,
type_t,
)
+from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.base import (
ExtensionDtype,
@@ -1176,6 +1178,15 @@ def _can_hold_na(self) -> bool:
def inclusive(self):
return self._closed
+ @property
+ def closed(self):
+ warnings.warn(
+ "Attribute `closed` is deprecated in favor of `inclusive`.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self._closed
+
@property
def subtype(self):
"""
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 11e2da47c5738..5f48be921f7c6 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -11,6 +11,7 @@
Hashable,
Literal,
)
+import warnings
import numpy as np
@@ -19,7 +20,6 @@
Interval,
IntervalMixin,
IntervalTree,
- _warning_interval,
)
from pandas._libs.tslibs import (
BaseOffset,
@@ -37,8 +37,12 @@
from pandas.util._decorators import (
Appender,
cache_readonly,
+ deprecate_kwarg,
+)
+from pandas.util._exceptions import (
+ find_stack_level,
+ rewrite_exception,
)
-from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.cast import (
find_common_type,
@@ -209,19 +213,17 @@ class IntervalIndex(ExtensionIndex):
# --------------------------------------------------------------------
# Constructors
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def __new__(
cls,
data,
inclusive=None,
- closed: None | lib.NoDefault = lib.no_default,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
verify_integrity: bool = True,
) -> IntervalIndex:
- inclusive, closed = _warning_interval(inclusive, closed)
-
name = maybe_extract_name(name, data, cls)
with rewrite_exception("IntervalArray", cls.__name__):
@@ -235,6 +237,15 @@ def __new__(
return cls._simple_new(array, name)
+ @property
+ def closed(self):
+ warnings.warn(
+ "Attribute `closed` is deprecated in favor of `inclusive`.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self.inclusive
+
@classmethod
@Appender(
_interval_shared_docs["from_breaks"]
@@ -251,19 +262,18 @@ def __new__(
),
}
)
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def from_breaks(
cls,
breaks,
inclusive=None,
- closed: None | lib.NoDefault = lib.no_default,
name: Hashable = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalIndex:
- inclusive, closed = _warning_interval(inclusive, closed)
if inclusive is None:
- inclusive = "both"
+ inclusive = "right"
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(
@@ -287,20 +297,19 @@ def from_breaks(
),
}
)
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def from_arrays(
cls,
left,
right,
inclusive=None,
- closed: None | lib.NoDefault = lib.no_default,
name: Hashable = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalIndex:
- inclusive, closed = _warning_interval(inclusive, closed)
if inclusive is None:
- inclusive = "both"
+ inclusive = "right"
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(
@@ -324,19 +333,18 @@ def from_arrays(
),
}
)
+ @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def from_tuples(
cls,
data,
inclusive=None,
- closed: None | lib.NoDefault = lib.no_default,
name: Hashable = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalIndex:
- inclusive, closed = _warning_interval(inclusive, closed)
if inclusive is None:
- inclusive = "both"
+ inclusive = "right"
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(
@@ -974,13 +982,13 @@ def _is_type_compatible(a, b) -> bool:
)
+@deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive")
def interval_range(
start=None,
end=None,
periods=None,
freq=None,
name: Hashable = None,
- closed: IntervalClosedType | lib.NoDefault = lib.no_default,
inclusive: IntervalClosedType | None = None,
) -> IntervalIndex:
"""
@@ -1000,6 +1008,10 @@ def interval_range(
for numeric and 'D' for datetime-like.
name : str, default None
Name of the resulting IntervalIndex.
+ inclusive : {"both", "neither", "left", "right"}, default "both"
+ Include boundaries; Whether to set each bound as closed or open.
+
+ .. versionadded:: 1.5.0
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
@@ -1007,10 +1019,6 @@ def interval_range(
.. deprecated:: 1.5.0
Argument `closed` has been deprecated to standardize boundary inputs.
Use `inclusive` instead, to set each bound as closed or open.
- inclusive : {"both", "neither", "left", "right"}, default "both"
- Include boundaries; Whether to set each bound as closed or open.
-
- .. versionadded:: 1.5.0
Returns
-------
@@ -1077,9 +1085,8 @@ def interval_range(
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
dtype='interval[int64, both]')
"""
- inclusive, closed = _warning_interval(inclusive, closed)
if inclusive is None:
- inclusive = "both"
+ inclusive = "right"
start = maybe_box_datetimelike(start)
end = maybe_box_datetimelike(end)
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index 44eafc72b1f5f..7ca86408a7f59 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -414,15 +414,15 @@ def test_interval_error_and_warning():
def test_interval_array_error_and_warning():
# GH 40245
- msg = (
- "Deprecated argument `closed` cannot "
- "be passed if argument `inclusive` is not None"
- )
- with pytest.raises(ValueError, match=msg):
- IntervalArray([Interval(0, 1), Interval(1, 5)], closed="both", inclusive="both")
-
- msg = "Argument `closed` is deprecated in favor of `inclusive`"
- with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
+ msg = "Can only specify 'closed' or 'inclusive', not both."
+ with pytest.raises(TypeError, match=msg):
+ with tm.assert_produces_warning(FutureWarning):
+ IntervalArray(
+ [Interval(0, 1), Interval(1, 5)], closed="both", inclusive="both"
+ )
+
+ msg = "the 'closed'' keyword is deprecated, use 'inclusive' instead."
+ with tm.assert_produces_warning(FutureWarning, match=msg):
IntervalArray([Interval(0, 1), Interval(1, 5)], closed="both")
@@ -433,15 +433,13 @@ def test_arrow_interval_type_error_and_warning():
from pandas.core.arrays.arrow._arrow_utils import ArrowIntervalType
- msg = (
- "Deprecated argument `closed` cannot "
- "be passed if argument `inclusive` is not None"
- )
- with pytest.raises(ValueError, match=msg):
- ArrowIntervalType(pa.int64(), closed="both", inclusive="both")
+ msg = "Can only specify 'closed' or 'inclusive', not both."
+ with pytest.raises(TypeError, match=msg):
+ with tm.assert_produces_warning(FutureWarning):
+ ArrowIntervalType(pa.int64(), closed="both", inclusive="both")
- msg = "Argument `closed` is deprecated in favor of `inclusive`"
- with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
+ msg = "the 'closed'' keyword is deprecated, use 'inclusive' instead."
+ with tm.assert_produces_warning(FutureWarning, match=msg):
ArrowIntervalType(pa.int64(), closed="both")
@@ -460,3 +458,47 @@ def test_interval_index_subtype(timezone, inclusive_endpoints_fixture):
dates[:-1], dates[1:], inclusive=inclusive_endpoints_fixture
)
tm.assert_index_equal(result, expected)
+
+
+def test_from_tuples_deprecation():
+ # GH#40245
+ with tm.assert_produces_warning(FutureWarning):
+ IntervalArray.from_tuples([(0, 1), (1, 2)], closed="right")
+
+
+def test_from_tuples_deprecation_error():
+ # GH#40245
+ msg = "Can only specify 'closed' or 'inclusive', not both."
+ with pytest.raises(TypeError, match=msg):
+ with tm.assert_produces_warning(FutureWarning):
+ IntervalArray.from_tuples(
+ [(0, 1), (1, 2)], closed="right", inclusive="right"
+ )
+
+
+def test_from_breaks_deprecation():
+ # GH#40245
+ with tm.assert_produces_warning(FutureWarning):
+ IntervalArray.from_breaks([0, 1, 2, 3], closed="right")
+
+
+def test_from_arrays_deprecation():
+ # GH#40245
+ with tm.assert_produces_warning(FutureWarning):
+ IntervalArray.from_arrays([0, 1, 2], [1, 2, 3], closed="right")
+
+
+def test_set_closed_deprecated_closed():
+ # GH#40245
+ array = IntervalArray.from_breaks(range(10))
+ with tm.assert_produces_warning(FutureWarning):
+ array.set_closed(closed="both")
+
+
+def test_set_closed_both_provided_deprecation():
+ # GH#40245
+ array = IntervalArray.from_breaks(range(10))
+ msg = "Can only specify 'closed' or 'inclusive', not both."
+ with pytest.raises(TypeError, match=msg):
+ with tm.assert_produces_warning(FutureWarning):
+ array.set_closed(inclusive="both", closed="both")
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py
index b57bcf7abc1e1..1966f344356a3 100644
--- a/pandas/tests/indexes/interval/test_constructors.py
+++ b/pandas/tests/indexes/interval/test_constructors.py
@@ -61,6 +61,16 @@ def test_constructor(self, constructor, breaks, closed, name):
tm.assert_index_equal(result.left, Index(breaks[:-1]))
tm.assert_index_equal(result.right, Index(breaks[1:]))
+ def test_constructor_inclusive_default(self, constructor, name):
+ result_kwargs = self.get_kwargs_from_breaks([3, 14, 15, 92, 653])
+ inclusive_in = result_kwargs.pop("inclusive", None)
+ result = constructor(name=name, **result_kwargs)
+
+ if inclusive_in is not None:
+ result_kwargs["inclusive"] = "right"
+ expected = constructor(name=name, **result_kwargs)
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize(
"breaks, subtype",
[
@@ -78,7 +88,7 @@ def test_constructor_dtype(self, constructor, breaks, subtype):
expected = constructor(**expected_kwargs)
result_kwargs = self.get_kwargs_from_breaks(breaks)
- iv_dtype = IntervalDtype(subtype, "both")
+ iv_dtype = IntervalDtype(subtype, "right")
for dtype in (iv_dtype, str(iv_dtype)):
result = constructor(dtype=dtype, **result_kwargs)
tm.assert_index_equal(result, expected)
@@ -219,7 +229,7 @@ class TestFromArrays(ConstructorTests):
def constructor(self):
return IntervalIndex.from_arrays
- def get_kwargs_from_breaks(self, breaks, inclusive="both"):
+ def get_kwargs_from_breaks(self, breaks, inclusive="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_arrays
@@ -268,7 +278,7 @@ class TestFromBreaks(ConstructorTests):
def constructor(self):
return IntervalIndex.from_breaks
- def get_kwargs_from_breaks(self, breaks, inclusive="both"):
+ def get_kwargs_from_breaks(self, breaks, inclusive="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_breaks
@@ -306,7 +316,7 @@ class TestFromTuples(ConstructorTests):
def constructor(self):
return IntervalIndex.from_tuples
- def get_kwargs_from_breaks(self, breaks, inclusive="both"):
+ def get_kwargs_from_breaks(self, breaks, inclusive="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
@@ -356,7 +366,7 @@ class TestClassConstructors(ConstructorTests):
def constructor(self, request):
return request.param
- def get_kwargs_from_breaks(self, breaks, inclusive="both"):
+ def get_kwargs_from_breaks(self, breaks, inclusive="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by the IntervalIndex/Index constructors
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 4e33c3abd3252..90497780311de 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -897,35 +897,31 @@ def test_is_all_dates(self):
def test_interval_index_error_and_warning(self):
# GH 40245
- msg = (
- "Deprecated argument `closed` cannot "
- "be passed if argument `inclusive` is not None"
- )
- with pytest.raises(ValueError, match=msg):
- IntervalIndex.from_breaks(range(11), closed="both", inclusive="both")
+ msg = "Can only specify 'closed' or 'inclusive', not both."
+ msg_warn = "the 'closed'' keyword is deprecated, use 'inclusive' instead."
+ with pytest.raises(TypeError, match=msg):
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
+ IntervalIndex.from_breaks(range(11), closed="both", inclusive="both")
- with pytest.raises(ValueError, match=msg):
- IntervalIndex.from_arrays([0, 1], [1, 2], closed="both", inclusive="both")
+ with pytest.raises(TypeError, match=msg):
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
+ IntervalIndex.from_arrays(
+ [0, 1], [1, 2], closed="both", inclusive="both"
+ )
- with pytest.raises(ValueError, match=msg):
- IntervalIndex.from_tuples(
- [(0, 1), (0.5, 1.5)], closed="both", inclusive="both"
- )
+ with pytest.raises(TypeError, match=msg):
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
+ IntervalIndex.from_tuples(
+ [(0, 1), (0.5, 1.5)], closed="both", inclusive="both"
+ )
- msg = "Argument `closed` is deprecated in favor of `inclusive`"
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
IntervalIndex.from_breaks(range(11), closed="both")
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
IntervalIndex.from_arrays([0, 1], [1, 2], closed="both")
- with tm.assert_produces_warning(
- FutureWarning, match=msg, check_stacklevel=False
- ):
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed="both")
@@ -955,3 +951,9 @@ def test_searchsorted_invalid_argument(arg):
msg = "'<' not supported between instances of 'pandas._libs.interval.Interval' and "
with pytest.raises(TypeError, match=msg):
values.searchsorted(arg)
+
+
+def test_interval_range_deprecated_closed():
+ # GH#40245
+ with tm.assert_produces_warning(FutureWarning):
+ interval_range(start=0, end=5, closed="right")
diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py
index 255470cf4683e..3bde2f51178dc 100644
--- a/pandas/tests/indexes/interval/test_interval_range.py
+++ b/pandas/tests/indexes/interval/test_interval_range.py
@@ -360,13 +360,13 @@ def test_errors(self):
def test_interval_range_error_and_warning(self):
# GH 40245
- msg = (
- "Deprecated argument `closed` cannot "
- "be passed if argument `inclusive` is not None"
- )
- with pytest.raises(ValueError, match=msg):
- interval_range(end=5, periods=4, closed="both", inclusive="both")
+ msg = "Can only specify 'closed' or 'inclusive', not both."
+ msg_warn = "the 'closed'' keyword is deprecated, use 'inclusive' instead."
+
+ with pytest.raises(TypeError, match=msg):
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
+ interval_range(end=5, periods=4, closed="both", inclusive="both")
- msg = "Argument `closed` is deprecated in favor of `inclusive`"
- with tm.assert_produces_warning(FutureWarning, match=msg):
+ msg = "the 'closed'' keyword is deprecated, use 'inclusive' instead."
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
interval_range(end=5, periods=4, closed="right")
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index 4d042579701eb..ab8e64be648d4 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -607,7 +607,7 @@ def test_bar_plt_xaxis_intervalrange(self):
expected = [Text(0, 0, "([0, 1],)"), Text(1, 0, "([1, 2],)")]
s = Series(
[1, 2],
- index=[interval_range(0, 2)],
+ index=[interval_range(0, 2, inclusive="both")],
)
_check_plot_works(s.plot.bar)
assert all(
| - [x] xref #47365 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
This adresses the changed default, will add the attribute back in as a follow up
| https://api.github.com/repos/pandas-dev/pandas/pulls/47367 | 2022-06-15T12:27:22Z | 2022-07-06T12:06:58Z | 2022-07-06T12:06:58Z | 2022-07-06T12:41:46Z |
Backport PR #47325 on branch 1.4.x (REGR: Avoid regression warning with ea dtype and assert_index_equal order False) | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index ca8b8ca15ec47..ce53d2b1dd04c 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -24,6 +24,7 @@ Fixed regressions
- Fixed regression in :func:`read_csv` with ``index_col=False`` identifying first row as index names when ``header=None`` (:issue:`46955`)
- Fixed regression in :meth:`.DataFrameGroupBy.agg` when used with list-likes or dict-likes and ``axis=1`` that would give incorrect results; now raises ``NotImplementedError`` (:issue:`46995`)
- Fixed regression in :meth:`DataFrame.resample` and :meth:`DataFrame.rolling` when used with list-likes or dict-likes and ``axis=1`` that would raise an unintuitive error message; now raises ``NotImplementedError`` (:issue:`46904`)
+- Fixed regression in :func:`assert_index_equal` when ``check_order=False`` and :class:`Index` has extension or object dtype (:issue:`47207`)
- Fixed regression in :func:`read_excel` returning ints as floats on certain input sheets (:issue:`46988`)
- Fixed regression in :meth:`DataFrame.shift` when ``axis`` is ``columns`` and ``fill_value`` is absent, ``freq`` is ignored (:issue:`47039`)
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index 4fa9c1aabe716..031a1a48760e6 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -374,8 +374,8 @@ def _get_ilevel_values(index, level):
# If order doesn't matter then sort the index entries
if not check_order:
- left = Index(safe_sort(left))
- right = Index(safe_sort(right))
+ left = Index(safe_sort(left), dtype=left.dtype)
+ right = Index(safe_sort(right), dtype=right.dtype)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py
index 8211b52fed650..e3461e62b4eda 100644
--- a/pandas/tests/util/test_assert_index_equal.py
+++ b/pandas/tests/util/test_assert_index_equal.py
@@ -242,3 +242,17 @@ def test_assert_index_equal_mixed_dtype():
# GH#39168
idx = Index(["foo", "bar", 42])
tm.assert_index_equal(idx, idx, check_order=False)
+
+
+def test_assert_index_equal_ea_dtype_order_false(any_numeric_ea_dtype):
+ # GH#47207
+ idx1 = Index([1, 3], dtype=any_numeric_ea_dtype)
+ idx2 = Index([3, 1], dtype=any_numeric_ea_dtype)
+ tm.assert_index_equal(idx1, idx2, check_order=False)
+
+
+def test_assert_index_equal_object_ints_order_false():
+ # GH#47207
+ idx1 = Index([1, 3], dtype="object")
+ idx2 = Index([3, 1], dtype="object")
+ tm.assert_index_equal(idx1, idx2, check_order=False)
| Backport PR #47325: REGR: Avoid regression warning with ea dtype and assert_index_equal order False | https://api.github.com/repos/pandas-dev/pandas/pulls/47366 | 2022-06-15T11:48:18Z | 2022-06-15T13:22:31Z | 2022-06-15T13:22:30Z | 2022-06-15T13:22:31Z |
Backport PR #47326 on branch 1.4.x (REGR: Fix nan comparison for same Index object) | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index ca8b8ca15ec47..bfc9422711690 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -20,6 +20,7 @@ Fixed regressions
- Fixed regression in :func:`read_fwf` raising ``ValueError`` when ``widths`` was specified with ``usecols`` (:issue:`46580`)
- Fixed regression in :func:`concat` not sorting columns for mixed column names (:issue:`47127`)
- Fixed regression in :meth:`.Groupby.transform` and :meth:`.Groupby.agg` failing with ``engine="numba"`` when the index was a :class:`MultiIndex` (:issue:`46867`)
+- Fixed regression in ``NaN`` comparison for :class:`Index` operations where the same object was compared (:issue:`47105`)
- Fixed regression is :meth:`.Styler.to_latex` and :meth:`.Styler.to_html` where ``buf`` failed in combination with ``encoding`` (:issue:`47053`)
- Fixed regression in :func:`read_csv` with ``index_col=False`` identifying first row as index names when ``header=None`` (:issue:`46955`)
- Fixed regression in :meth:`.DataFrameGroupBy.agg` when used with list-likes or dict-likes and ``axis=1`` that would give incorrect results; now raises ``NotImplementedError`` (:issue:`46995`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2abd649d00b78..7175b85e966d7 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -6660,7 +6660,7 @@ def _cmp_method(self, other, op):
# TODO: should set MultiIndex._can_hold_na = False?
arr[self.isna()] = False
return arr
- elif op in {operator.ne, operator.lt, operator.gt}:
+ elif op is operator.ne:
arr = np.zeros(len(self), dtype=bool)
if self._can_hold_na and not isinstance(self, ABCMultiIndex):
arr[self.isna()] = True
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 222f1fc3e7648..7b851d329c405 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -2,6 +2,7 @@
from datetime import datetime
from io import StringIO
import math
+import operator
import re
import numpy as np
@@ -1587,3 +1588,16 @@ def test_get_attributes_dict_deprecated():
with tm.assert_produces_warning(DeprecationWarning):
attrs = idx._get_attributes_dict()
assert attrs == {"name": None}
+
+
+@pytest.mark.parametrize("op", [operator.lt, operator.gt])
+def test_nan_comparison_same_object(op):
+ # GH#47105
+ idx = Index([np.nan])
+ expected = np.array([False])
+
+ result = op(idx, idx)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = op(idx, idx.copy())
+ tm.assert_numpy_array_equal(result, expected)
| Backport PR #47326: REGR: Fix nan comparison for same Index object | https://api.github.com/repos/pandas-dev/pandas/pulls/47364 | 2022-06-15T11:41:48Z | 2022-06-15T13:15:50Z | 2022-06-15T13:15:50Z | 2022-06-15T13:15:51Z |
BUG: DataFrame.loc not aligning dict when setting to a column | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index a76b682f135db..d8b068c67cdcd 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -837,6 +837,7 @@ Indexing
- Bug in :meth:`loc.__setitem__` treating ``range`` keys as positional instead of label-based (:issue:`45479`)
- Bug in :meth:`Series.__setitem__` when setting ``boolean`` dtype values containing ``NA`` incorrectly raising instead of casting to ``boolean`` dtype (:issue:`45462`)
- Bug in :meth:`Series.__setitem__` where setting :attr:`NA` into a numeric-dtype :class:`Series` would incorrectly upcast to object-dtype rather than treating the value as ``np.nan`` (:issue:`44199`)
+- Bug in :meth:`DataFrame.loc` when setting values to a column and right hand side is a dictionary (:issue:`47216`)
- Bug in :meth:`Series.__setitem__` with ``datetime64[ns]`` dtype, an all-``False`` boolean mask, and an incompatible value incorrectly casting to ``object`` instead of retaining ``datetime64[ns]`` dtype (:issue:`45967`)
- Bug in :meth:`Index.__getitem__` raising ``ValueError`` when indexer is from boolean dtype with ``NA`` (:issue:`45806`)
- Bug in :meth:`Series.mask` with ``inplace=True`` or setting values with a boolean mask with small integer dtypes incorrectly raising (:issue:`45750`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b4a278185b01b..5942dd3c32e6d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4709,6 +4709,8 @@ def _sanitize_column(self, value) -> ArrayLike:
# We should never get here with DataFrame value
if isinstance(value, Series):
return _reindex_for_setitem(value, self.index)
+ elif isinstance(value, dict):
+ return _reindex_for_setitem(Series(value), self.index)
if is_list_like(value):
com.require_length_match(value, self.index)
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index a59d7d4f3bd45..d30121f8f6271 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -709,6 +709,18 @@ def test_setitem_npmatrix_2d(self):
tm.assert_frame_equal(df, expected)
+ @pytest.mark.parametrize("vals", [{}, {"d": "a"}])
+ def test_setitem_aligning_dict_with_index(self, vals):
+ # GH#47216
+ df = DataFrame({"a": [1, 2], "b": [3, 4], **vals})
+ df.loc[:, "a"] = {1: 100, 0: 200}
+ df.loc[:, "c"] = {0: 5, 1: 6}
+ df.loc[:, "e"] = {1: 5}
+ expected = DataFrame(
+ {"a": [200, 100], "b": [3, 4], **vals, "c": [5, 6], "e": [np.nan, 5]}
+ )
+ tm.assert_frame_equal(df, expected)
+
class TestSetitemTZAwareValues:
@pytest.fixture
| - [x] closes #47216 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47361 | 2022-06-15T08:00:52Z | 2022-06-30T20:09:10Z | 2022-06-30T20:09:10Z | 2022-07-01T07:22:52Z |
CI: Add ccache | diff --git a/.circleci/setup_env.sh b/.circleci/setup_env.sh
index 52a8cab1cd2de..233a455f411bb 100755
--- a/.circleci/setup_env.sh
+++ b/.circleci/setup_env.sh
@@ -54,11 +54,7 @@ if pip list | grep -q ^pandas; then
pip uninstall -y pandas || true
fi
-echo "Build extensions"
-# GH 47305: Parallel build can causes flaky ImportError from pandas/_libs/tslibs
-python setup.py build_ext -q -j1
-
echo "Install pandas"
-python -m pip install --no-build-isolation --no-use-pep517 -e .
+python -m pip install --no-build-isolation -ve .
echo "done"
diff --git a/.github/actions/build-pandas/action.yml b/.github/actions/build-pandas/action.yml
new file mode 100644
index 0000000000000..7558a0fc0a549
--- /dev/null
+++ b/.github/actions/build-pandas/action.yml
@@ -0,0 +1,22 @@
+# TODO: merge setup-ccache, setup-conda, build-pandas into a single action?
+name: Build pandas
+description: Rebuilds the C extensions and installs pandas
+runs:
+ using: composite
+ steps:
+ - name: Set up Ccache
+ uses: ./.github/actions/setup-ccache
+
+ - name: Build Pandas
+ if : ${{ runner.os != 'Windows' }}
+ run: |
+ python -m pip install -ve . --no-build-isolation
+ shell: bash -el {0}
+
+ - name: Build Pandas (Windows)
+ if: ${{ runner.os == 'Windows' }}
+ run: |
+ call micromamba activate test
+ call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
+ python -m pip install -ve . --no-build-isolation
+ shell: cmd /C call {0}
diff --git a/.github/actions/build_pandas/action.yml b/.github/actions/build_pandas/action.yml
deleted file mode 100644
index 23bb988ef4d73..0000000000000
--- a/.github/actions/build_pandas/action.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: Build pandas
-description: Rebuilds the C extensions and installs pandas
-runs:
- using: composite
- steps:
-
- - name: Environment Detail
- run: |
- micromamba info
- micromamba list
- shell: bash -el {0}
-
- - name: Build Pandas
- run: |
- python setup.py build_ext -j $N_JOBS
- python -m pip install -e . --no-build-isolation --no-use-pep517 --no-index
- shell: bash -el {0}
- env:
- # Cannot use parallel compilation on Windows, see https://github.com/pandas-dev/pandas/issues/30873
- # GH 47305: Parallel build causes flaky ImportError: /home/runner/work/pandas/pandas/pandas/_libs/tslibs/timestamps.cpython-38-x86_64-linux-gnu.so: undefined symbol: pandas_datetime_to_datetimestruct
- N_JOBS: 1
- #N_JOBS: ${{ runner.os == 'Windows' && 1 || 2 }}
diff --git a/.github/actions/setup-ccache/action.yml b/.github/actions/setup-ccache/action.yml
new file mode 100644
index 0000000000000..9f48ca4a221a7
--- /dev/null
+++ b/.github/actions/setup-ccache/action.yml
@@ -0,0 +1,33 @@
+name: Setup sccache
+runs:
+ using: composite
+ steps:
+ - name: Make cache key
+ id: cache-key
+ run: |
+ key="${{ runner.os }}--${{ runner.arch }}--${{ github.workflow }}"
+ # Date: Daily invalidation of all ccaches as an extra safety measure.
+ key="$key--$(/bin/date -u '+%Y%m%d')"
+ # Python version: Separate caches for each Python version. This reduces the number of cache misses.
+ key="$key--$(python -V)"
+ # Cache version: Bump this number to manually invalidate the cache.
+ key="$key--0"
+
+ echo "cache-key=$key" >> $GITHUB_OUTPUT
+ shell: bash
+
+ # On Windows, for some reason the default temporary directory provided to sccache
+ # may become read-only at some point. Work around by having a private tempdir.
+ - name: Fix Windows temporary directory
+ id: mktemp
+ run: echo "tmpdir=$(cygpath -w $(mktemp -d))" >> $GITHUB_OUTPUT
+ shell: bash
+ if: ${{ runner.os == 'Windows' }}
+
+ - name: Setup sccache
+ uses: hendrikmuhs/ccache-action@v1.2
+ with:
+ variant: sccache
+ key: ${{ steps.cache-key.outputs.cache-key }}
+ env:
+ TMP: "${{ steps.mktemp.outputs.tmpdir }}"
diff --git a/.github/actions/setup-conda/action.yml b/.github/actions/setup-conda/action.yml
index 002d0020c2df1..97fa230898658 100644
--- a/.github/actions/setup-conda/action.yml
+++ b/.github/actions/setup-conda/action.yml
@@ -3,12 +3,6 @@ inputs:
environment-file:
description: Conda environment file to use.
default: environment.yml
- environment-name:
- description: Name to use for the Conda environment
- default: test
- extra-specs:
- description: Extra packages to install
- required: false
pyarrow-version:
description: If set, overrides the PyArrow version in the Conda environment to the given string.
required: false
@@ -19,7 +13,9 @@ runs:
run: |
grep -q ' - pyarrow' ${{ inputs.environment-file }}
sed -i"" -e "s/ - pyarrow/ - pyarrow=${{ inputs.pyarrow-version }}/" ${{ inputs.environment-file }}
+ echo ::group::Patched environment.yml contents
cat ${{ inputs.environment-file }}
+ echo ::endgroup::
shell: bash
if: ${{ inputs.pyarrow-version }}
@@ -27,8 +23,7 @@ runs:
uses: mamba-org/provision-with-micromamba@v12
with:
environment-file: ${{ inputs.environment-file }}
- environment-name: ${{ inputs.environment-name }}
- extra-specs: ${{ inputs.extra-specs }}
+ environment-name: test
channels: conda-forge
channel-priority: ${{ runner.os == 'macOS' && 'flexible' || 'strict' }}
condarc-file: ci/condarc.yml
diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml
index 438d2c7b4174e..87d40821ad2e5 100644
--- a/.github/workflows/32-bit-linux.yml
+++ b/.github/workflows/32-bit-linux.yml
@@ -38,13 +38,14 @@ jobs:
/opt/python/cp38-cp38/bin/python -m venv ~/virtualenvs/pandas-dev && \
. ~/virtualenvs/pandas-dev/bin/activate && \
python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \
+ pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
+ pip install "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master" && \
+ pip install "git+https://github.com/mesonbuild/meson-python.git@main" && \
python -m pip install versioneer[toml] && \
- python -m pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \
- python setup.py build_ext -q -j1 && \
- python -m pip install --no-build-isolation --no-use-pep517 -e . && \
- python -m pip list && \
export PANDAS_CI=1 && \
- pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml"
+ python -m pip install --no-build-isolation -ve . && \
+ python -m pip list && \
+ pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml --import-mode=importlib"
- name: Publish test results for Python 3.8-32 bit full Linux
uses: actions/upload-artifact@v3
diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml
index 280b6ed601f08..38547807e2f5c 100644
--- a/.github/workflows/code-checks.yml
+++ b/.github/workflows/code-checks.yml
@@ -62,7 +62,7 @@ jobs:
- name: Build Pandas
id: build
- uses: ./.github/actions/build_pandas
+ uses: ./.github/actions/build-pandas
# The following checks are independent of each other and should still be run if one fails
- name: Check for no warnings when building single-page docs
@@ -125,7 +125,7 @@ jobs:
- name: Build Pandas
id: build
- uses: ./.github/actions/build_pandas
+ uses: ./.github/actions/build-pandas
- name: Run ASV benchmarks
run: |
diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml
index 7a9f491228a83..1d2dc862ea297 100644
--- a/.github/workflows/docbuild-and-upload.yml
+++ b/.github/workflows/docbuild-and-upload.yml
@@ -44,7 +44,7 @@ jobs:
uses: ./.github/actions/setup-conda
- name: Build Pandas
- uses: ./.github/actions/build_pandas
+ uses: ./.github/actions/build-pandas
- name: Set up maintainers cache
uses: actions/cache@v3
diff --git a/.github/workflows/macos-windows.yml b/.github/workflows/macos-windows.yml
index d762e20db196a..11320486f2001 100644
--- a/.github/workflows/macos-windows.yml
+++ b/.github/workflows/macos-windows.yml
@@ -56,7 +56,7 @@ jobs:
pyarrow-version: ${{ matrix.os == 'macos-latest' && '9' || '' }}
- name: Build Pandas
- uses: ./.github/actions/build_pandas
+ uses: ./.github/actions/build-pandas
- name: Test
uses: ./.github/actions/run-tests
diff --git a/.github/workflows/package-checks.yml b/.github/workflows/package-checks.yml
index eb065c6e2e87d..4897c68527277 100644
--- a/.github/workflows/package-checks.yml
+++ b/.github/workflows/package-checks.yml
@@ -42,11 +42,15 @@ jobs:
- name: Install required dependencies
run: |
- python -m pip install --upgrade pip setuptools wheel python-dateutil pytz numpy cython
+ # TODO: Remove when we fully migrate to meson
+ # since the PEP 517 build will pull build dependencies automatically
+ python -m pip install --upgrade pip wheel python-dateutil pytz numpy cython
+ python -m pip install "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ python -m pip install git+https://github.com/mesonbuild/meson-python.git@main
python -m pip install versioneer[toml]
shell: bash -el {0}
- name: Pip install with extra
run: |
- python -m pip install -e .[${{ matrix.extra }}] --no-build-isolation
+ python -m pip install .[${{ matrix.extra }}] -v --no-build-isolation
shell: bash -el {0}
diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml
index 220c1e464742e..73b7dfc8dae2c 100644
--- a/.github/workflows/python-dev.yml
+++ b/.github/workflows/python-dev.yml
@@ -75,15 +75,30 @@ jobs:
python -m pip install --upgrade pip setuptools wheel
python -m pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy
python -m pip install git+https://github.com/nedbat/coveragepy.git
+ python -m pip install python-dateutil pytz cython
+ # TODO: update when upstream releases fixes
+ python -m pip install "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ python -m pip install "git+https://github.com/mesonbuild/meson-python.git@main"
+ python -m pip install hypothesis==6.52.1 pytest>=6.2.5 pytest-xdist pytest-cov pytest-asyncio>=0.17
python -m pip install versioneer[toml]
- python -m pip install python-dateutil pytz cython hypothesis==6.52.1 pytest>=6.2.5 pytest-xdist pytest-cov pytest-asyncio>=0.17
python -m pip list
- # GH 47305: Parallel build can cause flaky ImportError from pandas/_libs/tslibs
+ # Sigh, someone (numpy?) is depending on mingw, which pandas doesn't compile with.
+ # Also, meson doesn't detect visual c++ unless cl.exe is in path.
+ # TODO: File a bug with meson-python about this.
- name: Build Pandas
+ if : ${{ runner.os != 'Windows' }}
run: |
- python setup.py build_ext -q -j1
- python -m pip install -e . --no-build-isolation --no-use-pep517 --no-index
+ python3 -m pip install -ve . --no-build-isolation
+ shell: bash -el {0}
+
+ - name: Build Pandas (Windows)
+ if: ${{ runner.os == 'Windows' }}
+ run: |
+ call micromamba activate test
+ call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
+ python -m pip install -ve . --no-build-isolation
+ shell: cmd /C call {0}
- name: Build Version
run: |
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml
index d11b614e2b2c0..70788ec766435 100644
--- a/.github/workflows/sdist.yml
+++ b/.github/workflows/sdist.yml
@@ -46,16 +46,13 @@ jobs:
- name: Install dependencies
run: |
- python -m pip install --upgrade pip setuptools wheel
+ python -m pip install --upgrade pip setuptools wheel build
python -m pip install versioneer[toml]
- # GH 39416
- pip install numpy
-
- name: Build pandas sdist
run: |
pip list
- python setup.py sdist --formats=gztar
+ python -m build --sdist
- name: Upload sdist artifact
uses: actions/upload-artifact@v3
@@ -63,13 +60,15 @@ jobs:
name: ${{matrix.python-version}}-sdist.gz
path: dist/*.gz
- - name: Set up Conda
- uses: ./.github/actions/setup-conda
+ - name: Set up empty Conda environment
+ uses: mamba-org/provision-with-micromamba@v12
with:
environment-file: false
- environment-name: pandas-sdist
+ environment-name: sdist
extra-specs: |
python =${{ matrix.python-version }}
+ channels: conda-forge
+ condarc-file: ci/condarc.yml
- name: Install pandas from sdist
run: |
diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml
index 9c93725ea15ec..dad19c8270cc6 100644
--- a/.github/workflows/ubuntu.yml
+++ b/.github/workflows/ubuntu.yml
@@ -27,58 +27,60 @@ jobs:
timeout-minutes: 180
strategy:
matrix:
- env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
+ #env_file: [actions-38.yaml, actions-39.yaml, actions-310.yaml]
+ env_file: [actions-38.yaml]
pattern: ["not single_cpu", "single_cpu"]
- pyarrow_version: ["7", "8", "9", "10"]
- include:
- - name: "Downstream Compat"
- env_file: actions-38-downstream_compat.yaml
- pattern: "not slow and not network and not single_cpu"
- pytest_target: "pandas/tests/test_downstream.py"
- - name: "Minimum Versions"
- env_file: actions-38-minimum_versions.yaml
- pattern: "not slow and not network and not single_cpu"
- error_on_warnings: "0"
- - name: "Locale: it_IT"
- env_file: actions-38.yaml
- pattern: "not slow and not network and not single_cpu"
- extra_apt: "language-pack-it"
- # Use the utf8 version as the default, it has no bad side-effect.
- lang: "it_IT.utf8"
- lc_all: "it_IT.utf8"
- # Also install it_IT (its encoding is ISO8859-1) but do not activate it.
- # It will be temporarily activated during tests with locale.setlocale
- extra_loc: "it_IT"
- - name: "Locale: zh_CN"
- env_file: actions-38.yaml
- pattern: "not slow and not network and not single_cpu"
- extra_apt: "language-pack-zh-hans"
- # Use the utf8 version as the default, it has no bad side-effect.
- lang: "zh_CN.utf8"
- lc_all: "zh_CN.utf8"
- # Also install zh_CN (its encoding is gb2312) but do not activate it.
- # It will be temporarily activated during tests with locale.setlocale
- extra_loc: "zh_CN"
- - name: "Copy-on-Write"
- env_file: actions-310.yaml
- pattern: "not slow and not network and not single_cpu"
- pandas_copy_on_write: "1"
- error_on_warnings: "0"
- - name: "Data Manager"
- env_file: actions-38.yaml
- pattern: "not slow and not network and not single_cpu"
- pandas_data_manager: "array"
- error_on_warnings: "0"
- - name: "Pypy"
- env_file: actions-pypy-38.yaml
- pattern: "not slow and not network and not single_cpu"
- test_args: "--max-worker-restart 0"
- error_on_warnings: "0"
- - name: "Numpy Dev"
- env_file: actions-310-numpydev.yaml
- pattern: "not slow and not network and not single_cpu"
- test_args: "-W error::DeprecationWarning:numpy -W error::FutureWarning:numpy"
- error_on_warnings: "0"
+ #pyarrow_version: ["7", "8", "9", "10"]
+ pyarrow_version: ["10"]
+ #include:
+ # - name: "Downstream Compat"
+ # env_file: actions-38-downstream_compat.yaml
+ # pattern: "not slow and not network and not single_cpu"
+ # pytest_target: "pandas/tests/test_downstream.py"
+ # - name: "Minimum Versions"
+ # env_file: actions-38-minimum_versions.yaml
+ # pattern: "not slow and not network and not single_cpu"
+ # error_on_warnings: "0"
+ # - name: "Locale: it_IT"
+ # env_file: actions-38.yaml
+ # pattern: "not slow and not network and not single_cpu"
+ # extra_apt: "language-pack-it"
+ # # Use the utf8 version as the default, it has no bad side-effect.
+ # lang: "it_IT.utf8"
+ # lc_all: "it_IT.utf8"
+ # # Also install it_IT (its encoding is ISO8859-1) but do not activate it.
+ # # It will be temporarily activated during tests with locale.setlocale
+ # extra_loc: "it_IT"
+ # - name: "Locale: zh_CN"
+ # env_file: actions-38.yaml
+ # pattern: "not slow and not network and not single_cpu"
+ # extra_apt: "language-pack-zh-hans"
+ # # Use the utf8 version as the default, it has no bad side-effect.
+ # lang: "zh_CN.utf8"
+ # lc_all: "zh_CN.utf8"
+ # # Also install zh_CN (its encoding is gb2312) but do not activate it.
+ # # It will be temporarily activated during tests with locale.setlocale
+ # extra_loc: "zh_CN"
+ # - name: "Copy-on-Write"
+ # env_file: actions-310.yaml
+ # pattern: "not slow and not network and not single_cpu"
+ # pandas_copy_on_write: "1"
+ # error_on_warnings: "0"
+ # - name: "Data Manager"
+ # env_file: actions-38.yaml
+ # pattern: "not slow and not network and not single_cpu"
+ # pandas_data_manager: "array"
+ # error_on_warnings: "0"
+ # - name: "Pypy"
+ # env_file: actions-pypy-38.yaml
+ # pattern: "not slow and not network and not single_cpu"
+ # test_args: "--max-worker-restart 0"
+ # error_on_warnings: "0"
+ # - name: "Numpy Dev"
+ # env_file: actions-310-numpydev.yaml
+ # pattern: "not slow and not network and not single_cpu"
+ # test_args: "-W error::DeprecationWarning:numpy -W error::FutureWarning:numpy"
+ # error_on_warnings: "0"
exclude:
- env_file: actions-38.yaml
pyarrow_version: "7"
@@ -173,7 +175,7 @@ jobs:
pyarrow-version: ${{ matrix.pyarrow_version }}
- name: Build Pandas
- uses: ./.github/actions/build_pandas
+ uses: ./.github/actions/build-pandas
- name: Test
uses: ./.github/actions/run-tests
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index 0e347b166e425..8b845400fdc1e 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -51,9 +51,14 @@ jobs:
# https://github.com/github/feedback/discussions/7835#discussioncomment-1769026
buildplat:
- [ubuntu-20.04, manylinux_x86_64]
- - [macos-11, macosx_*]
+ # TODO: Consider re-enabling macos wheels, once meson-python makes it easy to
+ # cross compile
+ #- [macos-11, macosx_*]
+ - [macos-11, macosx_x86_64]
- [windows-2019, win_amd64]
- - [windows-2019, win32]
+ # Turn off for now
+ # TODO: Re-enable after mesonbuild/meson-python#167 goes in
+ #- [windows-2019, win32]
# TODO: support PyPy?
python: [["cp38", "3.8"], ["cp39", "3.9"], ["cp310", "3.10"], ["cp311", "3.11"]]# "pp38", "pp39"]
env:
@@ -70,6 +75,16 @@ jobs:
# https://github.com/actions/checkout/issues/338
fetch-depth: 0
+ - name: Remove other compilers
+ if: ${{ runner.os == 'Windows' }}
+ run: |
+ # TODO: This is a bug in meson, where it will look for other compilers
+ # if it can't find cl.exe in path, before trying harder to find MSVC
+ # Remove once meson patches this.
+ choco uninstall mingw -y
+ choco uninstall strawberryperl -y
+ choco uninstall llvm -y
+
- name: Build wheels
uses: pypa/cibuildwheel@v2.9.0
env:
@@ -158,8 +173,12 @@ jobs:
- name: Build sdist
run: |
- pip install build
- python -m build --sdist
+ # TODO: Remove once meson-python releases 0.11.0, also remove
+ # no-isolation from build flag
+ pip install "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ pip install git+https://github.com/mesonbuild/meson-python.git@main
+ pip install build Cython oldest-supported-numpy versioneer[toml]
+ python -m build --sdist --no-isolation
- name: Test the sdist
shell: bash -el {0}
run: |
@@ -173,6 +192,7 @@ jobs:
python -c "import pandas; print(pandas.__version__);
pandas.test(extra_args=['-m not clipboard and not single_cpu', '--skip-slow', '--skip-network', '--skip-db', '-n=2']);
pandas.test(extra_args=['-m not clipboard and single_cpu', '--skip-slow', '--skip-network', '--skip-db'])"
+
- uses: actions/upload-artifact@v3
with:
name: sdist
diff --git a/.gitignore b/.gitignore
index 07b1f056d511b..7c060793560a4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -36,6 +36,7 @@
*.py[ocd]
*.so
.build_cache_dir
+.mesonpy-native-file.ini
MANIFEST
# Python files #
@@ -72,6 +73,8 @@ coverage_html_report
__pycache__
# pytest-monkeytype
monkeytype.sqlite3
+# meson editable install folder
+.mesonpy
# OS generated files #
diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index 6e548bf9d9e8a..cef5675db6c7e 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -41,7 +41,11 @@
// pip (with all the conda available packages installed first,
// followed by the pip installed packages).
"matrix": {
- "numpy": ["1.23.5"], // https://github.com/pandas-dev/pandas/pull/50356
+ // TODO: Remove pip deps, once no-isolation is turned off
+ // pending new meson/meson-python releases
+ "pip+oldest-supported-numpy": [],
+ "pip+versioneer[toml]": [],
+ "pip+meson[ninja]": [],
"Cython": ["0.29.32"],
"matplotlib": [],
"sqlalchemy": [],
@@ -56,6 +60,9 @@
"xlrd": [],
"odfpy": [],
"jinja2": [],
+ "meson": [],
+ "meson-python": [],
+ "python-build": [],
},
"conda_channels": ["conda-forge"],
// Combinations of libraries/python versions can be excluded/included
@@ -125,7 +132,5 @@
"regression_thresholds": {
},
"build_command":
- ["python -m pip install versioneer[toml]",
- "python setup.py build -j4",
- "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}"],
+ ["python -m build -Cbuilddir=builddir --wheel --no-isolation --outdir {build_cache_dir} {build_dir}"]
}
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 3c1362b1ac83e..eb2c0be724613 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -67,11 +67,11 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
MSG='Doctests' ; echo $MSG
# Ignore test_*.py files or else the unit tests will run
- python -m pytest --doctest-modules --ignore-glob="**/test_*.py" pandas
+ python -c 'import pandas as pd; pd.test(extra_args=["--doctest-modules", "--ignore-glob=**/test_*.py"])'
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Cython Doctests' ; echo $MSG
- python -m pytest --doctest-cython pandas/_libs
+ python -c 'import pandas as pd; pd.test(extra_args=["--doctest-cython", "--ignore-glob=**/test_*.py"])'
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/ci/deps/actions-310-numpydev.yaml b/ci/deps/actions-310-numpydev.yaml
index 863c231b18c4f..1aa401de34234 100644
--- a/ci/deps/actions-310-numpydev.yaml
+++ b/ci/deps/actions-310-numpydev.yaml
@@ -20,6 +20,8 @@ dependencies:
- pip
- pip:
- "cython"
+ - "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ - "git+https://github.com/mesonbuild/meson-python.git@main"
- "--extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple"
- "--pre"
- "numpy<1.24"
diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml
index 79457cd503876..83f07430748b2 100644
--- a/ci/deps/actions-310.yaml
+++ b/ci/deps/actions-310.yaml
@@ -55,3 +55,6 @@ dependencies:
- xlrd
- xlsxwriter
- zstandard
+ - pip:
+ - "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ - "git+https://github.com/mesonbuild/meson-python.git@main"
diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-38-downstream_compat.yaml
index 6955baa282274..3b40cab733e66 100644
--- a/ci/deps/actions-38-downstream_compat.yaml
+++ b/ci/deps/actions-38-downstream_compat.yaml
@@ -69,3 +69,6 @@ dependencies:
- pandas-gbq
- pyyaml
- py
+ - pip:
+ - "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ - "git+https://github.com/mesonbuild/meson-python.git@main"
diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml
index de7e793c46d19..945e5894e79b6 100644
--- a/ci/deps/actions-38-minimum_versions.yaml
+++ b/ci/deps/actions-38-minimum_versions.yaml
@@ -58,6 +58,7 @@ dependencies:
- xlrd=2.0.1
- xlsxwriter=1.4.3
- zstandard=0.15.2
-
- pip:
- - pyqt5==5.15.1
+ - "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ - "git+https://github.com/mesonbuild/meson-python.git@main"
+ - pyqt5==5.15.1
diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml
index 004ef93606457..c71b57403e11a 100644
--- a/ci/deps/actions-38.yaml
+++ b/ci/deps/actions-38.yaml
@@ -54,3 +54,6 @@ dependencies:
- xlrd
- xlsxwriter
- zstandard
+ - pip:
+ - "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ - "git+https://github.com/mesonbuild/meson-python.git@main"
diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml
index ec7ffebde964f..09c371fdd5d0a 100644
--- a/ci/deps/actions-39.yaml
+++ b/ci/deps/actions-39.yaml
@@ -55,3 +55,6 @@ dependencies:
- xlrd
- xlsxwriter
- zstandard
+ - pip:
+ - "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ - "git+https://github.com/mesonbuild/meson-python.git@main"
diff --git a/ci/deps/actions-pypy-38.yaml b/ci/deps/actions-pypy-38.yaml
index 054129c4198a1..dc7aec697f3ea 100644
--- a/ci/deps/actions-pypy-38.yaml
+++ b/ci/deps/actions-pypy-38.yaml
@@ -22,3 +22,6 @@ dependencies:
- numpy<1.24
- python-dateutil
- pytz
+ - pip:
+ - "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ - "git+https://github.com/mesonbuild/meson-python.git@main"
diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-38-arm64.yaml
index b4171710564bf..8b94b4e5e2ae1 100644
--- a/ci/deps/circle-38-arm64.yaml
+++ b/ci/deps/circle-38-arm64.yaml
@@ -55,3 +55,6 @@ dependencies:
- xlrd
- xlsxwriter
- zstandard
+ - pip:
+ - "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ - "git+https://github.com/mesonbuild/meson-python.git@main"
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index a48d6c1ad6580..dade69a2431cc 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -13,7 +13,7 @@ if [[ "not network" == *"$PATTERN"* ]]; then
fi
if [[ "$COVERAGE" == "true" ]]; then
- COVERAGE="-s --cov=pandas --cov-report=xml --cov-append"
+ COVERAGE="-s --cov=pandas --cov-report=xml --cov-append --cov-config=setup.cfg"
else
COVERAGE="" # We need to reset this for COVERAGE="false" case
fi
@@ -24,7 +24,7 @@ if [[ $(uname) == "Linux" && -z $DISPLAY ]]; then
XVFB="xvfb-run "
fi
-PYTEST_CMD="${XVFB}pytest -r fEs -n $PYTEST_WORKERS --dist=loadfile $TEST_ARGS $COVERAGE $PYTEST_TARGET"
+PYTEST_CMD="${XVFB}pytest -r fEs -n $PYTEST_WORKERS --dist=loadfile --import-mode=importlib $TEST_ARGS $COVERAGE $PYTEST_TARGET"
if [[ "$PATTERN" ]]; then
PYTEST_CMD="$PYTEST_CMD -m \"$PATTERN\""
@@ -43,7 +43,7 @@ sh -c "$PYTEST_CMD"
if [[ "$PANDAS_DATA_MANAGER" != "array" && "$PYTEST_TARGET" == "pandas" ]]; then
# The ArrayManager tests should have already been run by PYTEST_CMD if PANDAS_DATA_MANAGER was already set to array
# If we're targeting specific files, e.g. test_downstream.py, don't run.
- PYTEST_AM_CMD="PANDAS_DATA_MANAGER=array pytest -n $PYTEST_WORKERS --dist=loadfile $TEST_ARGS $COVERAGE pandas"
+ PYTEST_AM_CMD="PANDAS_DATA_MANAGER=array pytest -n $PYTEST_WORKERS --dist=loadfile --import-mode=importlib $TEST_ARGS $COVERAGE pandas"
if [[ "$PATTERN" ]]; then
PYTEST_AM_CMD="$PYTEST_AM_CMD -m \"$PATTERN and arraymanager\""
diff --git a/doc/source/development/contributing_environment.rst b/doc/source/development/contributing_environment.rst
index 942edd863a19a..ad437d0dc37bf 100644
--- a/doc/source/development/contributing_environment.rst
+++ b/doc/source/development/contributing_environment.rst
@@ -191,11 +191,51 @@ See https://www.jetbrains.com/help/pycharm/docker.html for details.
Step 3: build and install pandas
--------------------------------
-You can now run::
+There are currently two supported ways of building pandas, pip/meson and setuptools(setup.py).
+Historically, pandas has only supported using setuptools to build pandas. However, this method
+requires a lot of convoluted code in setup.py and also has many issues in compiling pandas in parallel
+due to limitations in setuptools.
+
+The newer build system, invokes the meson backend through pip (via a `PEP 517 <https://peps.python.org/pep-0517/>`_ build).
+It automatically uses all available cores on your CPU, and also avoids the need for manual rebuilds by
+rebuilding automatically whenever pandas is imported(with an editable install).
+
+For these reasons, you should compile pandas with meson.
+Because the meson build system is newer, you may find bugs/minor issues as it matures. You can report these bugs
+`here <https://github.com/pandas-dev/pandas/issues/49683>`_.
+
+To compile pandas with meson, run::
# Build and install pandas
- python setup.py build_ext -j 4
- python -m pip install -e . --no-build-isolation --no-use-pep517
+ python -m pip install -ve . --no-build-isolation
+
+** Build options **
+
+It is possible to pass options from the pip frontend to the meson backend if you would like to configure your
+install. Occasionally, you'll want to use this to adjust the build directory, and/or toggle debug/optimization levels.
+
+You can pass a build directory to pandas by appending ``--config-settings builddir="your builddir here"`` to your pip command.
+This option allows you to configure where meson stores your built C extensions, and allows for fast rebuilds.
+
+Sometimes, it might be useful to compile pandas with debugging symbols, when debugging C extensions.
+Appending ``--config-settings setup-args="-Ddebug=true"`` will do the trick.
+
+With pip, it is possible to chain together multiple config settings (for example specifying both a build directory
+and building with debug symbols would look like
+``--config-settings builddir="your builddir here" --config-settings setup-args="-Ddebug=true"``.
+
+**Compiling pandas with setup.py**
+
+.. note::
+ This method of compiling pandas will be deprecated and removed very soon, as the meson backend matures.
+
+To compile pandas with setuptools, run::
+
+ python setup.py develop
+
+.. note::
+ You will also need to repeat this step each time the C extensions change,
+ for example if you modified any file in ``pandas/_libs`` or if you did a fetch and merge from ``upstream/main``.
At this point you should be able to import pandas from your locally built version::
@@ -204,9 +244,22 @@ At this point you should be able to import pandas from your locally built versio
>>> print(pandas.__version__) # note: the exact output may differ
2.0.0.dev0+880.g2b9e661fbb.dirty
-This will create the new environment, and not touch any of your existing environments,
-nor any existing Python installation.
+When building pandas with meson, importing pandas will automatically trigger a rebuild, even when C/Cython files are modified.
+By default, no output will be produced by this rebuild (the import will just take longer). If you would like to see meson's
+output when importing pandas, you can set the environment variable ``MESONPY_EDTIABLE_VERBOSE``. For example, this would be::
-.. note::
- You will need to repeat this step each time the C extensions change, for example
- if you modified any file in ``pandas/_libs`` or if you did a fetch and merge from ``upstream/main``.
+ # On Linux/macOS
+ MESONPY_EDITABLE_VERBOSE=1 python
+
+ # Windows
+ set MESONPY_EDITABLE_VERBOSE=1 # Only need to set this once per session
+ python
+
+If you would like to see this verbose output every time, you can set the ``editable-verbose`` config setting to ``true`` like so::
+
+ python -m pip install -ve . --config-settings editable-verbose=true
+
+.. tip::
+ If you ever find yourself wondering whether setuptools or meson was used to build your pandas,
+ you can check the value of ``pandas._built_with_meson``, which will be true if meson was used
+ to compile pandas.
diff --git a/doc/source/development/debugging_extensions.rst b/doc/source/development/debugging_extensions.rst
index 32cb8f4c4d8cd..9f090b2b37ad2 100644
--- a/doc/source/development/debugging_extensions.rst
+++ b/doc/source/development/debugging_extensions.rst
@@ -12,8 +12,12 @@ First, be sure to compile the extensions with the appropriate flags to generate
.. code-block:: sh
+ # If you're compiling pandas with setuptools, this would be
python setup.py build_ext --inplace -j4 --with-debugging-symbols
+ # If using meson, this would be
+ pip install -ve . --no-build-isolation --config-settings setup-args="-Ddebug=true"
+
Using a debugger
================
diff --git a/environment.yml b/environment.yml
index 96753f0f1c9b3..17b5c720b224f 100644
--- a/environment.yml
+++ b/environment.yml
@@ -70,9 +70,10 @@ dependencies:
# benchmarks
- asv>=0.5.1
- # The compiler packages are meta-packages and install the correct compiler (activation) packages on the respective platforms.
+ ## The compiler packages are meta-packages and install the correct compiler (activation) packages on the respective platforms.
- c-compiler
- cxx-compiler
+ - sccache
# code checks
- black=22.10.0
@@ -118,3 +119,5 @@ dependencies:
- pip:
- sphinx-toggleprompt
+ - "meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master"
+ - "git+https://github.com/mesonbuild/meson-python.git@main"
diff --git a/generate_pxi.py b/generate_pxi.py
new file mode 100644
index 0000000000000..3462b97aefcbf
--- /dev/null
+++ b/generate_pxi.py
@@ -0,0 +1,33 @@
+import argparse
+import os
+
+from Cython import Tempita
+
+
+def process_tempita(pxifile, outfile):
+ with open(pxifile) as f:
+ tmpl = f.read()
+ pyxcontent = Tempita.sub(tmpl)
+
+ with open(outfile, "w") as f:
+ f.write(pyxcontent)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("infile", type=str, help="Path to the input file")
+ parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory")
+ args = parser.parse_args()
+
+ if not args.infile.endswith(".in"):
+ raise ValueError(f"Unexpected extension: {args.infile}")
+
+ outdir_abs = os.path.join(os.getcwd(), args.outdir)
+ outfile = os.path.join(
+ outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]
+ )
+
+ process_tempita(args.infile, outfile)
+
+
+main()
diff --git a/scripts/generate_version.py b/generate_version.py
similarity index 91%
rename from scripts/generate_version.py
rename to generate_version.py
index fbc78ab12429a..c8d7f75c1977e 100644
--- a/scripts/generate_version.py
+++ b/generate_version.py
@@ -1,3 +1,4 @@
+# Note: This file has to live next to setup.py or versioneer will not work
import argparse
import os
@@ -6,7 +7,6 @@
def write_version_info(path):
if os.environ.get("MESON_DIST_ROOT"):
- # raise ValueError("dist root is", os.environ.get("MESON_DIST_ROOT"))
path = os.path.join(os.environ.get("MESON_DIST_ROOT"), path)
with open(path, "w") as file:
file.write(f'__version__="{versioneer.get_version()}"\n')
diff --git a/meson.build b/meson.build
new file mode 100644
index 0000000000000..b97996c55ae93
--- /dev/null
+++ b/meson.build
@@ -0,0 +1,52 @@
+# This file is adapted from https://github.com/scipy/scipy/blob/main/meson.build
+project(
+ 'pandas',
+ 'c', 'cpp', 'cython',
+ version: '2.0.0.dev0',
+ license: 'BSD-3',
+ meson_version: '>=0.64',
+ default_options: [
+ # TODO: investigate, does meson try to compile against debug Python
+ # when buildtype = debug, this seems to be causing problems on CI
+ # where provided Python is not compiled in debug mode
+ 'buildtype=release',
+ # TODO: Reactivate werror, some warnings on Windows
+ #'werror=true',
+ 'c_std=c99'
+ ]
+)
+
+add_project_arguments('-DNPY_NO_DEPRECATED_API=0', language : 'c')
+add_project_arguments('-DNPY_NO_DEPRECATED_API=0', language : 'cpp')
+
+# This is a cython bug
+# TODO: Remove once cython/cython#4804 addressed
+add_project_arguments('-DNDEBUG', language : 'c')
+add_project_arguments('-DNDEBUG', language : 'cpp')
+
+py_mod = import('python')
+fs = import('fs')
+py = py_mod.find_installation('python')
+py_dep = py.dependency()
+tempita = files('generate_pxi.py')
+versioneer = files('generate_version.py')
+
+if fs.exists('_version_meson.py')
+ py.install_sources('_version_meson.py', pure: false, subdir: 'pandas')
+else
+ custom_target('write_version_file',
+ output: '_version_meson.py',
+ command: [
+ py, versioneer, '-o', '@OUTPUT@'
+ ],
+ build_by_default: true,
+ build_always_stale: true,
+ install: true,
+ install_dir: py.get_install_dir(pure: false) / 'pandas'
+ )
+ meson.add_dist_script(py, versioneer, '-o', '_version_meson.py')
+endif
+
+# Needed by pandas.test() when it looks for the pytest ini options
+py.install_sources('pyproject.toml', pure: false, subdir: 'pandas')
+subdir('pandas')
diff --git a/pandas/__init__.py b/pandas/__init__.py
index 1a549c09d22f7..8138e0199ed66 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -176,12 +176,21 @@
from pandas.util._tester import test
# use the closest tagged version if possible
-from pandas._version import get_versions
+_built_with_meson = False
+try:
+ from pandas._version_meson import ( # pyright: ignore [reportMissingImports]
+ __version__,
+ __git_version__,
+ )
+
+ _built_with_meson = True
+except ImportError:
+ from pandas._version import get_versions
-v = get_versions()
-__version__ = v.get("closest-tag", v["version"])
-__git_version__ = v.get("full-revisionid")
-del get_versions, v
+ v = get_versions()
+ __version__ = v.get("closest-tag", v["version"])
+ __git_version__ = v.get("full-revisionid")
+ del get_versions, v
# module level doc-string
diff --git a/pandas/_libs/meson.build b/pandas/_libs/meson.build
new file mode 100644
index 0000000000000..75d507a398b93
--- /dev/null
+++ b/pandas/_libs/meson.build
@@ -0,0 +1,160 @@
+_algos_take_helper = custom_target('algos_take_helper_pxi',
+ output: 'algos_take_helper.pxi',
+ input: 'algos_take_helper.pxi.in',
+ command: [
+ py, tempita, '@INPUT@', '-o', '@OUTDIR@'
+ ]
+)
+_algos_common_helper = custom_target('algos_common_helper_pxi',
+ output: 'algos_common_helper.pxi',
+ input: 'algos_common_helper.pxi.in',
+ command: [
+ py, tempita, '@INPUT@', '-o', '@OUTDIR@'
+ ]
+)
+_khash_primitive_helper = custom_target('khash_primitive_helper_pxi',
+ output: 'khash_for_primitive_helper.pxi',
+ input: 'khash_for_primitive_helper.pxi.in',
+ command: [
+ py, tempita, '@INPUT@', '-o', '@OUTDIR@'
+ ]
+)
+_hashtable_class_helper = custom_target('hashtable_class_helper_pxi',
+ output: 'hashtable_class_helper.pxi',
+ input: 'hashtable_class_helper.pxi.in',
+ command: [
+ py, tempita, '@INPUT@', '-o', '@OUTDIR@'
+ ]
+)
+_hashtable_func_helper = custom_target('hashtable_func_helper_pxi',
+ output: 'hashtable_func_helper.pxi',
+ input: 'hashtable_func_helper.pxi.in',
+ command: [
+ py, tempita, '@INPUT@', '-o', '@OUTDIR@'
+ ]
+)
+_index_class_helper = custom_target('index_class_helper_pxi',
+ output: 'index_class_helper.pxi',
+ input: 'index_class_helper.pxi.in',
+ command: [
+ py, tempita, '@INPUT@', '-o', '@OUTDIR@'
+ ]
+)
+_sparse_op_helper = custom_target('sparse_op_helper_pxi',
+ output: 'sparse_op_helper.pxi',
+ input: 'sparse_op_helper.pxi.in',
+ command: [
+ py, tempita, '@INPUT@', '-o', '@OUTDIR@'
+ ]
+)
+_intervaltree_helper = custom_target('intervaltree_helper_pxi',
+ output: 'intervaltree.pxi',
+ input: 'intervaltree.pxi.in',
+ command: [
+ py, tempita, '@INPUT@', '-o', '@OUTDIR@'
+ ]
+)
+_khash_primitive_helper_dep = declare_dependency(sources: _khash_primitive_helper)
+# TODO: can this be removed, I wish meson copied .pyx source to the build dir automatically
+# The reason we can't build the pyx files inplace and copy to build dir is because
+# the generated pxi files cannot be written to the source directory.
+# (Meson only supports out of tree builds)
+cython_sources_list = [
+ # List of cython sources e.g. .pyx, .pxd & __init__.py
+ # Does NOT include .pxi.in
+ '__init__.py',
+ 'algos.pxd',
+ 'algos.pyx',
+ 'arrays.pxd',
+ 'dtypes.pxd',
+ 'hashtable.pxd',
+ 'hashtable.pyx',
+ 'index.pyx',
+ 'indexing.pyx',
+ 'internals.pyx',
+ 'interval.pyx',
+ 'join.pyx',
+ 'khash.pxd',
+ 'lib.pxd',
+ 'missing.pxd',
+ 'parsers.pyx',
+ 'sparse.pyx',
+ 'testing.pyx',
+ 'tslib.pyx',
+ 'util.pxd',
+]
+cython_sources = {}
+cython_sources_tgts = []
+
+foreach source: cython_sources_list
+ source_pyx = fs.copyfile(source)
+ cython_sources += {source: source_pyx}
+ cython_sources_tgts += source_pyx
+endforeach
+
+subdir('tslibs')
+
+libs_sources = {
+ # Dict of extension name -> dict of {sources, include_dirs, and deps}
+ # numpy include dir is implicitly included
+ 'algos': {'sources': [cython_sources['algos.pyx'], _algos_common_helper, _algos_take_helper, _khash_primitive_helper],
+ 'include_dirs': klib_include},
+ 'arrays': {'sources': ['arrays.pyx']},
+ 'groupby': {'sources': ['groupby.pyx']},
+ 'hashing': {'sources': ['hashing.pyx']},
+ 'hashtable': {'sources': [cython_sources['hashtable.pyx'], _khash_primitive_helper, _hashtable_class_helper, _hashtable_func_helper],
+ 'include_dirs': klib_include},
+ 'index': {'sources': [cython_sources['index.pyx'], _index_class_helper],
+ 'include_dirs': [klib_include, 'tslibs']},
+ 'indexing': {'sources': ['indexing.pyx']},
+ 'internals': {'sources': ['internals.pyx']},
+ 'interval': {'sources': [cython_sources['interval.pyx'], _intervaltree_helper],
+ 'include_dirs': [klib_include, 'tslibs']},
+ 'join': {'sources': [cython_sources['join.pyx'], _khash_primitive_helper],
+ 'include_dirs': klib_include,
+ 'deps': _khash_primitive_helper_dep},
+ 'lib': {'sources': ['lib.pyx', 'src/parser/tokenizer.c'],
+ 'include_dirs': [klib_include, inc_datetime]},
+ 'missing': {'sources': ['missing.pyx'],
+ 'include_dirs': [inc_datetime]},
+ 'parsers': {'sources': [cython_sources['parsers.pyx'], 'src/parser/tokenizer.c', 'src/parser/io.c'],
+ 'include_dirs': [klib_include, 'src'],
+ 'deps': _khash_primitive_helper_dep},
+ 'json': {'sources': ['src/ujson/python/ujson.c',
+ 'src/ujson/python/objToJSON.c',
+ 'src/ujson/python/date_conversions.c',
+ 'src/ujson/python/JSONtoObj.c',
+ 'src/ujson/lib/ultrajsonenc.c',
+ 'src/ujson/lib/ultrajsondec.c',
+ 'tslibs/src/datetime/np_datetime.c',
+ 'tslibs/src/datetime/np_datetime_strings.c'],
+ 'include_dirs': [inc_datetime, 'src/ujson/lib', 'src/ujson/python']},
+ 'reduction': {'sources': ['reduction.pyx']},
+ 'ops': {'sources': ['ops.pyx']},
+ 'ops_dispatch': {'sources': ['ops_dispatch.pyx']},
+ 'properties': {'sources': ['properties.pyx']},
+ 'reshape': {'sources': ['reshape.pyx']},
+ 'sparse': {'sources': [cython_sources['sparse.pyx'], _sparse_op_helper]},
+ 'tslib': {'sources': ['tslib.pyx', 'tslibs/src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime},
+ 'testing': {'sources': ['testing.pyx']},
+ 'writers': {'sources': ['writers.pyx']}
+}
+
+
+foreach ext_name, ext_dict : libs_sources
+ py.extension_module(
+ ext_name,
+ ext_dict.get('sources'),
+ include_directories: [inc_np] + ext_dict.get('include_dirs', ''),
+ dependencies: ext_dict.get('deps', ''),
+ subdir: 'pandas/_libs',
+ install: true
+ )
+endforeach
+
+py.install_sources('__init__.py',
+ pure: false,
+ subdir: 'pandas/_libs')
+
+subdir('window')
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index 42f84619ddbe5..2cabbe3ff07da 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -35,7 +35,7 @@
"get_supported_reso",
]
-from pandas._libs.tslibs import dtypes
+from pandas._libs.tslibs import dtypes # pylint: disable=import-self
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.dtypes import (
Resolution,
diff --git a/pandas/_libs/tslibs/meson.build b/pandas/_libs/tslibs/meson.build
new file mode 100644
index 0000000000000..a787beb3dd68c
--- /dev/null
+++ b/pandas/_libs/tslibs/meson.build
@@ -0,0 +1,70 @@
+tslibs_pxd_sources_list = [
+ # List of cython sources e.g. .pyx, .pxd & __init__.py
+ # Does NOT include .pxi.in
+ '__init__.py',
+ 'base.pxd',
+ 'ccalendar.pxd',
+ 'conversion.pxd',
+ 'dtypes.pxd',
+ 'nattype.pxd',
+ 'np_datetime.pxd',
+ 'offsets.pxd',
+ 'parsing.pxd',
+ 'period.pxd',
+ 'timedeltas.pxd',
+ 'timestamps.pxd',
+ 'timezones.pxd',
+ 'tzconversion.pxd',
+ 'util.pxd',
+]
+
+foreach source: tslibs_pxd_sources_list
+ source_pxd = fs.copyfile(source)
+endforeach
+
+tslibs_sources = {
+ # Dict of extension name -> dict of {sources, include_dirs, and deps}
+ # numpy include dir is implicitly included
+ 'base': {'sources': ['base.pyx']},
+ 'ccalendar': {'sources': ['ccalendar.pyx']},
+ 'dtypes': {'sources': ['dtypes.pyx']},
+ 'conversion': {'sources': ['conversion.pyx', 'src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime},
+ 'fields': {'sources': ['fields.pyx', 'src/datetime/np_datetime.c']},
+ 'nattype': {'sources': ['nattype.pyx']},
+ 'np_datetime': {'sources': ['np_datetime.pyx', 'src/datetime/np_datetime.c', 'src/datetime/np_datetime_strings.c'],
+ 'include_dirs': inc_datetime},
+ 'offsets': {'sources': ['offsets.pyx', 'src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime},
+ 'parsing': {'sources': ['parsing.pyx', '../src/parser/tokenizer.c'],
+ 'include_dirs': klib_include},
+ 'period': {'sources': ['period.pyx', 'src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime},
+ 'strptime': {'sources': ['strptime.pyx', 'src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime},
+ 'timedeltas': {'sources': ['timedeltas.pyx', 'src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime},
+ 'timestamps': {'sources': ['timestamps.pyx', 'src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime},
+ 'timezones': {'sources': ['timezones.pyx', 'src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime},
+ 'tzconversion': {'sources': ['tzconversion.pyx', 'src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime},
+ 'vectorized': {'sources': ['vectorized.pyx', 'src/datetime/np_datetime.c'],
+ 'include_dirs': inc_datetime}
+}
+
+foreach ext_name, ext_dict : tslibs_sources
+ py.extension_module(
+ ext_name,
+ ext_dict.get('sources'),
+ include_directories: [inc_np] + ext_dict.get('include_dirs', ''),
+ dependencies: ext_dict.get('deps', ''),
+ subdir: 'pandas/_libs/tslibs',
+ install: true
+ )
+endforeach
+
+py.install_sources('__init__.py',
+ pure: false,
+ subdir: 'pandas/_libs/tslibs')
diff --git a/pandas/_libs/window/meson.build b/pandas/_libs/window/meson.build
new file mode 100644
index 0000000000000..7d7c34a57c6a6
--- /dev/null
+++ b/pandas/_libs/window/meson.build
@@ -0,0 +1,18 @@
+py.extension_module(
+ 'aggregations',
+ ['aggregations.pyx'],
+ include_directories: [inc_np, '../src'],
+ dependencies: [py_dep],
+ subdir: 'pandas/_libs/window',
+ override_options : ['cython_language=cpp'],
+ install: true
+)
+
+py.extension_module(
+ 'indexers',
+ ['indexers.pyx'],
+ include_directories: [inc_np],
+ dependencies: [py_dep],
+ subdir: 'pandas/_libs/window',
+ install: true
+)
diff --git a/pandas/io/meson.build b/pandas/io/meson.build
new file mode 100644
index 0000000000000..cad41c71d0f91
--- /dev/null
+++ b/pandas/io/meson.build
@@ -0,0 +1,36 @@
+subdirs_list = [
+ # exclude sas, since it contains extension modules
+ # and has its own meson.build
+ 'clipboard',
+ 'excel',
+ 'formats',
+ 'json',
+ 'parsers'
+]
+foreach subdir: subdirs_list
+ install_subdir(subdir, install_dir: py.get_install_dir(pure: false) / 'pandas/io')
+endforeach
+top_level_py_list = [
+ '__init__.py',
+ '_util.py',
+ 'api.py',
+ 'clipboards.py',
+ 'common.py',
+ 'feather_format.py',
+ 'gbq.py',
+ 'html.py',
+ 'orc.py',
+ 'parquet.py',
+ 'pickle.py',
+ 'pytables.py',
+ 'spss.py',
+ 'sql.py',
+ 'stata.py',
+ 'xml.py'
+]
+foreach file: top_level_py_list
+ py.install_sources(file,
+ pure: false,
+ subdir: 'pandas/io')
+endforeach
+subdir('sas')
diff --git a/pandas/io/sas/meson.build b/pandas/io/sas/meson.build
new file mode 100644
index 0000000000000..172db6334734f
--- /dev/null
+++ b/pandas/io/sas/meson.build
@@ -0,0 +1,34 @@
+py.extension_module(
+ '_sas',
+ ['sas.pyx'],
+ include_directories: [inc_np],
+ dependencies: [py_dep],
+ # The file is named sas.pyx but we want the
+ # extension module to be named _sas
+ cython_args: ['--module-name=pandas.io.sas._sas'],
+ subdir: 'pandas/io/sas',
+ install: true
+)
+py.extension_module(
+ '_byteswap',
+ ['byteswap.pyx'],
+ include_directories: [inc_np],
+ dependencies: [py_dep],
+ # The file is named byteswap.pyx but we want the
+ # extension module to be named _byteswap
+ cython_args: ['--module-name=pandas.io.sas._byteswap'],
+ subdir: 'pandas/io/sas',
+ install: true
+)
+top_level_py_list = [
+ '__init__.py',
+ 'sas7bdat.py',
+ 'sas_constants.py',
+ 'sas_xport.py',
+ 'sasreader.py'
+]
+foreach file: top_level_py_list
+ py.install_sources(file,
+ pure: false,
+ subdir: 'pandas/io/sas')
+endforeach
diff --git a/pandas/meson.build b/pandas/meson.build
new file mode 100644
index 0000000000000..8ffa524570815
--- /dev/null
+++ b/pandas/meson.build
@@ -0,0 +1,46 @@
+incdir_numpy = run_command(py,
+ [
+ '-c',
+ 'import os; os.chdir(".."); import numpy; print(numpy.get_include())'
+ ],
+ check: true
+).stdout().strip()
+
+inc_np = include_directories(incdir_numpy)
+klib_include = include_directories('_libs/src/klib')
+inc_datetime = include_directories('_libs/tslibs')
+
+fs.copyfile('__init__.py')
+
+subdir('_libs')
+subdir('io')
+
+subdirs_list = [
+ '_config',
+ '_libs',
+ '_testing',
+ 'api',
+ 'arrays',
+ 'compat',
+ 'core',
+ 'errors',
+ 'plotting',
+ 'tests',
+ 'tseries',
+ 'util'
+]
+foreach subdir: subdirs_list
+ install_subdir(subdir, install_dir: py.get_install_dir(pure: false) / 'pandas')
+endforeach
+top_level_py_list = [
+ '__init__.py',
+ '_typing.py',
+ '_version.py',
+ 'conftest.py',
+ 'testing.py'
+]
+foreach file: top_level_py_list
+ py.install_sources(file,
+ pure: false,
+ subdir: 'pandas')
+endforeach
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index e448e1bce9146..b20a94e7f8944 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -26,7 +26,7 @@ def check(self, namespace, expected, ignored=None):
class TestPDApi(Base):
# these are optionally imported based on testing
# & need to be ignored
- ignored = ["tests", "locale", "conftest"]
+ ignored = ["tests", "locale", "conftest", "_version_meson"]
# top-level sub-packages
public_lib = [
@@ -40,7 +40,7 @@ class TestPDApi(Base):
"io",
"tseries",
]
- private_lib = ["compat", "core", "pandas", "util"]
+ private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"]
# misc
misc = ["IndexSlice", "NaT", "NA"]
@@ -183,8 +183,9 @@ class TestPDApi(Base):
"_is_numpy_dev",
"_testing",
"_typing",
- "_version",
]
+ if not pd._built_with_meson:
+ private_modules.append("_version")
def test_api(self):
diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py
index 315b3003f716b..5c68534c9ff76 100644
--- a/pandas/tests/window/test_pairwise.py
+++ b/pandas/tests/window/test_pairwise.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+from pandas.compat import IS64
+
from pandas import (
DataFrame,
Index,
@@ -294,7 +296,13 @@ def test_no_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f):
lambda x, y: x.expanding().cov(y, pairwise=True),
lambda x, y: x.expanding().corr(y, pairwise=True),
lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
- lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
+ # TODO: We're missing a flag somewhere in meson
+ pytest.param(
+ lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
+ marks=pytest.mark.xfail(
+ not IS64, reason="Precision issues on 32 bit", strict=False
+ ),
+ ),
lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
lambda x, y: x.ewm(com=3).corr(y, pairwise=True),
],
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 60049d0ac633a..25491c90101e7 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -7,6 +7,7 @@
import pytest
from pandas.compat import (
+ IS64,
is_platform_arm,
is_platform_mac,
is_platform_power,
@@ -1172,7 +1173,7 @@ def test_rolling_sem(frame_or_series):
@pytest.mark.xfail(
- (is_platform_arm() and not is_platform_mac()) or is_platform_power(),
+ (is_platform_arm() and not is_platform_mac()) or is_platform_power() or not IS64,
reason="GH 38921",
)
@pytest.mark.parametrize(
@@ -1691,7 +1692,11 @@ def test_rolling_quantile_interpolation_options(quantile, interpolation, data):
if np.isnan(q1):
assert np.isnan(q2)
else:
- assert q1 == q2
+ if not IS64:
+ # Less precision on 32-bit
+ assert np.allclose([q1], [q2], rtol=1e-07, atol=0)
+ else:
+ assert q1 == q2
def test_invalid_quantile_value():
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 91d518d1ab496..7a1984a8bac54 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -21,10 +21,17 @@ def _get_commit_hash() -> str | None:
Use vendored versioneer code to get git hash, which handles
git worktree correctly.
"""
- from pandas._version import get_versions
+ try:
+ from pandas._version_meson import ( # pyright: ignore [reportMissingImports]
+ __git_version__,
+ )
- versions = get_versions()
- return versions["full-revisionid"]
+ return __git_version__
+ except ImportError:
+ from pandas._version import get_versions
+
+ versions = get_versions()
+ return versions["full-revisionid"]
def _get_sys_info() -> dict[str, JSONSerializable]:
diff --git a/pyproject.toml b/pyproject.toml
index 385c1beb08121..63cba8707d47a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -2,13 +2,15 @@
# Minimum requirements for the build system to execute.
# See https://github.com/scipy/scipy/pull/12940 for the AIX issue.
requires = [
- "setuptools>=61.0.0",
+ "meson-python",
+ "meson[ninja]",
"wheel",
"Cython>=0.29.32,<3", # Note: sync with setup.py, environment.yml and asv.conf.json
"oldest-supported-numpy>=2022.8.16",
"versioneer[toml]"
]
-# build-backend = "setuptools.build_meta"
+
+build-backend = "mesonpy"
[project]
name = 'pandas'
@@ -147,8 +149,8 @@ test-requires = "hypothesis==6.52.1 pytest>=6.2.5 pytest-xdist pytest-asyncio>=0
test-command = "python {project}/ci/test_wheels.py"
[tool.cibuildwheel.macos]
-archs = "x86_64 universal2"
-test-skip = "*_arm64 *_universal2:arm64"
+archs = "x86_64 arm64"
+test-skip = "*_arm64"
[tool.cibuildwheel.windows]
repair-wheel-command = "python ci/fix_wheels.py {wheel} {dest_dir}"
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 975783a83d1f6..8eb10d1f81949 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -87,4 +87,5 @@ feedparser
pyyaml
requests
sphinx-toggleprompt
-setuptools>=61.0.0
+meson[ninja] @ git+https://github.com/mesonbuild/meson.git@master
+git+https://github.com/mesonbuild/meson-python.git@main
diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py
index 8190104428724..3a914ff610941 100755
--- a/scripts/generate_pip_deps_from_conda.py
+++ b/scripts/generate_pip_deps_from_conda.py
@@ -20,7 +20,7 @@
import toml
import yaml
-EXCLUDE = {"python", "c-compiler", "cxx-compiler"}
+EXCLUDE = {"python", "c-compiler", "cxx-compiler", "sccache"}
REMAP_VERSION = {"tzdata": "2022.1"}
RENAME = {
"pytables": "tables",
diff --git a/scripts/validate_min_versions_in_sync.py b/scripts/validate_min_versions_in_sync.py
index 2186e7c8ff9ef..fc97beb7afbc2 100755
--- a/scripts/validate_min_versions_in_sync.py
+++ b/scripts/validate_min_versions_in_sync.py
@@ -69,11 +69,15 @@ def get_versions_from_ci(content: list[str]) -> tuple[dict[str, str], dict[str,
elif "- pip:" in line:
continue
elif seen_required and line.strip():
- if "==" in line:
- package, version = line.strip().split("==")
-
- else:
- package, version = line.strip().split("=")
+ try:
+ if "==" in line:
+ package, version = line.strip().split("==")
+
+ else:
+ package, version = line.strip().split("=")
+ except ValueError:
+ # pip dependencies, just skip
+ continue
package = package[2:]
if package in EXCLUDE_DEPS:
continue
diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py
index 036ddd40ae137..4ec40751e9d10 100755
--- a/scripts/validate_unwanted_patterns.py
+++ b/scripts/validate_unwanted_patterns.py
@@ -51,6 +51,7 @@
"_testing",
"_test_decorators",
"__version__", # check np.__version__ in compat.numpy.function
+ "__git_version__",
"_arrow_dtype_mapping",
}
| Add Sccache https://github.com/mozilla/sccache to the GHA CI. Saves around 5 minutes per run.
Unfortunately it requires some hacks, namely changing a temporary Windows directory and patching distutils.
- [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47360 | 2022-06-15T07:27:45Z | 2023-05-15T20:53:53Z | null | 2023-05-15T20:53:54Z |
TYP: base_parser and readers | diff --git a/pandas/io/common.py b/pandas/io/common.py
index 5aecc55bb363a..d692f26ab0576 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -626,6 +626,21 @@ def get_handle(
...
+@overload
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = ...,
+ compression: CompressionOptions = ...,
+ memory_map: bool = ...,
+ is_text: bool = ...,
+ errors: str | None = ...,
+ storage_options: StorageOptions = ...,
+) -> IOHandles[str] | IOHandles[bytes]:
+ ...
+
+
@doc(compression_options=_shared_docs["compression_options"] % "path_or_buf")
def get_handle(
path_or_buf: FilePath | BaseBuffer,
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py
index 185ef8b59b587..cdd24a1194a45 100644
--- a/pandas/io/parsers/base_parser.py
+++ b/pandas/io/parsers/base_parser.py
@@ -32,6 +32,7 @@
from pandas._typing import (
ArrayLike,
DtypeArg,
+ Scalar,
)
from pandas.errors import (
ParserError,
@@ -89,7 +90,7 @@ def __init__(self, kwds) -> None:
self.index_col = kwds.get("index_col", None)
self.unnamed_cols: set = set()
- self.index_names: list | None = None
+ self.index_names: Sequence[Hashable] | None = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
@@ -365,7 +366,7 @@ def _maybe_make_multi_index_columns(
@final
def _make_index(
- self, data, alldata, columns, indexnamerow=False
+ self, data, alldata, columns, indexnamerow: list[Scalar] | None = None
) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]:
index: Index | None
if not is_index_col(self.index_col) or not self.index_col:
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py
index 56df5493027c5..867cdf0ee7636 100644
--- a/pandas/io/parsers/readers.py
+++ b/pandas/io/parsers/readers.py
@@ -636,7 +636,7 @@ def read_csv(
comment: str | None = ...,
encoding: str | None = ...,
encoding_errors: str | None = ...,
- dialect=...,
+ dialect: str | csv.Dialect | None = ...,
error_bad_lines: bool | None = ...,
warn_bad_lines: bool | None = ...,
on_bad_lines=...,
@@ -696,7 +696,7 @@ def read_csv(
comment: str | None = ...,
encoding: str | None = ...,
encoding_errors: str | None = ...,
- dialect=...,
+ dialect: str | csv.Dialect | None = ...,
error_bad_lines: bool | None = ...,
warn_bad_lines: bool | None = ...,
on_bad_lines=...,
@@ -756,7 +756,7 @@ def read_csv(
comment: str | None = ...,
encoding: str | None = ...,
encoding_errors: str | None = ...,
- dialect=...,
+ dialect: str | csv.Dialect | None = ...,
error_bad_lines: bool | None = ...,
warn_bad_lines: bool | None = ...,
on_bad_lines=...,
@@ -816,7 +816,7 @@ def read_csv(
comment: str | None = ...,
encoding: str | None = ...,
encoding_errors: str | None = ...,
- dialect=...,
+ dialect: str | csv.Dialect | None = ...,
error_bad_lines: bool | None = ...,
warn_bad_lines: bool | None = ...,
on_bad_lines=...,
@@ -891,7 +891,7 @@ def read_csv(
comment: str | None = None,
encoding: str | None = None,
encoding_errors: str | None = "strict",
- dialect=None,
+ dialect: str | csv.Dialect | None = None,
# Error Handling
error_bad_lines: bool | None = None,
warn_bad_lines: bool | None = None,
@@ -975,7 +975,7 @@ def read_table(
comment: str | None = ...,
encoding: str | None = ...,
encoding_errors: str | None = ...,
- dialect=...,
+ dialect: str | csv.Dialect | None = ...,
error_bad_lines: bool | None = ...,
warn_bad_lines: bool | None = ...,
on_bad_lines=...,
@@ -1035,7 +1035,7 @@ def read_table(
comment: str | None = ...,
encoding: str | None = ...,
encoding_errors: str | None = ...,
- dialect=...,
+ dialect: str | csv.Dialect | None = ...,
error_bad_lines: bool | None = ...,
warn_bad_lines: bool | None = ...,
on_bad_lines=...,
@@ -1095,7 +1095,7 @@ def read_table(
comment: str | None = ...,
encoding: str | None = ...,
encoding_errors: str | None = ...,
- dialect=...,
+ dialect: str | csv.Dialect | None = ...,
error_bad_lines: bool | None = ...,
warn_bad_lines: bool | None = ...,
on_bad_lines=...,
@@ -1155,7 +1155,7 @@ def read_table(
comment: str | None = ...,
encoding: str | None = ...,
encoding_errors: str | None = ...,
- dialect=...,
+ dialect: str | csv.Dialect | None = ...,
error_bad_lines: bool | None = ...,
warn_bad_lines: bool | None = ...,
on_bad_lines=...,
@@ -1230,7 +1230,7 @@ def read_table(
comment: str | None = None,
encoding: str | None = None,
encoding_errors: str | None = "strict",
- dialect=None,
+ dialect: str | csv.Dialect | None = None,
# Error Handling
error_bad_lines: bool | None = None,
warn_bad_lines: bool | None = None,
@@ -1702,10 +1702,7 @@ def _make_engine(
if engine == "pyarrow":
is_text = False
mode = "rb"
- # error: No overload variant of "get_handle" matches argument types
- # "Union[str, PathLike[str], ReadCsvBuffer[bytes], ReadCsvBuffer[str]]"
- # , "str", "bool", "Any", "Any", "Any", "Any", "Any"
- self.handles = get_handle( # type: ignore[call-overload]
+ self.handles = get_handle(
f,
mode,
encoding=self.options.get("encoding", None),
@@ -1925,7 +1922,7 @@ def _stringify_na_values(na_values):
def _refine_defaults_read(
- dialect: str | csv.Dialect,
+ dialect: str | csv.Dialect | None,
delimiter: str | None | lib.NoDefault,
delim_whitespace: bool,
engine: CSVEngine | None,
diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json
index bcbaa5c90f845..f6f9bed1af065 100644
--- a/pyright_reportGeneralTypeIssues.json
+++ b/pyright_reportGeneralTypeIssues.json
@@ -104,8 +104,6 @@
"pandas/io/parsers/arrow_parser_wrapper.py",
"pandas/io/parsers/base_parser.py",
"pandas/io/parsers/c_parser_wrapper.py",
- "pandas/io/parsers/python_parser.py",
- "pandas/io/parsers/readers.py",
"pandas/io/pytables.py",
"pandas/io/sas/sas7bdat.py",
"pandas/io/sas/sasreader.py",
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/47359 | 2022-06-15T01:17:36Z | 2022-06-15T19:36:48Z | 2022-06-15T19:36:48Z | 2022-09-21T15:29:51Z |
ENH/TST: Add TestBaseDtype tests for ArrowExtensionArray | diff --git a/pandas/core/arrays/arrow/dtype.py b/pandas/core/arrays/arrow/dtype.py
index af5b51a39b9c3..346c4e8d19379 100644
--- a/pandas/core/arrays/arrow/dtype.py
+++ b/pandas/core/arrays/arrow/dtype.py
@@ -44,7 +44,7 @@ def name(self) -> str: # type: ignore[override]
"""
A string identifying the data type.
"""
- return str(self.pyarrow_dtype)
+ return f"{str(self.pyarrow_dtype)}[{self.storage}]"
@cache_readonly
def numpy_dtype(self) -> np.dtype:
@@ -92,10 +92,11 @@ def construct_from_string(cls, string: str):
f"'construct_from_string' expects a string, got {type(string)}"
)
if not string.endswith("[pyarrow]"):
- raise TypeError(f"string {string} must end with '[pyarrow]'")
+ raise TypeError(f"'{string}' must end with '[pyarrow]'")
base_type = string.split("[pyarrow]")[0]
- pa_dtype = getattr(pa, base_type, None)
- if pa_dtype is None:
+ try:
+ pa_dtype = pa.type_for_alias(base_type)
+ except ValueError as err:
has_parameters = re.search(r"\[.*\]", base_type)
if has_parameters:
raise NotImplementedError(
@@ -103,9 +104,9 @@ def construct_from_string(cls, string: str):
f"({has_parameters.group()}) in the string is not supported. "
"Please construct an ArrowDtype object with a pyarrow_dtype "
"instance with specific parameters."
- )
- raise TypeError(f"'{base_type}' is not a valid pyarrow data type.")
- return cls(pa_dtype())
+ ) from err
+ raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err
+ return cls(pa_dtype)
@property
def _is_numeric(self) -> bool:
diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py
index 03616267c3f86..95cb7045ac68d 100644
--- a/pandas/tests/extension/test_arrow.py
+++ b/pandas/tests/extension/test_arrow.py
@@ -34,7 +34,7 @@
from pandas.core.arrays.arrow.dtype import ArrowDtype # isort:skip
-@pytest.fixture(params=tm.ALL_PYARROW_DTYPES)
+@pytest.fixture(params=tm.ALL_PYARROW_DTYPES, ids=str)
def dtype(request):
return ArrowDtype(pyarrow_dtype=request.param)
@@ -104,14 +104,23 @@ class TestBaseCasting(base.BaseCastingTests):
class TestConstructors(base.BaseConstructorsTests):
- @pytest.mark.xfail(
- reason=(
- "str(dtype) constructs "
- "e.g. in64[pyarrow] like int64 (numpy) "
- "due to StorageExtensionDtype.__str__"
- )
- )
- def test_from_dtype(self, data):
+ def test_from_dtype(self, data, request):
+ pa_dtype = data.dtype.pyarrow_dtype
+ if pa.types.is_timestamp(pa_dtype) and pa_dtype.tz:
+ if pa_version_under2p0:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=f"timestamp data with tz={pa_dtype.tz} "
+ "converted to integer when pyarrow < 2.0",
+ )
+ )
+ else:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=NotImplementedError,
+ reason=f"pyarrow.type_for_alias cannot infer {pa_dtype}",
+ )
+ )
super().test_from_dtype(data)
@@ -197,10 +206,71 @@ def test_loc_iloc_frame_single_dtype(self, request, using_array_manager, data):
super().test_loc_iloc_frame_single_dtype(data)
+class TestBaseDtype(base.BaseDtypeTests):
+ def test_construct_from_string_own_name(self, dtype, request):
+ pa_dtype = dtype.pyarrow_dtype
+ if pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=NotImplementedError,
+ reason=f"pyarrow.type_for_alias cannot infer {pa_dtype}",
+ )
+ )
+ super().test_construct_from_string_own_name(dtype)
+
+ def test_is_dtype_from_name(self, dtype, request):
+ pa_dtype = dtype.pyarrow_dtype
+ if pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=NotImplementedError,
+ reason=f"pyarrow.type_for_alias cannot infer {pa_dtype}",
+ )
+ )
+ super().test_is_dtype_from_name(dtype)
+
+ def test_construct_from_string(self, dtype, request):
+ pa_dtype = dtype.pyarrow_dtype
+ if pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ raises=NotImplementedError,
+ reason=f"pyarrow.type_for_alias cannot infer {pa_dtype}",
+ )
+ )
+ super().test_construct_from_string(dtype)
+
+ def test_construct_from_string_another_type_raises(self, dtype):
+ msg = r"'another_type' must end with '\[pyarrow\]'"
+ with pytest.raises(TypeError, match=msg):
+ type(dtype).construct_from_string("another_type")
+
+ def test_get_common_dtype(self, dtype, request):
+ pa_dtype = dtype.pyarrow_dtype
+ if (
+ pa.types.is_date(pa_dtype)
+ or pa.types.is_time(pa_dtype)
+ or (
+ pa.types.is_timestamp(pa_dtype)
+ and (pa_dtype.unit != "ns" or pa_dtype.tz is not None)
+ )
+ or (pa.types.is_duration(pa_dtype) and pa_dtype.unit != "ns")
+ ):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"{pa_dtype} does not have associated numpy "
+ f"dtype findable by find_common_type"
+ )
+ )
+ )
+ super().test_get_common_dtype(dtype)
+
+
class TestBaseIndex(base.BaseIndexTests):
pass
-def test_arrowdtype_construct_from_string_type_with_parameters():
+def test_arrowdtype_construct_from_string_type_with_unsupported_parameters():
with pytest.raises(NotImplementedError, match="Passing pyarrow type"):
- ArrowDtype.construct_from_string("timestamp[s][pyarrow]")
+ ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]")
| - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47358 | 2022-06-15T00:33:29Z | 2022-06-17T23:25:09Z | 2022-06-17T23:25:09Z | 2022-06-18T00:21:34Z |
ENH: Move IndexingError to error/__init__.py per GH27656 | diff --git a/doc/source/reference/testing.rst b/doc/source/reference/testing.rst
index 9d9fb91821ab1..2c419e8df9517 100644
--- a/doc/source/reference/testing.rst
+++ b/doc/source/reference/testing.rst
@@ -30,6 +30,7 @@ Exceptions and warnings
errors.DtypeWarning
errors.DuplicateLabelError
errors.EmptyDataError
+ errors.IndexingError
errors.InvalidIndexError
errors.IntCastingNaNError
errors.MergeError
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 681139fb51272..3a6f6f480837f 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -174,7 +174,7 @@ Other enhancements
- A :class:`errors.PerformanceWarning` is now thrown when using ``string[pyarrow]`` dtype with methods that don't dispatch to ``pyarrow.compute`` methods (:issue:`42613`)
- Added ``numeric_only`` argument to :meth:`Resampler.sum`, :meth:`Resampler.prod`, :meth:`Resampler.min`, :meth:`Resampler.max`, :meth:`Resampler.first`, and :meth:`Resampler.last` (:issue:`46442`)
- ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`)
-- :class:`DataError`, :class:`SpecificationError`, :class:`SettingWithCopyError`, :class:`SettingWithCopyWarning`, :class:`NumExprClobberingError`, :class:`UndefinedVariableError` are now exposed in ``pandas.errors`` (:issue:`27656`)
+- :class:`DataError`, :class:`SpecificationError`, :class:`SettingWithCopyError`, :class:`SettingWithCopyWarning`, :class:`NumExprClobberingError`, :class:`UndefinedVariableError`, and :class:`IndexingError` are now exposed in ``pandas.errors`` (:issue:`27656`)
- Added ``check_like`` argument to :func:`testing.assert_series_equal` (:issue:`47247`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 20ac0fedc28d1..5a97e1d5a04ea 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -16,6 +16,7 @@
from pandas._libs.lib import item_from_zerodim
from pandas.errors import (
AbstractMethodError,
+ IndexingError,
InvalidIndexError,
)
from pandas.util._decorators import doc
@@ -121,10 +122,6 @@ def __getitem__(self, arg):
IndexSlice = _IndexSlice()
-class IndexingError(Exception):
- pass
-
-
class IndexingMixin:
"""
Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series.
diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py
index e30e5019c3568..b8d9df16311d7 100644
--- a/pandas/errors/__init__.py
+++ b/pandas/errors/__init__.py
@@ -353,3 +353,24 @@ def __init__(self, name: str, is_local: bool | None = None) -> None:
else:
msg = f"name {base_msg}"
super().__init__(msg)
+
+
+class IndexingError(Exception):
+ """
+ Exception is raised when trying to index and there is a mismatch in dimensions.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({'A': [1, 1, 1]})
+ >>> df.loc[..., ..., 'A'] # doctest: +SKIP
+ ... # IndexingError: indexer may only contain one '...' entry
+ >>> df = pd.DataFrame({'A': [1, 1, 1]})
+ >>> df.loc[1, ..., ...] # doctest: +SKIP
+ ... # IndexingError: Too many indexers
+ >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP
+ ... # IndexingError: Unalignable boolean Series provided as indexer...
+ >>> s = pd.Series(range(2),
+ ... index = pd.MultiIndex.from_product([["a", "b"], ["c"]]))
+ >>> s.loc["a", "c", "d"] # doctest: +SKIP
+ ... # IndexingError: Too many indexers
+ """
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 3d703b54ca301..35335c54cd41e 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -3,6 +3,7 @@
import numpy as np
import pytest
+from pandas.errors import UndefinedVariableError
import pandas.util._test_decorators as td
import pandas as pd
@@ -495,8 +496,6 @@ def test_query_syntax_error(self):
df.query("i - +", engine=engine, parser=parser)
def test_query_scope(self):
- from pandas.errors import UndefinedVariableError
-
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
@@ -522,8 +521,6 @@ def test_query_scope(self):
df.query("@a > b > c", engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
- from pandas.errors import UndefinedVariableError
-
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
@@ -618,8 +615,6 @@ def test_nested_scope(self):
tm.assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
- from pandas.errors import UndefinedVariableError
-
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
@@ -678,8 +673,6 @@ def test_at_inside_string(self):
tm.assert_frame_equal(result, expected)
def test_query_undefined_local(self):
- from pandas.errors import UndefinedVariableError
-
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
@@ -838,8 +831,6 @@ def test_date_index_query_with_NaT_duplicates(self):
df.query("index < 20130101 < dates3", engine=engine, parser=parser)
def test_nested_scope(self):
- from pandas.errors import UndefinedVariableError
-
engine = self.engine
parser = self.parser
# smoke test
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 19dfe20a3a68d..d4354766a203b 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -1,7 +1,10 @@
import numpy as np
import pytest
-from pandas.errors import PerformanceWarning
+from pandas.errors import (
+ IndexingError,
+ PerformanceWarning,
+)
import pandas as pd
from pandas import (
@@ -11,7 +14,6 @@
Series,
)
import pandas._testing as tm
-from pandas.core.indexing import IndexingError
@pytest.fixture
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 44cdc320148e1..2a116c992231b 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -11,6 +11,7 @@
import pytest
from pandas.compat.numpy import is_numpy_min
+from pandas.errors import IndexingError
import pandas.util._test_decorators as td
from pandas import (
@@ -32,7 +33,6 @@
)
import pandas._testing as tm
from pandas.api.types import is_scalar
-from pandas.core.indexing import IndexingError
from pandas.tests.indexing.common import Base
# We pass through the error message from numpy
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index dcb06f1cf778d..9d10e487e0cc2 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -7,6 +7,8 @@
import numpy as np
import pytest
+from pandas.errors import IndexingError
+
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
@@ -24,7 +26,6 @@
)
import pandas._testing as tm
from pandas.core.api import Float64Index
-from pandas.core.indexing import IndexingError
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 9b1cb69cc33ad..01dfeaef1a5a1 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -12,6 +12,7 @@
import numpy as np
import pytest
+from pandas.errors import IndexingError
import pandas.util._test_decorators as td
import pandas as pd
@@ -36,10 +37,7 @@
import pandas._testing as tm
from pandas.api.types import is_scalar
from pandas.core.api import Float64Index
-from pandas.core.indexing import (
- IndexingError,
- _one_ellipsis_message,
-)
+from pandas.core.indexing import _one_ellipsis_message
from pandas.tests.indexing.common import Base
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index e2a5517066ad9..abee8e75e89d9 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -6,6 +6,8 @@
import numpy as np
import pytest
+from pandas.errors import IndexingError
+
from pandas.core.dtypes.common import is_list_like
from pandas import (
@@ -30,7 +32,6 @@
timedelta_range,
)
import pandas._testing as tm
-from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index d4e3fff5c2f86..7e3d5b43f3014 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -27,6 +27,7 @@
"SettingWithCopyError",
"SettingWithCopyWarning",
"NumExprClobberingError",
+ "IndexingError",
],
)
def test_exception_importable(exc):
| - [x] xref #27656. this GitHub issue is being done in multiple parts
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
I moved the `from pandas.errors import UndefinedVariableError` statement to the top for `pandas/tests/frame/test_query_eval.py` as a follow up from a comment on https://github.com/pandas-dev/pandas/pull/47338.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47357 | 2022-06-14T23:56:44Z | 2022-06-15T19:06:10Z | 2022-06-15T19:06:10Z | 2022-06-15T19:06:25Z |
ENH: Timedelta/Timestamp round support non-nano | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index dfd64dd50f213..1df5468869df5 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1653,15 +1653,14 @@ class Timedelta(_Timedelta):
int64_t result, unit, remainder
ndarray[int64_t] arr
- if self._reso != NPY_FR_ns:
- raise NotImplementedError
-
from pandas._libs.tslibs.offsets import to_offset
- unit = to_offset(freq).nanos
+
+ to_offset(freq).nanos # raises on non-fixed freq
+ unit = delta_to_nanoseconds(to_offset(freq), self._reso)
arr = np.array([self.value], dtype="i8")
result = round_nsint64(arr, mode, unit)[0]
- return Timedelta(result, unit="ns")
+ return Timedelta._from_value_and_reso(result, self._reso)
def round(self, freq):
"""
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 711d10222c133..8a2810825fc1d 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1645,10 +1645,10 @@ class Timestamp(_Timestamp):
def _round(self, freq, mode, ambiguous='raise', nonexistent='raise'):
cdef:
- int64_t nanos = to_offset(freq).nanos
+ int64_t nanos
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
+ to_offset(freq).nanos # raises on non-fixed freq
+ nanos = delta_to_nanoseconds(to_offset(freq), self._reso)
if self.tz is not None:
value = self.tz_localize(None).value
@@ -1659,7 +1659,7 @@ class Timestamp(_Timestamp):
# Will only ever contain 1 element for timestamp
r = round_nsint64(value, mode, nanos)[0]
- result = Timestamp(r, unit='ns')
+ result = Timestamp._from_value_and_reso(r, self._reso, None)
if self.tz is not None:
result = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index b9388c8047df9..4b1d50c23c110 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1933,7 +1933,8 @@ def _round(self, freq, mode, ambiguous, nonexistent):
values = self.view("i8")
values = cast(np.ndarray, values)
- nanos = to_offset(freq).nanos
+ nanos = to_offset(freq).nanos # raises on non-fixed frequencies
+ nanos = delta_to_nanoseconds(to_offset(freq), self._reso)
result_i8 = round_nsint64(values, mode, nanos)
result = self._maybe_mask_results(result_i8, fill_value=iNaT)
result = result.view(self._ndarray.dtype)
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 00072e6724a6b..99b3bbf0186bb 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -661,6 +661,22 @@ def test_round_sanity(self, val, method):
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+ def test_round_non_nano(self, unit):
+ td = Timedelta("1 days 02:34:57")._as_unit(unit)
+
+ res = td.round("min")
+ assert res == Timedelta("1 days 02:35:00")
+ assert res._reso == td._reso
+
+ res = td.floor("min")
+ assert res == Timedelta("1 days 02:34:00")
+ assert res._reso == td._reso
+
+ res = td.ceil("min")
+ assert res == Timedelta("1 days 02:35:00")
+ assert res._reso == td._reso
+
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 4ac50e3f4e034..2146e32a437a9 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -148,27 +148,26 @@ def test_round_minute_freq(self, test_input, freq, expected, rounder):
result = func(freq)
assert result == expected
- def test_ceil(self):
- dt = Timestamp("20130101 09:10:11")
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+ def test_ceil(self, unit):
+ dt = Timestamp("20130101 09:10:11")._as_unit(unit)
result = dt.ceil("D")
expected = Timestamp("20130102")
assert result == expected
+ assert result._reso == dt._reso
- def test_floor(self):
- dt = Timestamp("20130101 09:10:11")
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+ def test_floor(self, unit):
+ dt = Timestamp("20130101 09:10:11")._as_unit(unit)
result = dt.floor("D")
expected = Timestamp("20130101")
assert result == expected
+ assert result._reso == dt._reso
@pytest.mark.parametrize("method", ["ceil", "round", "floor"])
@pytest.mark.parametrize(
"unit",
- [
- "ns",
- pytest.param("us", marks=pytest.mark.xfail(reason="round not implemented")),
- pytest.param("ms", marks=pytest.mark.xfail(reason="round not implemented")),
- pytest.param("s", marks=pytest.mark.xfail(reason="round not implemented")),
- ],
+ ["ns", "us", "ms", "s"],
)
def test_round_dst_border_ambiguous(self, method, unit):
# GH 18946 round near "fall back" DST
@@ -203,12 +202,7 @@ def test_round_dst_border_ambiguous(self, method, unit):
)
@pytest.mark.parametrize(
"unit",
- [
- "ns",
- pytest.param("us", marks=pytest.mark.xfail(reason="round not implemented")),
- pytest.param("ms", marks=pytest.mark.xfail(reason="round not implemented")),
- pytest.param("s", marks=pytest.mark.xfail(reason="round not implemented")),
- ],
+ ["ns", "us", "ms", "s"],
)
def test_round_dst_border_nonexistent(self, method, ts_str, freq, unit):
# GH 23324 round near "spring forward" DST
| - [ ] closes #xxxx (Replace xxxx with the Github issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47356 | 2022-06-14T23:36:29Z | 2022-06-16T01:14:08Z | 2022-06-16T01:14:08Z | 2022-06-16T01:19:04Z |
ENH: Timestamp.tz_localize support non-nano | diff --git a/pandas/_libs/tslibs/ccalendar.pxd b/pandas/_libs/tslibs/ccalendar.pxd
index 511c9f94a47d8..341f2176f5eb4 100644
--- a/pandas/_libs/tslibs/ccalendar.pxd
+++ b/pandas/_libs/tslibs/ccalendar.pxd
@@ -15,8 +15,6 @@ cpdef int32_t get_day_of_year(int year, int month, int day) nogil
cpdef int get_lastbday(int year, int month) nogil
cpdef int get_firstbday(int year, int month) nogil
-cdef int64_t DAY_NANOS
-cdef int64_t HOUR_NANOS
cdef dict c_MONTH_NUMBERS
cdef int32_t* month_offset
diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx
index ff6f1721ca6c9..00ee15b73f551 100644
--- a/pandas/_libs/tslibs/ccalendar.pyx
+++ b/pandas/_libs/tslibs/ccalendar.pyx
@@ -47,11 +47,6 @@ DAYS_FULL = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
int_to_weekday = {num: name for num, name in enumerate(DAYS)}
weekday_to_int = {int_to_weekday[key]: key for key in int_to_weekday}
-DAY_SECONDS = 86400
-HOUR_SECONDS = 3600
-
-cdef const int64_t DAY_NANOS = DAY_SECONDS * 1_000_000_000
-cdef const int64_t HOUR_NANOS = HOUR_SECONDS * 1_000_000_000
# ----------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 0c05037097839..e32b0fd2bba3f 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -60,7 +60,6 @@ from pandas._libs.tslibs.np_datetime cimport (
from pandas._libs.tslibs.timestamps import Timestamp
from pandas._libs.tslibs.ccalendar cimport (
- c_MONTH_NUMBERS,
dayofweek,
get_day_of_year,
get_days_in_month,
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 2694991b54d4a..711d10222c133 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -84,13 +84,13 @@ from pandas._libs.tslibs.np_datetime cimport (
check_dts_bounds,
cmp_dtstructs,
cmp_scalar,
- dt64_to_dtstruct,
get_datetime64_unit,
get_datetime64_value,
get_unit_from_dtype,
npy_datetimestruct,
+ npy_datetimestruct_to_datetime,
pandas_datetime_to_datetimestruct,
- pydatetime_to_dt64,
+ pydatetime_to_dtstruct,
)
from pandas._libs.tslibs.np_datetime import (
@@ -530,7 +530,8 @@ cdef class _Timestamp(ABCTimestamp):
npy_datetimestruct dts
if own_tz is not None and not is_utc(own_tz):
- val = pydatetime_to_dt64(self, &dts) + self.nanosecond
+ pydatetime_to_dtstruct(self, &dts)
+ val = npy_datetimestruct_to_datetime(self._reso, &dts) + self.nanosecond
else:
val = self.value
return val
@@ -2044,11 +2045,6 @@ default 'raise'
>>> pd.NaT.tz_localize()
NaT
"""
- if self._reso != NPY_FR_ns:
- if tz is None and self.tz is None:
- return self
- raise NotImplementedError(self._reso)
-
if ambiguous == 'infer':
raise ValueError('Cannot infer offset with only one time.')
@@ -2077,7 +2073,7 @@ default 'raise'
"Cannot localize tz-aware Timestamp, use tz_convert for conversions"
)
- out = Timestamp(value, tz=tz)
+ out = type(self)._from_value_and_reso(value, self._reso, tz=tz)
if out is not NaT:
out._set_freq(self._freq) # avoid warning in constructor
return out
@@ -2124,7 +2120,6 @@ default 'raise'
>>> pd.NaT.tz_convert(tz='Asia/Tokyo')
NaT
"""
-
if self.tzinfo is None:
# tz naive, use tz_localize
raise TypeError(
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 7657633c7215a..dffe02ef15148 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -27,11 +27,10 @@ from numpy cimport (
cnp.import_array()
-from pandas._libs.tslibs.ccalendar cimport (
- DAY_NANOS,
- HOUR_NANOS,
+from pandas._libs.tslibs.dtypes cimport (
+ periods_per_day,
+ periods_per_second,
)
-from pandas._libs.tslibs.dtypes cimport periods_per_second
from pandas._libs.tslibs.nattype cimport NPY_NAT
from pandas._libs.tslibs.np_datetime cimport (
NPY_DATETIMEUNIT,
@@ -153,6 +152,7 @@ cdef int64_t tz_localize_to_utc_single(
return val
elif is_utc(tz) or tz is None:
+ # TODO: test with non-nano
return val
elif is_tzlocal(tz) or is_zoneinfo(tz):
@@ -161,6 +161,15 @@ cdef int64_t tz_localize_to_utc_single(
elif is_fixed_offset(tz):
_, deltas, _ = get_dst_info(tz)
delta = deltas[0]
+ # TODO: de-duplicate with Localizer.__init__
+ if reso != NPY_DATETIMEUNIT.NPY_FR_ns:
+ if reso == NPY_DATETIMEUNIT.NPY_FR_us:
+ delta = delta // 1000
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_ms:
+ delta = delta // 1_000_000
+ elif reso == NPY_DATETIMEUNIT.NPY_FR_s:
+ delta = delta // 1_000_000_000
+
return val - delta
else:
@@ -229,6 +238,7 @@ timedelta-like}
bint fill_nonexist = False
str stamp
Localizer info = Localizer(tz, reso=reso)
+ int64_t pph = periods_per_day(reso) // 24
# Vectorized version of DstTzInfo.localize
if info.use_utc:
@@ -242,7 +252,9 @@ timedelta-like}
if v == NPY_NAT:
result[i] = NPY_NAT
else:
- result[i] = v - _tz_localize_using_tzinfo_api(v, tz, to_utc=True, reso=reso)
+ result[i] = v - _tz_localize_using_tzinfo_api(
+ v, tz, to_utc=True, reso=reso
+ )
return result.base # to return underlying ndarray
elif info.use_fixed:
@@ -283,7 +295,7 @@ timedelta-like}
shift_backward = True
elif PyDelta_Check(nonexistent):
from .timedeltas import delta_to_nanoseconds
- shift_delta = delta_to_nanoseconds(nonexistent)
+ shift_delta = delta_to_nanoseconds(nonexistent, reso=reso)
elif nonexistent not in ('raise', None):
msg = ("nonexistent must be one of {'NaT', 'raise', 'shift_forward', "
"shift_backwards} or a timedelta object")
@@ -291,12 +303,14 @@ timedelta-like}
# Determine whether each date lies left of the DST transition (store in
# result_a) or right of the DST transition (store in result_b)
- result_a, result_b =_get_utc_bounds(vals, info.tdata, info.ntrans, info.deltas)
+ result_a, result_b =_get_utc_bounds(
+ vals, info.tdata, info.ntrans, info.deltas, reso=reso
+ )
# silence false-positive compiler warning
dst_hours = np.empty(0, dtype=np.int64)
if infer_dst:
- dst_hours = _get_dst_hours(vals, result_a, result_b)
+ dst_hours = _get_dst_hours(vals, result_a, result_b, reso=reso)
# Pre-compute delta_idx_offset that will be used if we go down non-existent
# paths.
@@ -316,12 +330,15 @@ timedelta-like}
left = result_a[i]
right = result_b[i]
if val == NPY_NAT:
+ # TODO: test with non-nano
result[i] = val
elif left != NPY_NAT and right != NPY_NAT:
if left == right:
+ # TODO: test with non-nano
result[i] = left
else:
if infer_dst and dst_hours[i] != NPY_NAT:
+ # TODO: test with non-nano
result[i] = dst_hours[i]
elif is_dst:
if ambiguous_array[i]:
@@ -329,9 +346,10 @@ timedelta-like}
else:
result[i] = right
elif fill:
+ # TODO: test with non-nano; parametrize test_dt_round_tz_ambiguous
result[i] = NPY_NAT
else:
- stamp = _render_tstamp(val)
+ stamp = _render_tstamp(val, reso=reso)
raise pytz.AmbiguousTimeError(
f"Cannot infer dst time from {stamp}, try using the "
"'ambiguous' argument"
@@ -339,23 +357,24 @@ timedelta-like}
elif left != NPY_NAT:
result[i] = left
elif right != NPY_NAT:
+ # TODO: test with non-nano
result[i] = right
else:
# Handle nonexistent times
if shift_forward or shift_backward or shift_delta != 0:
# Shift the nonexistent time to the closest existing time
- remaining_mins = val % HOUR_NANOS
+ remaining_mins = val % pph
if shift_delta != 0:
# Validate that we don't relocalize on another nonexistent
# time
- if -1 < shift_delta + remaining_mins < HOUR_NANOS:
+ if -1 < shift_delta + remaining_mins < pph:
raise ValueError(
"The provided timedelta will relocalize on a "
f"nonexistent time: {nonexistent}"
)
new_local = val + shift_delta
elif shift_forward:
- new_local = val + (HOUR_NANOS - remaining_mins)
+ new_local = val + (pph - remaining_mins)
else:
# Subtract 1 since the beginning hour is _inclusive_ of
# nonexistent times
@@ -368,7 +387,7 @@ timedelta-like}
elif fill_nonexist:
result[i] = NPY_NAT
else:
- stamp = _render_tstamp(val)
+ stamp = _render_tstamp(val, reso=reso)
raise pytz.NonExistentTimeError(stamp)
return result.base # .base to get underlying ndarray
@@ -404,10 +423,11 @@ cdef inline Py_ssize_t bisect_right_i8(int64_t *data,
return left
-cdef inline str _render_tstamp(int64_t val):
+cdef inline str _render_tstamp(int64_t val, NPY_DATETIMEUNIT reso):
""" Helper function to render exception messages"""
from pandas._libs.tslibs.timestamps import Timestamp
- return str(Timestamp(val))
+ ts = Timestamp._from_value_and_reso(val, reso, None)
+ return str(ts)
cdef _get_utc_bounds(
@@ -415,6 +435,7 @@ cdef _get_utc_bounds(
int64_t* tdata,
Py_ssize_t ntrans,
const int64_t[::1] deltas,
+ NPY_DATETIMEUNIT reso,
):
# Determine whether each date lies left of the DST transition (store in
# result_a) or right of the DST transition (store in result_b)
@@ -424,6 +445,7 @@ cdef _get_utc_bounds(
Py_ssize_t i, n = vals.size
int64_t val, v_left, v_right
Py_ssize_t isl, isr, pos_left, pos_right
+ int64_t ppd = periods_per_day(reso)
result_a = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0)
result_b = cnp.PyArray_EMPTY(vals.ndim, vals.shape, cnp.NPY_INT64, 0)
@@ -438,8 +460,8 @@ cdef _get_utc_bounds(
if val == NPY_NAT:
continue
- # TODO: be careful of overflow in val-DAY_NANOS
- isl = bisect_right_i8(tdata, val - DAY_NANOS, ntrans) - 1
+ # TODO: be careful of overflow in val-ppd
+ isl = bisect_right_i8(tdata, val - ppd, ntrans) - 1
if isl < 0:
isl = 0
@@ -449,8 +471,8 @@ cdef _get_utc_bounds(
if v_left + deltas[pos_left] == val:
result_a[i] = v_left
- # TODO: be careful of overflow in val+DAY_NANOS
- isr = bisect_right_i8(tdata, val + DAY_NANOS, ntrans) - 1
+ # TODO: be careful of overflow in val+ppd
+ isr = bisect_right_i8(tdata, val + ppd, ntrans) - 1
if isr < 0:
isr = 0
@@ -465,10 +487,11 @@ cdef _get_utc_bounds(
@cython.boundscheck(False)
cdef ndarray[int64_t] _get_dst_hours(
- # vals only needed here to potential render an exception message
+ # vals, reso only needed here to potential render an exception message
const int64_t[:] vals,
ndarray[int64_t] result_a,
ndarray[int64_t] result_b,
+ NPY_DATETIMEUNIT reso,
):
cdef:
Py_ssize_t i, n = vals.shape[0]
@@ -497,7 +520,7 @@ cdef ndarray[int64_t] _get_dst_hours(
if trans_idx.size == 1:
# TODO: not reached in tests 2022-05-02; possible?
- stamp = _render_tstamp(vals[trans_idx[0]])
+ stamp = _render_tstamp(vals[trans_idx[0]], reso=reso)
raise pytz.AmbiguousTimeError(
f"Cannot infer dst time from {stamp} as there "
"are no repeated times"
@@ -519,7 +542,7 @@ cdef ndarray[int64_t] _get_dst_hours(
delta = np.diff(result_a[grp])
if grp.size == 1 or np.all(delta > 0):
# TODO: not reached in tests 2022-05-02; possible?
- stamp = _render_tstamp(vals[grp[0]])
+ stamp = _render_tstamp(vals[grp[0]], reso=reso)
raise pytz.AmbiguousTimeError(stamp)
# Find the index for the switch and pull from a for dst and b
diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx
index 75efe6d4113cf..6201c94ecc155 100644
--- a/pandas/_libs/tslibs/vectorized.pyx
+++ b/pandas/_libs/tslibs/vectorized.pyx
@@ -19,7 +19,6 @@ cnp.import_array()
from .dtypes import Resolution
-from .ccalendar cimport DAY_NANOS
from .dtypes cimport (
c_Resolution,
periods_per_day,
diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py
index a7f7393fb3263..874575fa9ad4c 100644
--- a/pandas/tests/scalar/timestamp/test_timezones.py
+++ b/pandas/tests/scalar/timestamp/test_timezones.py
@@ -20,6 +20,7 @@
)
from pandas._libs.tslibs import timezones
+from pandas._libs.tslibs.dtypes import NpyDatetimeUnit
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
@@ -57,10 +58,11 @@ def test_tz_localize_pushes_out_of_bounds(self):
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.max.tz_localize("US/Pacific")
- def test_tz_localize_ambiguous_bool(self):
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+ def test_tz_localize_ambiguous_bool(self, unit):
# make sure that we are correctly accepting bool values as ambiguous
# GH#14402
- ts = Timestamp("2015-11-01 01:00:03")
+ ts = Timestamp("2015-11-01 01:00:03")._as_unit(unit)
expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central")
expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central")
@@ -70,9 +72,11 @@ def test_tz_localize_ambiguous_bool(self):
result = ts.tz_localize("US/Central", ambiguous=True)
assert result == expected0
+ assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
result = ts.tz_localize("US/Central", ambiguous=False)
assert result == expected1
+ assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
def test_tz_localize_ambiguous(self):
ts = Timestamp("2014-11-02 01:00")
@@ -245,17 +249,28 @@ def test_timestamp_tz_localize(self, tz):
],
)
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
def test_timestamp_tz_localize_nonexistent_shift(
- self, start_ts, tz, end_ts, shift, tz_type
+ self, start_ts, tz, end_ts, shift, tz_type, unit
):
# GH 8917, 24466
tz = tz_type + tz
if isinstance(shift, str):
shift = "shift_" + shift
- ts = Timestamp(start_ts)
+ ts = Timestamp(start_ts)._as_unit(unit)
result = ts.tz_localize(tz, nonexistent=shift)
expected = Timestamp(end_ts).tz_localize(tz)
- assert result == expected
+
+ if unit == "us":
+ assert result == expected.replace(nanosecond=0)
+ elif unit == "ms":
+ micros = expected.microsecond - expected.microsecond % 1000
+ assert result == expected.replace(microsecond=micros, nanosecond=0)
+ elif unit == "s":
+ assert result == expected.replace(microsecond=0, nanosecond=0)
+ else:
+ assert result == expected
+ assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
@pytest.mark.parametrize("offset", [-1, 1])
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
@@ -268,16 +283,18 @@ def test_timestamp_tz_localize_nonexistent_shift_invalid(self, offset, tz_type):
ts.tz_localize(tz, nonexistent=timedelta(seconds=offset))
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
- def test_timestamp_tz_localize_nonexistent_NaT(self, tz):
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+ def test_timestamp_tz_localize_nonexistent_NaT(self, tz, unit):
# GH 8917
- ts = Timestamp("2015-03-29 02:20:00")
+ ts = Timestamp("2015-03-29 02:20:00")._as_unit(unit)
result = ts.tz_localize(tz, nonexistent="NaT")
assert result is NaT
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
- def test_timestamp_tz_localize_nonexistent_raise(self, tz):
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+ def test_timestamp_tz_localize_nonexistent_raise(self, tz, unit):
# GH 8917
- ts = Timestamp("2015-03-29 02:20:00")
+ ts = Timestamp("2015-03-29 02:20:00")._as_unit(unit)
msg = "2015-03-29 02:20:00"
with pytest.raises(pytz.NonExistentTimeError, match=msg):
ts.tz_localize(tz, nonexistent="raise")
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 35065a3c9877c..4ac50e3f4e034 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -161,18 +161,30 @@ def test_floor(self):
assert result == expected
@pytest.mark.parametrize("method", ["ceil", "round", "floor"])
- def test_round_dst_border_ambiguous(self, method):
+ @pytest.mark.parametrize(
+ "unit",
+ [
+ "ns",
+ pytest.param("us", marks=pytest.mark.xfail(reason="round not implemented")),
+ pytest.param("ms", marks=pytest.mark.xfail(reason="round not implemented")),
+ pytest.param("s", marks=pytest.mark.xfail(reason="round not implemented")),
+ ],
+ )
+ def test_round_dst_border_ambiguous(self, method, unit):
# GH 18946 round near "fall back" DST
ts = Timestamp("2017-10-29 00:00:00", tz="UTC").tz_convert("Europe/Madrid")
+ ts = ts._as_unit(unit)
#
result = getattr(ts, method)("H", ambiguous=True)
assert result == ts
+ assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
result = getattr(ts, method)("H", ambiguous=False)
expected = Timestamp("2017-10-29 01:00:00", tz="UTC").tz_convert(
"Europe/Madrid"
)
assert result == expected
+ assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
result = getattr(ts, method)("H", ambiguous="NaT")
assert result is NaT
@@ -189,12 +201,22 @@ def test_round_dst_border_ambiguous(self, method):
["floor", "2018-03-11 03:01:00-0500", "2H"],
],
)
- def test_round_dst_border_nonexistent(self, method, ts_str, freq):
+ @pytest.mark.parametrize(
+ "unit",
+ [
+ "ns",
+ pytest.param("us", marks=pytest.mark.xfail(reason="round not implemented")),
+ pytest.param("ms", marks=pytest.mark.xfail(reason="round not implemented")),
+ pytest.param("s", marks=pytest.mark.xfail(reason="round not implemented")),
+ ],
+ )
+ def test_round_dst_border_nonexistent(self, method, ts_str, freq, unit):
# GH 23324 round near "spring forward" DST
- ts = Timestamp(ts_str, tz="America/Chicago")
+ ts = Timestamp(ts_str, tz="America/Chicago")._as_unit(unit)
result = getattr(ts, method)(freq, nonexistent="shift_forward")
expected = Timestamp("2018-03-11 03:00:00", tz="America/Chicago")
assert result == expected
+ assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
result = getattr(ts, method)(freq, nonexistent="NaT")
assert result is NaT
@@ -466,35 +488,41 @@ def test_replace_across_dst(self, tz, normalize):
ts2b = normalize(ts2)
assert ts2 == ts2b
- def test_replace_dst_border(self):
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+ def test_replace_dst_border(self, unit):
# Gh 7825
- t = Timestamp("2013-11-3", tz="America/Chicago")
+ t = Timestamp("2013-11-3", tz="America/Chicago")._as_unit(unit)
result = t.replace(hour=3)
expected = Timestamp("2013-11-3 03:00:00", tz="America/Chicago")
assert result == expected
+ assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
@pytest.mark.parametrize("fold", [0, 1])
@pytest.mark.parametrize("tz", ["dateutil/Europe/London", "Europe/London"])
- def test_replace_dst_fold(self, fold, tz):
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+ def test_replace_dst_fold(self, fold, tz, unit):
# GH 25017
d = datetime(2019, 10, 27, 2, 30)
- ts = Timestamp(d, tz=tz)
+ ts = Timestamp(d, tz=tz)._as_unit(unit)
result = ts.replace(hour=1, fold=fold)
expected = Timestamp(datetime(2019, 10, 27, 1, 30)).tz_localize(
tz, ambiguous=not fold
)
assert result == expected
+ assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
# --------------------------------------------------------------
# Timestamp.normalize
@pytest.mark.parametrize("arg", ["2013-11-30", "2013-11-30 12:00:00"])
- def test_normalize(self, tz_naive_fixture, arg):
+ @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])
+ def test_normalize(self, tz_naive_fixture, arg, unit):
tz = tz_naive_fixture
- ts = Timestamp(arg, tz=tz)
+ ts = Timestamp(arg, tz=tz)._as_unit(unit)
result = ts.normalize()
expected = Timestamp("2013-11-30", tz=tz)
assert result == expected
+ assert result._reso == getattr(NpyDatetimeUnit, f"NPY_FR_{unit}").value
def test_normalize_pre_epoch_dates(self):
# GH: 36294
| Sits on top of #47346
Not sure why the .github files are in the diff, they don't show up in the diff locally | https://api.github.com/repos/pandas-dev/pandas/pulls/47355 | 2022-06-14T23:25:00Z | 2022-06-15T22:21:49Z | 2022-06-15T22:21:49Z | 2022-06-15T22:51:48Z |
TST: GH 27185 Test to check df with timedelta & Int NA sums correctly | diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index 9d33e52709bd2..a68595546eb83 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -198,6 +198,18 @@ def test_numpy_reduction_with_tz_aware_dtype(self, tz_aware_fixture, func):
result = getattr(np, func)(expected, expected)
tm.assert_series_equal(result, expected)
+ def test_nan_int_timedelta_sum(self):
+ # GH 27185
+ df = DataFrame(
+ {
+ "A": Series([1, 2, NaT], dtype="timedelta64[ns]"),
+ "B": Series([1, 2, np.nan], dtype="Int64"),
+ }
+ )
+ expected = Series({"A": Timedelta(3), "B": 3})
+ result = df.sum()
+ tm.assert_series_equal(result, expected)
+
class TestIndexReductions:
# Note: the name TestIndexReductions indicates these tests
| - [x] closes #27185
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [NA] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [NA] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
Open to feedback on how to improve the PR - not sure if this is exactly what the original issue was looking for.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47354 | 2022-06-14T20:22:04Z | 2022-06-23T21:40:30Z | 2022-06-23T21:40:29Z | 2022-06-23T21:40:37Z |
Bug: GroupBy raising error with None in first level of MultiIndex | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index a76b682f135db..ec2c395ce1393 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -927,6 +927,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupBy.cumsum` with ``skipna=False`` giving incorrect results (:issue:`46216`)
- Bug in :meth:`.GroupBy.cumsum` with ``timedelta64[ns]`` dtype failing to recognize ``NaT`` as a null value (:issue:`46216`)
- Bug in :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` with nullable dtypes incorrectly altering the original data in place (:issue:`46220`)
+- Bug in :meth:`DataFrame.groupby` raising error when ``None`` is in first level of :class:`MultiIndex` (:issue:`47348`)
- Bug in :meth:`.GroupBy.cummax` with ``int64`` dtype with leading value being the smallest possible int64 (:issue:`46382`)
- Bug in :meth:`.GroupBy.max` with empty groups and ``uint64`` dtype incorrectly raising ``RuntimeError`` (:issue:`46408`)
- Bug in :meth:`.GroupBy.apply` would fail when ``func`` was a string and args or kwargs were supplied (:issue:`46479`)
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index e524021f3a1b8..0c9b64dc8cec3 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -831,7 +831,11 @@ def get_grouper(
# if the actual grouper should be obj[key]
def is_in_axis(key) -> bool:
+
if not _is_label_like(key):
+ if obj.ndim == 1:
+ return False
+
# items -> .columns for DataFrame, .index for Series
items = obj.axes[-1]
try:
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 97e616ef14cef..920b869ef799b 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2776,3 +2776,22 @@ def test_by_column_values_with_same_starting_value():
).set_index("Name")
tm.assert_frame_equal(result, expected_result)
+
+
+def test_groupby_none_in_first_mi_level():
+ # GH#47348
+ arr = [[None, 1, 0, 1], [2, 3, 2, 3]]
+ ser = Series(1, index=MultiIndex.from_arrays(arr, names=["a", "b"]))
+ result = ser.groupby(level=[0, 1]).sum()
+ expected = Series(
+ [1, 2], MultiIndex.from_tuples([(0.0, 2), (1.0, 3)], names=["a", "b"])
+ )
+ tm.assert_series_equal(result, expected)
+
+
+def test_groupby_none_column_name():
+ # GH#47348
+ df = DataFrame({None: [1, 1, 2, 2], "b": [1, 1, 2, 3], "c": [4, 5, 6, 7]})
+ result = df.groupby(by=[None]).sum()
+ expected = DataFrame({"b": [2, 5], "c": [9, 13]}, index=Index([1, 2], name=None))
+ tm.assert_frame_equal(result, expected)
| - [x] closes #47348 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
@rhshadrach Would you mind having a look? That ``key=None`` matches seems to be an accident to me. | https://api.github.com/repos/pandas-dev/pandas/pulls/47351 | 2022-06-14T18:20:28Z | 2022-06-27T22:55:31Z | 2022-06-27T22:55:31Z | 2022-06-28T14:13:19Z |
REGR: MultiIndex.dtypes has regular Index instead of MultiIndex index | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index ca8b8ca15ec47..65c53cc450514 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`DataFrame.replace` when the replacement value was explicitly ``None`` when passed in a dictionary to ``to_replace`` also casting other columns to object dtype even when there were no values to replace (:issue:`46634`)
+- Fixed regression in representation of ``dtypes`` attribute of :class:`MultiIndex` (:issue:`46900`)
- Fixed regression when setting values with :meth:`DataFrame.loc` updating :class:`RangeIndex` when index was set as new column and column was updated afterwards (:issue:`47128`)
- Fixed regression in :meth:`DataFrame.nsmallest` led to wrong results when ``np.nan`` in the sorting column (:issue:`46589`)
- Fixed regression in :func:`read_fwf` raising ``ValueError`` when ``widths`` was specified with ``usecols`` (:issue:`46580`)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 351cae6816ace..ef0a2d5bf1b0f 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -763,7 +763,7 @@ def dtypes(self) -> Series:
from pandas import Series
names = com.fill_missing_names([level.name for level in self.levels])
- return Series([level.dtype for level in self.levels], index=names)
+ return Series([level.dtype for level in self.levels], index=Index(names))
def __len__(self) -> int:
return len(self.codes[0])
diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py
index 63b0bd235e57c..7fad59fc6654c 100644
--- a/pandas/tests/indexes/multi/test_constructors.py
+++ b/pandas/tests/indexes/multi/test_constructors.py
@@ -827,3 +827,13 @@ def test_multiindex_inference_consistency():
mi = MultiIndex.from_tuples([(x,) for x in arr])
lev = mi.levels[0]
assert lev.dtype == object
+
+
+def test_dtype_representation():
+ # GH#46900
+ pmidx = MultiIndex.from_arrays([[1], ["a"]], names=[("a", "b"), ("c", "d")])
+ result = pmidx.dtypes
+ expected = Series(
+ ["int64", "object"], index=MultiIndex.from_tuples([("a", "b"), ("c", "d")])
+ )
+ tm.assert_series_equal(result, expected)
| - [x] closes #46900 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47349 | 2022-06-14T17:39:03Z | 2022-06-15T13:20:42Z | 2022-06-15T13:20:42Z | 2022-06-15T21:51:01Z |
REGR: Regression in to_csv for ea dtype categorical | diff --git a/doc/source/whatsnew/v1.4.3.rst b/doc/source/whatsnew/v1.4.3.rst
index 05ace0509e0b7..a4d81533df23d 100644
--- a/doc/source/whatsnew/v1.4.3.rst
+++ b/doc/source/whatsnew/v1.4.3.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`DataFrame.replace` when the replacement value was explicitly ``None`` when passed in a dictionary to ``to_replace`` also casting other columns to object dtype even when there were no values to replace (:issue:`46634`)
+- Fixed regression in :meth:`DataFrame.to_csv` raising error when :class:`DataFrame` contains extension dtype categorical column (:issue:`46297`, :issue:`46812`)
- Fixed regression in representation of ``dtypes`` attribute of :class:`MultiIndex` (:issue:`46900`)
- Fixed regression when setting values with :meth:`DataFrame.loc` updating :class:`RangeIndex` when index was set as new column and column was updated afterwards (:issue:`47128`)
- Fixed regression in :meth:`DataFrame.nsmallest` led to wrong results when ``np.nan`` in the sorting column (:issue:`46589`)
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 421fac4ea767b..49efecec7472e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2262,7 +2262,7 @@ def to_native_types(
**kwargs,
) -> np.ndarray:
"""convert to our native types format"""
- if isinstance(values, Categorical):
+ if isinstance(values, Categorical) and values.categories.dtype.kind in "Mm":
# GH#40754 Convert categorical datetimes to datetime array
values = algos.take_nd(
values.categories._values,
diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py
index 01009d6df3920..f1ecad2f711bc 100644
--- a/pandas/tests/frame/methods/test_to_csv.py
+++ b/pandas/tests/frame/methods/test_to_csv.py
@@ -1285,3 +1285,32 @@ def test_to_csv_na_quoting(self):
)
expected = '""\n""\n'
assert result == expected
+
+ def test_to_csv_categorical_and_ea(self):
+ # GH#46812
+ df = DataFrame({"a": "x", "b": [1, pd.NA]})
+ df["b"] = df["b"].astype("Int16")
+ df["b"] = df["b"].astype("category")
+ result = df.to_csv()
+ expected_rows = [",a,b", "0,x,1", "1,x,"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert result == expected
+
+ def test_to_csv_categorical_and_interval(self):
+ # GH#46297
+ df = DataFrame(
+ {
+ "a": [
+ pd.Interval(
+ Timestamp("2020-01-01"),
+ Timestamp("2020-01-02"),
+ inclusive="both",
+ )
+ ]
+ }
+ )
+ df["a"] = df["a"].astype("category")
+ result = df.to_csv()
+ expected_rows = [",a", '0,"[2020-01-01, 2020-01-02]"']
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert result == expected
| - [x] closes #46812 (Replace xxxx with the Github issue number)
- [x] closes #46297
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/47347 | 2022-06-14T17:15:55Z | 2022-06-16T13:45:26Z | 2022-06-16T13:45:25Z | 2022-06-16T19:45:39Z |
ENH: Timestamp.__sub__(datetimelike) support non-nano | diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 1d21f602fac05..da2377a9b085c 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -400,18 +400,19 @@ cdef class _Timestamp(ABCTimestamp):
new_value = int(self.value) + int(nanos)
try:
- result = type(self)._from_value_and_reso(new_value, reso=self._reso, tz=self.tzinfo)
+ result = type(self)._from_value_and_reso(
+ new_value, reso=self._reso, tz=self.tzinfo
+ )
except OverflowError as err:
# TODO: don't hard-code nanosecond here
- raise OutOfBoundsDatetime(f"Out of bounds nanosecond timestamp: {new_value}") from err
+ raise OutOfBoundsDatetime(
+ f"Out of bounds nanosecond timestamp: {new_value}"
+ ) from err
if result is not NaT:
result._set_freq(self._freq) # avoid warning in constructor
return result
- elif isinstance(self, _Timestamp) and self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
-
elif is_integer_object(other):
raise integer_op_not_supported(self)
@@ -446,9 +447,6 @@ cdef class _Timestamp(ABCTimestamp):
neg_other = -other
return self + neg_other
- elif isinstance(self, _Timestamp) and self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
-
elif is_array(other):
if other.dtype.kind in ['i', 'u']:
raise integer_op_not_supported(self)
@@ -479,10 +477,25 @@ cdef class _Timestamp(ABCTimestamp):
"Cannot subtract tz-naive and tz-aware datetime-like objects."
)
+ # We allow silent casting to the lower resolution if and only
+ # if it is lossless.
+ try:
+ if self._reso < other._reso:
+ other = (<_Timestamp>other)._as_reso(self._reso, round_ok=False)
+ elif self._reso > other._reso:
+ self = (<_Timestamp>self)._as_reso(other._reso, round_ok=False)
+ except ValueError as err:
+ raise ValueError(
+ "Timestamp subtraction with mismatched resolutions is not "
+ "allowed when casting to the lower resolution would require "
+ "lossy rounding."
+ ) from err
+
# scalar Timestamp/datetime - Timestamp/datetime -> yields a
# Timedelta
try:
- return Timedelta(self.value - other.value)
+ res_value = self.value - other.value
+ return Timedelta._from_value_and_reso(res_value, self._reso)
except (OverflowError, OutOfBoundsDatetime, OutOfBoundsTimedelta) as err:
if isinstance(other, _Timestamp):
if both_timestamps:
@@ -503,9 +516,6 @@ cdef class _Timestamp(ABCTimestamp):
return NotImplemented
def __rsub__(self, other):
- if self._reso != NPY_FR_ns:
- raise NotImplementedError(self._reso)
-
if PyDateTime_Check(other):
try:
return type(self)(other) - self
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 79c8a300b34e3..f7f19e49d0bac 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -22,6 +22,7 @@
from pandas._libs.tslibs.timezones import (
dateutil_gettz as gettz,
get_timezone,
+ maybe_get_tz,
tz_compare,
)
from pandas.errors import OutOfBoundsDatetime
@@ -712,6 +713,11 @@ def dt64(self, reso):
def ts(self, dt64):
return Timestamp._from_dt64(dt64)
+ @pytest.fixture
+ def ts_tz(self, ts, tz_aware_fixture):
+ tz = maybe_get_tz(tz_aware_fixture)
+ return Timestamp._from_value_and_reso(ts.value, ts._reso, tz)
+
def test_non_nano_construction(self, dt64, ts, reso):
assert ts.value == dt64.view("i8")
@@ -893,6 +899,70 @@ def test_addsub_timedeltalike_non_nano(self, dt64, ts, td):
assert result._reso == ts._reso
assert result == expected
+ @pytest.mark.xfail(reason="tz_localize not yet implemented for non-nano")
+ def test_addsub_offset(self, ts_tz):
+ # specifically non-Tick offset
+ off = offsets.YearBegin(1)
+ result = ts_tz + off
+
+ assert isinstance(result, Timestamp)
+ assert result._reso == ts_tz._reso
+ # If ts_tz is ever on the last day of the year, the year would be
+ # incremented by one
+ assert result.year == ts_tz.year
+ assert result.day == 31
+ assert result.month == 12
+ assert tz_compare(result.tz, ts_tz.tz)
+
+ result = ts_tz - off
+
+ assert isinstance(result, Timestamp)
+ assert result._reso == ts_tz._reso
+ assert result.year == ts_tz.year - 1
+ assert result.day == 31
+ assert result.month == 12
+ assert tz_compare(result.tz, ts_tz.tz)
+
+ def test_sub_datetimelike_mismatched_reso(self, ts_tz):
+ # case with non-lossy rounding
+ ts = ts_tz
+
+ # choose a unit for `other` that doesn't match ts_tz's;
+ # this construction ensures we get cases with other._reso < ts._reso
+ # and cases with other._reso > ts._reso
+ unit = {
+ NpyDatetimeUnit.NPY_FR_us.value: "ms",
+ NpyDatetimeUnit.NPY_FR_ms.value: "s",
+ NpyDatetimeUnit.NPY_FR_s.value: "us",
+ }[ts._reso]
+ other = ts._as_unit(unit)
+ assert other._reso != ts._reso
+
+ result = ts - other
+ assert isinstance(result, Timedelta)
+ assert result.value == 0
+ assert result._reso == min(ts._reso, other._reso)
+
+ result = other - ts
+ assert isinstance(result, Timedelta)
+ assert result.value == 0
+ assert result._reso == min(ts._reso, other._reso)
+
+ msg = "Timestamp subtraction with mismatched resolutions"
+ if ts._reso < other._reso:
+ # Case where rounding is lossy
+ other2 = other + Timedelta._from_value_and_reso(1, other._reso)
+ with pytest.raises(ValueError, match=msg):
+ ts - other2
+ with pytest.raises(ValueError, match=msg):
+ other2 - ts
+ else:
+ ts2 = ts + Timedelta._from_value_and_reso(1, ts._reso)
+ with pytest.raises(ValueError, match=msg):
+ ts2 - other
+ with pytest.raises(ValueError, match=msg):
+ other - ts2
+
class TestAsUnit:
def test_as_unit(self):
| With mixed resos, cast to the lower reso, and only if doing so is lossless. | https://api.github.com/repos/pandas-dev/pandas/pulls/47346 | 2022-06-14T17:01:14Z | 2022-06-15T00:16:46Z | 2022-06-15T00:16:46Z | 2022-06-15T16:44:08Z |
TST: adding a test for bar plot with intervalrange xaxis | diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py
index ca82c37b8a8b0..4d042579701eb 100644
--- a/pandas/tests/plotting/test_misc.py
+++ b/pandas/tests/plotting/test_misc.py
@@ -10,6 +10,7 @@
Index,
Series,
Timestamp,
+ interval_range,
)
import pandas._testing as tm
from pandas.tests.plotting.common import (
@@ -597,3 +598,19 @@ def test_plot_bar_axis_units_timestamp_conversion(self):
_check_plot_works(df.plot)
s = Series({"A": 1.0})
_check_plot_works(s.plot.bar)
+
+ def test_bar_plt_xaxis_intervalrange(self):
+ # GH 38969
+ # Ensure IntervalIndex x-axis produces a bar plot as expected
+ from matplotlib.text import Text
+
+ expected = [Text(0, 0, "([0, 1],)"), Text(1, 0, "([1, 2],)")]
+ s = Series(
+ [1, 2],
+ index=[interval_range(0, 2)],
+ )
+ _check_plot_works(s.plot.bar)
+ assert all(
+ (a.get_text() == b.get_text())
+ for a, b in zip(s.plot.bar().get_xticklabels(), expected)
+ )
| - [x] closes #38969
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
| https://api.github.com/repos/pandas-dev/pandas/pulls/47344 | 2022-06-14T12:31:41Z | 2022-06-15T21:52:26Z | 2022-06-15T21:52:26Z | 2022-10-20T08:44:21Z |
BUG: Series.setitem losing precision when enlarging | diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 5891eeea98cbb..81af4fe117dc6 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -823,6 +823,7 @@ Indexing
- Bug in :meth:`Series.__setitem__` where setting :attr:`NA` into a numeric-dtpye :class:`Series` would incorrectly upcast to object-dtype rather than treating the value as ``np.nan`` (:issue:`44199`)
- Bug in :meth:`Series.__setitem__` with ``datetime64[ns]`` dtype, an all-``False`` boolean mask, and an incompatible value incorrectly casting to ``object`` instead of retaining ``datetime64[ns]`` dtype (:issue:`45967`)
- Bug in :meth:`Index.__getitem__` raising ``ValueError`` when indexer is from boolean dtype with ``NA`` (:issue:`45806`)
+- Bug in :meth:`Series.__setitem__` losing precision when enlarging :class:`Series` with scalar (:issue:`32346`)
- Bug in :meth:`Series.mask` with ``inplace=True`` or setting values with a boolean mask with small integer dtypes incorrectly raising (:issue:`45750`)
- Bug in :meth:`DataFrame.mask` with ``inplace=True`` and ``ExtensionDtype`` columns incorrectly raising (:issue:`45577`)
- Bug in getting a column from a DataFrame with an object-dtype row index with datetime-like values: the resulting Series now preserves the exact object-dtype Index from the parent DataFrame (:issue:`42950`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 20ac0fedc28d1..67242aeeb49c6 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -21,7 +21,10 @@
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
-from pandas.core.dtypes.cast import can_hold_element
+from pandas.core.dtypes.cast import (
+ can_hold_element,
+ maybe_promote,
+)
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
@@ -41,7 +44,9 @@
)
from pandas.core.dtypes.missing import (
infer_fill_value,
+ is_valid_na_for_dtype,
isna,
+ na_value_for_dtype,
)
from pandas.core import algorithms as algos
@@ -2083,8 +2088,23 @@ def _setitem_with_indexer_missing(self, indexer, value):
# We get only here with loc, so can hard code
return self._setitem_with_indexer(new_indexer, value, "loc")
- # this preserves dtype of the value
- new_values = Series([value])._values
+ # this preserves dtype of the value and of the object
+ if is_valid_na_for_dtype(value, self.obj.dtype):
+ value = na_value_for_dtype(self.obj.dtype, compat=False)
+ new_dtype = maybe_promote(self.obj.dtype, value)[0]
+ elif isna(value):
+ new_dtype = None
+ elif not self.obj.empty and not is_object_dtype(self.obj.dtype):
+ # We should not cast, if we have object dtype because we can
+ # set timedeltas into object series
+ curr_dtype = self.obj.dtype
+ curr_dtype = getattr(curr_dtype, "numpy_dtype", curr_dtype)
+ new_dtype = maybe_promote(curr_dtype, value)[0]
+ else:
+ new_dtype = None
+
+ new_values = Series([value], dtype=new_dtype)._values
+
if len(self.obj._values):
# GH#22717 handle casting compatibility that np.concatenate
# does incorrectly
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index e2a5517066ad9..b73aacae18bc5 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -534,6 +534,33 @@ def test_setitem_not_contained(self, string_series):
expected = concat([string_series, app])
tm.assert_series_equal(ser, expected)
+ def test_setitem_keep_precision(self, any_numeric_ea_dtype):
+ # GH#32346
+ ser = Series([1, 2], dtype=any_numeric_ea_dtype)
+ ser[2] = 10
+ expected = Series([1, 2, 10], dtype=any_numeric_ea_dtype)
+ tm.assert_series_equal(ser, expected)
+
+ @pytest.mark.parametrize("indexer", [1, 2])
+ @pytest.mark.parametrize(
+ "na, target_na, dtype, target_dtype",
+ [
+ (NA, NA, "Int64", "Int64"),
+ (NA, np.nan, "int64", "float64"),
+ (NaT, NaT, "int64", "object"),
+ (np.nan, NA, "Int64", "Int64"),
+ (np.nan, NA, "Float64", "Float64"),
+ (np.nan, np.nan, "int64", "float64"),
+ ],
+ )
+ def test_setitem_enlarge_with_na(self, na, target_na, dtype, target_dtype, indexer):
+ # GH#32346
+ ser = Series([1, 2], dtype=dtype)
+ ser[indexer] = na
+ expected_values = [1, target_na] if indexer == 1 else [1, 2, target_na]
+ expected = Series(expected_values, dtype=target_dtype)
+ tm.assert_series_equal(ser, expected)
+
def test_setitem_scalar_into_readonly_backing_data():
# GH#14359: test that you cannot mutate a read only buffer
| - [x] xref #32346 (Replace xxxx with the Github issue number)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
We have to preserve the dtype here. This only fixes this case for Series, not for DataFames
| https://api.github.com/repos/pandas-dev/pandas/pulls/47342 | 2022-06-14T10:13:35Z | 2022-07-01T22:18:05Z | 2022-07-01T22:18:05Z | 2022-07-01T22:19:08Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.