title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
[ArrowStringArray] Use `utf8_is_*` functions from Apache Arrow if available | diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index 76257e1b40f1a..5d9b1c135d7ae 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -50,91 +50,126 @@ def peakmem_cat_frame_construction(self, dtype):
class Methods:
- def setup(self):
- self.s = Series(tm.makeStringIndex(10 ** 5))
+ params = ["str", "string", "arrow_string"]
+ param_names = ["dtype"]
+
+ def setup(self, dtype):
+ from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
+
+ try:
+ self.s = Series(tm.makeStringIndex(10 ** 5), dtype=dtype)
+ except ImportError:
+ raise NotImplementedError
- def time_center(self):
+ def time_center(self, dtype):
self.s.str.center(100)
- def time_count(self):
+ def time_count(self, dtype):
self.s.str.count("A")
- def time_endswith(self):
+ def time_endswith(self, dtype):
self.s.str.endswith("A")
- def time_extract(self):
+ def time_extract(self, dtype):
with warnings.catch_warnings(record=True):
self.s.str.extract("(\\w*)A(\\w*)")
- def time_findall(self):
+ def time_findall(self, dtype):
self.s.str.findall("[A-Z]+")
- def time_find(self):
+ def time_find(self, dtype):
self.s.str.find("[A-Z]+")
- def time_rfind(self):
+ def time_rfind(self, dtype):
self.s.str.rfind("[A-Z]+")
- def time_get(self):
+ def time_get(self, dtype):
self.s.str.get(0)
- def time_len(self):
+ def time_len(self, dtype):
self.s.str.len()
- def time_join(self):
+ def time_join(self, dtype):
self.s.str.join(" ")
- def time_match(self):
+ def time_match(self, dtype):
self.s.str.match("A")
- def time_normalize(self):
+ def time_normalize(self, dtype):
self.s.str.normalize("NFC")
- def time_pad(self):
+ def time_pad(self, dtype):
self.s.str.pad(100, side="both")
- def time_partition(self):
+ def time_partition(self, dtype):
self.s.str.partition("A")
- def time_rpartition(self):
+ def time_rpartition(self, dtype):
self.s.str.rpartition("A")
- def time_replace(self):
+ def time_replace(self, dtype):
self.s.str.replace("A", "\x01\x01")
- def time_translate(self):
+ def time_translate(self, dtype):
self.s.str.translate({"A": "\x01\x01"})
- def time_slice(self):
+ def time_slice(self, dtype):
self.s.str.slice(5, 15, 2)
- def time_startswith(self):
+ def time_startswith(self, dtype):
self.s.str.startswith("A")
- def time_strip(self):
+ def time_strip(self, dtype):
self.s.str.strip("A")
- def time_rstrip(self):
+ def time_rstrip(self, dtype):
self.s.str.rstrip("A")
- def time_lstrip(self):
+ def time_lstrip(self, dtype):
self.s.str.lstrip("A")
- def time_title(self):
+ def time_title(self, dtype):
self.s.str.title()
- def time_upper(self):
+ def time_upper(self, dtype):
self.s.str.upper()
- def time_lower(self):
+ def time_lower(self, dtype):
self.s.str.lower()
- def time_wrap(self):
+ def time_wrap(self, dtype):
self.s.str.wrap(10)
- def time_zfill(self):
+ def time_zfill(self, dtype):
self.s.str.zfill(10)
+ def time_isalnum(self, dtype):
+ self.s.str.isalnum()
+
+ def time_isalpha(self, dtype):
+ self.s.str.isalpha()
+
+ def time_isdecimal(self, dtype):
+ self.s.str.isdecimal()
+
+ def time_isdigit(self, dtype):
+ self.s.str.isdigit()
+
+ def time_islower(self, dtype):
+ self.s.str.islower()
+
+ def time_isnumeric(self, dtype):
+ self.s.str.isnumeric()
+
+ def time_isspace(self, dtype):
+ self.s.str.isspace()
+
+ def time_istitle(self, dtype):
+ self.s.str.istitle()
+
+ def time_isupper(self, dtype):
+ self.s.str.isupper()
+
class Repeat:
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index dd09ef4e585ce..55cb350d3d27c 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -39,6 +39,7 @@
from pandas.core import missing
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.base import ExtensionArray
+from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.indexers import (
check_array_indexer,
validate_indices,
@@ -758,6 +759,69 @@ def _str_map(self, f, na_value=None, dtype: Dtype | None = None):
# -> We don't know the result type. E.g. `.get` can return anything.
return lib.map_infer_mask(arr, f, mask.view("uint8"))
+ def _str_isalnum(self):
+ if hasattr(pc, "utf8_is_alnum"):
+ result = pc.utf8_is_alnum(self._data)
+ return BooleanDtype().__from_arrow__(result)
+ else:
+ return super()._str_isalnum()
+
+ def _str_isalpha(self):
+ if hasattr(pc, "utf8_is_alpha"):
+ result = pc.utf8_is_alpha(self._data)
+ return BooleanDtype().__from_arrow__(result)
+ else:
+ return super()._str_isalpha()
+
+ def _str_isdecimal(self):
+ if hasattr(pc, "utf8_is_decimal"):
+ result = pc.utf8_is_decimal(self._data)
+ return BooleanDtype().__from_arrow__(result)
+ else:
+ return super()._str_isdecimal()
+
+ def _str_isdigit(self):
+ if hasattr(pc, "utf8_is_digit"):
+ result = pc.utf8_is_digit(self._data)
+ return BooleanDtype().__from_arrow__(result)
+ else:
+ return super()._str_isdigit()
+
+ def _str_islower(self):
+ if hasattr(pc, "utf8_is_lower"):
+ result = pc.utf8_is_lower(self._data)
+ return BooleanDtype().__from_arrow__(result)
+ else:
+ return super()._str_islower()
+
+ def _str_isnumeric(self):
+ if hasattr(pc, "utf8_is_numeric"):
+ result = pc.utf8_is_numeric(self._data)
+ return BooleanDtype().__from_arrow__(result)
+ else:
+ return super()._str_isnumeric()
+
+ def _str_isspace(self):
+ if hasattr(pc, "utf8_is_space"):
+ result = pc.utf8_is_space(self._data)
+ return BooleanDtype().__from_arrow__(result)
+ else:
+ return super()._str_isspace()
+
+ def _str_istitle(self):
+ if hasattr(pc, "utf8_is_title"):
+ result = pc.utf8_is_title(self._data)
+ return BooleanDtype().__from_arrow__(result)
+ else:
+ return super()._str_istitle()
+
+ def _str_isupper(self):
+ if hasattr(pc, "utf8_is_upper"):
+ result = pc.utf8_is_upper(self._data)
+ return BooleanDtype().__from_arrow__(result)
+ else:
+ return super()._str_isupper()
+
def _str_lower(self):
return type(self)(pc.utf8_lower(self._data))
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index 0b5613e302175..85a58d3d99795 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -3002,8 +3002,9 @@ def _result_dtype(arr):
# ideally we just pass `dtype=arr.dtype` unconditionally, but this fails
# when the list of values is empty.
from pandas.core.arrays.string_ import StringDtype
+ from pandas.core.arrays.string_arrow import ArrowStringDtype
- if isinstance(arr.dtype, StringDtype):
+ if isinstance(arr.dtype, (StringDtype, ArrowStringDtype)):
return arr.dtype.name
else:
return object
diff --git a/pandas/tests/strings/test_string_array.py b/pandas/tests/strings/test_string_array.py
index 02ccb3a930557..f90d219159c7e 100644
--- a/pandas/tests/strings/test_string_array.py
+++ b/pandas/tests/strings/test_string_array.py
@@ -13,19 +13,11 @@
)
-def test_string_array(nullable_string_dtype, any_string_method, request):
+def test_string_array(nullable_string_dtype, any_string_method):
method_name, args, kwargs = any_string_method
if method_name == "decode":
pytest.skip("decode requires bytes.")
- if nullable_string_dtype == "arrow_string" and method_name in {
- "extract",
- "extractall",
- }:
- reason = "extract/extractall does not yet dispatch to array"
- mark = pytest.mark.xfail(reason=reason)
- request.node.add_marker(mark)
-
data = ["a", "bb", np.nan, "ccc"]
a = Series(data, dtype=object)
b = Series(data, dtype=nullable_string_dtype)
@@ -93,15 +85,10 @@ def test_string_array_boolean_array(nullable_string_dtype, method, expected):
tm.assert_series_equal(result, expected)
-def test_string_array_extract(nullable_string_dtype, request):
+def test_string_array_extract(nullable_string_dtype):
# https://github.com/pandas-dev/pandas/issues/30969
# Only expand=False & multiple groups was failing
- if nullable_string_dtype == "arrow_string":
- reason = "extract does not yet dispatch to array"
- mark = pytest.mark.xfail(reason=reason)
- request.node.add_marker(mark)
-
a = Series(["a1", "b2", "cc"], dtype=nullable_string_dtype)
b = Series(["a1", "b2", "cc"], dtype="object")
pat = r"(\w)(\d)"
diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py
index 06b22f00a38cf..2a52b3ba3f9e1 100644
--- a/pandas/tests/strings/test_strings.py
+++ b/pandas/tests/strings/test_strings.py
@@ -6,6 +6,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas import (
DataFrame,
Index,
@@ -17,6 +19,27 @@
import pandas._testing as tm
+@pytest.fixture(
+ params=[
+ "object",
+ "string",
+ pytest.param(
+ "arrow_string", marks=td.skip_if_no("pyarrow", min_version="1.0.0")
+ ),
+ ]
+)
+def any_string_dtype(request):
+ """
+ Parametrized fixture for string dtypes.
+ * 'object'
+ * 'string'
+ * 'arrow_string'
+ """
+ from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
+
+ return request.param
+
+
def assert_series_or_index_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
@@ -149,10 +172,15 @@ def test_repeat_with_null(nullable_string_dtype):
tm.assert_series_equal(result, expected)
-def test_empty_str_methods():
- empty_str = empty = Series(dtype=object)
- empty_int = Series(dtype="int64")
- empty_bool = Series(dtype=bool)
+def test_empty_str_methods(any_string_dtype):
+ empty_str = empty = Series(dtype=any_string_dtype)
+ if any_string_dtype == "object":
+ empty_int = Series(dtype="int64")
+ empty_bool = Series(dtype=bool)
+ else:
+ empty_int = Series(dtype="Int64")
+ empty_bool = Series(dtype="boolean")
+ empty_object = Series(dtype=object)
empty_bytes = Series(dtype=object)
# GH7241
@@ -184,15 +212,15 @@ def test_empty_str_methods():
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(""))
tm.assert_series_equal(empty_int, empty.str.len())
- tm.assert_series_equal(empty_str, empty_str.str.findall("a"))
+ tm.assert_series_equal(empty_object, empty_str.str.findall("a"))
tm.assert_series_equal(empty_int, empty.str.find("a"))
tm.assert_series_equal(empty_int, empty.str.rfind("a"))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
- tm.assert_series_equal(empty_str, empty.str.split("a"))
- tm.assert_series_equal(empty_str, empty.str.rsplit("a"))
- tm.assert_series_equal(empty_str, empty.str.partition("a", expand=False))
- tm.assert_series_equal(empty_str, empty.str.rpartition("a", expand=False))
+ tm.assert_series_equal(empty_object, empty.str.split("a"))
+ tm.assert_series_equal(empty_object, empty.str.rsplit("a"))
+ tm.assert_series_equal(empty_object, empty.str.partition("a", expand=False))
+ tm.assert_series_equal(empty_object, empty.str.rpartition("a", expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
@@ -200,7 +228,7 @@ def test_empty_str_methods():
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
- tm.assert_series_equal(empty_str, empty_bytes.str.decode("ascii"))
+ tm.assert_series_equal(empty_object, empty_bytes.str.decode("ascii"))
tm.assert_series_equal(empty_bytes, empty.str.encode("ascii"))
# ismethods should always return boolean (GH 29624)
tm.assert_series_equal(empty_bool, empty.str.isalnum())
@@ -227,9 +255,9 @@ def test_empty_str_methods_to_frame():
tm.assert_frame_equal(empty_df, empty.str.rpartition("a"))
-def test_ismethods():
+def test_ismethods(any_string_dtype):
values = ["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "]
- str_s = Series(values)
+ str_s = Series(values, dtype=any_string_dtype)
alnum_e = [True, True, True, True, True, False, True, True, False, False]
alpha_e = [True, True, True, False, False, False, True, False, False, False]
digit_e = [False, False, False, True, False, False, False, True, False, False]
@@ -253,13 +281,14 @@ def test_ismethods():
upper_e = [True, False, False, False, True, False, True, False, False, False]
title_e = [True, False, True, False, True, False, False, False, False, False]
- tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
- tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
- tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
- tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
- tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
- tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
- tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
+ dtype = "bool" if any_string_dtype == "object" else "boolean"
+ tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e, dtype=dtype))
+ tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e, dtype=dtype))
+ tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e, dtype=dtype))
+ tm.assert_series_equal(str_s.str.isspace(), Series(space_e, dtype=dtype))
+ tm.assert_series_equal(str_s.str.islower(), Series(lower_e, dtype=dtype))
+ tm.assert_series_equal(str_s.str.isupper(), Series(upper_e, dtype=dtype))
+ tm.assert_series_equal(str_s.str.istitle(), Series(title_e, dtype=dtype))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
@@ -270,28 +299,30 @@ def test_ismethods():
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
-def test_isnumeric():
+def test_isnumeric(any_string_dtype):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ["A", "3", "¼", "★", "፸", "3", "four"]
- s = Series(values)
+ s = Series(values, dtype=any_string_dtype)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
- tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
- tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
+ dtype = "bool" if any_string_dtype == "object" else "boolean"
+ tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e, dtype=dtype))
+ tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e, dtype=dtype))
unicodes = ["A", "3", "¼", "★", "፸", "3", "four"]
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ["A", np.nan, "¼", "★", np.nan, "3", "four"]
- s = Series(values)
+ s = Series(values, dtype=any_string_dtype)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
- tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
- tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
+ dtype = "object" if any_string_dtype == "object" else "boolean"
+ tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e, dtype=dtype))
+ tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e, dtype=dtype))
def test_get_dummies():
| xref https://github.com/xhochy/fletcher/pull/203
marked as draft since AFAICT there is performance issues with BooleanDtype().__from_arrow__ | https://api.github.com/repos/pandas-dev/pandas/pulls/41041 | 2021-04-19T13:47:43Z | 2021-04-25T13:17:15Z | 2021-04-25T13:17:15Z | 2021-04-25T15:48:12Z |
CI: Combined the doctests checks | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index d4b6c0d6ff09d..a2a108924a0f2 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -106,84 +106,31 @@ fi
### DOCTESTS ###
if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
- # Individual files
-
- MSG='Doctests accessor.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/accessor.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests aggregation.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/aggregation.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests base.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/base.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests construction.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/construction.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests frame.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/frame.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests generic.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/generic.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests series.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/series.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests strings.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/strings/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests sql.py' ; echo $MSG
- pytest -q --doctest-modules pandas/io/sql.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- # Directories
-
- MSG='Doctests arrays'; echo $MSG
- pytest -q --doctest-modules pandas/core/arrays/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests computation' ; echo $MSG
- pytest -q --doctest-modules pandas/core/computation/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests dtypes'; echo $MSG
- pytest -q --doctest-modules pandas/core/dtypes/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests groupby' ; echo $MSG
- pytest -q --doctest-modules pandas/core/groupby/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests indexes' ; echo $MSG
- pytest -q --doctest-modules pandas/core/indexes/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests ops' ; echo $MSG
- pytest -q --doctest-modules pandas/core/ops/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests reshape' ; echo $MSG
- pytest -q --doctest-modules pandas/core/reshape/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests tools' ; echo $MSG
- pytest -q --doctest-modules pandas/core/tools/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests window' ; echo $MSG
- pytest -q --doctest-modules pandas/core/window/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Doctests tseries' ; echo $MSG
- pytest -q --doctest-modules pandas/tseries/
+ MSG='Doctests for individual files' ; echo $MSG
+ pytest -q --doctest-modules \
+ pandas/core/accessor.py \
+ pandas/core/aggregation.py \
+ pandas/core/base.py \
+ pandas/core/construction.py \
+ pandas/core/frame.py \
+ pandas/core/generic.py \
+ pandas/core/series.py \
+ pandas/io/sql.py
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
+ MSG='Doctests for directories' ; echo $MSG
+ pytest -q --doctest-modules \
+ pandas/core/arrays/ \
+ pandas/core/computation/ \
+ pandas/core/dtypes/ \
+ pandas/core/groupby/ \
+ pandas/core/indexes/ \
+ pandas/core/ops/ \
+ pandas/core/reshape/ \
+ pandas/core/strings/ \
+ pandas/core/tools/ \
+ pandas/core/window/ \
+ pandas/tseries/
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Doing as was asked [here](https://github.com/pandas-dev/pandas/pull/40903#discussion_r612486254)
| https://api.github.com/repos/pandas-dev/pandas/pulls/41039 | 2021-04-19T13:21:30Z | 2021-04-26T18:25:05Z | 2021-04-26T18:25:05Z | 2021-04-26T19:34:02Z |
DOC: add example for plotting asymmetrical error bars | diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 8b41cc24829c5..2143d7ea75f6e 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -1455,8 +1455,6 @@ Horizontal and vertical error bars can be supplied to the ``xerr`` and ``yerr``
* As a ``str`` indicating which of the columns of plotting :class:`DataFrame` contain the error values.
* As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting :class:`DataFrame`/:class:`Series`.
-Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``N`` length :class:`Series`, a ``2xN`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors should be in a ``Mx2xN`` array.
-
Here is an example of one way to easily plot group means with standard deviations from the raw data.
.. ipython:: python
@@ -1464,16 +1462,16 @@ Here is an example of one way to easily plot group means with standard deviation
# Generate the data
ix3 = pd.MultiIndex.from_arrays(
[
- ["a", "a", "a", "a", "b", "b", "b", "b"],
- ["foo", "foo", "bar", "bar", "foo", "foo", "bar", "bar"],
+ ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"],
+ ["foo", "foo", "foo", "bar", "bar", "foo", "foo", "bar", "bar", "bar"],
],
names=["letter", "word"],
)
df3 = pd.DataFrame(
{
- "data1": [3, 2, 4, 3, 2, 4, 3, 2],
- "data2": [6, 5, 7, 5, 4, 5, 6, 5],
+ "data1": [9, 3, 2, 4, 3, 2, 4, 6, 3, 2],
+ "data2": [9, 6, 5, 7, 5, 4, 5, 6, 5, 1],
},
index=ix3,
)
@@ -1496,6 +1494,28 @@ Here is an example of one way to easily plot group means with standard deviation
plt.close("all")
+Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``N`` length :class:`Series`, a ``2xN`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors should be in a ``Mx2xN`` array.
+
+Here is an example of one way to plot the min/max range using asymmetrical error bars.
+
+.. ipython:: python
+
+ mins = gp3.min()
+ maxs = gp3.max()
+
+ # errors should be positive, and defined in the order of lower, upper
+ errors = [[means[c] - mins[c], maxs[c] - means[c]] for c in df3.columns]
+
+ # Plot
+ fig, ax = plt.subplots()
+ @savefig errorbar_asymmetrical_example.png
+ means.plot.bar(yerr=errors, ax=ax, capsize=4, rot=0);
+
+.. ipython:: python
+ :suppress:
+
+ plt.close("all")
+
.. _visualization.table:
Plotting tables
| - [x] closes #41034
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/41035 | 2021-04-19T07:32:11Z | 2021-04-21T04:27:08Z | 2021-04-21T04:27:08Z | 2021-04-21T04:27:21Z |
⬆️ UPGRADE: Autoupdate pre-commit config | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 5b11490479088..2f46190ef5eb7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -35,7 +35,7 @@ repos:
exclude: ^pandas/_libs/src/(klib|headers)/
args: [--quiet, '--extensions=c,h', '--headers=h', --recursive, '--filter=-readability/casting,-runtime/int,-build/include_subdir']
- repo: https://gitlab.com/pycqa/flake8
- rev: 3.9.0
+ rev: 3.9.1
hooks:
- id: flake8
additional_dependencies:
@@ -75,7 +75,7 @@ repos:
hooks:
- id: yesqa
additional_dependencies:
- - flake8==3.9.0
+ - flake8==3.9.1
- flake8-comprehensions==3.1.0
- flake8-bugbear==21.3.2
- pandas-dev-flaker==0.2.0
diff --git a/environment.yml b/environment.yml
index 146bf6db08d8b..0d03ad8e0a46a 100644
--- a/environment.yml
+++ b/environment.yml
@@ -20,7 +20,7 @@ dependencies:
# code checks
- black=20.8b1
- cpplint
- - flake8=3.9.0
+ - flake8=3.9.1
- flake8-bugbear=21.3.2 # used by flake8, find likely bugs
- flake8-comprehensions=3.1.0 # used by flake8, linting of unnecessary comprehensions
- isort>=5.2.1 # check that imports are in the right order
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 33deeef9f1f82..ea7ca43742934 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -8,7 +8,7 @@ asv
cython>=0.29.21
black==20.8b1
cpplint
-flake8==3.9.0
+flake8==3.9.1
flake8-bugbear==21.3.2
flake8-comprehensions==3.1.0
isort>=5.2.1
| <!-- START pr-commits -->
<!-- END pr-commits -->
## Base PullRequest
default branch (https://github.com/pandas-dev/pandas/tree/master)
## Command results
<details>
<summary>Details: </summary>
<details>
<summary><em>add path</em></summary>
```Shell
/home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/bin
```
</details>
<details>
<summary><em>pip install pre-commit</em></summary>
```Shell
Collecting pre-commit
Downloading pre_commit-2.12.1-py2.py3-none-any.whl (189 kB)
Collecting nodeenv>=0.11.1
Downloading nodeenv-1.6.0-py2.py3-none-any.whl (21 kB)
Collecting identify>=1.0.0
Downloading identify-2.2.3-py2.py3-none-any.whl (98 kB)
Collecting toml
Using cached toml-0.10.2-py2.py3-none-any.whl (16 kB)
Collecting cfgv>=2.0.0
Using cached cfgv-3.2.0-py2.py3-none-any.whl (7.3 kB)
Collecting pyyaml>=5.1
Downloading PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl (630 kB)
Collecting virtualenv>=20.0.8
Downloading virtualenv-20.4.3-py2.py3-none-any.whl (7.2 MB)
Collecting six<2,>=1.9.0
Using cached six-1.15.0-py2.py3-none-any.whl (10 kB)
Collecting appdirs<2,>=1.4.3
Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB)
Collecting distlib<1,>=0.3.1
Using cached distlib-0.3.1-py2.py3-none-any.whl (335 kB)
Collecting filelock<4,>=3.0.0
Using cached filelock-3.0.12-py3-none-any.whl (7.6 kB)
Installing collected packages: six, filelock, distlib, appdirs, virtualenv, toml, pyyaml, nodeenv, identify, cfgv, pre-commit
Successfully installed appdirs-1.4.4 cfgv-3.2.0 distlib-0.3.1 filelock-3.0.12 identify-2.2.3 nodeenv-1.6.0 pre-commit-2.12.1 pyyaml-5.4.1 six-1.15.0 toml-0.10.2 virtualenv-20.4.3
```
</details>
<details>
<summary><em>pre-commit autoupdate || (exit 0);</em></summary>
```Shell
Updating https://github.com/MarcoGorelli/absolufy-imports ... [INFO] Initializing environment for https://github.com/MarcoGorelli/absolufy-imports.
already up to date.
Updating https://github.com/python/black ... already up to date.
Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell.
already up to date.
Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.
already up to date.
Updating https://github.com/cpplint/cpplint ... [INFO] Initializing environment for https://github.com/cpplint/cpplint.
=====> /home/runner/.cache/pre-commit/repof0nncua3/.pre-commit-hooks.yaml does not exist
Updating https://gitlab.com/pycqa/flake8 ... [INFO] Initializing environment for https://gitlab.com/pycqa/flake8.
updating 3.9.0 -> 3.9.1.
Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort.
already up to date.
Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade.
already up to date.
Updating https://github.com/pre-commit/pygrep-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks.
already up to date.
Updating https://github.com/asottile/yesqa ... already up to date.
```
</details>
<details>
<summary><em>pre-commit run -a || (exit 0);</em></summary>
```Shell
[INFO] Initializing environment for https://github.com/cpplint/cpplint.
[INFO] Initializing environment for https://gitlab.com/pycqa/flake8:flake8-bugbear==21.3.2,flake8-comprehensions==3.1.0,pandas-dev-flaker==0.2.0.
[INFO] Initializing environment for https://github.com/asottile/yesqa:flake8-bugbear==21.3.2,flake8-comprehensions==3.1.0,flake8==3.9.0,pandas-dev-flaker==0.2.0.
[INFO] Installing environment for https://github.com/MarcoGorelli/absolufy-imports.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/python/black.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/codespell-project/codespell.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/cpplint/cpplint.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/PyCQA/isort.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/pyupgrade.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/yesqa.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
absolufy-imports...............................................................................Passed
black..........................................................................................Passed
codespell......................................................................................Passed
Fix End of Files...............................................................................Passed
Trim Trailing Whitespace.......................................................................Passed
cpplint........................................................................................Passed
flake8.........................................................................................Passed
flake8 (cython)................................................................................Passed
flake8 (cython template).......................................................................Passed
isort..........................................................................................Passed
pyupgrade......................................................................................Passed
rst ``code`` is two backticks..................................................................Passed
rst directives end with two colons.............................................................Passed
rst ``inline code`` next to normal text........................................................Passed
Strip unnecessary `# noqa`s....................................................................Passed
flake8-rst.....................................................................................Passed
Unwanted patterns..............................................................................Passed
Generate pip dependency from conda.............................................................Passed
Check flake8 version is synced across flake8, yesqa, and environment.yml.......................Failed
- hook id: sync-flake8-versions
- exit code: 1
flake8 in 'environment.yml' does not match in 'flake8' from 'pre-commit'
Validate correct capitalization among titles in documentation..................................Passed
Import pandas.array as pd_array in core........................................................Passed
Use bool_t instead of bool in pandas/core/generic.py...........................................Passed
```
</details>
</details>
## Changed files
<details>
<summary>Changed file: </summary>
- .pre-commit-config.yaml
</details>
<hr>
[:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action) | https://api.github.com/repos/pandas-dev/pandas/pulls/41033 | 2021-04-19T07:14:08Z | 2021-04-19T19:38:04Z | 2021-04-19T19:38:03Z | 2021-05-13T22:26:29Z |
REF: remove Categorical._shallow_copy | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 9e2dd846f0379..6f906cf8879ff 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1876,29 +1876,33 @@ def _sort_tuples(values: np.ndarray) -> np.ndarray:
return values[indexer]
-def union_with_duplicates(lvals: np.ndarray, rvals: np.ndarray) -> np.ndarray:
+def union_with_duplicates(lvals: ArrayLike, rvals: ArrayLike) -> ArrayLike:
"""
Extracts the union from lvals and rvals with respect to duplicates and nans in
both arrays.
Parameters
----------
- lvals: np.ndarray
+ lvals: np.ndarray or ExtensionArray
left values which is ordered in front.
- rvals: np.ndarray
+ rvals: np.ndarray or ExtensionArray
right values ordered after lvals.
Returns
-------
- np.ndarray containing the unsorted union of both arrays
+ np.ndarray or ExtensionArray
+ Containing the unsorted union of both arrays.
"""
indexer = []
l_count = value_counts(lvals, dropna=False)
r_count = value_counts(rvals, dropna=False)
l_count, r_count = l_count.align(r_count, fill_value=0)
unique_array = unique(np.append(lvals, rvals))
- if is_extension_array_dtype(lvals) or is_extension_array_dtype(rvals):
- unique_array = pd_array(unique_array)
+ if not isinstance(lvals, np.ndarray):
+ # i.e. ExtensionArray
+ # Note: we only get here with lvals.dtype == rvals.dtype
+ # TODO: are there any cases where union won't be type/dtype preserving?
+ unique_array = type(lvals)._from_sequence(unique_array, dtype=lvals.dtype)
for i, value in enumerate(unique_array):
indexer += [i] * int(max(l_count[value], r_count[value]))
return unique_array.take(indexer)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 310ee4c3a63e3..6cf6c18dbe350 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2982,12 +2982,7 @@ def _union(self, other: Index, sort):
elif not other.is_unique:
# other has duplicates
-
- # error: Argument 1 to "union_with_duplicates" has incompatible type
- # "Union[ExtensionArray, ndarray]"; expected "ndarray"
- # error: Argument 2 to "union_with_duplicates" has incompatible type
- # "Union[ExtensionArray, ndarray]"; expected "ndarray"
- result = algos.union_with_duplicates(lvals, rvals) # type: ignore[arg-type]
+ result = algos.union_with_duplicates(lvals, rvals)
return _maybe_try_sort(result, sort)
# Self may have duplicates
@@ -3002,9 +2997,7 @@ def _union(self, other: Index, sort):
other_diff = rvals.take(missing)
result = concat_compat((lvals, other_diff))
else:
- # error: Incompatible types in assignment (expression has type
- # "Union[ExtensionArray, ndarray]", variable has type "ndarray")
- result = lvals # type: ignore[assignment]
+ result = lvals
if not self.is_monotonic or not other.is_monotonic:
result = _maybe_try_sort(result, sort)
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 5b98b956e33e6..8d15b460a79df 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -11,7 +11,6 @@
from pandas._config import get_option
from pandas._libs import index as libindex
-from pandas._libs.lib import no_default
from pandas._typing import (
ArrayLike,
Dtype,
@@ -234,22 +233,6 @@ def __new__(
# --------------------------------------------------------------------
- @doc(Index._shallow_copy)
- def _shallow_copy(
- self,
- values: Categorical,
- name: Hashable = no_default,
- ) -> CategoricalIndex:
- name = self._name if name is no_default else name
-
- if values is not None:
- # In tests we only get here with Categorical objects that
- # have matching .ordered, and values.categories a subset of
- # our own. However we do _not_ have a dtype match in general.
- values = Categorical(values, dtype=self.dtype)
-
- return super()._shallow_copy(values=values, name=name)
-
def _is_dtype_compat(self, other) -> Categorical:
"""
*this is an internal non-public method*
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index b11ec06120e0c..d28bcd6c5497a 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -331,7 +331,7 @@ def _get_unique_index(self):
return self
result = self._data.unique()
- return self._shallow_copy(result)
+ return type(self)._simple_new(result, name=self.name)
@doc(Index.map)
def map(self, mapper, na_action=None):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
@phofl parts of this may be worth doing in #40967 | https://api.github.com/repos/pandas-dev/pandas/pulls/41030 | 2021-04-19T04:01:55Z | 2021-04-19T13:38:44Z | 2021-04-19T13:38:44Z | 2021-04-19T14:48:38Z |
TYP: aggregations.pyx | diff --git a/pandas/_libs/window/aggregations.pyi b/pandas/_libs/window/aggregations.pyi
new file mode 100644
index 0000000000000..3391edac84224
--- /dev/null
+++ b/pandas/_libs/window/aggregations.pyi
@@ -0,0 +1,126 @@
+from typing import (
+ Any,
+ Callable,
+ Literal,
+)
+
+import numpy as np
+
+def roll_sum(
+ values: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+
+def roll_mean(
+ values: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+
+def roll_var(
+ values: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+ ddof: int = ...,
+) -> np.ndarray: ... # np.ndarray[float]
+
+def roll_skew(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+
+def roll_kurt(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+
+def roll_median_c(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+
+def roll_max(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+
+def roll_min(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+
+def roll_quantile(
+ values: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+ quantile: float, # float64_t
+ interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"],
+) -> np.ndarray: ... # np.ndarray[float]
+
+def roll_apply(
+ obj: object,
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+ function: Callable[..., Any],
+ raw: bool,
+ args: tuple[Any, ...],
+ kwargs: dict[str, Any],
+) -> np.ndarray: ... # np.ndarray[float] # FIXME: could also be type(obj) if n==0
+
+def roll_weighted_sum(
+ values: np.ndarray, # const float64_t[:]
+ weights: np.ndarray, # const float64_t[:]
+ minp: int,
+) -> np.ndarray: ... # np.ndarray[np.float64]
+
+def roll_weighted_mean(
+ values: np.ndarray, # const float64_t[:]
+ weights: np.ndarray, # const float64_t[:]
+ minp: int,
+) -> np.ndarray: ... # np.ndarray[np.float64]
+
+def roll_weighted_var(
+ values: np.ndarray, # const float64_t[:]
+ weights: np.ndarray, # const float64_t[:]
+ minp: int, # int64_t
+ ddof: int, # unsigned int
+) -> np.ndarray: ... # np.ndarray[np.float64]
+
+def ewma(
+ vals: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # const int64_t[:]
+ end: np.ndarray, # const int64_t[:]
+ minp: int,
+ com: float, # float64_t
+ adjust: bool,
+ ignore_na: bool,
+ deltas: np.ndarray, # const float64_t[:]
+) -> np.ndarray: ... # np.ndarray[np.float64]
+
+def ewmcov(
+ input_x: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # const int64_t[:]
+ end: np.ndarray, # const int64_t[:]
+ minp: int,
+ input_y: np.ndarray, # const float64_t[:]
+ com: float, # float64_t
+ adjust: bool,
+ ignore_na: bool,
+ bias: bool,
+) -> np.ndarray: ... # np.ndarray[np.float64]
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 8d6f899d6f3ca..3d3a19a1c7a40 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -116,7 +116,7 @@ cdef inline void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x,
def roll_sum(const float64_t[:] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp) -> np.ndarray:
cdef:
Py_ssize_t i, j
float64_t sum_x = 0, compensation_add = 0, compensation_remove = 0
@@ -128,7 +128,7 @@ def roll_sum(const float64_t[:] values, ndarray[int64_t] start,
is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds(
start, end
)
- output = np.empty(N, dtype=float)
+ output = np.empty(N, dtype=np.float64)
with nogil:
@@ -221,7 +221,7 @@ cdef inline void remove_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x,
def roll_mean(const float64_t[:] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp) -> np.ndarray:
cdef:
float64_t val, compensation_add = 0, compensation_remove = 0, sum_x = 0
int64_t s, e
@@ -232,7 +232,7 @@ def roll_mean(const float64_t[:] values, ndarray[int64_t] start,
is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds(
start, end
)
- output = np.empty(N, dtype=float)
+ output = np.empty(N, dtype=np.float64)
with nogil:
@@ -338,7 +338,7 @@ cdef inline void remove_var(float64_t val, float64_t *nobs, float64_t *mean_x,
def roll_var(const float64_t[:] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp, int ddof=1):
+ ndarray[int64_t] end, int64_t minp, int ddof=1) -> np.ndarray:
"""
Numerically stable implementation using Welford's method.
"""
@@ -355,7 +355,7 @@ def roll_var(const float64_t[:] values, ndarray[int64_t] start,
is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds(
start, end
)
- output = np.empty(N, dtype=float)
+ output = np.empty(N, dtype=np.float64)
with nogil:
@@ -490,7 +490,7 @@ cdef inline void remove_skew(float64_t val, int64_t *nobs,
def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp) -> np.ndarray:
cdef:
Py_ssize_t i, j
float64_t val, prev, min_val, mean_val, sum_val = 0
@@ -507,7 +507,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds(
start, end
)
- output = np.empty(N, dtype=float)
+ output = np.empty(N, dtype=np.float64)
min_val = np.nanmin(values)
values_copy = np.copy(values)
@@ -672,7 +672,7 @@ cdef inline void remove_kurt(float64_t val, int64_t *nobs,
def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp) -> np.ndarray:
cdef:
Py_ssize_t i, j
float64_t val, prev, mean_val, min_val, sum_val = 0
@@ -689,7 +689,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
is_monotonic_increasing_bounds = is_monotonic_increasing_start_end_bounds(
start, end
)
- output = np.empty(N, dtype=float)
+ output = np.empty(N, dtype=np.float64)
values_copy = np.copy(values)
min_val = np.nanmin(values)
@@ -753,7 +753,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
def roll_median_c(const float64_t[:] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp) -> np.ndarray:
cdef:
Py_ssize_t i, j
bint err = False, is_monotonic_increasing_bounds
@@ -769,7 +769,7 @@ def roll_median_c(const float64_t[:] values, ndarray[int64_t] start,
# we use the Fixed/Variable Indexer here as the
# actual skiplist ops outweigh any window computation costs
- output = np.empty(N, dtype=float)
+ output = np.empty(N, dtype=np.float64)
if (end - start).max() == 0:
output[:] = NaN
@@ -889,7 +889,7 @@ cdef inline numeric calc_mm(int64_t minp, Py_ssize_t nobs,
def roll_max(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp) -> np.ndarray:
"""
Moving max of 1d array of any numeric type along axis=0 ignoring NaNs.
@@ -904,12 +904,16 @@ def roll_max(ndarray[float64_t] values, ndarray[int64_t] start,
closed : 'right', 'left', 'both', 'neither'
make the interval closed on the right, left,
both or neither endpoints
+
+ Returns
+ -------
+ np.ndarray[float]
"""
return _roll_min_max(values, start, end, minp, is_max=1)
def roll_min(ndarray[float64_t] values, ndarray[int64_t] start,
- ndarray[int64_t] end, int64_t minp):
+ ndarray[int64_t] end, int64_t minp) -> np.ndarray:
"""
Moving min of 1d array of any numeric type along axis=0 ignoring NaNs.
@@ -921,6 +925,10 @@ def roll_min(ndarray[float64_t] values, ndarray[int64_t] start,
is below this, output a NaN
index : ndarray, optional
index for window computation
+
+ Returns
+ -------
+ np.ndarray[float]
"""
return _roll_min_max(values, start, end, minp, is_max=0)
@@ -938,7 +946,7 @@ cdef _roll_min_max(ndarray[numeric] values,
deque W[int64_t] # track the whole window for nobs compute
ndarray[float64_t, ndim=1] output
- output = np.empty(N, dtype=float)
+ output = np.empty(N, dtype=np.float64)
Q = deque[int64_t]()
W = deque[int64_t]()
@@ -1011,7 +1019,7 @@ interpolation_types = {
def roll_quantile(const float64_t[:] values, ndarray[int64_t] start,
ndarray[int64_t] end, int64_t minp,
- float64_t quantile, str interpolation):
+ float64_t quantile, str interpolation) -> np.ndarray:
"""
O(N log(window)) implementation using skip list
"""
@@ -1038,7 +1046,7 @@ def roll_quantile(const float64_t[:] values, ndarray[int64_t] start,
)
# we use the Fixed/Variable Indexer here as the
# actual skiplist ops outweigh any window computation costs
- output = np.empty(N, dtype=float)
+ output = np.empty(N, dtype=np.float64)
win = (end - start).max()
if win == 0:
@@ -1132,7 +1140,7 @@ def roll_apply(object obj,
ndarray[int64_t] start, ndarray[int64_t] end,
int64_t minp,
object function, bint raw,
- tuple args, dict kwargs):
+ tuple args, dict kwargs) -> np.ndarray:
cdef:
ndarray[float64_t] output, counts
ndarray[float64_t, cast=True] arr
@@ -1149,7 +1157,7 @@ def roll_apply(object obj,
counts = roll_sum(np.isfinite(arr).astype(float), start, end, minp)
- output = np.empty(N, dtype=float)
+ output = np.empty(N, dtype=np.float64)
for i in range(N):
@@ -1171,11 +1179,15 @@ def roll_apply(object obj,
# Rolling sum and mean for weighted window
-def roll_weighted_sum(const float64_t[:] values, const float64_t[:] weights, int minp):
+def roll_weighted_sum(
+ const float64_t[:] values, const float64_t[:] weights, int minp
+) -> np.ndaray:
return _roll_weighted_sum_mean(values, weights, minp, avg=0)
-def roll_weighted_mean(const float64_t[:] values, const float64_t[:] weights, int minp):
+def roll_weighted_mean(
+ const float64_t[:] values, const float64_t[:] weights, int minp
+) -> np.ndaray:
return _roll_weighted_sum_mean(values, weights, minp, avg=1)
@@ -1434,7 +1446,7 @@ def roll_weighted_var(const float64_t[:] values, const float64_t[:] weights,
n = len(values)
win_n = len(weights)
- output = np.empty(n, dtype=float)
+ output = np.empty(n, dtype=np.float64)
with nogil:
@@ -1474,7 +1486,7 @@ def roll_weighted_var(const float64_t[:] values, const float64_t[:] weights,
def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
int minp, float64_t com, bint adjust, bint ignore_na,
- const float64_t[:] deltas):
+ const float64_t[:] deltas) -> np.ndarray:
"""
Compute exponentially-weighted moving average using center-of-mass.
@@ -1491,13 +1503,13 @@ def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
Returns
-------
- ndarray
+ np.ndarray[float64_t]
"""
cdef:
Py_ssize_t i, j, s, e, nobs, win_size, N = len(vals), M = len(start)
const float64_t[:] sub_deltas, sub_vals
- ndarray[float64_t] sub_output, output = np.empty(N, dtype=float)
+ ndarray[float64_t] sub_output, output = np.empty(N, dtype=np.float64)
float64_t alpha, old_wt_factor, new_wt, weighted_avg, old_wt, cur
bint is_observation
@@ -1516,7 +1528,7 @@ def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
# conjunction with vals[i+1]
sub_deltas = deltas[s:e - 1]
win_size = len(sub_vals)
- sub_output = np.empty(win_size, dtype=float)
+ sub_output = np.empty(win_size, dtype=np.float64)
weighted_avg = sub_vals[0]
is_observation = weighted_avg == weighted_avg
@@ -1559,7 +1571,7 @@ def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
def ewmcov(const float64_t[:] input_x, const int64_t[:] start, const int64_t[:] end,
int minp, const float64_t[:] input_y, float64_t com, bint adjust,
- bint ignore_na, bint bias):
+ bint ignore_na, bint bias) -> np.ndarray:
"""
Compute exponentially-weighted moving variance using center-of-mass.
@@ -1577,7 +1589,7 @@ def ewmcov(const float64_t[:] input_x, const int64_t[:] start, const int64_t[:]
Returns
-------
- ndarray
+ np.ndarray[float64_t]
"""
cdef:
@@ -1587,7 +1599,7 @@ def ewmcov(const float64_t[:] input_x, const int64_t[:] start, const int64_t[:]
float64_t sum_wt, sum_wt2, old_wt, cur_x, cur_y, old_mean_x, old_mean_y
float64_t numerator, denominator
const float64_t[:] sub_x_vals, sub_y_vals
- ndarray[float64_t] sub_out, output = np.empty(N, dtype=float)
+ ndarray[float64_t] sub_out, output = np.empty(N, dtype=np.float64)
bint is_observation
if M != N:
@@ -1606,7 +1618,7 @@ def ewmcov(const float64_t[:] input_x, const int64_t[:] start, const int64_t[:]
sub_x_vals = input_x[s:e]
sub_y_vals = input_y[s:e]
win_size = len(sub_x_vals)
- sub_out = np.empty(win_size, dtype=float)
+ sub_out = np.empty(win_size, dtype=np.float64)
mean_x = sub_x_vals[0]
mean_y = sub_y_vals[0]
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 9208ec615557e..4a210d8b47e9b 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -449,7 +449,7 @@ def vol(self, bias: bool = False, *args, **kwargs):
def var(self, bias: bool = False, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
window_func = window_aggregations.ewmcov
- window_func = partial(
+ wfunc = partial(
window_func,
com=self._com,
adjust=self.adjust,
@@ -458,7 +458,7 @@ def var(self, bias: bool = False, *args, **kwargs):
)
def var_func(values, begin, end, min_periods):
- return window_func(values, begin, end, min_periods, values)
+ return wfunc(values, begin, end, min_periods, values)
return self._apply(var_func)
@@ -518,7 +518,9 @@ def cov_func(x, y):
x_array,
start,
end,
- self.min_periods,
+ # error: Argument 4 to "ewmcov" has incompatible type
+ # "Optional[int]"; expected "int"
+ self.min_periods, # type: ignore[arg-type]
y_array,
self._com,
self.adjust,
@@ -584,12 +586,12 @@ def _cov(X, Y):
X,
start,
end,
- self.min_periods,
+ min_periods,
Y,
self._com,
self.adjust,
self.ignore_na,
- 1,
+ True,
)
with np.errstate(all="ignore"):
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index e4710254d9311..31b09dc8e5973 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -1051,7 +1051,10 @@ def aggregate(self, func, *args, **kwargs):
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
window_func = window_aggregations.roll_weighted_sum
- return self._apply(window_func, name="sum", **kwargs)
+ # error: Argument 1 to "_apply" of "Window" has incompatible type
+ # "Callable[[ndarray, ndarray, int], ndarray]"; expected
+ # "Callable[[ndarray, int, int], ndarray]"
+ return self._apply(window_func, name="sum", **kwargs) # type: ignore[arg-type]
@doc(
template_header,
@@ -1068,7 +1071,10 @@ def sum(self, *args, **kwargs):
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
window_func = window_aggregations.roll_weighted_mean
- return self._apply(window_func, name="mean", **kwargs)
+ # error: Argument 1 to "_apply" of "Window" has incompatible type
+ # "Callable[[ndarray, ndarray, int], ndarray]"; expected
+ # "Callable[[ndarray, int, int], ndarray]"
+ return self._apply(window_func, name="mean", **kwargs) # type: ignore[arg-type]
@doc(
template_header,
| @mroeschke the type:ignores added in ewm.py look like potential bugs. any thoughts? | https://api.github.com/repos/pandas-dev/pandas/pulls/41029 | 2021-04-19T03:52:38Z | 2021-04-28T00:46:56Z | 2021-04-28T00:46:56Z | 2021-04-28T01:06:37Z |
added test case for iloc function if it returns the same output for b… | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 82971e460a8a2..de053b24de70f 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -44,6 +44,7 @@
isna,
)
+from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import array as pd_array
from pandas.core.indexers import (
@@ -1588,10 +1589,11 @@ def _setitem_with_indexer(self, indexer, value, name="iloc"):
BlockManager methods, see GH#12991, GH#22046, GH#15686.
"""
info_axis = self.obj._info_axis_number
-
# maybe partial set
take_split_path = not self.obj._mgr.is_single_block
+ value = np.array(value) if isinstance(value, IntegerArray) else value
+
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if (
@@ -1705,7 +1707,6 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str):
"""
# Above we only set take_split_path to True for 2D cases
assert self.ndim == 2
-
if not isinstance(indexer, tuple):
indexer = _tuplify(self.ndim, indexer)
if len(indexer) > self.ndim:
@@ -1716,8 +1717,8 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str):
if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict):
from pandas import Series
+ print("value is ABCseries")
value = self._align_series(indexer, Series(value))
-
# Ensure we have something we can iterate over
info_axis = indexer[1]
ilocs = self._ensure_iterable_column_indexer(info_axis)
@@ -1731,6 +1732,7 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str):
if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
if isinstance(value, ABCDataFrame):
+ print("value is ABCDataFrame")
self._setitem_with_indexer_frame_value(indexer, value, name)
elif np.ndim(value) == 2:
diff --git a/pandas/tests/indexing/multiindex/test_iloc.py b/pandas/tests/indexing/multiindex/test_iloc.py
index db91d5ad88252..1cfdfa7da9fb8 100644
--- a/pandas/tests/indexing/multiindex/test_iloc.py
+++ b/pandas/tests/indexing/multiindex/test_iloc.py
@@ -1,6 +1,7 @@
import numpy as np
import pytest
+import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
@@ -73,6 +74,53 @@ def test_iloc_getitem_multiple_items():
tm.assert_frame_equal(result, expected)
+def test_iloc_pd_arr_value():
+ # test if iloc returns the same output for numpy array input and integer array input
+ df = DataFrame(
+ data={
+ "col1": [1, 2, 3, 4],
+ "col2": [3, 4, 5, 6],
+ "col3": [6, 7, 8, 9],
+ }
+ )
+ df_np = df.copy()
+ df_pd = df.copy()
+ df_int = df.copy()
+
+ pd_arr = pd.array([1, 2, 3])
+ np_arr = np.array([1, 2, 3])
+ int_arr = [1, 2, 3]
+ df_pd.iloc[[1, 2, 3]] = pd_arr
+ df_np.iloc[[1, 2, 3]] = np_arr
+ df_int.iloc[[1, 2, 3]] = int_arr
+
+ tm.assert_frame_equal(df_pd, df_np) and tm.assert_frame_equal(df_int, df_np)
+
+
+def test_iloc_pd_arr_value2():
+ # test if iloc returns the same output for numpy array input and integer array input
+ df = DataFrame(
+ data={
+ "col1": [1, 2, 3, 4],
+ "col2": [3, 4, 5, 6],
+ "col3": [6, 7, 8, 9],
+ }
+ )
+ df_np = df.copy()
+ df_pd = df.copy()
+ df_int = df.copy()
+
+ pd_arr = pd.array([1, 2, 3])
+ np_arr = np.array([1, 2, 3])
+ int_arr = [1, 2, 3]
+
+ df_pd.iloc[[1, 2, 3], :] = pd_arr
+ df_np.iloc[[1, 2, 3], :] = np_arr
+ df_int.iloc[[1, 2, 3], :] = int_arr
+
+ tm.assert_frame_equal(df_pd, df_np) and tm.assert_frame_equal(df_int, df_np)
+
+
def test_iloc_getitem_labels():
# this is basically regular indexing
arr = np.random.randn(4, 3)
diff --git a/pip b/pip
new file mode 160000
index 0000000000000..e6a65fc5852b0
--- /dev/null
+++ b/pip
@@ -0,0 +1 @@
+Subproject commit e6a65fc5852b0237bb588b00e51ea9384b8f23e4
| …oth numpy and pandas arrays to fix the issue #40933
- [x] closes #40933
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/41028 | 2021-04-19T01:17:54Z | 2021-05-25T10:46:55Z | null | 2021-05-25T10:46:55Z |
BUG: Handling NaN case before dtype conversion #40638 | diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 45656459792ba..f23ab6b45b024 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -355,9 +355,14 @@ def to_numpy( # type: ignore[override]
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
- result = np.asarray(self._ndarray, dtype=dtype)
+ temp = self
- if (copy or na_value is not lib.no_default) and result is self._ndarray:
+ if na_value is not lib.no_default:
+ temp.fillna(method="bfill")
+
+ result = np.asarray(temp._ndarray, dtype=dtype)
+
+ if (copy or na_value is not lib.no_default) and result is temp._ndarray:
result = result.copy()
if na_value is not lib.no_default:
| - [ ] closes #40638
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
Handles the NA/NaN case before the dtype conversion | https://api.github.com/repos/pandas-dev/pandas/pulls/41027 | 2021-04-19T01:05:37Z | 2021-05-25T10:55:12Z | null | 2021-05-25T10:55:12Z |
BUG: Changing return is_named_tuble to a tuple sublclass #40682 | diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index 58da2570015b5..c2759be07c857 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -315,7 +315,7 @@ def is_named_tuple(obj) -> bool:
>>> is_named_tuple((1, 2))
False
"""
- return isinstance(obj, tuple) and hasattr(obj, "_fields")
+ return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")
def is_hashable(obj) -> bool:
| - [ ] closes #40682
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
Changed pandas/core/dtypes/inference.py so that the return of the is_named_tuple function to be a tuple subclass so that it is compatible with sqlalchemy.
Passed test_inference.py | https://api.github.com/repos/pandas-dev/pandas/pulls/41026 | 2021-04-18T20:17:54Z | 2021-06-16T13:04:54Z | null | 2021-06-16T13:04:54Z |
[ArrowStringArray] implement ArrowStringArray._str_contains | diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index 5d9b1c135d7ae..45a9053954569 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -213,13 +213,18 @@ def time_cat(self, other_cols, sep, na_rep, na_frac):
class Contains:
- params = [True, False]
- param_names = ["regex"]
+ params = (["str", "string", "arrow_string"], [True, False])
+ param_names = ["dtype", "regex"]
+
+ def setup(self, dtype, regex):
+ from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
- def setup(self, regex):
- self.s = Series(tm.makeStringIndex(10 ** 5))
+ try:
+ self.s = Series(tm.makeStringIndex(10 ** 5), dtype=dtype)
+ except ImportError:
+ raise NotImplementedError
- def time_contains(self, regex):
+ def time_contains(self, dtype, regex):
self.s.str.contains("A", regex=regex)
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 55cb350d3d27c..b7a0e70180ae4 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -759,6 +759,16 @@ def _str_map(self, f, na_value=None, dtype: Dtype | None = None):
# -> We don't know the result type. E.g. `.get` can return anything.
return lib.map_infer_mask(arr, f, mask.view("uint8"))
+ def _str_contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
+ if not regex and case:
+ result = pc.match_substring(self._data, pat)
+ result = BooleanDtype().__from_arrow__(result)
+ if not isna(na):
+ result[isna(result)] = bool(na)
+ return result
+ else:
+ return super()._str_contains(pat, case, flags, na, regex)
+
def _str_isalnum(self):
if hasattr(pc, "utf8_is_alnum"):
result = pc.utf8_is_alnum(self._data)
diff --git a/pandas/tests/strings/test_find_replace.py b/pandas/tests/strings/test_find_replace.py
index ab95b2071ae10..d801d3457027f 100644
--- a/pandas/tests/strings/test_find_replace.py
+++ b/pandas/tests/strings/test_find_replace.py
@@ -4,6 +4,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
import pandas as pd
from pandas import (
Index,
@@ -12,79 +14,118 @@
)
-def test_contains():
+@pytest.fixture(
+ params=[
+ "object",
+ "string",
+ pytest.param(
+ "arrow_string", marks=td.skip_if_no("pyarrow", min_version="1.0.0")
+ ),
+ ]
+)
+def any_string_dtype(request):
+ """
+ Parametrized fixture for string dtypes.
+ * 'object'
+ * 'string'
+ * 'arrow_string'
+ """
+ from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
+
+ return request.param
+
+
+def test_contains(any_string_dtype):
values = np.array(
["foo", np.nan, "fooommm__foo", "mmm_", "foommm[_]+bar"], dtype=np.object_
)
- values = Series(values)
+ values = Series(values, dtype=any_string_dtype)
pat = "mmm[_]+"
result = values.str.contains(pat)
- expected = Series(np.array([False, np.nan, True, True, False], dtype=np.object_))
+ expected_dtype = "object" if any_string_dtype == "object" else "boolean"
+ expected = Series(
+ np.array([False, np.nan, True, True, False], dtype=np.object_),
+ dtype=expected_dtype,
+ )
tm.assert_series_equal(result, expected)
result = values.str.contains(pat, regex=False)
- expected = Series(np.array([False, np.nan, False, False, True], dtype=np.object_))
+ expected = Series(
+ np.array([False, np.nan, False, False, True], dtype=np.object_),
+ dtype=expected_dtype,
+ )
tm.assert_series_equal(result, expected)
- values = Series(np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=object))
+ values = Series(
+ np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=object),
+ dtype=any_string_dtype,
+ )
result = values.str.contains(pat)
- expected = Series(np.array([False, False, True, True]))
- assert result.dtype == np.bool_
+ expected_dtype = np.bool_ if any_string_dtype == "object" else "boolean"
+ expected = Series(np.array([False, False, True, True]), dtype=expected_dtype)
tm.assert_series_equal(result, expected)
# case insensitive using regex
- values = Series(np.array(["Foo", "xYz", "fOOomMm__fOo", "MMM_"], dtype=object))
+ values = Series(
+ np.array(["Foo", "xYz", "fOOomMm__fOo", "MMM_"], dtype=object),
+ dtype=any_string_dtype,
+ )
result = values.str.contains("FOO|mmm", case=False)
- expected = Series(np.array([True, False, True, True]))
+ expected = Series(np.array([True, False, True, True]), dtype=expected_dtype)
tm.assert_series_equal(result, expected)
# case insensitive without regex
- result = Series(values).str.contains("foo", regex=False, case=False)
- expected = Series(np.array([True, False, True, False]))
+ result = values.str.contains("foo", regex=False, case=False)
+ expected = Series(np.array([True, False, True, False]), dtype=expected_dtype)
tm.assert_series_equal(result, expected)
- # mixed
+ # unicode
+ values = Series(
+ np.array(["foo", np.nan, "fooommm__foo", "mmm_"], dtype=np.object_),
+ dtype=any_string_dtype,
+ )
+ pat = "mmm[_]+"
+
+ result = values.str.contains(pat)
+ expected_dtype = "object" if any_string_dtype == "object" else "boolean"
+ expected = Series(
+ np.array([False, np.nan, True, True], dtype=np.object_), dtype=expected_dtype
+ )
+ tm.assert_series_equal(result, expected)
+
+ result = values.str.contains(pat, na=False)
+ expected_dtype = np.bool_ if any_string_dtype == "object" else "boolean"
+ expected = Series(np.array([False, False, True, True]), dtype=expected_dtype)
+ tm.assert_series_equal(result, expected)
+
+ values = Series(
+ np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=np.object_),
+ dtype=any_string_dtype,
+ )
+ result = values.str.contains(pat)
+ expected = Series(np.array([False, False, True, True]), dtype=expected_dtype)
+ tm.assert_series_equal(result, expected)
+
+
+def test_contains_object_mixed():
mixed = Series(
np.array(
["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],
dtype=object,
)
)
- rs = mixed.str.contains("o")
- xp = Series(
+ result = mixed.str.contains("o")
+ expected = Series(
np.array(
[False, np.nan, False, np.nan, np.nan, True, np.nan, np.nan, np.nan],
dtype=np.object_,
)
)
- tm.assert_series_equal(rs, xp)
-
- rs = mixed.str.contains("o")
- xp = Series([False, np.nan, False, np.nan, np.nan, True, np.nan, np.nan, np.nan])
- assert isinstance(rs, Series)
- tm.assert_series_equal(rs, xp)
-
- # unicode
- values = Series(np.array(["foo", np.nan, "fooommm__foo", "mmm_"], dtype=np.object_))
- pat = "mmm[_]+"
-
- result = values.str.contains(pat)
- expected = Series(np.array([False, np.nan, True, True], dtype=np.object_))
- tm.assert_series_equal(result, expected)
-
- result = values.str.contains(pat, na=False)
- expected = Series(np.array([False, False, True, True]))
- tm.assert_series_equal(result, expected)
-
- values = Series(np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=np.object_))
- result = values.str.contains(pat)
- expected = Series(np.array([False, False, True, True]))
- assert result.dtype == np.bool_
tm.assert_series_equal(result, expected)
-def test_contains_for_object_category():
+def test_contains_na_kwarg_for_object_category():
# gh 22158
# na for category
@@ -108,6 +149,29 @@ def test_contains_for_object_category():
tm.assert_series_equal(result, expected)
+@pytest.mark.parametrize(
+ "na, expected",
+ [
+ (None, pd.NA),
+ (True, True),
+ (False, False),
+ (0, False),
+ (3, True),
+ (np.nan, pd.NA),
+ ],
+)
+@pytest.mark.parametrize("regex", [True, False])
+def test_contains_na_kwarg_for_nullable_string_dtype(
+ nullable_string_dtype, na, expected, regex
+):
+ # https://github.com/pandas-dev/pandas/pull/41025#issuecomment-824062416
+
+ values = Series(["a", "b", "c", "a", np.nan], dtype=nullable_string_dtype)
+ result = values.str.contains("a", na=na, regex=regex)
+ expected = Series([True, False, False, True, expected], dtype="boolean")
+ tm.assert_series_equal(result, expected)
+
+
@pytest.mark.parametrize("dtype", [None, "category"])
@pytest.mark.parametrize("null_value", [None, np.nan, pd.NA])
@pytest.mark.parametrize("na", [True, False])
@@ -508,59 +572,73 @@ def _check(result, expected):
tm.assert_series_equal(result, expected)
-def test_contains_moar():
+def test_contains_moar(any_string_dtype):
# PR #1179
- s = Series(["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"])
+ s = Series(
+ ["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"],
+ dtype=any_string_dtype,
+ )
result = s.str.contains("a")
+ expected_dtype = "object" if any_string_dtype == "object" else "boolean"
expected = Series(
- [False, False, False, True, True, False, np.nan, False, False, True]
+ [False, False, False, True, True, False, np.nan, False, False, True],
+ dtype=expected_dtype,
)
tm.assert_series_equal(result, expected)
result = s.str.contains("a", case=False)
expected = Series(
- [True, False, False, True, True, False, np.nan, True, False, True]
+ [True, False, False, True, True, False, np.nan, True, False, True],
+ dtype=expected_dtype,
)
tm.assert_series_equal(result, expected)
result = s.str.contains("Aa")
expected = Series(
- [False, False, False, True, False, False, np.nan, False, False, False]
+ [False, False, False, True, False, False, np.nan, False, False, False],
+ dtype=expected_dtype,
)
tm.assert_series_equal(result, expected)
result = s.str.contains("ba")
expected = Series(
- [False, False, False, True, False, False, np.nan, False, False, False]
+ [False, False, False, True, False, False, np.nan, False, False, False],
+ dtype=expected_dtype,
)
tm.assert_series_equal(result, expected)
result = s.str.contains("ba", case=False)
expected = Series(
- [False, False, False, True, True, False, np.nan, True, False, False]
+ [False, False, False, True, True, False, np.nan, True, False, False],
+ dtype=expected_dtype,
)
tm.assert_series_equal(result, expected)
-def test_contains_nan():
+def test_contains_nan(any_string_dtype):
# PR #14171
- s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
+ s = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype)
result = s.str.contains("foo", na=False)
- expected = Series([False, False, False], dtype=np.bool_)
+ expected_dtype = np.bool_ if any_string_dtype == "object" else "boolean"
+ expected = Series([False, False, False], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
result = s.str.contains("foo", na=True)
- expected = Series([True, True, True], dtype=np.bool_)
+ expected = Series([True, True, True], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
result = s.str.contains("foo", na="foo")
- expected = Series(["foo", "foo", "foo"], dtype=np.object_)
+ if any_string_dtype == "object":
+ expected = Series(["foo", "foo", "foo"], dtype=np.object_)
+ else:
+ expected = Series([True, True, True], dtype="boolean")
tm.assert_series_equal(result, expected)
result = s.str.contains("foo")
- expected = Series([np.nan, np.nan, np.nan], dtype=np.object_)
+ expected_dtype = "object" if any_string_dtype == "object" else "boolean"
+ expected = Series([np.nan, np.nan, np.nan], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
@@ -609,14 +687,14 @@ def test_replace_moar():
tm.assert_series_equal(result, expected)
-def test_match_findall_flags():
+def test_flags_kwarg(any_string_dtype):
data = {
"Dave": "dave@google.com",
"Steve": "steve@gmail.com",
"Rob": "rob@gmail.com",
"Wes": np.nan,
}
- data = Series(data)
+ data = Series(data, dtype=any_string_dtype)
pat = r"([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})"
| not yet dealt with `na`. no tests are failing so we need tests for this.
we can either use the fallback when `na` is specified or handle `na` in the array method, which may be more performant.
should we replicate StringArray:
```
>>> s = pd.Series(["Mouse", "dog", "house and parrot", "23", np.NaN], dtype="string")
>>>
>>> s.str.contains("og", na=3, regex=False)
0 False
1 True
2 False
3 False
4 True
dtype: boolean
>>>
>>> s.str.contains("og", na=np.nan, regex=False)
0 False
1 True
2 False
3 False
4 <NA>
dtype: boolean
>>>
```
or return an object array to preserve `na` if not pd.NA, True or False instead of coercing to bool/null? | https://api.github.com/repos/pandas-dev/pandas/pulls/41025 | 2021-04-18T19:41:59Z | 2021-04-26T12:15:39Z | 2021-04-26T12:15:39Z | 2021-05-01T15:51:56Z |
REF: pass arguments to Index._foo_indexer correctly | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 310ee4c3a63e3..7f969ea5a26af 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -302,23 +302,47 @@ class Index(IndexOpsMixin, PandasObject):
# for why we need to wrap these instead of making them class attributes
# Moreover, cython will choose the appropriate-dtyped sub-function
# given the dtypes of the passed arguments
- def _left_indexer_unique(self, left: np.ndarray, right: np.ndarray) -> np.ndarray:
- return libjoin.left_join_indexer_unique(left, right)
+ @final
+ def _left_indexer_unique(self: _IndexT, other: _IndexT) -> np.ndarray:
+ # -> np.ndarray[np.intp]
+ # Caller is responsible for ensuring other.dtype == self.dtype
+ sv = self._get_join_target()
+ ov = other._get_join_target()
+ return libjoin.left_join_indexer_unique(sv, ov)
+
+ @final
def _left_indexer(
- self, left: np.ndarray, right: np.ndarray
- ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
- return libjoin.left_join_indexer(left, right)
+ self: _IndexT, other: _IndexT
+ ) -> tuple[ArrayLike, np.ndarray, np.ndarray]:
+ # Caller is responsible for ensuring other.dtype == self.dtype
+ sv = self._get_join_target()
+ ov = other._get_join_target()
+ joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov)
+ joined = self._from_join_target(joined_ndarray)
+ return joined, lidx, ridx
+ @final
def _inner_indexer(
- self, left: np.ndarray, right: np.ndarray
- ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
- return libjoin.inner_join_indexer(left, right)
+ self: _IndexT, other: _IndexT
+ ) -> tuple[ArrayLike, np.ndarray, np.ndarray]:
+ # Caller is responsible for ensuring other.dtype == self.dtype
+ sv = self._get_join_target()
+ ov = other._get_join_target()
+ joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov)
+ joined = self._from_join_target(joined_ndarray)
+ return joined, lidx, ridx
+ @final
def _outer_indexer(
- self, left: np.ndarray, right: np.ndarray
- ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
- return libjoin.outer_join_indexer(left, right)
+ self: _IndexT, other: _IndexT
+ ) -> tuple[ArrayLike, np.ndarray, np.ndarray]:
+ # Caller is responsible for ensuring other.dtype == self.dtype
+ sv = self._get_join_target()
+ ov = other._get_join_target()
+ joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov)
+ joined = self._from_join_target(joined_ndarray)
+ return joined, lidx, ridx
_typ = "index"
_data: ExtensionArray | np.ndarray
@@ -2965,11 +2989,7 @@ def _union(self, other: Index, sort):
):
# Both are unique and monotonic, so can use outer join
try:
- # error: Argument 1 to "_outer_indexer" of "Index" has incompatible type
- # "Union[ExtensionArray, ndarray]"; expected "ndarray"
- # error: Argument 2 to "_outer_indexer" of "Index" has incompatible type
- # "Union[ExtensionArray, ndarray]"; expected "ndarray"
- return self._outer_indexer(lvals, rvals)[0] # type: ignore[arg-type]
+ return self._outer_indexer(other)[0]
except (TypeError, IncompatibleFrequency):
# incomparable objects
value_list = list(lvals)
@@ -3090,13 +3110,10 @@ def _intersection(self, other: Index, sort=False):
"""
# TODO(EA): setops-refactor, clean all this up
lvals = self._values
- rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
- # error: Argument 1 to "_inner_indexer" of "Index" has incompatible type
- # "Union[ExtensionArray, ndarray]"; expected "ndarray"
- result = self._inner_indexer(lvals, rvals)[0] # type: ignore[arg-type]
+ result = self._inner_indexer(other)[0]
except TypeError:
pass
else:
@@ -4095,8 +4112,8 @@ def _join_non_unique(self, other, how="left"):
# We only get here if dtypes match
assert self.dtype == other.dtype
- lvalues = self._get_engine_target()
- rvalues = other._get_engine_target()
+ lvalues = self._get_join_target()
+ rvalues = other._get_join_target()
left_idx, right_idx = get_join_indexers(
[lvalues], [rvalues], how=how, sort=True
@@ -4109,7 +4126,8 @@ def _join_non_unique(self, other, how="left"):
mask = left_idx == -1
np.putmask(join_array, mask, rvalues.take(right_idx))
- join_index = self._wrap_joined_index(join_array, other)
+ join_arraylike = self._from_join_target(join_array)
+ join_index = self._wrap_joined_index(join_arraylike, other)
return join_index, left_idx, right_idx
@@ -4267,9 +4285,6 @@ def _join_monotonic(self, other: Index, how="left"):
ret_index = other if how == "right" else self
return ret_index, None, None
- sv = self._get_engine_target()
- ov = other._get_engine_target()
-
ridx: np.ndarray | None
lidx: np.ndarray | None
@@ -4278,26 +4293,26 @@ def _join_monotonic(self, other: Index, how="left"):
if how == "left":
join_index = self
lidx = None
- ridx = self._left_indexer_unique(sv, ov)
+ ridx = self._left_indexer_unique(other)
elif how == "right":
join_index = other
- lidx = self._left_indexer_unique(ov, sv)
+ lidx = other._left_indexer_unique(self)
ridx = None
elif how == "inner":
- join_array, lidx, ridx = self._inner_indexer(sv, ov)
+ join_array, lidx, ridx = self._inner_indexer(other)
join_index = self._wrap_joined_index(join_array, other)
elif how == "outer":
- join_array, lidx, ridx = self._outer_indexer(sv, ov)
+ join_array, lidx, ridx = self._outer_indexer(other)
join_index = self._wrap_joined_index(join_array, other)
else:
if how == "left":
- join_array, lidx, ridx = self._left_indexer(sv, ov)
+ join_array, lidx, ridx = self._left_indexer(other)
elif how == "right":
- join_array, ridx, lidx = self._left_indexer(ov, sv)
+ join_array, ridx, lidx = other._left_indexer(self)
elif how == "inner":
- join_array, lidx, ridx = self._inner_indexer(sv, ov)
+ join_array, lidx, ridx = self._inner_indexer(other)
elif how == "outer":
- join_array, lidx, ridx = self._outer_indexer(sv, ov)
+ join_array, lidx, ridx = self._outer_indexer(other)
join_index = self._wrap_joined_index(join_array, other)
@@ -4305,9 +4320,7 @@ def _join_monotonic(self, other: Index, how="left"):
ridx = None if ridx is None else ensure_platform_int(ridx)
return join_index, lidx, ridx
- def _wrap_joined_index(
- self: _IndexT, joined: np.ndarray, other: _IndexT
- ) -> _IndexT:
+ def _wrap_joined_index(self: _IndexT, joined: ArrayLike, other: _IndexT) -> _IndexT:
assert other.dtype == self.dtype
if isinstance(self, ABCMultiIndex):
@@ -4385,6 +4398,19 @@ def _get_engine_target(self) -> np.ndarray:
# ndarray]", expected "ndarray")
return self._values # type: ignore[return-value]
+ def _get_join_target(self) -> np.ndarray:
+ """
+ Get the ndarray that we will pass to libjoin functions.
+ """
+ return self._get_engine_target()
+
+ def _from_join_target(self, result: np.ndarray) -> ArrayLike:
+ """
+ Cast the ndarray returned from one of the libjoin.foo_indexer functions
+ back to type(self)._data.
+ """
+ return result
+
@doc(IndexOpsMixin._memory_usage)
def memory_usage(self, deep: bool = False) -> int:
result = self._memory_usage(deep=deep)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 7bc0655ea9529..b2d2c98c08f68 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -20,7 +20,6 @@
NaT,
Timedelta,
iNaT,
- join as libjoin,
lib,
)
from pandas._libs.tslibs import (
@@ -75,36 +74,6 @@
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
-def _join_i8_wrapper(joinf, with_indexers: bool = True):
- """
- Create the join wrapper methods.
- """
-
- # error: 'staticmethod' used with a non-method
- @staticmethod # type: ignore[misc]
- def wrapper(left, right):
- # Note: these only get called with left.dtype == right.dtype
- orig_left = left
-
- left = left.view("i8")
- right = right.view("i8")
-
- results = joinf(left, right)
- if with_indexers:
-
- join_index, left_indexer, right_indexer = results
- if not isinstance(orig_left, np.ndarray):
- # When called from Index._intersection/_union, we have the EA
- join_index = join_index.view(orig_left._ndarray.dtype)
- join_index = orig_left._from_backing_data(join_index)
-
- return join_index, left_indexer, right_indexer
-
- return results
-
- return wrapper
-
-
@inherit_names(
["inferred_freq", "_resolution_obj", "resolution"],
DatetimeLikeArrayMixin,
@@ -603,13 +572,6 @@ def insert(self, loc: int, item):
# --------------------------------------------------------------------
# Join/Set Methods
- _inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer)
- _outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer)
- _left_indexer = _join_i8_wrapper(libjoin.left_join_indexer)
- _left_indexer_unique = _join_i8_wrapper(
- libjoin.left_join_indexer_unique, with_indexers=False
- )
-
def _get_join_freq(self, other):
"""
Get the freq to attach to the result of a join operation.
@@ -621,14 +583,22 @@ def _get_join_freq(self, other):
freq = self.freq if self._can_fast_union(other) else None
return freq
- def _wrap_joined_index(self, joined: np.ndarray, other):
+ def _wrap_joined_index(self, joined, other):
assert other.dtype == self.dtype, (other.dtype, self.dtype)
- assert joined.dtype == "i8" or joined.dtype == self.dtype, joined.dtype
- joined = joined.view(self._data._ndarray.dtype)
result = super()._wrap_joined_index(joined, other)
result._data._freq = self._get_join_freq(other)
return result
+ def _get_join_target(self) -> np.ndarray:
+ return self._data._ndarray.view("i8")
+
+ def _from_join_target(self, result: np.ndarray):
+ # view e.g. i8 back to M8[ns]
+ result = result.view(self._data._ndarray.dtype)
+ return self._data._from_backing_data(result)
+
+ # --------------------------------------------------------------------
+
@doc(Index._convert_arr_indexer)
def _convert_arr_indexer(self, keyarr):
try:
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index b11ec06120e0c..d593ddc640967 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -11,6 +11,7 @@
import numpy as np
+from pandas._typing import ArrayLike
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
@@ -300,6 +301,11 @@ def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
def _get_engine_target(self) -> np.ndarray:
return np.asarray(self._data)
+ def _from_join_target(self, result: np.ndarray) -> ArrayLike:
+ # ATM this is only for IntervalIndex, implicit assumption
+ # about _get_engine_target
+ return type(self._data)._from_sequence(result, dtype=self.dtype)
+
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
@@ -410,6 +416,10 @@ def _simple_new(
def _get_engine_target(self) -> np.ndarray:
return self._data._ndarray
+ def _from_join_target(self, result: np.ndarray) -> ArrayLike:
+ assert result.dtype == self._data._ndarray.dtype
+ return self._data._from_backing_data(result)
+
def insert(self: _T, loc: int, item) -> Index:
"""
Make new Index inserting new item at location. Follows
@@ -458,7 +468,11 @@ def putmask(self, mask, value) -> Index:
return type(self)._simple_new(res_values, name=self.name)
- def _wrap_joined_index(self: _T, joined: np.ndarray, other: _T) -> _T:
+ # error: Argument 1 of "_wrap_joined_index" is incompatible with supertype
+ # "Index"; supertype defines the argument type as "Union[ExtensionArray, ndarray]"
+ def _wrap_joined_index( # type: ignore[override]
+ self: _T, joined: NDArrayBackedExtensionArray, other: _T
+ ) -> _T:
name = get_op_result_name(self, other)
- arr = self._data._from_backing_data(joined)
- return type(self)._simple_new(arr, name=name)
+
+ return type(self)._simple_new(joined, name=name)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 59ff128713aca..794f13bbfb6b1 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3613,14 +3613,12 @@ def _maybe_match_names(self, other):
def _intersection(self, other, sort=False) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
-
- lvals = self._values
- rvals = other._values.astype(object, copy=False)
+ other = other.astype(object, copy=False)
uniq_tuples = None # flag whether _inner_indexer was successful
if self.is_monotonic and other.is_monotonic:
try:
- inner_tuples = self._inner_indexer(lvals, rvals)[0]
+ inner_tuples = self._inner_indexer(other)[0]
sort = False # inner_tuples is already sorted
except TypeError:
pass
diff --git a/pandas/tests/indexes/period/test_join.py b/pandas/tests/indexes/period/test_join.py
index 77dcd38b239ec..b8b15708466cb 100644
--- a/pandas/tests/indexes/period/test_join.py
+++ b/pandas/tests/indexes/period/test_join.py
@@ -15,7 +15,7 @@ class TestJoin:
def test_join_outer_indexer(self):
pi = period_range("1/1/2000", "1/20/2000", freq="D")
- result = pi._outer_indexer(pi._values, pi._values)
+ result = pi._outer_indexer(pi)
tm.assert_extension_array_equal(result[0], pi._values)
tm.assert_numpy_array_equal(result[1], np.arange(len(pi), dtype=np.intp))
tm.assert_numpy_array_equal(result[2], np.arange(len(pi), dtype=np.intp))
| ATM when we call self._inner_indexer in Index._union it raises on CategoricalIndex bc we are calling it incorrectly. Fixing that required moving the unwrapping/wrapping for Index._foo_indexer closer to just around the cython calls. | https://api.github.com/repos/pandas-dev/pandas/pulls/41024 | 2021-04-18T18:27:47Z | 2021-04-19T13:41:50Z | 2021-04-19T13:41:50Z | 2021-04-19T14:55:09Z |
ENH: check for string and convert to list in DataFrame.dropna subset argument | diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 16ee728a4425a..d6ad5eb2003ce 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -177,6 +177,7 @@ Other enhancements
- :meth:`DataFrame.__pos__`, :meth:`DataFrame.__neg__` now retain ``ExtensionDtype`` dtypes (:issue:`43883`)
- The error raised when an optional dependency can't be imported now includes the original exception, for easier investigation (:issue:`43882`)
- Added :meth:`.ExponentialMovingWindow.sum` (:issue:`13297`)
+- :meth:`DataFrame.dropna` now accepts a single label as ``subset`` along with array-like (:issue:`41021`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index c29a67c4942db..2b2c11bc6eeb5 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5931,7 +5931,7 @@ def dropna(
axis: Axis = 0,
how: str = "any",
thresh=None,
- subset=None,
+ subset: IndexLabel = None,
inplace: bool = False,
):
"""
@@ -5963,7 +5963,7 @@ def dropna(
thresh : int, optional
Require that many non-NA values.
- subset : array-like, optional
+ subset : column label or sequence of labels, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
@@ -6047,11 +6047,14 @@ def dropna(
agg_obj = self
if subset is not None:
+ # subset needs to be list
+ if not is_list_like(subset):
+ subset = [subset]
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
- raise KeyError(list(np.compress(check, subset)))
+ raise KeyError(np.array(subset)[check].tolist())
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
diff --git a/pandas/tests/frame/methods/test_dropna.py b/pandas/tests/frame/methods/test_dropna.py
index bc2b48d3312d7..1207c2763db07 100644
--- a/pandas/tests/frame/methods/test_dropna.py
+++ b/pandas/tests/frame/methods/test_dropna.py
@@ -243,3 +243,27 @@ def test_dropna_pos_args_deprecation(self):
result = df.dropna(1)
expected = DataFrame({"a": [1, 2, 3]})
tm.assert_frame_equal(result, expected)
+
+ def test_set_single_column_subset(self):
+ # GH 41021
+ df = DataFrame({"A": [1, 2, 3], "B": list("abc"), "C": [4, np.NaN, 5]})
+ expected = DataFrame(
+ {"A": [1, 3], "B": list("ac"), "C": [4.0, 5.0]}, index=[0, 2]
+ )
+ result = df.dropna(subset="C")
+ tm.assert_frame_equal(result, expected)
+
+ def test_single_column_not_present_in_axis(self):
+ # GH 41021
+ df = DataFrame({"A": [1, 2, 3]})
+
+ # Column not present
+ with pytest.raises(KeyError, match="['D']"):
+ df.dropna(subset="D", axis=0)
+
+ def test_subset_is_nparray(self):
+ # GH 41021
+ df = DataFrame({"A": [1, 2, np.NaN], "B": list("abc"), "C": [4, np.NaN, 5]})
+ expected = DataFrame({"A": [1.0], "B": ["a"], "C": [4.0]})
+ result = df.dropna(subset=np.array(["A", "C"]))
+ tm.assert_frame_equal(result, expected)
| - [x] closes #41021
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/41022 | 2021-04-18T18:04:22Z | 2021-10-18T15:48:48Z | 2021-10-18T15:48:47Z | 2021-10-18T15:48:58Z |
BUG: isna not returning copy for MaskedArray | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 85d9acff353be..1a11fffbf6b4e 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -749,6 +749,7 @@ Missing
- Bug in :class:`Grouper` now correctly propagates ``dropna`` argument and :meth:`DataFrameGroupBy.transform` now correctly handles missing values for ``dropna=True`` (:issue:`35612`)
- Bug in :func:`isna`, and :meth:`Series.isna`, :meth:`Index.isna`, :meth:`DataFrame.isna` (and the corresponding ``notna`` functions) not recognizing ``Decimal("NaN")`` objects (:issue:`39409`)
- Bug in :meth:`DataFrame.fillna` not accepting dictionary for ``downcast`` keyword (:issue:`40809`)
+- Bug in :func:`isna` not returning a copy of the mask for nullable types, causing any subsequent mask modification to change the original array (:issue:`40935`)
MultiIndex
^^^^^^^^^^
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 93de1cd91d625..11f9f645920ec 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -352,7 +352,7 @@ def _hasna(self) -> bool:
return self._mask.any() # type: ignore[return-value]
def isna(self) -> np.ndarray:
- return self._mask
+ return self._mask.copy()
@property
def _na_value(self):
diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py
index c501694a7c2d5..3d43dc47b5280 100644
--- a/pandas/tests/extension/base/missing.py
+++ b/pandas/tests/extension/base/missing.py
@@ -1,7 +1,9 @@
import numpy as np
+import pytest
import pandas as pd
import pandas._testing as tm
+from pandas.api.types import is_sparse
from pandas.tests.extension.base.base import BaseExtensionTests
@@ -21,6 +23,17 @@ def test_isna(self, data_missing):
expected = pd.Series([], dtype=bool)
self.assert_series_equal(result, expected)
+ @pytest.mark.parametrize("na_func", ["isna", "notna"])
+ def test_isna_returns_copy(self, data_missing, na_func):
+ result = pd.Series(data_missing)
+ expected = result.copy()
+ mask = getattr(result, na_func)()
+ if is_sparse(mask):
+ mask = np.array(mask)
+
+ mask[:] = True
+ self.assert_series_equal(result, expected)
+
def test_dropna_array(self, data_missing):
result = data_missing.dropna()
expected = data_missing[[1]]
| - [x] closes #40935
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
Think further improvements like zero-copy access fall under #34873, so #40935 can be closed. | https://api.github.com/repos/pandas-dev/pandas/pulls/41020 | 2021-04-18T17:52:55Z | 2021-04-20T12:52:12Z | 2021-04-20T12:52:12Z | 2021-04-20T13:04:51Z |
Backport PR #41015 on branch 1.2.x (Revert "Skipt failing tests for numpy dev (#40877)") | diff --git a/pandas/tests/arrays/boolean/test_arithmetic.py b/pandas/tests/arrays/boolean/test_arithmetic.py
index 9b854a81f2def..01de64568a011 100644
--- a/pandas/tests/arrays/boolean/test_arithmetic.py
+++ b/pandas/tests/arrays/boolean/test_arithmetic.py
@@ -66,10 +66,7 @@ def test_div(left_array, right_array):
@pytest.mark.parametrize(
"opname",
[
- pytest.param(
- "floordiv",
- marks=pytest.mark.xfail(reason="NumpyDev GH#40874", strict=False),
- ),
+ "floordiv",
"mod",
pytest.param(
"pow", marks=pytest.mark.xfail(reason="TODO follow int8 behaviour? GH34686")
diff --git a/pandas/tests/arrays/masked/test_arithmetic.py b/pandas/tests/arrays/masked/test_arithmetic.py
index 34686f6052131..148b7092abb56 100644
--- a/pandas/tests/arrays/masked/test_arithmetic.py
+++ b/pandas/tests/arrays/masked/test_arithmetic.py
@@ -3,8 +3,6 @@
import numpy as np
import pytest
-from pandas.compat.numpy import is_numpy_dev
-
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import ExtensionArray
@@ -51,8 +49,6 @@ def test_array_scalar_like_equivalence(data, all_arithmetic_operators):
def test_array_NA(data, all_arithmetic_operators):
if "truediv" in all_arithmetic_operators:
pytest.skip("division with pd.NA raises")
- if "floordiv" in all_arithmetic_operators and is_numpy_dev:
- pytest.skip("NumpyDev behavior GH#40874")
data, _ = data
op = tm.get_op_from_name(all_arithmetic_operators)
check_skip(data, all_arithmetic_operators)
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index d15c822f22c14..86a0bc9213256 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -16,8 +16,6 @@
import numpy as np
import pytest
-from pandas.compat.numpy import is_numpy_dev
-
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.boolean import BooleanDtype
@@ -141,21 +139,6 @@ def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
with pytest.raises(exc):
op(s, other)
- def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
- if "floordiv" in all_arithmetic_operators and is_numpy_dev:
- pytest.skip("NumpyDev behavior GH#40874")
- super().test_arith_series_with_scalar(data, all_arithmetic_operators)
-
- def test_arith_series_with_array(self, data, all_arithmetic_operators):
- if "floordiv" in all_arithmetic_operators and is_numpy_dev:
- pytest.skip("NumpyDev behavior GH#40874")
- super().test_arith_series_with_scalar(data, all_arithmetic_operators)
-
- def test_divmod_series_array(self, data, data_for_twos):
- if is_numpy_dev:
- pytest.skip("NumpyDev behavior GH#40874")
- super().test_divmod_series_array(data, data_for_twos)
-
def _check_divmod_op(self, s, op, other, exc=None):
# override to not raise an error
super()._check_divmod_op(s, op, other, None)
| Backport pr #41015 | https://api.github.com/repos/pandas-dev/pandas/pulls/41019 | 2021-04-18T16:02:01Z | 2021-04-18T17:50:27Z | 2021-04-18T17:50:27Z | 2021-04-18T17:50:30Z |
[ArrowStringArray] fix test_astype_int, test_astype_float | diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 219c52c4a65b9..a6835ab1325a0 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -31,14 +31,17 @@
from pandas.util._decorators import doc
from pandas.util._validators import validate_fillna_kwargs
+from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
+ is_dtype_equal,
is_integer,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
+ pandas_dtype,
)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.missing import isna
@@ -48,6 +51,7 @@
from pandas.core.arrays.base import ExtensionArray
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
+from pandas.core.arrays.numeric import NumericDtype
from pandas.core.arrays.string_ import StringDtype
from pandas.core.indexers import (
check_array_indexer,
@@ -290,10 +294,14 @@ def to_numpy( # type: ignore[override]
"""
# TODO: copy argument is ignored
- if na_value is lib.no_default:
- na_value = self._dtype.na_value
- result = self._data.__array__(dtype=dtype)
- result[isna(result)] = na_value
+ result = np.array(self._data, dtype=dtype)
+ if self._data.null_count > 0:
+ if na_value is lib.no_default:
+ if dtype and np.issubdtype(dtype, np.floating):
+ return result
+ na_value = self._dtype.na_value
+ mask = self.isna()
+ result[mask] = na_value
return result
def __len__(self) -> int:
@@ -737,6 +745,24 @@ def value_counts(self, dropna: bool = True) -> Series:
return Series(counts, index=index).astype("Int64")
+ def astype(self, dtype, copy=True):
+ dtype = pandas_dtype(dtype)
+
+ if is_dtype_equal(dtype, self.dtype):
+ if copy:
+ return self.copy()
+ return self
+
+ elif isinstance(dtype, NumericDtype):
+ data = self._data.cast(pa.from_numpy_dtype(dtype.numpy_dtype))
+ return dtype.__from_arrow__(data)
+
+ elif isinstance(dtype, ExtensionDtype):
+ cls = dtype.construct_array_type()
+ return cls._from_sequence(self, dtype=dtype, copy=copy)
+
+ return super().astype(dtype, copy)
+
# ------------------------------------------------------------------------
# String methods interface
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index e3b43c544a477..c9533e239abe0 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -3,6 +3,8 @@
Tests for the str accessors are in pandas/tests/strings/test_string_array.py
"""
+import re
+
import numpy as np
import pytest
@@ -325,12 +327,19 @@ def test_from_sequence_no_mutate(copy, cls, request):
tm.assert_numpy_array_equal(nan_arr, expected)
-def test_astype_int(dtype, request):
- if dtype == "arrow_string":
- reason = "Cannot interpret 'Int64Dtype()' as a data type"
- mark = pytest.mark.xfail(raises=TypeError, reason=reason)
- request.node.add_marker(mark)
+def test_astype_int(dtype):
+ arr = pd.array(["1", "2", "3"], dtype=dtype)
+ result = arr.astype("int64")
+ expected = np.array([1, 2, 3], dtype="int64")
+ tm.assert_numpy_array_equal(result, expected)
+
+ arr = pd.array(["1", pd.NA, "3"], dtype=dtype)
+ msg = re.escape("int() argument must be a string, a bytes-like object or a number")
+ with pytest.raises(TypeError, match=msg):
+ arr.astype("int64")
+
+def test_astype_nullable_int(dtype):
arr = pd.array(["1", pd.NA, "3"], dtype=dtype)
result = arr.astype("Int64")
@@ -338,19 +347,9 @@ def test_astype_int(dtype, request):
tm.assert_extension_array_equal(result, expected)
-def test_astype_float(dtype, any_float_allowed_nullable_dtype, request):
+def test_astype_float(dtype, any_float_allowed_nullable_dtype):
# Don't compare arrays (37974)
-
- if dtype == "arrow_string":
- if any_float_allowed_nullable_dtype in {"Float32", "Float64"}:
- reason = "Cannot interpret 'Float32Dtype()' as a data type"
- else:
- reason = "float() argument must be a string or a number, not 'NAType'"
- mark = pytest.mark.xfail(raises=TypeError, reason=reason)
- request.node.add_marker(mark)
-
ser = pd.Series(["1.1", pd.NA, "3.3"], dtype=dtype)
-
result = ser.astype(any_float_allowed_nullable_dtype)
expected = pd.Series([1.1, np.nan, 3.3], dtype=any_float_allowed_nullable_dtype)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index bebe6948cff9c..ffaecf1576364 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -379,7 +379,9 @@ class TestAstypeString:
# currently no way to parse IntervalArray from a list of strings
],
)
- def test_astype_string_to_extension_dtype_roundtrip(self, data, dtype, request):
+ def test_astype_string_to_extension_dtype_roundtrip(
+ self, data, dtype, request, nullable_string_dtype
+ ):
if dtype == "boolean" or (
dtype in ("period[M]", "datetime64[ns]", "timedelta64[ns]") and NaT in data
):
@@ -389,7 +391,8 @@ def test_astype_string_to_extension_dtype_roundtrip(self, data, dtype, request):
request.node.add_marker(mark)
# GH-40351
s = Series(data, dtype=dtype)
- tm.assert_series_equal(s, s.astype("string").astype(dtype))
+ result = s.astype(nullable_string_dtype).astype(dtype)
+ tm.assert_series_equal(result, s)
class TestAstypeCategorical:
| https://api.github.com/repos/pandas-dev/pandas/pulls/41018 | 2021-04-18T14:11:39Z | 2021-05-31T16:21:07Z | 2021-05-31T16:21:07Z | 2021-05-31T17:23:04Z | |
Fix 33634: If aggregation function returns NaN the order of the index on the resulting df is not maintained | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 1eb22436204a8..6f39dc4917024 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -973,6 +973,7 @@ Other
- Bug in :meth:`DataFrame.equals`, :meth:`Series.equals`, :meth:`Index.equals` with object-dtype containing ``np.datetime64("NaT")`` or ``np.timedelta64("NaT")`` (:issue:`39650`)
- Bug in :func:`pandas.util.show_versions` where console JSON output was not proper JSON (:issue:`39701`)
- Bug in :meth:`DataFrame.convert_dtypes` incorrectly raised ValueError when called on an empty DataFrame (:issue:`40393`)
+- Bug in :meth:`DataFrame.agg()` not sorting the aggregated axis in the order of the provided aggragation functions when one or more aggregation function fails to produce results (:issue:`33634`)
- Bug in :meth:`DataFrame.clip` not interpreting missing values as no threshold (:issue:`40420`)
- Bug in :class:`Series` backed by :class:`DatetimeArray` or :class:`TimedeltaArray` sometimes failing to set the array's ``freq`` to ``None`` (:issue:`41425`)
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index d0c6a1a841edb..00b49c2f4f951 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -376,12 +376,10 @@ def agg_list_like(self) -> FrameOrSeriesUnion:
raise ValueError("no results")
try:
- return concat(results, keys=keys, axis=1, sort=False)
+ concatenated = concat(results, keys=keys, axis=1, sort=False)
except TypeError as err:
-
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
-
from pandas import Series
result = Series(results, index=keys, name=obj.name)
@@ -390,6 +388,16 @@ def agg_list_like(self) -> FrameOrSeriesUnion:
"cannot combine transform and aggregation operations"
) from err
return result
+ else:
+ # Concat uses the first index to determine the final indexing order.
+ # The union of a shorter first index with the other indices causes
+ # the index sorting to be different from the order of the aggregating
+ # functions. Reindex if this is the case.
+ index_size = concatenated.index.size
+ full_ordered_index = next(
+ result.index for result in results if result.index.size == index_size
+ )
+ return concatenated.reindex(full_ordered_index, copy=False)
def agg_dict_like(self) -> FrameOrSeriesUnion:
"""
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py
index fcccd0d846d0f..cc91cdae942fd 100644
--- a/pandas/tests/apply/test_frame_apply.py
+++ b/pandas/tests/apply/test_frame_apply.py
@@ -1110,10 +1110,9 @@ def test_agg_multiple_mixed_no_warning():
with tm.assert_produces_warning(None):
result = mdf[["D", "C", "B", "A"]].agg(["sum", "min"])
- # For backwards compatibility, the result's index is
- # still sorted by function name, so it's ['min', 'sum']
- # not ['sum', 'min'].
- expected = expected[["D", "C", "B", "A"]]
+ # GH40420: the result of .agg should have an index that is sorted
+ # according to the arguments provided to agg.
+ expected = expected[["D", "C", "B", "A"]].reindex(["sum", "min"])
tm.assert_frame_equal(result, expected)
@@ -1521,6 +1520,38 @@ def test_apply_np_reducer(float_frame, op, how):
tm.assert_series_equal(result, expected)
+def test_aggregation_func_column_order():
+ # GH40420: the result of .agg should have an index that is sorted
+ # according to the arguments provided to agg.
+ df = DataFrame(
+ [
+ ("1", 1, 0, 0),
+ ("2", 2, 0, 0),
+ ("3", 3, 0, 0),
+ ("4", 4, 5, 4),
+ ("5", 5, 6, 6),
+ ("6", 6, 7, 7),
+ ],
+ columns=("item", "att1", "att2", "att3"),
+ )
+
+ def foo(s):
+ return s.sum() / 2
+
+ aggs = ["sum", foo, "count", "min"]
+ result = df.agg(aggs)
+ expected = DataFrame(
+ {
+ "item": ["123456", np.nan, 6, "1"],
+ "att1": [21.0, 10.5, 6.0, 1.0],
+ "att2": [18.0, 9.0, 6.0, 0.0],
+ "att3": [17.0, 8.5, 6.0, 0.0],
+ },
+ index=["sum", "foo", "count", "min"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
def test_apply_getitem_axis_1():
# GH 13427
df = DataFrame({"a": [0, 1, 2], "b": [1, 2, 3]})
| - [x] closes #33634
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/41017 | 2021-04-18T13:44:15Z | 2021-05-21T01:16:05Z | 2021-05-21T01:16:05Z | 2021-05-21T01:16:15Z |
Revert "Skipt failing tests for numpy dev (#40877)" | diff --git a/pandas/tests/arrays/boolean/test_arithmetic.py b/pandas/tests/arrays/boolean/test_arithmetic.py
index 8e879372cba31..f8f1af4c3da51 100644
--- a/pandas/tests/arrays/boolean/test_arithmetic.py
+++ b/pandas/tests/arrays/boolean/test_arithmetic.py
@@ -69,10 +69,7 @@ def test_div(left_array, right_array):
@pytest.mark.parametrize(
"opname",
[
- pytest.param(
- "floordiv",
- marks=pytest.mark.xfail(reason="NumpyDev GH#40874", strict=False),
- ),
+ "floordiv",
"mod",
pytest.param(
"pow", marks=pytest.mark.xfail(reason="TODO follow int8 behaviour? GH34686")
diff --git a/pandas/tests/arrays/masked/test_arithmetic.py b/pandas/tests/arrays/masked/test_arithmetic.py
index 088a37f8615c0..adb52fce17f8b 100644
--- a/pandas/tests/arrays/masked/test_arithmetic.py
+++ b/pandas/tests/arrays/masked/test_arithmetic.py
@@ -6,8 +6,6 @@
import numpy as np
import pytest
-from pandas.compat import is_numpy_dev
-
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import ExtensionArray
@@ -54,8 +52,6 @@ def test_array_scalar_like_equivalence(data, all_arithmetic_operators):
def test_array_NA(data, all_arithmetic_operators):
if "truediv" in all_arithmetic_operators:
pytest.skip("division with pd.NA raises")
- if "floordiv" in all_arithmetic_operators and is_numpy_dev:
- pytest.skip("NumpyDev behavior GH#40874")
data, _ = data
op = tm.get_op_from_name(all_arithmetic_operators)
check_skip(data, all_arithmetic_operators)
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index 23ab80f200598..33d82a1d64fb7 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -16,8 +16,6 @@
import numpy as np
import pytest
-from pandas.compat import is_numpy_dev
-
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.boolean import BooleanDtype
@@ -142,26 +140,6 @@ def _check_op(self, obj, op, other, op_name, exc=NotImplementedError):
with pytest.raises(exc):
op(obj, other)
- def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
- if "floordiv" in all_arithmetic_operators and is_numpy_dev:
- pytest.skip("NumpyDev behavior GH#40874")
- super().test_arith_series_with_scalar(data, all_arithmetic_operators)
-
- def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
- if "floordiv" in all_arithmetic_operators and is_numpy_dev:
- pytest.skip("NumpyDev behavior GH#40874")
- super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
-
- def test_arith_series_with_array(self, data, all_arithmetic_operators):
- if "floordiv" in all_arithmetic_operators and is_numpy_dev:
- pytest.skip("NumpyDev behavior GH#40874")
- super().test_arith_series_with_scalar(data, all_arithmetic_operators)
-
- def test_divmod_series_array(self, data, data_for_twos):
- if is_numpy_dev:
- pytest.skip("NumpyDev behavior GH#40874")
- super().test_divmod_series_array(data, data_for_twos)
-
def _check_divmod_op(self, s, op, other, exc=None):
# override to not raise an error
super()._check_divmod_op(s, op, other, None)
| - [x] closes #40874
new numpy wheel is out, bug was fixed during the week
| https://api.github.com/repos/pandas-dev/pandas/pulls/41015 | 2021-04-18T11:13:42Z | 2021-04-18T15:55:39Z | 2021-04-18T15:55:39Z | 2021-04-18T16:03:01Z |
DOC: update `style.ipynb` user guide for recent enhancements | diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 765b2929d3014..86696cc909764 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -1006,7 +1006,30 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "We expect certain styling functions to be common enough that we've included a few \"built-in\" to the `Styler`, so you don't have to write them yourself."
+ "Some styling functions are common enough that we've \"built them in\" to the `Styler`, so you don't have to write them and apply them yourself. The current list of such functions is:\n",
+ "\n",
+ " - [.highlight_null][nullfunc]: for use with identifying missing data. \n",
+ " - [.highlight_min][minfunc] and [.highlight_max][maxfunc]: for use with identifying extremeties in data.\n",
+ " - [.highlight_between][betweenfunc] and [.highlight_quantile][quantilefunc]: for use with identifying classes within data.\n",
+ " - [.background_gradient][bgfunc]: a flexible method for highlighting cells based or their, or other, values on a numeric scale.\n",
+ " - [.bar][barfunc]: to display mini-charts within cell backgrounds.\n",
+ " \n",
+ "The individual documentation on each function often gives more examples of their arguments.\n",
+ "\n",
+ "[nullfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_null.rst\n",
+ "[minfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_min.rst\n",
+ "[maxfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_max.rst\n",
+ "[betweenfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_between.rst\n",
+ "[quantilefunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_quantile.rst\n",
+ "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst\n",
+ "[barfunc]: ../reference/api/pandas.io.formats.style.Styler.bar.rst"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Highlight Null"
]
},
{
@@ -1017,14 +1040,14 @@
"source": [
"df2.iloc[0,2] = np.nan\n",
"df2.iloc[4,3] = np.nan\n",
- "df2.loc[:4].style.highlight_null(null_color='red')"
+ "df2.loc[:4].style.highlight_null(null_color='yellow')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "You can create \"heatmaps\" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap."
+ "### Highlight Min or Max"
]
},
{
@@ -1033,17 +1056,15 @@
"metadata": {},
"outputs": [],
"source": [
- "import seaborn as sns\n",
- "cm = sns.light_palette(\"green\", as_cmap=True)\n",
- "\n",
- "df2.style.background_gradient(cmap=cm)"
+ "df2.loc[:4].style.highlight_max(axis=1, props='color:white; font-weight:bold; background-color:darkblue;')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "`Styler.background_gradient` takes the keyword arguments `low` and `high`. Roughly speaking these extend the range of your data by `low` and `high` percent so that when we convert the colors, the colormap's entire range isn't used. This is useful so that you can actually read the text still."
+ "### Highlight Between\n",
+ "This method accepts ranges as float, or NumPy arrays or Series provided the indexes match."
]
},
{
@@ -1052,8 +1073,16 @@
"metadata": {},
"outputs": [],
"source": [
- "# Uses the full color range\n",
- "df2.loc[:4].style.background_gradient(cmap='viridis')"
+ "left = pd.Series([1.0, 0.0, 1.0], index=[\"A\", \"B\", \"D\"])\n",
+ "df2.loc[:4].style.highlight_between(left=left, right=1.5, axis=1, props='color:white; background-color:purple;')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Highlight Quantile\n",
+ "Useful for detecting the highest or lowest percentile values"
]
},
{
@@ -1062,17 +1091,21 @@
"metadata": {},
"outputs": [],
"source": [
- "# Compress the color range\n",
- "df2.loc[:4].style\\\n",
- " .background_gradient(cmap='viridis', low=.5, high=0)\\\n",
- " .highlight_null('red')"
+ "df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color='yellow')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Background Gradient"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "There's also `.highlight_min` and `.highlight_max`, which is almost identical to the user defined version we created above, and also a `.highlight_null` method. "
+ "You can create \"heatmaps\" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap."
]
},
{
@@ -1081,7 +1114,19 @@
"metadata": {},
"outputs": [],
"source": [
- "df2.loc[:4].style.highlight_max(axis=0)"
+ "import seaborn as sns\n",
+ "cm = sns.light_palette(\"green\", as_cmap=True)\n",
+ "\n",
+ "df2.style.background_gradient(cmap=cm)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[.background_gradient][bgfunc] has a number of keyword arguments to customise the gradients and colors. See its documentation.\n",
+ "\n",
+ "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst"
]
},
{
| I added small changes to the Styler User Guide to exemplify the new builtin highlighting functions.
Also removed some of the longer detail which has been moved to individual documentation pages (for example for `background_gradient`) | https://api.github.com/repos/pandas-dev/pandas/pulls/41013 | 2021-04-18T06:02:13Z | 2021-04-21T18:31:39Z | 2021-04-21T18:31:39Z | 2021-04-22T10:34:56Z |
DOC: Improve describe() documentation | diff --git a/doc/source/getting_started/intro_tutorials/01_table_oriented.rst b/doc/source/getting_started/intro_tutorials/01_table_oriented.rst
index e8e0fef271a74..2dcc8b0abe3b8 100644
--- a/doc/source/getting_started/intro_tutorials/01_table_oriented.rst
+++ b/doc/source/getting_started/intro_tutorials/01_table_oriented.rst
@@ -176,7 +176,7 @@ these are by default not taken into account by the :func:`~DataFrame.describe` m
Many pandas operations return a ``DataFrame`` or a ``Series``. The
:func:`~DataFrame.describe` method is an example of a pandas operation returning a
-pandas ``Series``.
+pandas ``Series`` or a pandas ``DataFrame``.
.. raw:: html
| - [x] closes #40972
- [ ] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
It feels more verbose, but necessary? | https://api.github.com/repos/pandas-dev/pandas/pulls/41012 | 2021-04-18T05:52:26Z | 2021-04-20T04:26:26Z | 2021-04-20T04:26:26Z | 2021-04-20T04:26:36Z |
CLN: remove ensure_int_or_float | diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 59550927299fe..e207dac71752e 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -128,51 +128,6 @@ def ensure_str(value: Union[bytes, Any]) -> str:
return value
-def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.ndarray:
- """
- Ensure that an dtype array of some integer dtype
- has an int64 dtype if possible.
- If it's not possible, potentially because of overflow,
- convert the array to float64 instead.
-
- Parameters
- ----------
- arr : array-like
- The array whose data type we want to enforce.
- copy: bool
- Whether to copy the original array or reuse
- it in place, if possible.
-
- Returns
- -------
- out_arr : The input array cast as int64 if
- possible without overflow.
- Otherwise the input array cast to float64.
-
- Notes
- -----
- If the array is explicitly of type uint64 the type
- will remain unchanged.
- """
- # TODO: GH27506 potential bug with ExtensionArrays
- try:
- # error: Unexpected keyword argument "casting" for "astype"
- return arr.astype("int64", copy=copy, casting="safe") # type: ignore[call-arg]
- except TypeError:
- pass
- try:
- # error: Unexpected keyword argument "casting" for "astype"
- return arr.astype("uint64", copy=copy, casting="safe") # type: ignore[call-arg]
- except TypeError:
- if is_extension_array_dtype(arr.dtype):
- # pandas/core/dtypes/common.py:168: error: Item "ndarray" of
- # "Union[ExtensionArray, ndarray]" has no attribute "to_numpy" [union-attr]
- return arr.to_numpy( # type: ignore[union-attr]
- dtype="float64", na_value=np.nan
- )
- return arr.astype("float64", copy=copy)
-
-
def ensure_python_int(value: Union[int, np.integer]) -> int:
"""
Ensure that a value is a python int.
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 702d67b198e8d..da4e165dc5ceb 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -43,7 +43,6 @@
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
- ensure_int_or_float,
ensure_platform_int,
is_bool_dtype,
is_categorical_dtype,
@@ -582,7 +581,7 @@ def _ea_wrap_cython_operation(
elif is_integer_dtype(values.dtype) or is_bool_dtype(values.dtype):
# IntegerArray or BooleanArray
- values = ensure_int_or_float(values)
+ values = values.to_numpy("float64", na_value=np.nan)
res_values = self._cython_operation(
kind, values, how, axis, min_count, **kwargs
)
@@ -660,9 +659,11 @@ def _cython_operation(
values = values.view("int64")
is_numeric = True
elif is_bool_dtype(dtype):
- values = ensure_int_or_float(values)
+ values = values.astype("int64")
elif is_integer_dtype(dtype):
- values = ensure_int_or_float(values)
+ # e.g. uint8 -> uint64, int16 -> int64
+ dtype = dtype.kind + "8"
+ values = values.astype(dtype, copy=False)
elif is_numeric:
if not is_complex_dtype(dtype):
values = ensure_float64(values)
| - [x] closes #27506
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
No need for a try/except; we can reason out which branch we go down in each of the places this is used. | https://api.github.com/repos/pandas-dev/pandas/pulls/41011 | 2021-04-18T04:01:23Z | 2021-04-19T13:51:57Z | 2021-04-19T13:51:57Z | 2021-04-19T14:43:15Z |
BUG: groupby.rank with MaskedArray incorrect casting | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 1a11fffbf6b4e..2487b5fca591b 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -832,6 +832,7 @@ Groupby/resample/rolling
- Bug in :class:`core.window.RollingGroupby` where ``as_index=False`` argument in ``groupby`` was ignored (:issue:`39433`)
- Bug in :meth:`.GroupBy.any` and :meth:`.GroupBy.all` raising ``ValueError`` when using with nullable type columns holding ``NA`` even with ``skipna=True`` (:issue:`40585`)
- Bug in :meth:`GroupBy.cummin` and :meth:`GroupBy.cummax` incorrectly rounding integer values near the ``int64`` implementations bounds (:issue:`40767`)
+- Bug in :meth:`.GroupBy.rank` with nullable dtypes incorrectly raising ``TypeError`` (:issue:`41010`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 50248d5af8883..6c1d6847a0bde 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -122,8 +122,6 @@ def _gotitem(self, key, ndim, subset=None):
# require postprocessing of the result by transform.
cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
-cython_cast_blocklist = frozenset(["rank", "count", "size", "idxmin", "idxmax"])
-
# List of aggregation/reduction functions.
# These map each group to a single numeric value
reduction_kernels = frozenset(
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index d9bf1adf74a5e..6eddf8e9e8773 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -58,7 +58,6 @@
is_timedelta64_dtype,
needs_i8_conversion,
)
-from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCCategoricalIndex
from pandas.core.dtypes.missing import (
isna,
@@ -95,6 +94,10 @@ class WrappedCythonOp:
Dispatch logic for functions defined in _libs.groupby
"""
+ # Functions for which we do _not_ attempt to cast the cython result
+ # back to the original dtype.
+ cast_blocklist = frozenset(["rank", "count", "size", "idxmin", "idxmax"])
+
def __init__(self, kind: str, how: str):
self.kind = kind
self.how = how
@@ -564,11 +567,13 @@ def _ea_wrap_cython_operation(
if is_datetime64tz_dtype(values.dtype) or is_period_dtype(values.dtype):
# All of the functions implemented here are ordinal, so we can
# operate on the tz-naive equivalents
- values = values.view("M8[ns]")
+ npvalues = values.view("M8[ns]")
res_values = self._cython_operation(
- kind, values, how, axis, min_count, **kwargs
+ kind, npvalues, how, axis, min_count, **kwargs
)
if how in ["rank"]:
+ # i.e. how in WrappedCythonOp.cast_blocklist, since
+ # other cast_blocklist methods dont go through cython_operation
# preserve float64 dtype
return res_values
@@ -582,12 +587,16 @@ def _ea_wrap_cython_operation(
res_values = self._cython_operation(
kind, values, how, axis, min_count, **kwargs
)
- dtype = maybe_cast_result_dtype(orig_values.dtype, how)
- if isinstance(dtype, ExtensionDtype):
- cls = dtype.construct_array_type()
- return cls._from_sequence(res_values, dtype=dtype)
+ if how in ["rank"]:
+ # i.e. how in WrappedCythonOp.cast_blocklist, since
+ # other cast_blocklist methods dont go through cython_operation
+ return res_values
- return res_values
+ dtype = maybe_cast_result_dtype(orig_values.dtype, how)
+ # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]"
+ # has no attribute "construct_array_type"
+ cls = dtype.construct_array_type() # type: ignore[union-attr]
+ return cls._from_sequence(res_values, dtype=dtype)
elif is_float_dtype(values.dtype):
# FloatingArray
@@ -595,8 +604,16 @@ def _ea_wrap_cython_operation(
res_values = self._cython_operation(
kind, values, how, axis, min_count, **kwargs
)
- result = type(orig_values)._from_sequence(res_values)
- return result
+ if how in ["rank"]:
+ # i.e. how in WrappedCythonOp.cast_blocklist, since
+ # other cast_blocklist methods dont go through cython_operation
+ return res_values
+
+ dtype = maybe_cast_result_dtype(orig_values.dtype, how)
+ # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]"
+ # has no attribute "construct_array_type"
+ cls = dtype.construct_array_type() # type: ignore[union-attr]
+ return cls._from_sequence(res_values, dtype=dtype)
raise NotImplementedError(
f"function is not implemented for this dtype: {values.dtype}"
@@ -711,9 +728,9 @@ def _cython_operation(
result = result.T
- if how not in base.cython_cast_blocklist:
+ if how not in cy_op.cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
- # "rank" is the only member of cython_cast_blocklist we get here
+ # "rank" is the only member of cast_blocklist we get here
dtype = maybe_cast_result_dtype(orig_values.dtype, how)
op_result = maybe_downcast_to_dtype(result, dtype)
else:
diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py
index 1317f0f68216a..73b2d8ac2c1f5 100644
--- a/pandas/tests/groupby/test_counting.py
+++ b/pandas/tests/groupby/test_counting.py
@@ -209,6 +209,7 @@ def test_ngroup_respects_groupby_order(self):
[
[Timestamp(f"2016-05-{i:02d} 20:09:25+00:00") for i in range(1, 4)],
[Timestamp(f"2016-05-{i:02d} 20:09:25") for i in range(1, 4)],
+ [Timestamp(f"2016-05-{i:02d} 20:09:25", tz="UTC") for i in range(1, 4)],
[Timedelta(x, unit="h") for x in range(1, 4)],
[Period(freq="2W", year=2017, month=x) for x in range(1, 4)],
],
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index d7020e2ffd701..46985ff956788 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -495,6 +495,8 @@ def test_idxmin_idxmax_returns_int_types(func, values):
df["c_date_tz"] = df["c_date"].dt.tz_localize("US/Pacific")
df["c_timedelta"] = df["c_date"] - df["c_date"].iloc[0]
df["c_period"] = df["c_date"].dt.to_period("W")
+ df["c_Integer"] = df["c_int"].astype("Int64")
+ df["c_Floating"] = df["c_float"].astype("Float64")
result = getattr(df.groupby("name"), func)()
@@ -502,6 +504,8 @@ def test_idxmin_idxmax_returns_int_types(func, values):
expected["c_date_tz"] = expected["c_date"]
expected["c_timedelta"] = expected["c_date"]
expected["c_period"] = expected["c_date"]
+ expected["c_Integer"] = expected["c_int"]
+ expected["c_Floating"] = expected["c_float"]
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 2dab22910a0c9..c5620d6d8c06c 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1732,6 +1732,8 @@ def test_pivot_table_values_key_error():
[to_datetime(0)],
[date_range(0, 1, 1, tz="US/Eastern")],
[pd.array([0], dtype="Int64")],
+ [pd.array([0], dtype="Float64")],
+ [pd.array([False], dtype="boolean")],
],
)
@pytest.mark.parametrize("method", ["attr", "agg", "apply"])
diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py
index 2e666c27386b4..da88ea5f05107 100644
--- a/pandas/tests/groupby/test_rank.py
+++ b/pandas/tests/groupby/test_rank.py
@@ -444,8 +444,19 @@ def test_rank_resets_each_group(pct, exp):
tm.assert_frame_equal(result, exp_df)
-def test_rank_avg_even_vals():
+@pytest.mark.parametrize(
+ "dtype", ["int64", "int32", "uint64", "uint32", "float64", "float32"]
+)
+@pytest.mark.parametrize("upper", [True, False])
+def test_rank_avg_even_vals(dtype, upper):
+ if upper:
+ # use IntegerDtype/FloatingDtype
+ dtype = dtype[0].upper() + dtype[1:]
+ dtype = dtype.replace("Ui", "UI")
df = DataFrame({"key": ["a"] * 4, "val": [1] * 4})
+ df["val"] = df["val"].astype(dtype)
+ assert df["val"].dtype == dtype
+
result = df.groupby("key").rank()
exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"])
tm.assert_frame_equal(result, exp_df)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
In master:
```
df = pd.DataFrame({"key": ["a"] * 4, "val": [1] * 4})
df["val"] = df["val"].astype("Int64")
>>> result = df.groupby("key").rank()
TypeError: cannot safely cast non-equivalent float64 to int64
```
Moves in the direction of sharing/generalizing _ea_wrap_cython_operation.
If I did this right, should avoid overlap with #40651. | https://api.github.com/repos/pandas-dev/pandas/pulls/41010 | 2021-04-18T03:23:27Z | 2021-04-20T22:56:41Z | 2021-04-20T22:56:41Z | 2021-04-20T22:59:10Z |
Fix setting dataframe column to 2d array with more than one col | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 38766d2856cfe..c6a6955c5a7fd 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3682,6 +3682,7 @@ def _set_item_frame_value(self, key, value: DataFrame) -> None:
def _iset_item_mgr(self, loc: int | slice | np.ndarray, value) -> None:
# when called from _set_item_mgr loc can be anything returned from get_loc
+ print(type(self._mgr))
self._mgr.iset(loc, value)
self._clear_item_cache()
@@ -3692,6 +3693,7 @@ def _set_item_mgr(self, key, value: ArrayLike) -> None:
# This item wasn't present, just insert at end
self._mgr.insert(len(self._info_axis), key, value)
else:
+ print("path1")
self._iset_item_mgr(loc, value)
# check if we are modifying a copy
@@ -3733,6 +3735,36 @@ def _set_item(self, key, value) -> None:
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1)).T
+ if (
+ value.ndim == 2
+ and value.shape[1] > 1
+ and not isinstance(value, list)
+ and key in self.columns
+ and (
+ # Second part is for duplicates
+ isinstance(self[key], Series)
+ or len(self.columns.get_indexer_for([key])) != value.shape[1]
+ )
+ ):
+ # Prevents assignment of a single column to multiple column array.
+ if len(self.columns.get_indexer_for([key])) == 1:
+ raise ValueError(
+ "Dataframe column is being assigned to a 2D array with "
+ "different number of columns. Column assignment accepts only "
+ "2D arrays with same number of columns."
+ )
+ elif len(self.columns.get_indexer_for([key])) != value.shape[1]:
+ raise ValueError(
+ "Dataframe column is being assigned to a 2D array with "
+ "different number of columns. Column assignment accepts only "
+ "2D arrays with same number of columns."
+ )
+ # Need a test for multiindexer being
+ # applied to a different multiindexer length
+ # NEed a test for duplicate applied to diff count
+ # Assignment to a column that doesn't exist?
+ # Should be series only as well.
+
self._set_item_mgr(key, value)
def _set_value(
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 3a1b2345ee7f0..d580fa56e2a69 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -43,6 +43,33 @@
class TestDataFrameSetItem:
+ @pytest.mark.parametrize(
+ argnames="arr",
+ argvalues=[(4, 3), (4, 4), (4, 10), (4, 20), (4, 30)],
+ )
+ def test_setitem_size_incompatible_ndarray(self, arr):
+ # GH#40827
+ # Assigning a dataframe column to an ndarray with more than one columns
+ # should raise an exception.
+ data = DataFrame(np.zeros((4, 2)), columns=["A", "B"])
+ msg = (
+ "Dataframe column is being assigned to a 2D array with "
+ "different number of columns. Column assignment accepts only "
+ "2D arrays with same number of columns."
+ )
+ with pytest.raises(ValueError, match=msg):
+ data["A"] = np.random.randn(arr[0], arr[1])
+
+ def test_setitem_size_compatible_ndarray(self):
+ data = DataFrame(np.zeros(4), columns=["A"])
+
+ data_to_set = np.random.randn(4, 1)
+
+ expected = DataFrame(data=data_to_set, columns=["A"])
+
+ data["A"] = data_to_set
+ tm.assert_frame_equal(data, expected)
+
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py
index c9a39eb460cf4..abe4351004771 100644
--- a/pandas/tests/frame/test_nonunique_indexes.py
+++ b/pandas/tests/frame/test_nonunique_indexes.py
@@ -125,6 +125,25 @@ def test_insert_with_duplicate_columns(self):
)
tm.assert_frame_equal(df, expected)
+ def test_dup_with_mismatched_column_lengths(self):
+ # dup across dtypes
+ df = DataFrame(
+ np.random.randn(3, 4),
+ columns=["foo", "bar", "foo", "hello"],
+ )
+ check(df)
+
+ msg = (
+ "Dataframe column is being assigned to a 2D array with "
+ "different number of columns. Column assignment accepts only "
+ "2D arrays with same number of columns."
+ )
+ with pytest.raises(ValueError, match=msg):
+ df["foo"] = np.random.randn(3, 3)
+
+ df["foo"] = np.random.randn(3, 2)
+ df["foo"] = np.random.randn(3, 1)
+
def test_dup_across_dtypes(self):
# dup across dtypes
df = DataFrame(
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index 5d0aeba4aebbc..bd40442872488 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -311,6 +311,47 @@ def test_frame_getitem_setitem_multislice(self):
df.loc[:, :] = 10
tm.assert_frame_equal(df, result)
+ def test_frame_setitem_multi_column_incompatible_column_length(self):
+ df = DataFrame(
+ np.random.randn(10, 5), columns=[["a", "a", "a", "b", "b"], [0, 1, 2, 0, 1]]
+ )
+
+ cp = df.copy()
+ msg = (
+ "Dataframe column is being assigned to a 2D array with "
+ "different number of columns. Column assignment accepts only "
+ "2D arrays with same number of columns."
+ )
+ with pytest.raises(ValueError, match=msg):
+ cp["a"] = cp["b"]
+
+ # set with ndarray
+ cp = df.copy()
+ msg = (
+ "Dataframe column is being assigned to a 2D array with "
+ "different number of columns. Column assignment accepts only "
+ "2D arrays with same number of columns."
+ )
+ with pytest.raises(ValueError, match=msg):
+ cp["a"] = cp["b"].values
+
+ def test_frame_setitem_multi_column_incompatible_column_length2(self):
+ df = DataFrame(np.random.randn(10, 3), columns=[["a", "a", "b"], [0, 1, 0]])
+
+ cp = df.copy()
+ msg = (
+ "Dataframe column is being assigned to a 2D array with "
+ "different number of columns. Column assignment accepts only "
+ "2D arrays with same number of columns."
+ )
+ with pytest.raises(ValueError, match=msg):
+ cp["a"] = cp["b"]
+
+ # set with ndarray
+ cp = df.copy()
+ # Works
+ cp["a"] = cp["b"].values
+
def test_frame_setitem_multi_column(self):
df = DataFrame(
np.random.randn(10, 4), columns=[["a", "a", "b", "b"], [0, 1, 0, 1]]
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 853c7079a3c1b..8cfa28bbe8a2c 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -54,6 +54,16 @@ def test_iloc_getitem_int_and_list_int(self, key):
class TestiLocBaseIndependent:
"""Tests Independent Of Base Class"""
+ def test_iloc_setitem_with_size_compatible_ndarray(self):
+ data = DataFrame(np.zeros(4), columns=["A"])
+
+ data_to_set = np.random.randn(4, 1)
+
+ expected = DataFrame(data=data_to_set, columns=["A"])
+
+ data.iloc[:] = data_to_set
+ tm.assert_frame_equal(data, expected)
+
@pytest.mark.parametrize(
"key",
[
@@ -1112,6 +1122,21 @@ def test_iloc_interval(self):
class TestILocErrors:
+ @pytest.mark.parametrize(
+ argnames="arr", argvalues=[(4, 2), (4, 3), (4, 4), (4, 10), (4, 20), (4, 30)]
+ )
+ def test_iloc_setitem_with_size_incompatible_ndarray(self, arr):
+ # GH#40827
+ # Assigning a dataframe column to an ndarray with more than one columns
+ # should raise an exception.
+ data = DataFrame(np.zeros(4), columns=["A"])
+ msg = re.escape(
+ f"could not broadcast input array from shape (4,{arr[1]}) "
+ "into shape (4,1)"
+ )
+ with pytest.raises(Exception, match=msg):
+ data.iloc[:] = np.random.randn(arr[0], arr[1])
+
# NB: this test should work for _any_ Series we can pass as
# series_with_simple_index
def test_iloc_float_raises(self, series_with_simple_index, frame_or_series):
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 97b3412ce626e..5c0bc91ff5b40 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -2353,6 +2353,32 @@ def test_loc_getitem_listlike_of_datetimelike_keys(self, to_period):
ser.loc[keys]
+@pytest.mark.parametrize(
+ argnames="arr", argvalues=[(4, 2), (4, 3), (4, 4), (4, 10), (4, 20), (4, 30)]
+)
+def test_loc_setitem_with_size_incompatible_ndarray(arr):
+ # GH#40827
+ # Assigning a dataframe column to an ndarray with more than one columns
+ # should raise an exception.
+ data = DataFrame(np.zeros(4), columns=["A"])
+ msg = re.escape(
+ f"could not broadcast input array from shape (4,{arr[1]}) into shape (4,1)"
+ )
+ with pytest.raises(Exception, match=msg):
+ data.iloc[:] = np.random.randn(arr[0], arr[1])
+
+
+def test_loc_setitem_with_size_compatible_ndarray():
+ data = DataFrame(np.zeros(4), columns=["A"])
+
+ data_to_set = np.random.randn(4, 1)
+
+ expected = DataFrame(data=data_to_set, columns=["A"])
+
+ data.iloc[:] = data_to_set
+ tm.assert_frame_equal(data, expected)
+
+
@pytest.mark.parametrize(
"columns, column_key, expected_columns",
[
| - [ ] closes #40827
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
I throw a value error when a dataframe column is assigned to an object with `ndim == 2` with more than one column.
I have added a test to test dataframe column assignment to an array with more than one 'column' and a test to show that assignment still works with an array with only one 'column'.
The tests I added pass but I am unsure why ~600 existing tests fail when I run the entire suite (whether I have my change or not). I doubt the tests are failing because of something on my machine but I want to see if that's the case anyway.
~500 linting tests fail as well when I run `./ci/code_checks.sh`
| https://api.github.com/repos/pandas-dev/pandas/pulls/41008 | 2021-04-17T21:56:18Z | 2021-04-28T10:01:21Z | null | 2021-04-28T10:56:48Z |
TYP: core.computation | diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py
index 5b2dbed7af6ea..7452cf03d0038 100644
--- a/pandas/core/computation/engines.py
+++ b/pandas/core/computation/engines.py
@@ -12,6 +12,7 @@
align_terms,
reconstruct_object,
)
+from pandas.core.computation.expr import Expr
from pandas.core.computation.ops import (
MATHOPS,
REDUCTIONS,
@@ -26,13 +27,13 @@ class NumExprClobberingError(NameError):
pass
-def _check_ne_builtin_clash(expr):
+def _check_ne_builtin_clash(expr: Expr) -> None:
"""
Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
- terms : Term
+ expr : Expr
Terms can contain
"""
names = expr.names
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 51fcbb02fd926..57ba478a9157b 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -1,9 +1,9 @@
"""
Top level ``eval`` module.
"""
+from __future__ import annotations
import tokenize
-from typing import Optional
import warnings
from pandas._libs.lib import no_default
@@ -14,13 +14,14 @@
PARSERS,
Expr,
)
+from pandas.core.computation.ops import BinOp
from pandas.core.computation.parsing import tokenize_string
from pandas.core.computation.scope import ensure_scope
from pandas.io.formats.printing import pprint_thing
-def _check_engine(engine: Optional[str]) -> str:
+def _check_engine(engine: str | None) -> str:
"""
Make sure a valid engine is passed.
@@ -161,9 +162,9 @@ def _check_for_locals(expr: str, stack_level: int, parser: str):
def eval(
- expr,
- parser="pandas",
- engine: Optional[str] = None,
+ expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users
+ parser: str = "pandas",
+ engine: str | None = None,
truediv=no_default,
local_dict=None,
global_dict=None,
@@ -309,10 +310,12 @@ def eval(
stacklevel=2,
)
+ exprs: list[str | BinOp]
if isinstance(expr, str):
_check_expression(expr)
exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""]
else:
+ # ops.BinOp; for internal compat, not intended to be passed by users
exprs = [expr]
multi_line = len(exprs) > 1
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 891642bf61d16..0e6a7551ab399 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -546,6 +546,7 @@ class PyTablesExpr(expr.Expr):
_visitor: PyTablesExprVisitor | None
env: PyTablesScope
+ expr: str
def __init__(
self,
@@ -570,7 +571,7 @@ def __init__(
local_dict = where.env.scope
_where = where.expr
- elif isinstance(where, (list, tuple)):
+ elif is_list_like(where):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
@@ -580,6 +581,7 @@ def __init__(
where[idx] = w
_where = " & ".join(f"({w})" for w in com.flatten(where))
else:
+ # _validate_where ensures we otherwise have a string
_where = where
self.expr = _where
diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py
index ea92a33f242e9..09067e7eba6e5 100644
--- a/pandas/core/computation/scope.py
+++ b/pandas/core/computation/scope.py
@@ -106,9 +106,13 @@ class Scope:
"""
__slots__ = ["level", "scope", "target", "resolvers", "temps"]
+ level: int
+ scope: DeepChainMap
+ resolvers: DeepChainMap
+ temps: dict
def __init__(
- self, level, global_dict=None, local_dict=None, resolvers=(), target=None
+ self, level: int, global_dict=None, local_dict=None, resolvers=(), target=None
):
self.level = level + 1
@@ -146,8 +150,7 @@ def __init__(
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
- # error: Cannot determine type of 'resolvers'
- resolvers += tuple(local_dict.resolvers.maps) # type: ignore[has-type]
+ resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
@@ -212,7 +215,7 @@ def resolve(self, key: str, is_local: bool):
raise UndefinedVariableError(key, is_local) from err
- def swapkey(self, old_key: str, new_key: str, new_value=None):
+ def swapkey(self, old_key: str, new_key: str, new_value=None) -> None:
"""
Replace a variable name, with a potentially new value.
@@ -238,7 +241,7 @@ def swapkey(self, old_key: str, new_key: str, new_value=None):
mapping[new_key] = new_value # type: ignore[index]
return
- def _get_vars(self, stack, scopes: list[str]):
+ def _get_vars(self, stack, scopes: list[str]) -> None:
"""
Get specifically scoped variables from a list of stack frames.
@@ -263,7 +266,7 @@ def _get_vars(self, stack, scopes: list[str]):
# scope after the loop
del frame
- def _update(self, level: int):
+ def _update(self, level: int) -> None:
"""
Update the current scope by going back `level` levels.
@@ -313,7 +316,7 @@ def ntemps(self) -> int:
return len(self.temps)
@property
- def full_scope(self):
+ def full_scope(self) -> DeepChainMap:
"""
Return the full scope for use with passing to engines transparently
as a mapping.
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index ddc6e92b04927..4c73e5d594d2b 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -180,7 +180,6 @@ def __internal_pivot_table(
# TODO: why does test_pivot_table_doctest_case fail if
# we don't do this apparently-unnecessary setitem?
agged[v] = agged[v]
- pass
else:
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index bb6928d2fd95a..c61864bbc0a76 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -673,17 +673,20 @@ def test_coordinates(setup_path):
tm.assert_frame_equal(result, expected)
# invalid
- msg = "cannot process expression"
- with pytest.raises(ValueError, match=msg):
+ msg = (
+ "where must be passed as a string, PyTablesExpr, "
+ "or list-like of PyTablesExpr"
+ )
+ with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df), dtype="float64"))
- with pytest.raises(ValueError, match=msg):
+ with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df) + 1))
- with pytest.raises(ValueError, match=msg):
+ with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df)), start=5)
- with pytest.raises(ValueError, match=msg):
+ with pytest.raises(TypeError, match=msg):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/41007 | 2021-04-17T21:47:41Z | 2021-04-19T13:53:19Z | 2021-04-19T13:53:19Z | 2021-04-19T14:49:41Z |
Bug in to_datetime raising ValueError with None and NaT and more than 50 elements | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 1c7942dfedafa..57516a7d039e6 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -813,6 +813,7 @@ Reshaping
- Bug in :meth:`DataFrame.stack` not preserving ``CategoricalDtype`` in a ``MultiIndex`` (:issue:`36991`)
- Bug in :func:`to_datetime` raising error when input sequence contains unhashable items (:issue:`39756`)
- Bug in :meth:`Series.explode` preserving index when ``ignore_index`` was ``True`` and values were scalars (:issue:`40487`)
+- Bug in :func:`to_datetime` raising ``ValueError`` when :class:`Series` contains ``None`` and ``NaT`` and has more than 50 elements (:issue:`39882`)
Sparse
^^^^^^
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 77bbde62d607e..102cdf4334510 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -84,6 +84,7 @@
Scalar = Union[int, float, str]
DatetimeScalar = TypeVar("DatetimeScalar", Scalar, datetime)
DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible]
+start_caching_at = 50
# ---------------------------------------------------------------------
@@ -130,7 +131,7 @@ def should_cache(
# default realization
if check_count is None:
# in this case, the gain from caching is negligible
- if len(arg) <= 50:
+ if len(arg) <= start_caching_at:
return False
if len(arg) <= 5000:
@@ -193,6 +194,9 @@ def _maybe_cache(
if len(unique_dates) < len(arg):
cache_dates = convert_listlike(unique_dates, format)
cache_array = Series(cache_dates, index=unique_dates)
+ if not cache_array.is_unique:
+ # GH#39882 in case of None and NaT we get duplicates
+ cache_array = cache_array.drop_duplicates()
return cache_array
diff --git a/pandas/tests/series/methods/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py
index 8283bcd16dbad..81203b944fa92 100644
--- a/pandas/tests/series/methods/test_convert_dtypes.py
+++ b/pandas/tests/series/methods/test_convert_dtypes.py
@@ -13,6 +13,7 @@
# for most cases), and the specific cases where the result deviates from
# this default. Those overrides are defined as a dict with (keyword, val) as
# dictionary key. In case of multiple items, the last override takes precedence.
+
test_cases = [
(
# data
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index cefbea529e366..3e0c12c6a22cc 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -43,6 +43,7 @@
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
from pandas.core.tools import datetimes as tools
+from pandas.core.tools.datetimes import start_caching_at
class TestTimeConversionFormats:
@@ -956,6 +957,19 @@ def test_to_datetime_cache_scalar(self):
expected = Timestamp("20130101 00:00:00")
assert result == expected
+ def test_convert_object_to_datetime_with_cache(self):
+ # GH#39882
+ ser = Series(
+ [None] + [NaT] * start_caching_at + [Timestamp("2012-07-26")],
+ dtype="object",
+ )
+ result = to_datetime(ser, errors="coerce")
+ expected = Series(
+ [NaT] * (start_caching_at + 1) + [Timestamp("2012-07-26")],
+ dtype="datetime64[ns]",
+ )
+ tm.assert_series_equal(result, expected)
+
@pytest.mark.parametrize(
"date, format",
[
| - [x] closes #39882
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
None and NaT are different for unique while convert_listlike casts None to NaT, hence causing dups
Not sure if we could do something better. | https://api.github.com/repos/pandas-dev/pandas/pulls/41006 | 2021-04-17T21:34:34Z | 2021-04-20T22:49:56Z | 2021-04-20T22:49:55Z | 2021-04-21T20:40:13Z |
BUG: Fix Dataframe constructor missing sql column names (#40682) | diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index a26da75d921ef..b5a18a1fd2674 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -25,6 +25,7 @@
"s3fs": "0.4.0",
"scipy": "1.2.0",
"sqlalchemy": "1.2.8",
+ "sql_metadata": None,
"tables": "3.5.1",
"tabulate": "0.8.7",
"xarray": "0.12.3",
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2736560def2cb..b2f6893eb0752 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -437,6 +437,7 @@
3 bar 8
"""
+# DataFrame helper functions
# -----------------------------------------------------------------------
# DataFrame class
@@ -566,7 +567,6 @@ def __init__(
dtype: Dtype | None = None,
copy: bool | None = None,
):
-
if copy is None:
if isinstance(data, dict) or data is None:
# retain pre-GH#38939 default behavior
@@ -669,6 +669,16 @@ def __init__(
# For data is list-like, or Iterable (will consume into list)
elif is_list_like(data):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
+ # For data is a sqlalchemy query, extract column names
+ if str(type(data)) == "<class 'sqlalchemy.orm.query.Query'>":
+ sql_mt = import_optional_dependency("sql_metadata")
+ # Extract column names using sql_metadata
+ columns = list(sql_mt.get_query_columns(str(data)))
+ # Sanitize column names and remove everything before . character
+ for i in range(len(columns)):
+ if columns[i].find(".") != -1:
+ columns[i] = columns[i][columns[i].find(".") + 1 :]
+
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
| …ames
- [ ] closes #40682
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/41005 | 2021-04-17T21:22:45Z | 2021-06-25T16:13:00Z | null | 2021-06-25T16:13:00Z |
fix: function _take_with_is_copy was defined final but overriden | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bad42a85aeeee..d69e933164118 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3626,7 +3626,6 @@ class max_speed
)
return self._constructor(new_data).__finalize__(self, method="take")
- @final
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cbec0024d5f9e..4d50c3100a12c 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -863,7 +863,7 @@ def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series:
result = self._constructor(new_values, index=new_index, fastpath=True)
return result.__finalize__(self, method="take")
- def _take_with_is_copy(self, indices, axis=0):
+ def _take_with_is_copy(self, indices, axis=0) -> Series:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
| - [x] closes #40974
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/41004 | 2021-04-17T20:27:04Z | 2021-04-19T17:22:29Z | 2021-04-19T17:22:28Z | 2021-04-19T17:22:38Z |
CI: suppress warnings | diff --git a/pandas/tests/io/pytables/test_categorical.py b/pandas/tests/io/pytables/test_categorical.py
index 177abdeedb88b..4928a70f90960 100644
--- a/pandas/tests/io/pytables/test_categorical.py
+++ b/pandas/tests/io/pytables/test_categorical.py
@@ -17,7 +17,14 @@
ensure_clean_store,
)
-pytestmark = [pytest.mark.single, td.skip_array_manager_not_yet_implemented]
+pytestmark = [
+ pytest.mark.single,
+ td.skip_array_manager_not_yet_implemented,
+ # pytables https://github.com/PyTables/PyTables/issues/822
+ pytest.mark.filterwarnings(
+ "ignore:a closed node found in the registry:UserWarning"
+ ),
+]
def test_categorical(setup_path):
diff --git a/setup.cfg b/setup.cfg
index 610b30e4422a9..8cdec8ab9feed 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -137,6 +137,7 @@ xfail_strict = True
filterwarnings =
error:Sparse:FutureWarning
error:The SparseArray:FutureWarning
+ ignore:unclosed transport <asyncio.sslproto:ResourceWarning
junit_family = xunit2
[codespell]
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/41003 | 2021-04-17T20:17:48Z | 2021-04-19T13:53:57Z | 2021-04-19T13:53:57Z | 2021-04-19T14:46:40Z |
[ArrowStringArray] fix test_value_counts_na | diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 52bdcd03d3b49..ecbb5367febc5 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -675,13 +675,18 @@ def value_counts(self, dropna: bool = True) -> Series:
vc = self._data.value_counts()
- # Index cannot hold ExtensionArrays yet
- index = Index(type(self)(vc.field(0)).astype(object))
+ values = vc.field(0)
+ counts = vc.field(1)
+ if dropna and self._data.null_count > 0:
+ mask = values.is_valid()
+ values = values.filter(mask)
+ counts = counts.filter(mask)
+
# No missing values so we can adhere to the interface and return a numpy array.
- counts = np.array(vc.field(1))
+ counts = np.array(counts)
- if dropna and self._data.null_count > 0:
- raise NotImplementedError("yo")
+ # Index cannot hold ExtensionArrays yet
+ index = Index(type(self)(values)).astype(object)
return Series(counts, index=index).astype("Int64")
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 2fec1925149ad..2b2db49c62ba2 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -476,12 +476,7 @@ def test_arrow_roundtrip(dtype, dtype_object):
assert result.loc[2, "a"] is pd.NA
-def test_value_counts_na(dtype, request):
- if dtype == "arrow_string":
- reason = "TypeError: boolean value of NA is ambiguous"
- mark = pytest.mark.xfail(reason=reason)
- request.node.add_marker(mark)
-
+def test_value_counts_na(dtype):
arr = pd.array(["a", "b", "a", pd.NA], dtype=dtype)
result = arr.value_counts(dropna=False)
expected = pd.Series([2, 1, 1], index=["a", "b", pd.NA], dtype="Int64")
@@ -492,12 +487,7 @@ def test_value_counts_na(dtype, request):
tm.assert_series_equal(result, expected)
-def test_value_counts_with_normalize(dtype, request):
- if dtype == "arrow_string":
- reason = "TypeError: boolean value of NA is ambiguous"
- mark = pytest.mark.xfail(reason=reason)
- request.node.add_marker(mark)
-
+def test_value_counts_with_normalize(dtype):
s = pd.Series(["a", "b", "a", pd.NA], dtype=dtype)
result = s.value_counts(normalize=True)
expected = pd.Series([2, 1], index=["a", "b"], dtype="Float64") / 3
| https://api.github.com/repos/pandas-dev/pandas/pulls/41002 | 2021-04-17T20:00:27Z | 2021-04-19T14:00:20Z | 2021-04-19T14:00:20Z | 2021-04-19T15:51:02Z | |
[ArrowStringArray] fix test_repeat_with_null | diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index dc4550484fa3b..b794690ccc5af 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -198,6 +198,7 @@ def scalar_rep(x):
return self._str_map(scalar_rep, dtype=str)
else:
from pandas.core.arrays.string_ import StringArray
+ from pandas.core.arrays.string_arrow import ArrowStringArray
def rep(x, r):
if x is libmissing.NA:
@@ -209,9 +210,9 @@ def rep(x, r):
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(np.asarray(self), repeats, rep)
- if isinstance(self, StringArray):
+ if isinstance(self, (StringArray, ArrowStringArray)):
# Not going through map, so we have to do this here.
- result = StringArray._from_sequence(result)
+ result = type(self)._from_sequence(result)
return result
def _str_match(
diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py
index a809446f0bc06..06b22f00a38cf 100644
--- a/pandas/tests/strings/test_strings.py
+++ b/pandas/tests/strings/test_strings.py
@@ -136,14 +136,8 @@ def test_repeat():
tm.assert_series_equal(rs, xp)
-def test_repeat_with_null(nullable_string_dtype, request):
+def test_repeat_with_null(nullable_string_dtype):
# GH: 31632
-
- if nullable_string_dtype == "arrow_string":
- reason = 'Attribute "dtype" are different'
- mark = pytest.mark.xfail(reason=reason)
- request.node.add_marker(mark)
-
ser = Series(["a", None], dtype=nullable_string_dtype)
result = ser.str.repeat([3, 4])
expected = Series(["aaa", None], dtype=nullable_string_dtype)
| https://api.github.com/repos/pandas-dev/pandas/pulls/41001 | 2021-04-17T18:15:39Z | 2021-04-19T14:00:40Z | 2021-04-19T14:00:40Z | 2021-04-19T15:35:53Z | |
REF: remove how arg from maybe_cast_result | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 6726374dbe30e..e91927d87d318 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -55,7 +55,6 @@
ensure_str,
is_bool,
is_bool_dtype,
- is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
@@ -79,6 +78,7 @@
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
@@ -359,15 +359,15 @@ def trans(x):
return result
-def maybe_cast_result(
+def maybe_cast_pointwise_result(
result: ArrayLike,
dtype: DtypeObj,
numeric_only: bool = False,
- how: str = "",
same_dtype: bool = True,
) -> ArrayLike:
"""
- Try casting result to a different type if appropriate
+ Try casting result of a pointwise operation back to the original dtype if
+ appropriate.
Parameters
----------
@@ -377,8 +377,6 @@ def maybe_cast_result(
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
- how : str, default ""
- How the result was computed.
same_dtype : bool, default True
Specify dtype when calling _from_sequence
@@ -387,12 +385,12 @@ def maybe_cast_result(
result : array-like
result maybe casted to the dtype.
"""
- dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if isinstance(dtype, ExtensionDtype):
- if not is_categorical_dtype(dtype) and dtype.kind != "M":
+ if not isinstance(dtype, (CategoricalDtype, DatetimeTZDtype)):
+ # TODO: avoid this special-casing
# We have to special case categorical so as not to upcast
# things like counts back to categorical
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 702d67b198e8d..54a10b9b62ec4 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -36,7 +36,7 @@
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
- maybe_cast_result,
+ maybe_cast_pointwise_result,
maybe_cast_result_dtype,
maybe_downcast_to_dtype,
)
@@ -797,7 +797,7 @@ def _aggregate_series_pure_python(self, obj: Series, func: F):
result[label] = res
out = lib.maybe_convert_objects(result, try_float=False)
- out = maybe_cast_result(out, obj.dtype, numeric_only=True)
+ out = maybe_cast_pointwise_result(out, obj.dtype, numeric_only=True)
return out, counts
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cbec0024d5f9e..fd115a550fa77 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -60,7 +60,7 @@
from pandas.core.dtypes.cast import (
convert_dtypes,
maybe_box_native,
- maybe_cast_result,
+ maybe_cast_pointwise_result,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
@@ -3070,22 +3070,26 @@ def combine(self, other, func, fill_value=None) -> Series:
# so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
- new_values = []
- for idx in new_index:
+ new_values = np.empty(len(new_index), dtype=object)
+ for i, idx in enumerate(new_index):
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all="ignore"):
- new_values.append(func(lv, rv))
+ new_values[i] = func(lv, rv)
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
new_index = self.index
+ new_values = np.empty(len(new_index), dtype=object)
with np.errstate(all="ignore"):
- new_values = [func(lv, other) for lv in self._values]
+ new_values[:] = [func(lv, other) for lv in self._values]
new_name = self.name
- res_values = sanitize_array(new_values, None)
- res_values = maybe_cast_result(res_values, self.dtype, same_dtype=False)
+ # try_float=False is to match _aggregate_series_pure_python
+ res_values = lib.maybe_convert_objects(new_values, try_float=False)
+ res_values = maybe_cast_pointwise_result(
+ res_values, self.dtype, same_dtype=False
+ )
return self._constructor(res_values, index=new_index, name=new_name)
def combine_first(self, other) -> Series:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/41000 | 2021-04-17T14:47:59Z | 2021-04-19T14:01:50Z | 2021-04-19T14:01:50Z | 2021-04-19T14:44:06Z |
Regression modifying obj with all false indexer | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 82971e460a8a2..313ec5af4fe42 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -48,6 +48,7 @@
from pandas.core.construction import array as pd_array
from pandas.core.indexers import (
check_array_indexer,
+ is_empty_indexer,
is_exact_shape_match,
is_list_like_indexer,
length_of_indexer,
@@ -1872,7 +1873,11 @@ def _setitem_single_column(self, loc: int, value, plane_indexer):
# GH#6149 (null slice), GH#10408 (full bounds)
if com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj)):
ser = value
- elif is_array_like(value) and is_exact_shape_match(ser, value):
+ elif (
+ is_array_like(value)
+ and is_exact_shape_match(ser, value)
+ and not is_empty_indexer(pi, value)
+ ):
if is_list_like(pi):
ser = value[np.argsort(pi)]
else:
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 1d41426b93db6..9c153777c320a 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -888,6 +888,14 @@ def test_setitem_boolean_indexing(self):
with pytest.raises(ValueError, match="Item wrong length"):
df1[df1.index[:-1] > 2] = -1
+ def test_loc_setitem_all_false_boolean_two_blocks(self):
+ # GH#40885
+ df = DataFrame({"a": [1, 2], "b": [3, 4], "c": "a"})
+ expected = df.copy()
+ indexer = Series([False, False], name="c")
+ df.loc[indexer, ["b"]] = DataFrame({"b": [5, 6]}, index=[0, 1])
+ tm.assert_frame_equal(df, expected)
+
class TestDataFrameSetitemCopyViewSemantics:
def test_setitem_always_copy(self, float_frame):
| - [x] closes #40885
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Regression on master so no whatsnew
| https://api.github.com/repos/pandas-dev/pandas/pulls/40999 | 2021-04-17T14:43:17Z | 2021-04-19T14:02:53Z | 2021-04-19T14:02:52Z | 2021-04-19T23:20:21Z |
[ArrowStringArray] BUG: fix test_astype_string for Float32Dtype | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 8d3a8feb89d67..50e8cc4c82e0d 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -829,6 +829,7 @@ def astype(self, dtype, copy: bool = True):
"""
from pandas import Index
from pandas.core.arrays.string_ import StringDtype
+ from pandas.core.arrays.string_arrow import ArrowStringDtype
if dtype is not None:
dtype = pandas_dtype(dtype)
@@ -851,7 +852,7 @@ def astype(self, dtype, copy: bool = True):
return self._shallow_copy(new_left, new_right)
elif is_categorical_dtype(dtype):
return Categorical(np.asarray(self), dtype=dtype)
- elif isinstance(dtype, StringDtype):
+ elif isinstance(dtype, (StringDtype, ArrowStringDtype)):
return dtype.construct_array_type()._from_sequence(self, copy=False)
# TODO: This try/except will be repeated.
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 180ed51e7fd2b..1692afbf1fc84 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -228,10 +228,21 @@ def _chk_pyarrow_available(cls) -> None:
@classmethod
def _from_sequence(cls, scalars, dtype: Dtype | None = None, copy: bool = False):
+ from pandas.core.arrays.masked import BaseMaskedArray
+
cls._chk_pyarrow_available()
- # convert non-na-likes to str, and nan-likes to ArrowStringDtype.na_value
- scalars = lib.ensure_string_array(scalars, copy=False)
- return cls(pa.array(scalars, type=pa.string(), from_pandas=True))
+
+ if isinstance(scalars, BaseMaskedArray):
+ # avoid costly conversion to object dtype in ensure_string_array and
+ # numerical issues with Float32Dtype
+ na_values = scalars._mask
+ result = scalars._data
+ result = lib.ensure_string_array(result, copy=copy, convert_na_value=False)
+ return cls(pa.array(result, mask=na_values, type=pa.string()))
+
+ # convert non-na-likes to str
+ result = lib.ensure_string_array(scalars, copy=copy)
+ return cls(pa.array(result, type=pa.string(), from_pandas=True))
@classmethod
def _from_sequence_of_strings(
diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py
index 7c5ef5b3b27d3..47f4f7585243d 100644
--- a/pandas/tests/extension/base/casting.py
+++ b/pandas/tests/extension/base/casting.py
@@ -43,10 +43,10 @@ def test_astype_str(self, data):
expected = pd.Series([str(x) for x in data[:5]], dtype=str)
self.assert_series_equal(result, expected)
- def test_astype_string(self, data):
+ def test_astype_string(self, data, nullable_string_dtype):
# GH-33465
- result = pd.Series(data[:5]).astype("string")
- expected = pd.Series([str(x) for x in data[:5]], dtype="string")
+ result = pd.Series(data[:5]).astype(nullable_string_dtype)
+ expected = pd.Series([str(x) for x in data[:5]], dtype=nullable_string_dtype)
self.assert_series_equal(result, expected)
def test_to_numpy(self, data):
| this also fixes Interval, but that test doesn't fail in #39908 for a single StringDtype parameterized by storage.
| https://api.github.com/repos/pandas-dev/pandas/pulls/40998 | 2021-04-17T11:41:39Z | 2021-04-30T17:24:25Z | 2021-04-30T17:24:25Z | 2021-04-30T17:57:25Z |
CLN: remove CategoricalIndex.unique | diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 5b98b956e33e6..c085d68b36bc3 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -386,15 +386,6 @@ def fillna(self, value, downcast=None):
return type(self)._simple_new(cat, name=self.name)
- @doc(Index.unique)
- def unique(self, level=None):
- if level is not None:
- self._validate_index_level(level)
- result = self._values.unique()
- # Use _simple_new instead of _shallow_copy to ensure we keep dtype
- # of result, not self.
- return type(self)._simple_new(result, name=self.name)
-
def reindex(
self, target, method=None, level=None, limit=None, tolerance=None
) -> tuple[Index, np.ndarray | None]:
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index b11ec06120e0c..d28bcd6c5497a 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -331,7 +331,7 @@ def _get_unique_index(self):
return self
result = self._data.unique()
- return self._shallow_copy(result)
+ return type(self)._simple_new(result, name=self.name)
@doc(Index.map)
def map(self, mapper, na_action=None):
| made possible by #38140 | https://api.github.com/repos/pandas-dev/pandas/pulls/40995 | 2021-04-17T02:25:15Z | 2021-04-19T14:10:34Z | 2021-04-19T14:10:34Z | 2021-04-19T14:40:54Z |
REGR: memory_map with non-UTF8 encoding | diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst
index 16f9284802407..60e146b2212eb 100644
--- a/doc/source/whatsnew/v1.2.5.rst
+++ b/doc/source/whatsnew/v1.2.5.rst
@@ -15,7 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`)
--
+- Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 00966d39dd99d..06b00a9cbb4eb 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -618,7 +618,12 @@ def get_handle(
# memory mapping needs to be the first step
handle, memory_map, handles = _maybe_memory_map(
- handle, memory_map, ioargs.encoding, ioargs.mode, errors
+ handle,
+ memory_map,
+ ioargs.encoding,
+ ioargs.mode,
+ errors,
+ ioargs.compression["method"] not in _compression_to_extension,
)
is_path = isinstance(handle, str)
@@ -820,7 +825,18 @@ class _MMapWrapper(abc.Iterator):
"""
- def __init__(self, f: IO):
+ def __init__(
+ self,
+ f: IO,
+ encoding: str = "utf-8",
+ errors: str = "strict",
+ decode: bool = True,
+ ):
+ self.encoding = encoding
+ self.errors = errors
+ self.decoder = codecs.getincrementaldecoder(encoding)(errors=errors)
+ self.decode = decode
+
self.attributes = {}
for attribute in ("seekable", "readable", "writeable"):
if not hasattr(f, attribute):
@@ -836,19 +852,30 @@ def __getattr__(self, name: str):
def __iter__(self) -> _MMapWrapper:
return self
+ def read(self, size: int = -1) -> str | bytes:
+ # CSV c-engine uses read instead of iterating
+ content: bytes = self.mmap.read(size)
+ if self.decode:
+ # memory mapping is applied before compression. Encoding should
+ # be applied to the de-compressed data.
+ return content.decode(self.encoding, errors=self.errors)
+ return content
+
def __next__(self) -> str:
newbytes = self.mmap.readline()
# readline returns bytes, not str, but Python's CSV reader
# expects str, so convert the output to str before continuing
- newline = newbytes.decode("utf-8")
+ newline = self.decoder.decode(newbytes)
# mmap doesn't raise if reading past the allocated
# data but instead returns an empty string, so raise
# if that is returned
if newline == "":
raise StopIteration
- return newline
+
+ # IncrementalDecoder seems to push newline to the next line
+ return newline.lstrip("\n")
def _maybe_memory_map(
@@ -857,6 +884,7 @@ def _maybe_memory_map(
encoding: str,
mode: str,
errors: str | None,
+ decode: bool,
) -> tuple[FileOrBuffer, bool, list[Buffer]]:
"""Try to memory map file/buffer."""
handles: list[Buffer] = []
@@ -877,7 +905,10 @@ def _maybe_memory_map(
try:
# error: Argument 1 to "_MMapWrapper" has incompatible type "Union[IO[Any],
# RawIOBase, BufferedIOBase, TextIOBase, mmap]"; expected "IO[Any]"
- wrapped = cast(mmap.mmap, _MMapWrapper(handle)) # type: ignore[arg-type]
+ wrapped = cast(
+ mmap.mmap,
+ _MMapWrapper(handle, encoding, errors, decode), # type: ignore[arg-type]
+ )
handle.close()
handles.remove(handle)
handles.append(wrapped)
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index abf6128699a21..fb110706c3fb4 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -30,25 +30,6 @@ def __init__(self, src: FilePathOrBuffer, **kwds):
assert self.handles is not None
for key in ("storage_options", "encoding", "memory_map", "compression"):
kwds.pop(key, None)
- if self.handles.is_mmap and hasattr(self.handles.handle, "mmap"):
- # error: Item "IO[Any]" of "Union[IO[Any], RawIOBase, BufferedIOBase,
- # TextIOBase, TextIOWrapper, mmap]" has no attribute "mmap"
-
- # error: Item "RawIOBase" of "Union[IO[Any], RawIOBase, BufferedIOBase,
- # TextIOBase, TextIOWrapper, mmap]" has no attribute "mmap"
-
- # error: Item "BufferedIOBase" of "Union[IO[Any], RawIOBase, BufferedIOBase,
- # TextIOBase, TextIOWrapper, mmap]" has no attribute "mmap"
-
- # error: Item "TextIOBase" of "Union[IO[Any], RawIOBase, BufferedIOBase,
- # TextIOBase, TextIOWrapper, mmap]" has no attribute "mmap"
-
- # error: Item "TextIOWrapper" of "Union[IO[Any], RawIOBase, BufferedIOBase,
- # TextIOBase, TextIOWrapper, mmap]" has no attribute "mmap"
-
- # error: Item "mmap" of "Union[IO[Any], RawIOBase, BufferedIOBase,
- # TextIOBase, TextIOWrapper, mmap]" has no attribute "mmap"
- self.handles.handle = self.handles.handle.mmap # type: ignore[union-attr]
try:
self._reader = parsers.TextReader(self.handles.handle, **kwds)
diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index 89ece3b1a7300..006438df2a5e0 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -220,3 +220,20 @@ def test_parse_encoded_special_characters(encoding):
expected = DataFrame(data=[[":foo", 0], ["bar", 1], ["baz", 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("encoding", ["utf-8", None, "utf-16", "cp1255", "latin-1"])
+def test_encoding_memory_map(all_parsers, encoding):
+ # GH40986
+ parser = all_parsers
+ expected = DataFrame(
+ {
+ "name": ["Raphael", "Donatello", "Miguel Angel", "Leonardo"],
+ "mask": ["red", "purple", "orange", "blue"],
+ "weapon": ["sai", "bo staff", "nunchunk", "katana"],
+ }
+ )
+ with tm.ensure_clean() as file:
+ expected.to_csv(file, index=False, encoding=encoding)
+ df = parser.read_csv(file, encoding=encoding, memory_map=True)
+ tm.assert_frame_equal(df, expected)
| - [x] closes #40986
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
My best guess is that `memory_map=True` always assumed UTF-8 with the python engine. Now that the c and python engine use the same IO code, the c engine assumed UTF-8 as well.
| https://api.github.com/repos/pandas-dev/pandas/pulls/40994 | 2021-04-17T02:09:57Z | 2021-04-26T12:20:57Z | 2021-04-26T12:20:56Z | 2021-06-05T20:50:11Z |
Deprecate joining over a different number of levels | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 1a11fffbf6b4e..f7463218096e5 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -601,6 +601,7 @@ Deprecations
- Deprecated :meth:`.Styler.set_na_rep` and :meth:`.Styler.set_precision` in favour of :meth:`.Styler.format` with ``na_rep`` and ``precision`` as existing and new input arguments respectively (:issue:`40134`, :issue:`40425`)
- Deprecated allowing partial failure in :meth:`Series.transform` and :meth:`DataFrame.transform` when ``func`` is list-like or dict-like and raises anything but ``TypeError``; ``func`` raising anything but a ``TypeError`` will raise in a future version (:issue:`40211`)
- Deprecated support for ``np.ma.mrecords.MaskedRecords`` in the :class:`DataFrame` constructor, pass ``{name: data[name] for name in data.dtype.names}`` instead (:issue:`40363`)
+- Deprecated using :func:`merge` or :func:`join` on a different number of levels (:issue:`34862`)
- Deprecated the use of ``**kwargs`` in :class:`.ExcelWriter`; use the keyword argument ``engine_kwargs`` instead (:issue:`40430`)
- Deprecated the ``level`` keyword for :class:`DataFrame` and :class:`Series` aggregations; use groupby instead (:issue:`39983`)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 8cee0dd2abb88..b2fe18bba43f4 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -669,11 +669,11 @@ def __init__(
# warn user when merging between different levels
if _left.columns.nlevels != _right.columns.nlevels:
msg = (
- "merging between different levels can give an unintended "
- f"result ({left.columns.nlevels} levels on the left,"
+ "merging between different levels is deprecated and will be removed "
+ f"in a future version. ({left.columns.nlevels} levels on the left,"
f"{right.columns.nlevels} on the right)"
)
- warnings.warn(msg, UserWarning)
+ warnings.warn(msg, FutureWarning, stacklevel=3)
self._validate_specification()
diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py
index 1c7f7e3ff674a..36fd5d399c300 100644
--- a/pandas/tests/frame/methods/test_join.py
+++ b/pandas/tests/frame/methods/test_join.py
@@ -338,14 +338,14 @@ def test_merge_join_different_levels(self):
# merge
columns = ["a", "b", ("c", "c1")]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
- with tm.assert_produces_warning(UserWarning):
+ with tm.assert_produces_warning(FutureWarning):
result = pd.merge(df1, df2, on="a")
tm.assert_frame_equal(result, expected)
# join, see discussion in GH#12219
columns = ["a", "b", ("a", ""), ("c", "c1")]
expected = DataFrame(columns=columns, data=[[1, 11, 0, 44], [0, 22, 1, 33]])
- with tm.assert_produces_warning(UserWarning):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df1.join(df2, on="a")
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index fb161e38c7155..2201da8ccb0d2 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -417,7 +417,7 @@ def test_join_hierarchical_mixed(self):
other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"])
other_df.set_index("a", inplace=True)
# GH 9455, 12219
- with tm.assert_produces_warning(UserWarning):
+ with tm.assert_produces_warning(FutureWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
assert ("b", "mean") in result
assert "b" in result
| - [x] closes #34862
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
I think deprecating this is a good idea, I stumbled across this accidentally this week and found this really odd | https://api.github.com/repos/pandas-dev/pandas/pulls/40993 | 2021-04-17T00:22:07Z | 2021-04-20T22:47:54Z | 2021-04-20T22:47:54Z | 2021-04-21T20:40:07Z |
Deprecate suffixes in merge producing duplicate columns | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 1c7942dfedafa..8f900d16ce0ad 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -571,6 +571,7 @@ Deprecations
- Deprecated support for ``np.ma.mrecords.MaskedRecords`` in the :class:`DataFrame` constructor, pass ``{name: data[name] for name in data.dtype.names}`` instead (:issue:`40363`)
- Deprecated the use of ``**kwargs`` in :class:`.ExcelWriter`; use the keyword argument ``engine_kwargs`` instead (:issue:`40430`)
- Deprecated the ``level`` keyword for :class:`DataFrame` and :class:`Series` aggregations; use groupby instead (:issue:`39983`)
+- Deprecated :func:`merge` producing duplicated columns through the ``suffixes`` keyword and already existing columns (:issue:`22818`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 8cee0dd2abb88..5a13506a42011 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -2311,4 +2311,22 @@ def renamer(x, suffix):
lrenamer = partial(renamer, suffix=lsuffix)
rrenamer = partial(renamer, suffix=rsuffix)
- return (left._transform_index(lrenamer), right._transform_index(rrenamer))
+ llabels = left._transform_index(lrenamer)
+ rlabels = right._transform_index(rrenamer)
+
+ dups = []
+ if not llabels.is_unique:
+ # Only warn when duplicates are caused because of suffixes, already duplicated
+ # columns in origin should not warn
+ dups = llabels[(llabels.duplicated()) & (~left.duplicated())].tolist()
+ if not rlabels.is_unique:
+ dups.extend(rlabels[(rlabels.duplicated()) & (~right.duplicated())].tolist())
+ if dups:
+ warnings.warn(
+ f"Passing 'suffixes' which cause duplicate columns {set(dups)} in the "
+ f"result is deprecated and will raise a MergeError in a future version.",
+ FutureWarning,
+ stacklevel=4,
+ )
+
+ return llabels, rlabels
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index fb161e38c7155..166aa3f5e3263 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -629,7 +629,8 @@ def test_join_dups(self):
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer"
)
- dta = dta.merge(w, left_index=True, right_index=True)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ["x_x", "y_x", "x_y", "y_y", "x_x", "y_x", "x_y", "y_y"]
tm.assert_frame_equal(dta, expected)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 9699a0dec4891..1495a34274a94 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -2409,3 +2409,40 @@ def test_merge_result_empty_index_and_on():
result = merge(df2, df1, left_index=True, right_on=["b"])
tm.assert_frame_equal(result, expected)
+
+
+def test_merge_suffixes_produce_dup_columns_warns():
+ # GH#22818
+ left = DataFrame({"a": [1, 2, 3], "b": 1, "b_x": 2})
+ right = DataFrame({"a": [1, 2, 3], "b": 2})
+ expected = DataFrame(
+ [[1, 1, 2, 2], [2, 1, 2, 2], [3, 1, 2, 2]], columns=["a", "b_x", "b_x", "b_y"]
+ )
+ with tm.assert_produces_warning(FutureWarning):
+ result = merge(left, right, on="a")
+ tm.assert_frame_equal(result, expected)
+
+ with tm.assert_produces_warning(FutureWarning):
+ merge(right, left, on="a", suffixes=("_y", "_x"))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_merge_duplicate_columns_with_suffix_no_warning():
+ # GH#22818
+ # Do not raise warning when duplicates are caused by duplicates in origin
+ left = DataFrame([[1, 1, 1], [2, 2, 2]], columns=["a", "b", "b"])
+ right = DataFrame({"a": [1, 3], "b": 2})
+ result = merge(left, right, on="a")
+ expected = DataFrame([[1, 1, 1, 2]], columns=["a", "b_x", "b_x", "b_y"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_merge_duplicate_columns_with_suffix_causing_another_duplicate():
+ # GH#22818
+ # This should raise warning because suffixes cause another collision
+ left = DataFrame([[1, 1, 1, 1], [2, 2, 2, 2]], columns=["a", "b", "b", "b_x"])
+ right = DataFrame({"a": [1, 3], "b": 2})
+ with tm.assert_produces_warning(FutureWarning):
+ result = merge(left, right, on="a")
+ expected = DataFrame([[1, 1, 1, 1, 2]], columns=["a", "b_x", "b_x", "b_x", "b_y"])
+ tm.assert_frame_equal(result, expected)
| - [x] closes #22818
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
I think we should not allow these column collisions at all. Hence deprecating and raising in 2.0
Alternative here would be to add an ``errors`` keyword to the merge and join functions as @simonjayhawkins mentioned in the op. | https://api.github.com/repos/pandas-dev/pandas/pulls/40991 | 2021-04-16T23:50:24Z | 2021-04-20T22:47:30Z | 2021-04-20T22:47:29Z | 2021-04-21T20:39:54Z |
TYP: libperiod | diff --git a/pandas/_libs/tslibs/period.pyi b/pandas/_libs/tslibs/period.pyi
new file mode 100644
index 0000000000000..49e630d605310
--- /dev/null
+++ b/pandas/_libs/tslibs/period.pyi
@@ -0,0 +1,158 @@
+from typing import Literal
+
+import numpy as np
+
+from pandas._libs.tslibs.nattype import NaTType
+from pandas._libs.tslibs.offsets import BaseOffset
+from pandas._libs.tslibs.timestamps import Timestamp
+from pandas._typing import (
+ Frequency,
+ Timezone,
+)
+
+INVALID_FREQ_ERR_MSG: str
+DIFFERENT_FREQ: str
+
+class IncompatibleFrequency(ValueError): ...
+
+def periodarr_to_dt64arr(
+ periodarr: np.ndarray, # const int64_t[:]
+ freq: int,
+) -> np.ndarray: ... # np.ndarray[np.int64]
+
+def period_asfreq_arr(
+ arr: np.ndarray, # ndarray[int64_t] arr,
+ freq1: int,
+ freq2: int,
+ end: bool,
+) -> np.ndarray: ... # np.ndarray[np.int64]
+
+def get_period_field_arr(
+ field: str,
+ arr: np.ndarray, # const int64_t[:]
+ freq: int,
+) -> np.ndarray: ... # np.ndarray[np.int64]
+
+def from_ordinals(
+ values: np.ndarray, # const int64_t[:]
+ freq: Frequency,
+) -> np.ndarray: ... # np.ndarray[np.int64]
+
+def extract_ordinals(
+ values: np.ndarray, # np.ndarray[object]
+ freq: Frequency | int,
+) -> np.ndarray: ... # np.ndarray[np.int64]
+
+def extract_freq(
+ values: np.ndarray, # np.ndarray[object]
+) -> BaseOffset: ...
+
+# exposed for tests
+def period_asfreq(ordinal: int, freq1: int, freq2: int, end: bool) -> int: ...
+
+def period_ordinal(
+ y: int, m: int, d: int, h: int, min: int, s: int, us: int, ps: int, freq: int
+) -> int: ...
+
+def freq_to_dtype_code(freq: BaseOffset) -> int: ...
+def validate_end_alias(how: str) -> Literal["E", "S"]: ...
+
+class Period:
+ ordinal: int # int64_t
+ freq: BaseOffset
+
+ # error: "__new__" must return a class instance (got "Union[Period, NaTType]")
+ def __new__( # type: ignore[misc]
+ cls,
+ value=None,
+ freq=None,
+ ordinal=None,
+ year=None,
+ month=None,
+ quarter=None,
+ day=None,
+ hour=None,
+ minute=None,
+ second=None,
+ ) -> Period | NaTType: ...
+
+ @classmethod
+ def _maybe_convert_freq(cls, freq) -> BaseOffset: ...
+
+ @classmethod
+ def _from_ordinal(cls, ordinal: int, freq) -> Period: ...
+
+ @classmethod
+ def now(cls, freq=...) -> Period: ...
+
+ def strftime(self, fmt: str) -> str: ...
+
+ def to_timestamp(
+ self,
+ freq: str | BaseOffset | None =...,
+ how: str = ...,
+ tz: Timezone | None = ...,
+ ) -> Timestamp: ...
+
+ def asfreq(self, freq, how=...) -> Period: ...
+
+ @property
+ def freqstr(self) -> str: ...
+
+ @property
+ def is_leap_year(self) -> bool: ...
+
+ @property
+ def daysinmonth(self) -> int: ...
+
+ @property
+ def days_in_month(self) -> int: ...
+
+ @property
+ def qyear(self) -> int: ...
+
+ @property
+ def quarter(self) -> int: ...
+
+ @property
+ def day_of_year(self) -> int: ...
+
+ @property
+ def weekday(self) -> int: ...
+
+ @property
+ def day_of_week(self) -> int: ...
+
+ @property
+ def week(self) -> int: ...
+
+ @property
+ def weekofyear(self) -> int: ...
+
+ @property
+ def second(self) -> int: ...
+
+ @property
+ def minute(self) -> int: ...
+
+ @property
+ def hour(self) -> int: ...
+
+ @property
+ def day(self) -> int: ...
+
+ @property
+ def month(self) -> int: ...
+
+ @property
+ def year(self) -> int: ...
+
+ @property
+ def end_time(self) -> Timestamp: ...
+
+ @property
+ def start_time(self) -> Timestamp: ...
+
+ def __sub__(self, other) -> Period | BaseOffset: ...
+
+ def __add__(self, other) -> Period: ...
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 165f51d06af6d..0bb431bc8e1cd 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1445,7 +1445,7 @@ def from_ordinals(const int64_t[:] values, freq):
@cython.wraparound(False)
@cython.boundscheck(False)
-def extract_ordinals(ndarray[object] values, freq):
+def extract_ordinals(ndarray[object] values, freq) -> np.ndarray:
# TODO: Change type to const object[:] when Cython supports that.
cdef:
@@ -1483,7 +1483,7 @@ def extract_ordinals(ndarray[object] values, freq):
return ordinals.base # .base to access underlying np.ndarray
-def extract_freq(ndarray[object] values):
+def extract_freq(ndarray[object] values) -> BaseOffset:
# TODO: Change type to const object[:] when Cython supports that.
cdef:
@@ -2539,7 +2539,7 @@ cdef int64_t _ordinal_from_fields(int year, int month, quarter, int day,
minute, second, 0, 0, base)
-def validate_end_alias(how):
+def validate_end_alias(how: str) -> str: # Literal["E", "S"]
how_dict = {'S': 'S', 'E': 'E',
'START': 'S', 'FINISH': 'E',
'BEGIN': 'S', 'END': 'E'}
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 93df88aba2cba..c7548d008efc6 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -584,6 +584,8 @@ def _validate_shift_value(self, fill_value):
elif isinstance(fill_value, self._recognized_scalars):
fill_value = self._scalar_type(fill_value)
else:
+ new_fill: DatetimeLikeScalar
+
# only warn if we're not going to raise
if self._scalar_type is Period and lib.is_integer(fill_value):
# kludge for #31971 since Period(integer) tries to cast to str
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index a9c94b615f49c..e9322b06b96d2 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -295,9 +295,17 @@ def _generate_range(cls, start, end, periods, freq, fields):
# -----------------------------------------------------------------
# DatetimeLike Interface
- def _unbox_scalar(self, value: Period | NaTType, setitem: bool = False) -> np.int64:
+ # error: Argument 1 of "_unbox_scalar" is incompatible with supertype
+ # "DatetimeLikeArrayMixin"; supertype defines the argument type as
+ # "Union[Union[Period, Any, Timedelta], NaTType]"
+ def _unbox_scalar( # type: ignore[override]
+ self,
+ value: Period | NaTType,
+ setitem: bool = False,
+ ) -> np.int64:
if value is NaT:
- return np.int64(value.value)
+ # error: Item "Period" of "Union[Period, NaTType]" has no attribute "value"
+ return np.int64(value.value) # type: ignore[union-attr]
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=setitem)
return np.int64(value.ordinal)
@@ -482,9 +490,9 @@ def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray:
freq = Period._maybe_convert_freq(freq)
base = freq._period_dtype_code
- new_data = self.asfreq(freq, how=how)
+ new_parr = self.asfreq(freq, how=how)
- new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base)
+ new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base)
return DatetimeArray(new_data)._with_freq("infer")
# --------------------------------------------------------------------
@@ -910,7 +918,7 @@ def raise_on_incompatible(left, right):
def period_array(
- data: Sequence[Period | None] | AnyArrayLike,
+ data: Sequence[Period | str | None] | AnyArrayLike,
freq: str | Tick | None = None,
copy: bool = False,
) -> PeriodArray:
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 213c20294025d..aae6314968695 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1734,7 +1734,8 @@ def _get_period_bins(self, ax: PeriodIndex):
# Get offset for bin edge (not label edge) adjustment
start_offset = Period(start, self.freq) - Period(p_start, self.freq)
- bin_shift = start_offset.n % freq_mult
+ # error: Item "Period" of "Union[Period, Any]" has no attribute "n"
+ bin_shift = start_offset.n % freq_mult # type: ignore[union-attr]
start = p_start
labels = binner = period_range(
@@ -1903,17 +1904,17 @@ def _get_period_range_edges(
raise TypeError("'first' and 'last' must be instances of type Period")
# GH 23882
- first = first.to_timestamp()
- last = last.to_timestamp()
- adjust_first = not freq.is_on_offset(first)
- adjust_last = freq.is_on_offset(last)
+ first_ts = first.to_timestamp()
+ last_ts = last.to_timestamp()
+ adjust_first = not freq.is_on_offset(first_ts)
+ adjust_last = freq.is_on_offset(last_ts)
- first, last = _get_timestamp_range_edges(
- first, last, freq, closed=closed, origin=origin, offset=offset
+ first_ts, last_ts = _get_timestamp_range_edges(
+ first_ts, last_ts, freq, closed=closed, origin=origin, offset=offset
)
- first = (first + int(adjust_first) * freq).to_period(freq)
- last = (last - int(adjust_last) * freq).to_period(freq)
+ first = (first_ts + int(adjust_first) * freq).to_period(freq)
+ last = (last_ts - int(adjust_last) * freq).to_period(freq)
return first, last
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40990 | 2021-04-16T21:30:41Z | 2021-05-05T12:52:29Z | 2021-05-05T12:52:29Z | 2021-05-05T14:47:17Z |
Bug in loc returning multiindex in wrong oder with duplicated indexer | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index d357e4a633347..e29e740ddf8fe 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -871,6 +871,7 @@ Indexing
- Bug in :meth:`DataFrame.loc.__setitem__` when setting-with-expansion incorrectly raising when the index in the expanding axis contains duplicates (:issue:`40096`)
- Bug in :meth:`DataFrame.loc` incorrectly matching non-boolean index elements (:issue:`20432`)
- Bug in :meth:`Series.__delitem__` with ``ExtensionDtype`` incorrectly casting to ``ndarray`` (:issue:`40386`)
+- Bug in :meth:`DataFrame.loc` returning :class:`MultiIndex` in wrong order if indexer has duplicates (:issue:`40978`)
- Bug in :meth:`DataFrame.__setitem__` raising ``TypeError`` when using a str subclass as the column name with a :class:`DatetimeIndex` (:issue:`37366`)
Missing
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 1a3719233a1da..532e39487ad7b 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3446,6 +3446,7 @@ def _reorder_indexer(
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
+ k = algos.unique(k)
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 0c6f2faf77f00..cf1ab3eadc7a4 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -764,6 +764,28 @@ def test_loc_getitem_index_differently_ordered_slice_none():
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize("indexer", [[1, 2, 7, 6, 2, 3, 8, 7], [1, 2, 7, 6, 3, 8]])
+def test_loc_getitem_index_differently_ordered_slice_none_duplicates(indexer):
+ # GH#40978
+ df = DataFrame(
+ [1] * 8,
+ index=MultiIndex.from_tuples(
+ [(1, 1), (1, 2), (1, 7), (1, 6), (2, 2), (2, 3), (2, 8), (2, 7)]
+ ),
+ columns=["a"],
+ )
+ result = df.loc[(slice(None), indexer), :]
+ expected = DataFrame(
+ [1] * 8,
+ index=[[1, 1, 2, 1, 2, 1, 2, 2], [1, 2, 2, 7, 7, 6, 3, 8]],
+ columns=["a"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ result = df.loc[df.index.isin(indexer, level=1), :]
+ tm.assert_frame_equal(result, df)
+
+
def test_loc_getitem_drops_levels_for_one_row_dataframe():
# GH#10521
mi = MultiIndex.from_arrays([["x"], ["y"], ["z"]], names=["a", "b", "c"])
| - [x] closes #40978
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
This fixes the ordering, but I am not sure if an indexer with duplicates in this case should return the same as an indexer which is unique with the same elements. | https://api.github.com/repos/pandas-dev/pandas/pulls/40987 | 2021-04-16T20:26:55Z | 2021-05-26T02:04:16Z | 2021-05-26T02:04:15Z | 2021-05-27T10:26:39Z |
DOC: Reindexing behaviour of dataframe column-assignment missing | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 3707e141bc447..cf7f337799be7 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -287,6 +287,10 @@ def loc(self) -> _LocIndexer:
- An alignable boolean Series. The index of the key will be aligned before
masking.
- An alignable Index. The Index of the returned selection will be the input.
+
+ .. warning:: Preferred syntax to assign a column: `df[]` instead of
+ allocation through `df.loc[]`
+
- A ``callable`` function with one argument (the calling Series or
DataFrame) and that returns valid output for indexing (one of the above)
@@ -388,6 +392,9 @@ def loc(self) -> _LocIndexer:
**Setting values**
+ .. warning:: Preferred syntax to assign a column: `df['shield']`
+ instead of allocation through `df.loc[]` such as `df.loc['shield']`
+
Set value for all items matching the list of labels
>>> df.loc[['viper', 'sidewinder'], ['shield']] = 50
@@ -445,6 +452,36 @@ def loc(self) -> _LocIndexer:
8 4 5
9 7 8
+ **Setting values using Series**
+
+ Assign column using Series
+
+ >>> df.loc[:, 'shield'] = pd.Series({7: 8, 8: 10, 9: 13})
+ >>> df
+ max_speed shield
+ 7 1 8
+ 8 4 10
+ 9 7 13
+
+ Assigning column to a Series with non matching indexes aligns the
+ right hand side to the index of the left
+
+ >>> df.loc[:, 'shield'] = pd.Series({8: 10})
+ >>> df
+ max_speed shield
+ 7 1 NaN
+ 8 4 10.0
+ 9 7 NaN
+
+ Assign column containing value with missing index
+
+ >>> df.loc[:, 'shield'] = pd.Series({11: 14})
+ >>> df
+ max_speed shield
+ 7 1 NaN
+ 8 4 NaN
+ 9 7 NaN
+
**Getting values with a MultiIndex**
A number of examples using a DataFrame with a MultiIndex
| - [X] xref #39845 (.loc[] portion of issue)
- [X] tests added / passed
- [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40985 | 2021-04-16T17:07:15Z | 2021-09-02T00:43:43Z | null | 2021-09-02T00:43:44Z |
TYP: Signature of "reindex" incompatible with supertype "NDFrame" | diff --git a/pandas/core/series.py b/pandas/core/series.py
index f0f5bd7c3e2b2..a53d65edabec2 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4604,8 +4604,17 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
optional_labels=_shared_doc_kwargs["optional_labels"],
optional_axis=_shared_doc_kwargs["optional_axis"],
)
- def reindex(self, index=None, **kwargs):
- return super().reindex(index=index, **kwargs)
+ def reindex(self, *args, **kwargs) -> Series:
+ if len(args) > 1:
+ raise TypeError("Only one positional argument ('index') is allowed")
+ if args:
+ (index,) = args
+ if "index" in kwargs:
+ raise TypeError(
+ "'index' passed as both positional and keyword argument"
+ )
+ kwargs.update({"index": index})
+ return super().reindex(**kwargs)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
def drop(
diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py
index be9f96c8b509a..4350a5d9ac989 100644
--- a/pandas/tests/series/methods/test_reindex.py
+++ b/pandas/tests/series/methods/test_reindex.py
@@ -348,6 +348,31 @@ def test_reindex_periodindex_with_object(p_values, o_values, values, expected_va
tm.assert_series_equal(result, expected)
+def test_reindex_too_many_args():
+ # GH 40980
+ ser = Series([1, 2])
+ with pytest.raises(
+ TypeError, match=r"Only one positional argument \('index'\) is allowed"
+ ):
+ ser.reindex([2, 3], False)
+
+
+def test_reindex_double_index():
+ # GH 40980
+ ser = Series([1, 2])
+ msg = r"'index' passed as both positional and keyword argument"
+ with pytest.raises(TypeError, match=msg):
+ ser.reindex([2, 3], index=[3, 4])
+
+
+def test_reindex_no_posargs():
+ # GH 40980
+ ser = Series([1, 2])
+ result = ser.reindex(index=[1, 0])
+ expected = Series([2, 1], index=[1, 0])
+ tm.assert_series_equal(result, expected)
+
+
@pytest.mark.parametrize("values", [[["a"], ["x"]], [[], []]])
def test_reindex_empty_with_level(values):
# GH41170
| - [ ] closes #40980
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40984 | 2021-04-16T15:46:51Z | 2021-11-26T15:05:13Z | 2021-11-26T15:05:13Z | 2021-11-26T17:39:52Z |
REF: enforce annotation in maybe_downcast_to_dtype | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index a373b57accaa9..6726374dbe30e 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -4,7 +4,6 @@
from __future__ import annotations
-from contextlib import suppress
from datetime import (
date,
datetime,
@@ -29,7 +28,6 @@
NaT,
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
- Period,
Timedelta,
Timestamp,
conversion,
@@ -87,7 +85,6 @@
PeriodDtype,
)
from pandas.core.dtypes.generic import (
- ABCDataFrame,
ABCExtensionArray,
ABCSeries,
)
@@ -249,9 +246,6 @@ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLi
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
- if isinstance(result, ABCDataFrame):
- # see test_pivot_table_doctest_case
- return result
do_round = False
if isinstance(dtype, str):
@@ -278,15 +272,9 @@ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLi
dtype = np.dtype(dtype)
- elif dtype.type is Period:
- from pandas.core.arrays import PeriodArray
-
- with suppress(TypeError):
- # e.g. TypeError: int() argument must be a string, a
- # bytes-like object or a number, not 'Period
-
- # error: "dtype[Any]" has no attribute "freq"
- return PeriodArray(result, freq=dtype.freq) # type: ignore[attr-defined]
+ if not isinstance(dtype, np.dtype):
+ # enforce our signature annotation
+ raise TypeError(dtype) # pragma: no cover
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
@@ -295,15 +283,7 @@ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLi
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
- if isinstance(dtype, DatetimeTZDtype):
- # convert to datetime and change timezone
- i8values = result.astype("i8", copy=False)
- cls = dtype.construct_array_type()
- # equiv: DatetimeArray(i8values).tz_localize("UTC").tz_convert(dtype.tz)
- dt64values = i8values.view("M8[ns]")
- result = cls._simple_new(dt64values, dtype=dtype)
- else:
- result = result.astype(dtype)
+ result = result.astype(dtype)
return result
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 37fc5de95b3d2..38766d2856cfe 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7213,13 +7213,14 @@ def combine(
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
- if not is_dtype_equal(this_dtype, new_dtype):
- series = series.astype(new_dtype)
- if not is_dtype_equal(other_dtype, new_dtype):
- otherSeries = otherSeries.astype(new_dtype)
+ series = series.astype(new_dtype, copy=False)
+ otherSeries = otherSeries.astype(new_dtype, copy=False)
arr = func(series, otherSeries)
- arr = maybe_downcast_to_dtype(arr, new_dtype)
+ if isinstance(new_dtype, np.dtype):
+ # if new_dtype is an EA Dtype, then `func` is expected to return
+ # the correct dtype without any additional casting
+ arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 795f5250012cb..ddc6e92b04927 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -174,7 +174,15 @@ def __internal_pivot_table(
and v in agged
and not is_integer_dtype(agged[v])
):
- agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
+ if isinstance(agged[v], ABCDataFrame):
+ # exclude DataFrame case bc maybe_downcast_to_dtype expects
+ # ArrayLike
+ # TODO: why does test_pivot_table_doctest_case fail if
+ # we don't do this apparently-unnecessary setitem?
+ agged[v] = agged[v]
+ pass
+ else:
+ agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
diff --git a/pandas/tests/dtypes/cast/test_downcast.py b/pandas/tests/dtypes/cast/test_downcast.py
index 0c3e9841eba3e..5217b38f155c8 100644
--- a/pandas/tests/dtypes/cast/test_downcast.py
+++ b/pandas/tests/dtypes/cast/test_downcast.py
@@ -5,11 +5,7 @@
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
-from pandas import (
- DatetimeIndex,
- Series,
- Timestamp,
-)
+from pandas import Series
import pandas._testing as tm
@@ -77,7 +73,7 @@ def test_downcast_conversion_nan(float_dtype):
def test_downcast_conversion_empty(any_real_dtype):
dtype = any_real_dtype
arr = np.array([], dtype=dtype)
- result = maybe_downcast_to_dtype(arr, "int64")
+ result = maybe_downcast_to_dtype(arr, np.dtype("int64"))
tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
@@ -89,15 +85,3 @@ def test_datetime_likes_nan(klass):
exp = np.array([1, 2, klass("NaT")], dtype)
res = maybe_downcast_to_dtype(arr, dtype)
tm.assert_numpy_array_equal(res, exp)
-
-
-@pytest.mark.parametrize("as_asi", [True, False])
-def test_datetime_with_timezone(as_asi):
- # see gh-15426
- ts = Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
- exp = DatetimeIndex([ts, ts])._data
-
- obj = exp.asi8 if as_asi else exp
- res = maybe_downcast_to_dtype(obj, exp.dtype)
-
- tm.assert_datetime_array_equal(res, exp)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40982 | 2021-04-16T14:57:25Z | 2021-04-16T19:02:10Z | 2021-04-16T19:02:10Z | 2021-04-16T21:25:37Z |
TST: use expected_html for to_html tests | diff --git a/pandas/tests/io/formats/data/html/gh13828_expected_output.html b/pandas/tests/io/formats/data/html/gh13828_expected_output.html
new file mode 100644
index 0000000000000..690d638c31d5b
--- /dev/null
+++ b/pandas/tests/io/formats/data/html/gh13828_expected_output.html
@@ -0,0 +1,21 @@
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>Group</th>
+ <th>Data</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>A</td>
+ <td>1.22</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>A</td>
+ <td>{na_rep}</td>
+ </tr>
+ </tbody>
+</table>
diff --git a/pandas/tests/io/formats/data/html/gh40024_expected_output.html b/pandas/tests/io/formats/data/html/gh40024_expected_output.html
new file mode 100644
index 0000000000000..0877c29525d2c
--- /dev/null
+++ b/pandas/tests/io/formats/data/html/gh40024_expected_output.html
@@ -0,0 +1,18 @@
+<table border="1" class="dataframe">
+ <thead>
+ <tr style="text-align: right;">
+ <th></th>
+ <th>x</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <th>0</th>
+ <td>1,000</td>
+ </tr>
+ <tr>
+ <th>1</th>
+ <td>test</td>
+ </tr>
+ </tbody>
+</table>
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index ec2f109900f3a..a61e77bec9828 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -851,7 +851,7 @@ def test_to_html_multilevel(multiindex_year_month_day_dataframe_random_data):
@pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
-def test_to_html_na_rep_and_float_format(na_rep):
+def test_to_html_na_rep_and_float_format(na_rep, datapath):
# https://github.com/pandas-dev/pandas/issues/13828
df = DataFrame(
[
@@ -861,51 +861,14 @@ def test_to_html_na_rep_and_float_format(na_rep):
columns=["Group", "Data"],
)
result = df.to_html(na_rep=na_rep, float_format="{:.2f}".format)
- expected = f"""<table border="1" class="dataframe">
- <thead>
- <tr style="text-align: right;">
- <th></th>
- <th>Group</th>
- <th>Data</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <th>0</th>
- <td>A</td>
- <td>1.22</td>
- </tr>
- <tr>
- <th>1</th>
- <td>A</td>
- <td>{na_rep}</td>
- </tr>
- </tbody>
-</table>"""
+ expected = expected_html(datapath, "gh13828_expected_output")
+ expected = expected.format(na_rep=na_rep)
assert result == expected
-def test_to_html_float_format_object_col():
+def test_to_html_float_format_object_col(datapath):
# GH#40024
df = DataFrame(data={"x": [1000.0, "test"]})
result = df.to_html(float_format=lambda x: f"{x:,.0f}")
- expected = """<table border="1" class="dataframe">
- <thead>
- <tr style="text-align: right;">
- <th></th>
- <th>x</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <th>0</th>
- <td>1,000</td>
- </tr>
- <tr>
- <th>1</th>
- <td>test</td>
- </tr>
- </tbody>
-</table>"""
-
+ expected = expected_html(datapath, "gh40024_expected_output")
assert result == expected
| xref https://github.com/pandas-dev/pandas/pull/40850#discussion_r611022208 | https://api.github.com/repos/pandas-dev/pandas/pulls/40981 | 2021-04-16T14:33:54Z | 2021-04-16T15:57:20Z | 2021-04-16T15:57:20Z | 2021-04-16T16:03:33Z |
TYP: Signature of "rename" incompatible with supertype "NDFrame" | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 76f68fdaa7845..803d1c914c954 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5073,10 +5073,6 @@ def drop(
errors=errors,
)
- @rewrite_axis_style_signature(
- "mapper",
- [("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
- )
def rename(
self,
mapper: Renamer | None = None,
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index faa32b31a73d7..1dafe1618a9f8 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2082,7 +2082,8 @@ def size(self) -> DataFrame | Series:
result = self._obj_1d_constructor(result)
if not self.as_index:
- result = result.rename("size").reset_index()
+ # Item "None" of "Optional[Series]" has no attribute "reset_index"
+ result = result.rename("size").reset_index() # type: ignore[union-attr]
return self._reindex_output(result, fill_value=0)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 03fac7cceabb7..d5909b8659903 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4468,14 +4468,16 @@ def align(
def rename(
self,
- index=None,
+ mapper=None,
*,
+ index=None,
+ columns=None,
axis=None,
copy=True,
inplace=False,
level=None,
errors="ignore",
- ):
+ ) -> Series | None:
"""
Alter Series index labels or name.
@@ -4491,7 +4493,7 @@ def rename(
----------
axis : {0 or "index"}
Unused. Accepted for compatibility with DataFrame method only.
- index : scalar, hashable sequence, dict-like or function, optional
+ mapper : scalar, hashable sequence, dict-like or function, optional
Functions or dict-like are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
@@ -4539,12 +4541,16 @@ def rename(
# Make sure we raise if an invalid 'axis' is passed.
axis = self._get_axis_number(axis)
- if callable(index) or is_dict_like(index):
+ if index is not None and mapper is not None:
+ raise TypeError("Cannot specify both 'mapper' and 'index'")
+ if mapper is None:
+ mapper = index
+ if callable(mapper) or is_dict_like(mapper):
return super().rename(
- index, copy=copy, inplace=inplace, level=level, errors=errors
+ mapper, copy=copy, inplace=inplace, level=level, errors=errors
)
else:
- return self._set_name(index, inplace=inplace)
+ return self._set_name(mapper, inplace=inplace)
@overload
def set_axis(
diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py
index 2c2c127394fb6..36a7949a9f1e3 100644
--- a/pandas/io/json/_normalize.py
+++ b/pandas/io/json/_normalize.py
@@ -517,7 +517,11 @@ def _recursive_extract(data, path, seen_meta, level=0):
result = DataFrame(records)
if record_prefix is not None:
- result = result.rename(columns=lambda x: f"{record_prefix}{x}")
+ # Incompatible types in assignment (expression has type "Optional[DataFrame]",
+ # variable has type "DataFrame")
+ result = result.rename( # type: ignore[assignment]
+ columns=lambda x: f"{record_prefix}{x}"
+ )
# Data types, a problem
for k, v in meta_vals.items():
diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py
index a78abfa63cff4..3425dd8f019e7 100644
--- a/pandas/tests/series/methods/test_rename.py
+++ b/pandas/tests/series/methods/test_rename.py
@@ -105,6 +105,19 @@ def test_rename_callable(self):
assert result.name == expected.name
+ def test_rename_method_and_index(self):
+ # GH 40977
+ ser = Series([1, 2])
+ with pytest.raises(TypeError, match="Cannot specify both 'mapper' and 'index'"):
+ ser.rename(str, index=str)
+
+ def test_rename_none(self):
+ # GH 40977
+ ser = Series([1, 2], name="foo")
+ result = ser.rename(None)
+ expected = Series([1, 2])
+ tm.assert_series_equal(result, expected)
+
def test_rename_series_with_multiindex(self):
# issue #43659
arrays = [
| - [x] closes #40977
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
---
@WillAyd this should still be backwards compatible, it just makes the signature compatible with its parent's | https://api.github.com/repos/pandas-dev/pandas/pulls/40979 | 2021-04-16T14:17:39Z | 2021-11-26T15:36:25Z | 2021-11-26T15:36:25Z | 2021-11-26T17:44:30Z |
TYP add some missing types to series | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5c605a6b441c6..1944e5e6196b4 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1019,7 +1019,7 @@ def _get_value(self, label, takeable: bool = False):
loc = self.index.get_loc(label)
return self.index._get_values_for_loc(self, loc, label)
- def __setitem__(self, key, value):
+ def __setitem__(self, key, value) -> None:
key = com.apply_if_callable(key, self)
cacher_needs_updating = self._check_is_chained_assignment_possible()
@@ -1058,7 +1058,7 @@ def __setitem__(self, key, value):
if cacher_needs_updating:
self._maybe_update_cacher()
- def _set_with_engine(self, key, value):
+ def _set_with_engine(self, key, value) -> None:
# fails with AttributeError for IntervalIndex
loc = self.index._engine.get_loc(key)
# error: Argument 1 to "validate_numeric_casting" has incompatible type
@@ -1094,7 +1094,7 @@ def _set_with(self, key, value):
else:
self.loc[key] = value
- def _set_labels(self, key, value):
+ def _set_labels(self, key, value) -> None:
key = com.asarray_tuplesafe(key)
indexer: np.ndarray = self.index.get_indexer(key)
mask = indexer == -1
@@ -1102,7 +1102,7 @@ def _set_labels(self, key, value):
raise KeyError(f"{key[mask]} not in index")
self._set_values(indexer, value)
- def _set_values(self, key, value):
+ def _set_values(self, key, value) -> None:
if isinstance(key, Series):
key = key._values
| Noticed this while working on #40973
Those without any types won't be checked by `mypy`
e.g.:
```console
$ cat t.py
from typing import final
class Foo:
@final
def foo(x) -> None:
pass
class Bar(Foo):
def foo(x):
pass
$ mypy t.py
Success: no issues found in 1 source file
```
Adding in `-> None`:
```console
$ cat t.py
from typing import final
class Foo:
@final
def foo(x) -> None:
pass
class Bar(Foo):
def foo(x) -> None:
pass
$ mypy t.py
t.py:9: error: Cannot override final attribute "foo" (previously declared in base class "Foo") [misc]
Found 1 error in 1 file (checked 1 source file)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/40975 | 2021-04-16T10:21:50Z | 2021-04-16T16:54:14Z | 2021-04-16T16:54:14Z | 2021-04-16T17:31:23Z |
STYLE, TYP Turn black formatter off for overloads | diff --git a/.pep8speaks.yml b/.pep8speaks.yml
deleted file mode 100644
index 5a83727ddf5f8..0000000000000
--- a/.pep8speaks.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-# File : .pep8speaks.yml
-
-scanner:
- diff_only: True # If True, errors caused by only the patch are shown
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2f46190ef5eb7..131ef98985ae5 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -71,7 +71,7 @@ repos:
types: [text] # overwrite types: [rst]
types_or: [python, rst]
- repo: https://github.com/asottile/yesqa
- rev: v1.2.2
+ rev: v1.2.3
hooks:
- id: yesqa
additional_dependencies:
diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py
index 93d87f6bb4dfa..bc1602525010e 100644
--- a/pandas/core/array_algos/take.py
+++ b/pandas/core/array_algos/take.py
@@ -26,26 +26,12 @@
from pandas.core.arrays.base import ExtensionArray
+# fmt: off
@overload
-def take_nd(
- arr: np.ndarray,
- indexer,
- axis: int = ...,
- fill_value=...,
- allow_fill: bool = ...,
-) -> np.ndarray:
- ...
-
-
+def take_nd(arr: np.ndarray, indexer, axis: int = ..., fill_value=..., allow_fill: bool = ...) -> np.ndarray: ... # noqa: E501, E704
@overload
-def take_nd(
- arr: ExtensionArray,
- indexer,
- axis: int = ...,
- fill_value=...,
- allow_fill: bool = ...,
-) -> ArrayLike:
- ...
+def take_nd(arr: ExtensionArray, indexer, axis: int = ..., fill_value=..., allow_fill: bool = ...) -> ArrayLike: ... # noqa: E501, E704
+# fmt: on
def take_nd(
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 4a5dca348a8c0..f0ea724ca137f 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -427,21 +427,16 @@ def astype(self, dtype, copy: bool = True):
else:
return np.asarray(self, dtype=dtype)
+ # fmt: off
@overload
- def view(self: DatetimeLikeArrayT) -> DatetimeLikeArrayT:
- ...
-
+ def view(self: DatetimeLikeArrayT) -> DatetimeLikeArrayT: ... # noqa: E704
@overload
- def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray:
- ...
-
+ def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray: ... # noqa: E704
@overload
- def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray:
- ...
-
+ def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray: ... # noqa: E704
@overload
- def view(self, dtype: Dtype | None = ...) -> ArrayLike:
- ...
+ def view(self, dtype: Dtype | None = ...) -> ArrayLike: ... # noqa: E704
+ # fmt: on
def view(self, dtype: Dtype | None = None) -> ArrayLike:
# We handle datetime64, datetime64tz, timedelta64, and period
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 289ed4948934f..eea8a852accbf 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1918,18 +1918,12 @@ def std(
# Constructor Helpers
+# fmt: off
@overload
-def sequence_to_datetimes(
- data, allow_object: Literal[False] = ..., require_iso8601: bool = ...
-) -> DatetimeArray:
- ...
-
-
+def sequence_to_datetimes(data, allow_object: Literal[False] = ..., require_iso8601: bool = ...) -> DatetimeArray: ... # noqa: E501, E704
@overload
-def sequence_to_datetimes(
- data, allow_object: Literal[True] = ..., require_iso8601: bool = ...
-) -> np.ndarray | DatetimeArray:
- ...
+def sequence_to_datetimes(data, allow_object: Literal[True] = ..., require_iso8601: bool = ...) -> np.ndarray | DatetimeArray: ... # noqa: E501, E704
+# fmt: on
def sequence_to_datetimes(
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index d739b46620032..1d14d007bd14c 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -445,14 +445,12 @@ def maybe_cast_to_extension_array(
return result
+# fmt: off
@overload
-def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype:
- ...
-
-
+def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: ... # noqa: E704
@overload
-def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype:
- ...
+def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: ... # noqa: E704
+# fmt: on
def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:
@@ -1059,18 +1057,12 @@ def astype_td64_unit_conversion(
return result
+# fmt: off
@overload
-def astype_nansafe(
- arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...
-) -> np.ndarray:
- ...
-
-
+def astype_nansafe(arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...) -> np.ndarray: ... # noqa: E501, E704
@overload
-def astype_nansafe(
- arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...
-) -> ExtensionArray:
- ...
+def astype_nansafe(arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...) -> ExtensionArray: ... # noqa: E501, E704
+# fmt: on
def astype_nansafe(
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 7f970a72cb12c..04178a2491857 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1303,13 +1303,12 @@ def __len__(self) -> int:
"""
return len(self.index)
+ # fmt: off
@overload
- def dot(self, other: Series) -> Series:
- ...
-
+ def dot(self, other: Series) -> Series: ... # noqa: E704
@overload
- def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame:
- ...
+ def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... # noqa: E704
+ # fmt: on
def dot(self, other: AnyArrayLike | FrameOrSeriesUnion) -> FrameOrSeriesUnion:
"""
@@ -1421,15 +1420,12 @@ def dot(self, other: AnyArrayLike | FrameOrSeriesUnion) -> FrameOrSeriesUnion:
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
+ # fmt: off
@overload
- def __matmul__(self, other: Series) -> Series:
- ...
-
+ def __matmul__(self, other: Series) -> Series: ... # noqa: E704
@overload
- def __matmul__(
- self, other: AnyArrayLike | FrameOrSeriesUnion
- ) -> FrameOrSeriesUnion:
- ...
+ def __matmul__(self, other: AnyArrayLike | FrameOrSeriesUnion) -> FrameOrSeriesUnion: ... # noqa: E501, E704
+ # fmt: on
def __matmul__(
self, other: AnyArrayLike | FrameOrSeriesUnion
@@ -4653,25 +4649,16 @@ def align(
broadcast_axis=broadcast_axis,
)
+ # fmt: off
@overload
- def set_axis(
- self, labels, axis: Axis = ..., inplace: Literal[False] = ...
- ) -> DataFrame:
- ...
-
+ def set_axis(self, labels, axis: Axis = ..., inplace: Literal[False] = ...) -> DataFrame: ... # noqa: E501, E704
@overload
- def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None:
- ...
-
+ def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None: ... # noqa: E501, E704
@overload
- def set_axis(self, labels, *, inplace: Literal[True]) -> None:
- ...
-
+ def set_axis(self, labels, *, inplace: Literal[True]) -> None: ... # noqa: E704
@overload
- def set_axis(
- self, labels, axis: Axis = ..., inplace: bool = ...
- ) -> DataFrame | None:
- ...
+ def set_axis(self, labels, axis: Axis = ..., inplace: bool = ...) -> DataFrame | None: ... # noqa: E501, E704
+ # fmt: on
@Appender(
"""
@@ -5011,120 +4998,28 @@ def rename(
errors=errors,
)
+ # fmt: off
@overload
- def fillna(
- self,
- value=...,
- method: str | None = ...,
- axis: Axis | None = ...,
- inplace: Literal[False] = ...,
- limit=...,
- downcast=...,
- ) -> DataFrame:
- ...
-
+ def fillna(self, value=..., method: str | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit=..., downcast=...) -> DataFrame: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value,
- method: str | None,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, value, method: str | None, axis: Axis | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- *,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, *, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value,
- *,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, value, *, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- *,
- method: str | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, *, method: str | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- *,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, *, axis: Axis | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- *,
- method: str | None,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, *, method: str | None, axis: Axis | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value,
- *,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, value, *, axis: Axis | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value,
- method: str | None,
- *,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, value, method: str | None, *, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value=...,
- method: str | None = ...,
- axis: Axis | None = ...,
- inplace: bool = ...,
- limit=...,
- downcast=...,
- ) -> DataFrame | None:
- ...
+ def fillna(self, value=..., method: str | None = ..., axis: Axis | None = ..., inplace: bool = ..., limit=..., downcast=...) -> DataFrame | None: ... # noqa: E501, E704
+ # fmt: on
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
@@ -5500,70 +5395,20 @@ def set_index(
if not inplace:
return frame
+ # fmt: off
@overload
- def reset_index(
- self,
- level: Hashable | Sequence[Hashable] | None = ...,
- drop: bool = ...,
- inplace: Literal[False] = ...,
- col_level: Hashable = ...,
- col_fill: Hashable = ...,
- ) -> DataFrame:
- ...
-
+ def reset_index(self, level: Hashable | Sequence[Hashable] | None = ..., drop: bool = ..., inplace: Literal[False] = ..., col_level: Hashable = ..., col_fill: Hashable = ...) -> DataFrame: ... # noqa: E501, E704
@overload
- def reset_index(
- self,
- level: Hashable | Sequence[Hashable] | None,
- drop: bool,
- inplace: Literal[True],
- col_level: Hashable = ...,
- col_fill: Hashable = ...,
- ) -> None:
- ...
-
+ def reset_index(self, level: Hashable | Sequence[Hashable] | None, drop: bool, inplace: Literal[True], col_level: Hashable = ..., col_fill: Hashable = ...) -> None: ... # noqa: E501, E704
@overload
- def reset_index(
- self,
- *,
- drop: bool,
- inplace: Literal[True],
- col_level: Hashable = ...,
- col_fill: Hashable = ...,
- ) -> None:
- ...
-
+ def reset_index(self, *, drop: bool, inplace: Literal[True], col_level: Hashable = ..., col_fill: Hashable = ...) -> None: ... # noqa: E501, E704
@overload
- def reset_index(
- self,
- level: Hashable | Sequence[Hashable] | None,
- *,
- inplace: Literal[True],
- col_level: Hashable = ...,
- col_fill: Hashable = ...,
- ) -> None:
- ...
-
+ def reset_index(self, level: Hashable | Sequence[Hashable] | None, *, inplace: Literal[True], col_level: Hashable = ..., col_fill: Hashable = ...) -> None: ... # noqa: E501, E704
@overload
- def reset_index(
- self,
- *,
- inplace: Literal[True],
- col_level: Hashable = ...,
- col_fill: Hashable = ...,
- ) -> None:
- ...
-
+ def reset_index(self, *, inplace: Literal[True], col_level: Hashable = ..., col_fill: Hashable = ...) -> None: ... # noqa: E501, E704
@overload
- def reset_index(
- self,
- level: Hashable | Sequence[Hashable] | None = ...,
- drop: bool = ...,
- inplace: bool = ...,
- col_level: Hashable = ...,
- col_fill: Hashable = ...,
- ) -> DataFrame | None:
- ...
+ def reset_index(self, level: Hashable | Sequence[Hashable] | None = ..., drop: bool = ..., inplace: bool = ..., col_level: Hashable = ..., col_fill: Hashable = ...) -> DataFrame | None: ... # noqa: E501, E704
+ # fmt: on
def reset_index(
self,
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c77a3717c4c03..99483d9737f09 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -681,27 +681,16 @@ def size(self) -> int:
# error: Incompatible return value type (got "number", expected "int")
return np.prod(self.shape) # type: ignore[return-value]
+ # fmt: off
@overload
- def set_axis(
- self: FrameOrSeries, labels, axis: Axis = ..., inplace: Literal[False] = ...
- ) -> FrameOrSeries:
- ...
-
+ def set_axis(self: FrameOrSeries, labels, axis: Axis = ..., inplace: Literal[False] = ...) -> FrameOrSeries: ... # noqa: E501, E704
@overload
- def set_axis(
- self: FrameOrSeries, labels, axis: Axis, inplace: Literal[True]
- ) -> None:
- ...
-
+ def set_axis(self: FrameOrSeries, labels, axis: Axis, inplace: Literal[True]) -> None: ... # noqa: E501, E704
@overload
- def set_axis(self: FrameOrSeries, labels, *, inplace: Literal[True]) -> None:
- ...
-
+ def set_axis(self: FrameOrSeries, labels, *, inplace: Literal[True]) -> None: ... # noqa: E501, E704
@overload
- def set_axis(
- self: FrameOrSeries, labels, axis: Axis = ..., inplace: bool_t = ...
- ) -> FrameOrSeries | None:
- ...
+ def set_axis(self: FrameOrSeries, labels, axis: Axis = ..., inplace: bool_t = ...) -> FrameOrSeries | None: ... # noqa: E501, E704
+ # fmt: on
def set_axis(self, labels, axis: Axis = 0, inplace: bool_t = False):
"""
@@ -7363,113 +7352,28 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace):
# GH 40420
return self.where(subset, threshold, axis=axis, inplace=inplace)
+ # fmt: off
@overload
- def clip(
- self: FrameOrSeries,
- lower=...,
- upper=...,
- axis: Axis | None = ...,
- inplace: Literal[False] = ...,
- *args,
- **kwargs,
- ) -> FrameOrSeries:
- ...
-
+ def clip(self: FrameOrSeries, lower=..., upper=..., axis: Axis | None = ..., inplace: Literal[False] = ..., *args, **kwargs) -> FrameOrSeries: ... # noqa: E501, E704
@overload
- def clip(
- self: FrameOrSeries,
- lower,
- *,
- axis: Axis | None,
- inplace: Literal[True],
- **kwargs,
- ) -> None:
- ...
-
+ def clip(self: FrameOrSeries, lower, *, axis: Axis | None, inplace: Literal[True], **kwargs) -> None: ... # noqa: E501, E704
@overload
- def clip(
- self: FrameOrSeries,
- lower,
- *,
- inplace: Literal[True],
- **kwargs,
- ) -> None:
- ...
-
+ def clip(self: FrameOrSeries, lower, *, inplace: Literal[True], **kwargs) -> None: ... # noqa: E501, E704
@overload
- def clip(
- self: FrameOrSeries,
- *,
- upper,
- axis: Axis | None,
- inplace: Literal[True],
- **kwargs,
- ) -> None:
- ...
-
+ def clip(self: FrameOrSeries, *, upper, axis: Axis | None, inplace: Literal[True], **kwargs) -> None: ... # noqa: E501, E704
@overload
- def clip(
- self: FrameOrSeries,
- *,
- upper,
- inplace: Literal[True],
- **kwargs,
- ) -> None:
- ...
-
+ def clip(self: FrameOrSeries, *, upper, inplace: Literal[True], **kwargs) -> None: ... # noqa: E501, E704
@overload
- def clip(
- self: FrameOrSeries,
- *,
- axis: Axis | None,
- inplace: Literal[True],
- **kwargs,
- ) -> None:
- ...
-
+ def clip(self: FrameOrSeries, *, axis: Axis | None, inplace: Literal[True], **kwargs) -> None: ... # noqa: E501, E704
@overload
- def clip(
- self: FrameOrSeries,
- lower,
- upper,
- axis: Axis | None,
- inplace: Literal[True],
- *args,
- **kwargs,
- ) -> None:
- ...
-
+ def clip(self: FrameOrSeries, lower, upper, axis: Axis | None, inplace: Literal[True], *args, **kwargs) -> None: ... # noqa: E501, E704
@overload
- def clip(
- self: FrameOrSeries,
- lower,
- upper,
- *,
- inplace: Literal[True],
- **kwargs,
- ) -> None:
- ...
-
+ def clip(self: FrameOrSeries, lower, upper, *, inplace: Literal[True], **kwargs) -> None: ... # noqa: E501, E704
@overload
- def clip(
- self: FrameOrSeries,
- *,
- inplace: Literal[True],
- **kwargs,
- ) -> None:
- ...
-
+ def clip(self: FrameOrSeries, *, inplace: Literal[True], **kwargs) -> None: ... # noqa: E501, E704
@overload
- def clip(
- self: FrameOrSeries,
- lower=...,
- upper=...,
- axis: Axis | None = ...,
- inplace: bool_t = ...,
- *args,
- **kwargs,
- ) -> FrameOrSeries | None:
- ...
+ def clip(self: FrameOrSeries, lower=..., upper=..., axis: Axis | None = ..., inplace: bool_t = ..., *args, **kwargs) -> FrameOrSeries | None: ... # noqa: E501, E704
+ # fmt: on
@final
def clip(
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index b3b453ea6355a..7e852a0e1352d 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -52,36 +52,12 @@
# Concatenate DataFrame objects
+# fmt: off
@overload
-def concat(
- objs: Iterable[DataFrame] | Mapping[Hashable, DataFrame],
- axis=0,
- join: str = "outer",
- ignore_index: bool = False,
- keys=None,
- levels=None,
- names=None,
- verify_integrity: bool = False,
- sort: bool = False,
- copy: bool = True,
-) -> DataFrame:
- ...
-
-
+def concat(objs: Iterable[DataFrame] | Mapping[Hashable, DataFrame], axis=0, join: str = "outer", ignore_index: bool = False, keys=None, levels=None, names=None, verify_integrity: bool = False, sort: bool = False, copy: bool = True) -> DataFrame: ... # noqa: E501, E704
@overload
-def concat(
- objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame],
- axis=0,
- join: str = "outer",
- ignore_index: bool = False,
- keys=None,
- levels=None,
- names=None,
- verify_integrity: bool = False,
- sort: bool = False,
- copy: bool = True,
-) -> FrameOrSeriesUnion:
- ...
+def concat(objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame], axis=0, join: str = "outer", ignore_index: bool = False, keys=None, levels=None, names=None, verify_integrity: bool = False, sort: bool = False, copy: bool = True) -> FrameOrSeriesUnion: ... # noqa: E501, E704
+# fmt: on
def concat(
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 85c30096b1001..c4551d93ffb2d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2007,21 +2007,16 @@ def unique(self) -> ArrayLike:
"""
return super().unique()
+ # fmt: off
@overload
- def drop_duplicates(self, keep=..., inplace: Literal[False] = ...) -> Series:
- ...
-
+ def drop_duplicates(self, keep=..., inplace: Literal[False] = ...) -> Series: ... # noqa: E501, E704
@overload
- def drop_duplicates(self, keep, inplace: Literal[True]) -> None:
- ...
-
+ def drop_duplicates(self, keep, inplace: Literal[True]) -> None: ... # noqa: E704
@overload
- def drop_duplicates(self, *, inplace: Literal[True]) -> None:
- ...
-
+ def drop_duplicates(self, *, inplace: Literal[True]) -> None: ... # noqa: E704
@overload
- def drop_duplicates(self, keep=..., inplace: bool = ...) -> Series | None:
- ...
+ def drop_duplicates(self, keep=..., inplace: bool = ...) -> Series | None: ... # noqa: E501, E704
+ # fmt: on
def drop_duplicates(self, keep="first", inplace=False) -> Series | None:
"""
@@ -4428,23 +4423,16 @@ def rename(
else:
return self._set_name(index, inplace=inplace)
+ # fmt: off
@overload
- def set_axis(
- self, labels, axis: Axis = ..., inplace: Literal[False] = ...
- ) -> Series:
- ...
-
+ def set_axis(self, labels, axis: Axis = ..., inplace: Literal[False] = ...) -> Series: ... # noqa: E501, E704
@overload
- def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None:
- ...
-
+ def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None: ... # noqa: E501, E704
@overload
- def set_axis(self, labels, *, inplace: Literal[True]) -> None:
- ...
-
+ def set_axis(self, labels, *, inplace: Literal[True]) -> None: ... # noqa: E704
@overload
- def set_axis(self, labels, axis: Axis = ..., inplace: bool = ...) -> Series | None:
- ...
+ def set_axis(self, labels, axis: Axis = ..., inplace: bool = ...) -> Series | None: ... # noqa: E501, E704
+ # fmt: on
@Appender(
"""
@@ -4591,120 +4579,28 @@ def drop(
errors=errors,
)
+ # fmt: off
@overload
- def fillna(
- self,
- value=...,
- method: str | None = ...,
- axis: Axis | None = ...,
- inplace: Literal[False] = ...,
- limit=...,
- downcast=...,
- ) -> Series:
- ...
-
+ def fillna(self, value=..., method: str | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit=..., downcast=...) -> Series: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value,
- method: str | None,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, value, method: str | None, axis: Axis | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- *,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, *, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value,
- *,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, value, *, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- *,
- method: str | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, *, method: str | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- *,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, *, axis: Axis | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- *,
- method: str | None,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, *, method: str | None, axis: Axis | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value,
- *,
- axis: Axis | None,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, value, *, axis: Axis | None, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value,
- method: str | None,
- *,
- inplace: Literal[True],
- limit=...,
- downcast=...,
- ) -> None:
- ...
-
+ def fillna(self, value, method: str | None, *, inplace: Literal[True], limit=..., downcast=...) -> None: ... # noqa: E501, E704
@overload
- def fillna(
- self,
- value=...,
- method: str | None = ...,
- axis: Axis | None = ...,
- inplace: bool = ...,
- limit=...,
- downcast=...,
- ) -> Series | None:
- ...
+ def fillna(self, value=..., method: str | None = ..., axis: Axis | None = ..., inplace: bool = ..., limit=..., downcast=...) -> Series | None: ... # noqa: E501, E704
+ # fmt: on
# error: Cannot determine type of 'fillna'
@doc(NDFrame.fillna, **_shared_doc_kwargs) # type: ignore[has-type]
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index bb37f670ed302..d107118db721c 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -641,55 +641,14 @@ def _adjust_to_origin(arg, origin, unit):
return arg
+# fmt: off
@overload
-def to_datetime(
- arg: DatetimeScalar,
- errors: str = ...,
- dayfirst: bool = ...,
- yearfirst: bool = ...,
- utc: bool | None = ...,
- format: str | None = ...,
- exact: bool = ...,
- unit: str | None = ...,
- infer_datetime_format: bool = ...,
- origin=...,
- cache: bool = ...,
-) -> DatetimeScalar | NaTType:
- ...
-
-
+def to_datetime(arg: DatetimeScalar, errors: str = ..., dayfirst: bool = ..., yearfirst: bool = ..., utc: bool | None = ..., format: str | None = ..., exact: bool = ..., unit: str | None = ..., infer_datetime_format: bool = ..., origin=..., cache: bool = ...) -> DatetimeScalar | NaTType: ... # noqa: E501, E704
@overload
-def to_datetime(
- arg: Series,
- errors: str = ...,
- dayfirst: bool = ...,
- yearfirst: bool = ...,
- utc: bool | None = ...,
- format: str | None = ...,
- exact: bool = ...,
- unit: str | None = ...,
- infer_datetime_format: bool = ...,
- origin=...,
- cache: bool = ...,
-) -> Series:
- ...
-
-
+def to_datetime(arg: Series, errors: str = ..., dayfirst: bool = ..., yearfirst: bool = ..., utc: bool | None = ..., format: str | None = ..., exact: bool = ..., unit: str | None = ..., infer_datetime_format: bool = ..., origin=..., cache: bool = ...) -> Series: ... # noqa: E501, E704
@overload
-def to_datetime(
- arg: list | tuple | np.ndarray,
- errors: str = ...,
- dayfirst: bool = ...,
- yearfirst: bool = ...,
- utc: bool | None = ...,
- format: str | None = ...,
- exact: bool = ...,
- unit: str | None = ...,
- infer_datetime_format: bool = ...,
- origin=...,
- cache: bool = ...,
-) -> DatetimeIndex:
- ...
+def to_datetime(arg: list | tuple | np.ndarray, errors: str = ..., dayfirst: bool = ..., yearfirst: bool = ..., utc: bool | None = ..., format: str | None = ..., exact: bool = ..., unit: str | None = ..., infer_datetime_format: bool = ..., origin=..., cache: bool = ...) -> DatetimeIndex: ... # noqa: E501, E704
+# fmt: on
def to_datetime(
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index b323ce39763a1..704aa58674162 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -42,28 +42,12 @@ def __exit__(self, exc_type, exc_value, traceback):
self.close()
+# fmt: off
@overload
-def read_sas(
- filepath_or_buffer: FilePathOrBuffer,
- format: str | None = ...,
- index: Hashable | None = ...,
- encoding: str | None = ...,
- chunksize: int = ...,
- iterator: bool = ...,
-) -> ReaderBase:
- ...
-
-
+def read_sas(filepath_or_buffer: FilePathOrBuffer, format: str | None = ..., index: Hashable | None = ..., encoding: str | None = ..., chunksize: int = ..., iterator: bool = ...) -> ReaderBase: ... # noqa: E501, E704
@overload
-def read_sas(
- filepath_or_buffer: FilePathOrBuffer,
- format: str | None = ...,
- index: Hashable | None = ...,
- encoding: str | None = ...,
- chunksize: None = ...,
- iterator: bool = ...,
-) -> DataFrame | ReaderBase:
- ...
+def read_sas(filepath_or_buffer: FilePathOrBuffer, format: str | None = ..., index: Hashable | None = ..., encoding: str | None = ..., chunksize: None = ..., iterator: bool = ...) -> DataFrame | ReaderBase: ... # noqa: E501, E704
+# fmt: on
def read_sas(
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index d797fa51984d6..235433ca483af 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -211,32 +211,12 @@ def execute(sql, con, cur=None, params=None):
# -- Read and write to DataFrames
+# fmt: off
@overload
-def read_sql_table(
- table_name,
- con,
- schema=None,
- index_col=None,
- coerce_float=True,
- parse_dates=None,
- columns=None,
- chunksize: None = None,
-) -> DataFrame:
- ...
-
-
+def read_sql_table(table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: None = None) -> DataFrame: ... # noqa: E501, E704
@overload
-def read_sql_table(
- table_name,
- con,
- schema=None,
- index_col=None,
- coerce_float=True,
- parse_dates=None,
- columns=None,
- chunksize: int = 1,
-) -> Iterator[DataFrame]:
- ...
+def read_sql_table(table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: int = 1) -> Iterator[DataFrame]: ... # noqa: E501, E704
+# fmt: on
def read_sql_table(
@@ -334,32 +314,12 @@ def read_sql_table(
raise ValueError(f"Table {table_name} not found", con)
+# fmt: off
@overload
-def read_sql_query(
- sql,
- con,
- index_col=None,
- coerce_float=True,
- params=None,
- parse_dates=None,
- chunksize: None = None,
- dtype: DtypeArg | None = None,
-) -> DataFrame:
- ...
-
-
+def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: None = None, dtype: DtypeArg | None = None) -> DataFrame: ... # noqa: E501, E704
@overload
-def read_sql_query(
- sql,
- con,
- index_col=None,
- coerce_float=True,
- params=None,
- parse_dates=None,
- chunksize: int = 1,
- dtype: DtypeArg | None = None,
-) -> Iterator[DataFrame]:
- ...
+def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: int = 1, dtype: DtypeArg | None = None) -> Iterator[DataFrame]: ... # noqa: E501, E704
+# fmt: on
def read_sql_query(
@@ -441,32 +401,12 @@ def read_sql_query(
)
+# fmt: off
@overload
-def read_sql(
- sql,
- con,
- index_col=None,
- coerce_float=True,
- params=None,
- parse_dates=None,
- columns=None,
- chunksize: None = None,
-) -> DataFrame:
- ...
-
-
+def read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: None = None) -> DataFrame: ... # noqa: E501, E704
@overload
-def read_sql(
- sql,
- con,
- index_col=None,
- coerce_float=True,
- params=None,
- parse_dates=None,
- columns=None,
- chunksize: int = 1,
-) -> Iterator[DataFrame]:
- ...
+def read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: int = 1) -> Iterator[DataFrame]: ... # noqa: E501, E704
+# fmt: on
def read_sql(
| Following on from the monthly meeting's discussion, some things were suggested:
- turning off the black formatter for the overloads, keeping them in `.py` files (what I've done here)
- moving overloads to `.pyi` files, writing a script to keep them in sync (I tried that this, and it seems that `mypy` behaves a bit differently with a `.pyi` file than it does with a `.py` file - will look into this more closely, but it'll take more time)
- keeping overloads out of `.py` files and "injecting" them before running `mypy` via a script - IMO this might be slightly disruptive to the usual workflow of just running `mypy pandas`, but other than that seems like a potentially good idea
- doing away with overloads, and just using `cast` and `assert` - TBH this strikes me as more error-prone, and if pandas will expose types to users, then I think it'd be better to have the correct type revealed rather than having them do `cast` / `assert`
Even if `inplace` is removed in version 2, there are other places in the codebase where there's a parameter which changes the return type, so I do think finding a solution here is worthwhile.
For now, I've gone with the first solution, though I will give some more thought to the second one | https://api.github.com/repos/pandas-dev/pandas/pulls/40973 | 2021-04-16T09:48:37Z | 2021-04-25T19:41:22Z | null | 2021-04-25T19:43:53Z |
BUG: Fix pd.read_orc raising AttributeError | diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst
index b6351ac2232ff..89b21d1984ad3 100644
--- a/doc/source/getting_started/install.rst
+++ b/doc/source/getting_started/install.rst
@@ -362,6 +362,21 @@ pyarrow 0.15.0 Parquet, ORC, and feather reading /
pyreadstat SPSS files (.sav) reading
========================= ================== =============================================================
+.. _install.warn_orc:
+
+.. warning::
+
+ * If you want to use :func:`~pandas.read_orc`, it is highly recommended to install pyarrow using conda.
+ The following is a summary of the environment in which :func:`~pandas.read_orc` can work.
+
+ ========================= ================== =============================================================
+ System Conda PyPI
+ ========================= ================== =============================================================
+ Linux Successful Failed(pyarrow==3.0 Successful)
+ macOS Successful Failed
+ Windows Failed Failed
+ ========================= ================== =============================================================
+
Access data in the cloud
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 3b7a6037a9715..5148bb87b0eb0 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -5443,6 +5443,11 @@ Similar to the :ref:`parquet <io.parquet>` format, the `ORC Format <https://orc.
for data frames. It is designed to make reading data frames efficient. pandas provides *only* a reader for the
ORC format, :func:`~pandas.read_orc`. This requires the `pyarrow <https://arrow.apache.org/docs/python/>`__ library.
+.. warning::
+
+ * It is *highly recommended* to install pyarrow using conda due to some issues occurred by pyarrow.
+ * :func:`~pandas.read_orc` is not supported on Windows yet, you can find valid environments on :ref:`install optional dependencies <install.warn_orc>`.
+
.. _io.sql:
SQL queries
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 85d9acff353be..58f46206b6d57 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -783,6 +783,7 @@ I/O
- Bug in :func:`read_sas` raising ``ValueError`` when ``datetimes`` were null (:issue:`39725`)
- Bug in :func:`read_excel` dropping empty values from single-column spreadsheets (:issue:`39808`)
- Bug in :meth:`DataFrame.to_string` misplacing the truncation column when ``index=False`` (:issue:`40907`)
+- Bug in :func:`read_orc` always raising ``AttributeError`` (:issue:`40918`)
Period
^^^^^^
diff --git a/pandas/io/orc.py b/pandas/io/orc.py
index db14a07e4b91b..6bdb4df806b5c 100644
--- a/pandas/io/orc.py
+++ b/pandas/io/orc.py
@@ -1,10 +1,10 @@
""" orc compat """
from __future__ import annotations
-import distutils
from typing import TYPE_CHECKING
from pandas._typing import FilePathOrBuffer
+from pandas.compat._optional import import_optional_dependency
from pandas.io.common import get_handle
@@ -42,13 +42,16 @@ def read_orc(
Returns
-------
DataFrame
+
+ Notes
+ -------
+ Before using this function you should read the :ref:`user guide about ORC <io.orc>`
+ and :ref:`install optional dependencies <install.warn_orc>`.
"""
# we require a newer version of pyarrow than we support for parquet
- import pyarrow
- if distutils.version.LooseVersion(pyarrow.__version__) < "0.13.0":
- raise ImportError("pyarrow must be >= 0.13.0 for read_orc")
+ orc = import_optional_dependency("pyarrow.orc")
with get_handle(path, "rb", is_text=False) as handles:
- orc_file = pyarrow.orc.ORCFile(handles.handle)
+ orc_file = orc.ORCFile(handles.handle)
return orc_file.read(columns=columns, **kwargs).to_pandas()
diff --git a/pandas/tests/io/test_orc.py b/pandas/tests/io/test_orc.py
index a1f9c6f6af51a..f34e9b940317d 100644
--- a/pandas/tests/io/test_orc.py
+++ b/pandas/tests/io/test_orc.py
@@ -9,7 +9,6 @@
from pandas import read_orc
import pandas._testing as tm
-pytest.importorskip("pyarrow", minversion="0.13.0")
pytest.importorskip("pyarrow.orc")
pytestmark = pytest.mark.filterwarnings(
| - [x] closes #40918
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
---
I am not sure if this is a reasonable solution. Cause If users install PyArrow from PyPI on MacOS or Win10(works fine on Linux), it will always raise an `AttributeError` exception.
Should we add some operating system requirments to `pd.read_orc`? ([Related to PyArrow's JIRA #7811](https://issues.apache.org/jira/browse/ARROW-7811)).
| https://api.github.com/repos/pandas-dev/pandas/pulls/40970 | 2021-04-16T06:25:47Z | 2021-04-21T12:41:03Z | 2021-04-21T12:41:02Z | 2021-04-25T02:41:50Z |
Updated qcut for Float64DType Issue #40730 | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 85d9acff353be..4ef8fe116596f 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -695,7 +695,7 @@ Conversion
- Bug in :class:`Index` construction silently ignoring a passed ``dtype`` when the data cannot be cast to that dtype (:issue:`21311`)
- Bug in :meth:`StringArray.astype` falling back to numpy and raising when converting to ``dtype='categorical'`` (:issue:`40450`)
- Bug in :class:`DataFrame` construction with a dictionary containing an arraylike with ``ExtensionDtype`` and ``copy=True`` failing to make a copy (:issue:`38939`)
--
+- Bug in :meth:`qcut` raising error when taking ``Float64DType`` as input (:issue:`40730`)
Strings
^^^^^^^
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 41e1ff41d9ba2..7b9c3883d74e3 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -24,8 +24,8 @@
is_datetime_or_timedelta_dtype,
is_extension_array_dtype,
is_integer,
- is_integer_dtype,
is_list_like,
+ is_numeric_dtype,
is_scalar,
is_timedelta64_dtype,
)
@@ -488,7 +488,7 @@ def _coerce_to_type(x):
# Will properly support in the future.
# https://github.com/pandas-dev/pandas/pull/31290
# https://github.com/pandas-dev/pandas/issues/31389
- elif is_extension_array_dtype(x.dtype) and is_integer_dtype(x.dtype):
+ elif is_extension_array_dtype(x.dtype) and is_numeric_dtype(x.dtype):
x = x.to_numpy(dtype=np.float64, na_value=np.nan)
if dtype is not None:
diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py
index 7996c15ae8e64..c12d28f6f1380 100644
--- a/pandas/tests/reshape/test_qcut.py
+++ b/pandas/tests/reshape/test_qcut.py
@@ -293,8 +293,8 @@ def test_qcut_bool_coercion_to_int(bins, box, compare):
@pytest.mark.parametrize("q", [2, 5, 10])
-def test_qcut_nullable_integer(q, any_nullable_int_dtype):
- arr = pd.array(np.arange(100), dtype=any_nullable_int_dtype)
+def test_qcut_nullable_integer(q, any_nullable_numeric_dtype):
+ arr = pd.array(np.arange(100), dtype=any_nullable_numeric_dtype)
arr[::2] = pd.NA
result = qcut(arr, q)
| This PR is used to address #40730 . qcut is now able to support both intdtype and floatdtype | https://api.github.com/repos/pandas-dev/pandas/pulls/40969 | 2021-04-16T04:14:58Z | 2021-04-26T12:04:15Z | 2021-04-26T12:04:15Z | 2021-04-26T12:04:23Z |
CLN: de-duplicate reindex/align code | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index bb8de35d22462..cbc353eead464 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8813,15 +8813,7 @@ def _align_frame(
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
- if is_datetime64tz_dtype(left.index.dtype):
- if left.index.tz != right.index.tz:
- if join_index is not None:
- # GH#33671 ensure we don't change the index on
- # our original Series (NB: by default deep=False)
- left = left.copy()
- right = right.copy()
- left.index = join_index
- right.index = join_index
+ left, right = _align_as_utc(left, right, join_index)
return (
left.__finalize__(self),
@@ -8863,27 +8855,18 @@ def _align_series(
else:
# one has > 1 ndim
fdata = self._mgr
- if axis == 0:
- join_index = self.index
+ if axis in [0, 1]:
+ join_index = self.axes[axis]
lidx, ridx = None, None
- if not self.index.equals(other.index):
- join_index, lidx, ridx = self.index.join(
+ if not join_index.equals(other.index):
+ join_index, lidx, ridx = join_index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
- fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
+ bm_axis = self._get_block_manager_axis(axis)
+ fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis)
- elif axis == 1:
- join_index = self.columns
- lidx, ridx = None, None
- if not self.columns.equals(other.index):
- join_index, lidx, ridx = self.columns.join(
- other.index, how=join, level=level, return_indexers=True
- )
-
- if lidx is not None:
- fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
@@ -8905,15 +8888,7 @@ def _align_series(
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
- if is_datetime64tz_dtype(left.index.dtype):
- if left.index.tz != right.index.tz:
- if join_index is not None:
- # GH#33671 ensure we don't change the index on
- # our original Series (NB: by default deep=False)
- left = left.copy()
- right = right.copy()
- left.index = join_index
- right.index = join_index
+ left, right = _align_as_utc(left, right, join_index)
return (
left.__finalize__(self),
@@ -11887,3 +11862,23 @@ def _doc_params(cls):
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
"""
+
+
+def _align_as_utc(
+ left: FrameOrSeries, right: FrameOrSeries, join_index: Index | None
+) -> tuple[FrameOrSeries, FrameOrSeries]:
+ """
+ If we are aligning timezone-aware DatetimeIndexes and the timezones
+ do not match, convert both to UTC.
+ """
+ if is_datetime64tz_dtype(left.index.dtype):
+ if left.index.tz != right.index.tz:
+ if join_index is not None:
+ # GH#33671 ensure we don't change the index on
+ # our original Series (NB: by default deep=False)
+ left = left.copy()
+ right = right.copy()
+ left.index = join_index
+ right.index = join_index
+
+ return left, right
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40968 | 2021-04-15T20:54:49Z | 2021-04-16T01:09:45Z | 2021-04-16T01:09:45Z | 2021-04-16T01:14:56Z |
TST: Add test for union with duplicates | diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index c9d034361d8c4..75fc7a782772a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -2418,10 +2418,16 @@ def test_diff_low_precision_int(self, dtype):
tm.assert_numpy_array_equal(result, expected)
-def test_union_with_duplicates():
+@pytest.mark.parametrize("op", [np.array, pd.array])
+def test_union_with_duplicates(op):
# GH#36289
- lvals = np.array([3, 1, 3, 4])
- rvals = np.array([2, 3, 1, 1])
- result = algos.union_with_duplicates(lvals, rvals)
- expected = np.array([3, 3, 1, 1, 4, 2])
- tm.assert_numpy_array_equal(result, expected)
+ lvals = op([3, 1, 3, 4])
+ rvals = op([2, 3, 1, 1])
+ expected = op([3, 3, 1, 1, 4, 2])
+ if isinstance(expected, np.ndarray):
+ result = algos.union_with_duplicates(lvals, rvals)
+ tm.assert_numpy_array_equal(result, expected)
+ else:
+ with tm.assert_produces_warning(RuntimeWarning):
+ result = algos.union_with_duplicates(lvals, rvals)
+ tm.assert_extension_array_equal(result, expected)
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
| https://api.github.com/repos/pandas-dev/pandas/pulls/40967 | 2021-04-15T20:24:41Z | 2021-04-20T22:51:39Z | 2021-04-20T22:51:38Z | 2021-04-21T20:40:26Z |
REF: move union_categoricals call outside of cython | diff --git a/pandas/_libs/parsers.pyi b/pandas/_libs/parsers.pyi
index 1051c319b769b..18ae23e7fb90d 100644
--- a/pandas/_libs/parsers.pyi
+++ b/pandas/_libs/parsers.pyi
@@ -58,7 +58,6 @@ class TextReader:
true_values=...,
false_values=...,
allow_leading_cols: bool = ...,
- low_memory: bool = ...,
skiprows=...,
skipfooter: int = ..., # int64_t
verbose: bool = ...,
@@ -75,3 +74,4 @@ class TextReader:
def close(self) -> None: ...
def read(self, rows: int | None = ...) -> dict[int, ArrayLike]: ...
+ def read_low_memory(self, rows: int | None) -> list[dict[int, ArrayLike]]: ...
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 1a5ac31cc821b..2abb7e0ea3ac2 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -94,7 +94,6 @@ from pandas._libs.khash cimport (
)
from pandas.errors import (
- DtypeWarning,
EmptyDataError,
ParserError,
ParserWarning,
@@ -108,9 +107,7 @@ from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
is_object_dtype,
- pandas_dtype,
)
-from pandas.core.dtypes.concat import union_categoricals
cdef:
float64_t INF = <float64_t>np.inf
@@ -317,7 +314,7 @@ cdef class TextReader:
cdef public:
int64_t leading_cols, table_width, skipfooter, buffer_lines
- bint allow_leading_cols, mangle_dupe_cols, low_memory
+ bint allow_leading_cols, mangle_dupe_cols
bint delim_whitespace
object delimiter # bytes or str
object converters
@@ -362,7 +359,6 @@ cdef class TextReader:
true_values=None,
false_values=None,
bint allow_leading_cols=True,
- bint low_memory=False,
skiprows=None,
skipfooter=0, # int64_t
bint verbose=False,
@@ -479,7 +475,6 @@ cdef class TextReader:
self.na_filter = na_filter
self.verbose = verbose
- self.low_memory = low_memory
if float_precision == "round_trip":
# see gh-15140
@@ -492,12 +487,10 @@ cdef class TextReader:
raise ValueError(f'Unrecognized float_precision option: '
f'{float_precision}')
- if isinstance(dtype, dict):
- dtype = {k: pandas_dtype(dtype[k])
- for k in dtype}
- elif dtype is not None:
- dtype = pandas_dtype(dtype)
-
+ # Caller is responsible for ensuring we have one of
+ # - None
+ # - DtypeObj
+ # - dict[Any, DtypeObj]
self.dtype = dtype
# XXX
@@ -774,17 +767,18 @@ cdef class TextReader:
"""
rows=None --> read all rows
"""
- if self.low_memory:
- # Conserve intermediate space
- columns = self._read_low_memory(rows)
- else:
- # Don't care about memory usage
- columns = self._read_rows(rows, 1)
+ # Don't care about memory usage
+ columns = self._read_rows(rows, 1)
return columns
- # -> dict[int, "ArrayLike"]
- cdef _read_low_memory(self, rows):
+ def read_low_memory(self, rows: int | None)-> list[dict[int, "ArrayLike"]]:
+ """
+ rows=None --> read all rows
+ """
+ # Conserve intermediate space
+ # Caller is responsible for concatenating chunks,
+ # see c_parser_wrapper._concatenatve_chunks
cdef:
size_t rows_read = 0
list chunks = []
@@ -819,8 +813,7 @@ cdef class TextReader:
if len(chunks) == 0:
raise StopIteration
- # destructive to chunks
- return _concatenate_chunks(chunks)
+ return chunks
cdef _tokenize_rows(self, size_t nrows):
cdef:
@@ -1908,49 +1901,6 @@ cdef raise_parser_error(object base, parser_t *parser):
raise ParserError(message)
-# chunks: list[dict[int, "ArrayLike"]]
-# -> dict[int, "ArrayLike"]
-def _concatenate_chunks(list chunks) -> dict:
- cdef:
- list names = list(chunks[0].keys())
- object name
- list warning_columns = []
- object warning_names
- object common_type
-
- result = {}
- for name in names:
- arrs = [chunk.pop(name) for chunk in chunks]
- # Check each arr for consistent types.
- dtypes = {a.dtype for a in arrs}
- numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)}
- if len(numpy_dtypes) > 1:
- common_type = np.find_common_type(numpy_dtypes, [])
- if common_type == object:
- warning_columns.append(str(name))
-
- dtype = dtypes.pop()
- if is_categorical_dtype(dtype):
- sort_categories = isinstance(dtype, str)
- result[name] = union_categoricals(arrs,
- sort_categories=sort_categories)
- else:
- if is_extension_array_dtype(dtype):
- array_type = dtype.construct_array_type()
- result[name] = array_type._concat_same_type(arrs)
- else:
- result[name] = np.concatenate(arrs)
-
- if warning_columns:
- warning_names = ','.join(warning_columns)
- warning_message = " ".join([
- f"Columns ({warning_names}) have mixed types."
- f"Specify dtype option on import or set low_memory=False."
- ])
- warnings.warn(warning_message, DtypeWarning, stacklevel=8)
- return result
-
-
# ----------------------------------------------------------------------
# NA values
def _compute_na_values():
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index fb110706c3fb4..fbf2a53207f75 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -1,5 +1,22 @@
+from __future__ import annotations
+
+import warnings
+
+import numpy as np
+
import pandas._libs.parsers as parsers
-from pandas._typing import FilePathOrBuffer
+from pandas._typing import (
+ ArrayLike,
+ FilePathOrBuffer,
+)
+from pandas.errors import DtypeWarning
+
+from pandas.core.dtypes.common import (
+ is_categorical_dtype,
+ pandas_dtype,
+)
+from pandas.core.dtypes.concat import union_categoricals
+from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.indexes.api import ensure_index_from_sequences
@@ -10,12 +27,16 @@
class CParserWrapper(ParserBase):
+ low_memory: bool
+
def __init__(self, src: FilePathOrBuffer, **kwds):
self.kwds = kwds
kwds = kwds.copy()
ParserBase.__init__(self, kwds)
+ self.low_memory = kwds.pop("low_memory", False)
+
# #2442
# error: Cannot determine type of 'index_col'
kwds["allow_leading_cols"] = (
@@ -31,6 +52,7 @@ def __init__(self, src: FilePathOrBuffer, **kwds):
for key in ("storage_options", "encoding", "memory_map", "compression"):
kwds.pop(key, None)
+ kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))
try:
self._reader = parsers.TextReader(self.handles.handle, **kwds)
except Exception:
@@ -187,7 +209,13 @@ def set_error_bad_lines(self, status):
def read(self, nrows=None):
try:
- data = self._reader.read(nrows)
+ if self.low_memory:
+ chunks = self._reader.read_low_memory(nrows)
+ # destructive to chunks
+ data = _concatenate_chunks(chunks)
+
+ else:
+ data = self._reader.read(nrows)
except StopIteration:
# error: Cannot determine type of '_first_chunk'
if self._first_chunk: # type: ignore[has-type]
@@ -294,7 +322,76 @@ def _get_index_names(self):
return names, idx_names
- def _maybe_parse_dates(self, values, index, try_parse_dates=True):
+ def _maybe_parse_dates(self, values, index: int, try_parse_dates=True):
if try_parse_dates and self._should_parse_dates(index):
values = self._date_conv(values)
return values
+
+
+def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
+ """
+ Concatenate chunks of data read with low_memory=True.
+
+ The tricky part is handling Categoricals, where different chunks
+ may have different inferred categories.
+ """
+ names = list(chunks[0].keys())
+ warning_columns = []
+
+ result = {}
+ for name in names:
+ arrs = [chunk.pop(name) for chunk in chunks]
+ # Check each arr for consistent types.
+ dtypes = {a.dtype for a in arrs}
+ # TODO: shouldn't we exclude all EA dtypes here?
+ numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)}
+ if len(numpy_dtypes) > 1:
+ # error: Argument 1 to "find_common_type" has incompatible type
+ # "Set[Any]"; expected "Sequence[Union[dtype[Any], None, type,
+ # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
+ # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]]"
+ common_type = np.find_common_type(
+ numpy_dtypes, # type: ignore[arg-type]
+ [],
+ )
+ if common_type == object:
+ warning_columns.append(str(name))
+
+ dtype = dtypes.pop()
+ if is_categorical_dtype(dtype):
+ result[name] = union_categoricals(arrs, sort_categories=False)
+ else:
+ if isinstance(dtype, ExtensionDtype):
+ # TODO: concat_compat?
+ array_type = dtype.construct_array_type()
+ # error: Argument 1 to "_concat_same_type" of "ExtensionArray"
+ # has incompatible type "List[Union[ExtensionArray, ndarray]]";
+ # expected "Sequence[ExtensionArray]"
+ result[name] = array_type._concat_same_type(
+ arrs # type: ignore[arg-type]
+ )
+ else:
+ result[name] = np.concatenate(arrs)
+
+ if warning_columns:
+ warning_names = ",".join(warning_columns)
+ warning_message = " ".join(
+ [
+ f"Columns ({warning_names}) have mixed types."
+ f"Specify dtype option on import or set low_memory=False."
+ ]
+ )
+ warnings.warn(warning_message, DtypeWarning, stacklevel=8)
+ return result
+
+
+def ensure_dtype_objs(dtype):
+ """
+ Ensure we have either None, a dtype object, or a dictionary mapping to
+ dtype objects.
+ """
+ if isinstance(dtype, dict):
+ dtype = {k: pandas_dtype(dtype[k]) for k in dtype}
+ elif dtype is not None:
+ dtype = pandas_dtype(dtype)
+ return dtype
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index 104cf56419bfd..7f84c5e378d16 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -21,6 +21,7 @@
TextFileReader,
read_csv,
)
+from pandas.io.parsers.c_parser_wrapper import ensure_dtype_objs
class TestTextReader:
@@ -206,6 +207,8 @@ def test_numpy_string_dtype(self):
aaaaa,5"""
def _make_reader(**kwds):
+ if "dtype" in kwds:
+ kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])
return TextReader(StringIO(data), delimiter=",", header=None, **kwds)
reader = _make_reader(dtype="S5,i4")
@@ -233,6 +236,8 @@ def test_pass_dtype(self):
4,d"""
def _make_reader(**kwds):
+ if "dtype" in kwds:
+ kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])
return TextReader(StringIO(data), delimiter=",", **kwds)
reader = _make_reader(dtype={"one": "u1", 1: "S1"})
| There's no real perf bump to calling union_categoricals inside cython, better to do it in the python code where we can e.g. get the benefit of mypy. Plus gets us closer to dependency structure goals.
| https://api.github.com/repos/pandas-dev/pandas/pulls/40964 | 2021-04-15T17:04:55Z | 2021-04-28T14:53:58Z | 2021-04-28T14:53:58Z | 2021-04-28T15:54:28Z |
[ArrowStringArray] TST: more parameterised testing - part 4 | diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index d23c44733949a..bebe6948cff9c 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -10,6 +10,7 @@
import pytest
from pandas._libs.tslibs import iNaT
+import pandas.util._test_decorators as td
from pandas import (
NA,
@@ -246,25 +247,34 @@ def test_td64_series_astype_object(self):
assert result.dtype == np.object_
@pytest.mark.parametrize(
- "values",
+ "data, dtype",
[
- Series(["x", "y", "z"], dtype="string"),
- Series(["x", "y", "z"], dtype="category"),
- Series(3 * [Timestamp("2020-01-01", tz="UTC")]),
- Series(3 * [Interval(0, 1)]),
+ (["x", "y", "z"], "string"),
+ pytest.param(
+ ["x", "y", "z"],
+ "arrow_string",
+ marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
+ ),
+ (["x", "y", "z"], "category"),
+ (3 * [Timestamp("2020-01-01", tz="UTC")], None),
+ (3 * [Interval(0, 1)], None),
],
)
@pytest.mark.parametrize("errors", ["raise", "ignore"])
- def test_astype_ignores_errors_for_extension_dtypes(self, values, errors):
+ def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors):
# https://github.com/pandas-dev/pandas/issues/35471
+
+ from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
+
+ ser = Series(data, dtype=dtype)
if errors == "ignore":
- expected = values
- result = values.astype(float, errors="ignore")
+ expected = ser
+ result = ser.astype(float, errors="ignore")
tm.assert_series_equal(result, expected)
else:
msg = "(Cannot cast)|(could not convert)"
with pytest.raises((ValueError, TypeError), match=msg):
- values.astype(float, errors=errors)
+ ser.astype(float, errors=errors)
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_astype_from_float_to_str(self, dtype):
diff --git a/pandas/tests/series/methods/test_update.py b/pandas/tests/series/methods/test_update.py
index 4f585a6ea029a..9a64877cb92ff 100644
--- a/pandas/tests/series/methods/test_update.py
+++ b/pandas/tests/series/methods/test_update.py
@@ -1,6 +1,8 @@
import numpy as np
import pytest
+import pandas.util._test_decorators as td
+
from pandas import (
CategoricalDtype,
DataFrame,
@@ -9,6 +11,7 @@
Timestamp,
)
import pandas._testing as tm
+from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
class TestUpdate:
@@ -82,37 +85,38 @@ def test_update_from_non_series(self, series, other, expected):
tm.assert_series_equal(series, expected)
@pytest.mark.parametrize(
- "result, target, expected",
+ "data, other, expected, dtype",
[
- (
- Series(["a", None], dtype="string"),
- Series([None, "b"], dtype="string"),
- Series(["a", "b"], dtype="string"),
- ),
- (
- Series([1, None], dtype="Int64"),
- Series([None, 2], dtype="Int64"),
- Series([1, 2], dtype="Int64"),
+ (["a", None], [None, "b"], ["a", "b"], "string"),
+ pytest.param(
+ ["a", None],
+ [None, "b"],
+ ["a", "b"],
+ "arrow_string",
+ marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
),
+ ([1, None], [None, 2], [1, 2], "Int64"),
+ ([True, None], [None, False], [True, False], "boolean"),
(
- Series([True, None], dtype="boolean"),
- Series([None, False], dtype="boolean"),
- Series([True, False], dtype="boolean"),
+ ["a", None],
+ [None, "b"],
+ ["a", "b"],
+ CategoricalDtype(categories=["a", "b"]),
),
(
- Series(["a", None], dtype=CategoricalDtype(categories=["a", "b"])),
- Series([None, "b"], dtype=CategoricalDtype(categories=["a", "b"])),
- Series(["a", "b"], dtype=CategoricalDtype(categories=["a", "b"])),
- ),
- (
- Series([Timestamp(year=2020, month=1, day=1, tz="Europe/London"), NaT]),
- Series([NaT, Timestamp(year=2020, month=1, day=1, tz="Europe/London")]),
- Series([Timestamp(year=2020, month=1, day=1, tz="Europe/London")] * 2),
+ [Timestamp(year=2020, month=1, day=1, tz="Europe/London"), NaT],
+ [NaT, Timestamp(year=2020, month=1, day=1, tz="Europe/London")],
+ [Timestamp(year=2020, month=1, day=1, tz="Europe/London")] * 2,
+ "datetime64[ns, Europe/London]",
),
],
)
- def test_update_extension_array_series(self, result, target, expected):
- result.update(target)
+ def test_update_extension_array_series(self, data, other, expected, dtype):
+ result = Series(data, dtype=dtype)
+ other = Series(other, dtype=dtype)
+ expected = Series(expected, dtype=dtype)
+
+ result.update(other)
tm.assert_series_equal(result, expected)
def test_update_with_categorical_type(self):
diff --git a/pandas/tests/strings/test_find_replace.py b/pandas/tests/strings/test_find_replace.py
index ef27d582b4e0f..ab95b2071ae10 100644
--- a/pandas/tests/strings/test_find_replace.py
+++ b/pandas/tests/strings/test_find_replace.py
@@ -364,26 +364,28 @@ def test_match():
def test_fullmatch():
# GH 32806
- values = Series(["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"])
- result = values.str.fullmatch(".*BAD[_]+.*BAD")
- exp = Series([True, False, np.nan, False])
- tm.assert_series_equal(result, exp)
-
- # Make sure that the new string arrays work
- string_values = Series(
- ["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"], dtype="string"
- )
- result = string_values.str.fullmatch(".*BAD[_]+.*BAD")
- # Result is nullable boolean with StringDtype
- string_exp = Series([True, False, np.nan, False], dtype="boolean")
- tm.assert_series_equal(result, string_exp)
+ ser = Series(["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"])
+ result = ser.str.fullmatch(".*BAD[_]+.*BAD")
+ expected = Series([True, False, np.nan, False])
+ tm.assert_series_equal(result, expected)
- values = Series(["ab", "AB", "abc", "ABC"])
- result = values.str.fullmatch("ab", case=False)
+ ser = Series(["ab", "AB", "abc", "ABC"])
+ result = ser.str.fullmatch("ab", case=False)
expected = Series([True, True, False, False])
tm.assert_series_equal(result, expected)
+def test_fullmatch_nullable_string_dtype(nullable_string_dtype):
+ ser = Series(
+ ["fooBAD__barBAD", "BAD_BADleroybrown", None, "foo"],
+ dtype=nullable_string_dtype,
+ )
+ result = ser.str.fullmatch(".*BAD[_]+.*BAD")
+ # Result is nullable boolean
+ expected = Series([True, False, np.nan, False], dtype="boolean")
+ tm.assert_series_equal(result, expected)
+
+
def test_findall():
values = Series(["fooBAD__barBAD", np.nan, "foo", "BAD"])
diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py
index 95ac237597bc4..a809446f0bc06 100644
--- a/pandas/tests/strings/test_strings.py
+++ b/pandas/tests/strings/test_strings.py
@@ -136,17 +136,23 @@ def test_repeat():
tm.assert_series_equal(rs, xp)
-def test_repeat_with_null():
+def test_repeat_with_null(nullable_string_dtype, request):
# GH: 31632
- values = Series(["a", None], dtype="string")
- result = values.str.repeat([3, 4])
- exp = Series(["aaa", None], dtype="string")
- tm.assert_series_equal(result, exp)
- values = Series(["a", "b"], dtype="string")
- result = values.str.repeat([3, None])
- exp = Series(["aaa", None], dtype="string")
- tm.assert_series_equal(result, exp)
+ if nullable_string_dtype == "arrow_string":
+ reason = 'Attribute "dtype" are different'
+ mark = pytest.mark.xfail(reason=reason)
+ request.node.add_marker(mark)
+
+ ser = Series(["a", None], dtype=nullable_string_dtype)
+ result = ser.str.repeat([3, 4])
+ expected = Series(["aaa", None], dtype=nullable_string_dtype)
+ tm.assert_series_equal(result, expected)
+
+ ser = Series(["a", "b"], dtype=nullable_string_dtype)
+ result = ser.str.repeat([3, None])
+ expected = Series(["aaa", None], dtype=nullable_string_dtype)
+ tm.assert_series_equal(result, expected)
def test_empty_str_methods():
| still outstanding:
`test_astype_string` in `pandas/tests/extension/base/casting.py` requires separate PR with fix
`test_convert_dtypes` in `pandas/tests/frame/methods/test_convert_dtypes.py` requires discussion https://github.com/pandas-dev/pandas/pull/40747#issuecomment-812508672... probably need to include in (or follow on to #39908) to at least use the global default
`test_to_html_formatters` in `pandas/tests/io/formats/test_to_html.py`
paramaterisation where pd.StringArray is used directly... could potentially combine with #40962 and expose ArrowStringArray publicly
the changes here to `test_repeat_with_null` need a separate PR to fix so could be excluded here. | https://api.github.com/repos/pandas-dev/pandas/pulls/40963 | 2021-04-15T16:56:11Z | 2021-04-16T01:03:57Z | 2021-04-16T01:03:57Z | 2021-04-16T07:03:33Z |
[ArrowStringArray] API: StringArray -> ObjectStringArray | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 329023ed7ba6a..cf8787e9e97aa 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1120,22 +1120,30 @@ def string_dtype(request):
@pytest.fixture(
params=[
- "string",
- pytest.param(
- "arrow_string", marks=td.skip_if_no("pyarrow", min_version="1.0.0")
- ),
+ "python",
+ pytest.param("pyarrow", marks=td.skip_if_no("pyarrow", min_version="1.0.0")),
]
)
-def nullable_string_dtype(request):
+def string_storage(request):
+ return request.param
+
+
+# Aliases so we can test with cartesian product of string_storage
+string_storage2 = string_storage
+string_storage3 = string_storage
+
+
+@pytest.fixture
+def nullable_string_dtype(string_storage):
"""
- Parametrized fixture for string dtypes.
+ Parametrized fixture for StringDtype with string_storage.
- * 'string'
- * 'arrow_string'
+ * 'string' (python storage)
+ * 'string` (pyarrow storage)
"""
- from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
+ with pd.option_context("string_storage", string_storage):
- return request.param
+ yield "string"
@pytest.fixture(params=tm.BYTES_DTYPES)
@@ -1163,22 +1171,27 @@ def object_dtype(request):
@pytest.fixture(
params=[
"object",
- "string",
- pytest.param(
- "arrow_string", marks=td.skip_if_no("pyarrow", min_version="1.0.0")
- ),
+ "python",
+ pytest.param("pyarrow", marks=td.skip_if_no("pyarrow", min_version="1.0.0")),
]
)
-def any_string_dtype(request):
+def any_string_dtype_param(request):
+ return request.param
+
+
+@pytest.fixture
+def any_string_dtype(any_string_dtype_param):
"""
Parametrized fixture for string dtypes.
* 'object'
- * 'string'
- * 'arrow_string'
+ * 'string' (python storage)
+ * 'string` (pyarrow storage)
"""
- from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
-
- return request.param
+ if any_string_dtype_param == "object":
+ yield "object"
+ else:
+ with pd.option_context("string_storage", any_string_dtype_param):
+ yield "string"
@pytest.fixture(params=tm.DATETIME64_DTYPES)
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index ab1dadf4d2dfa..4c487fa6af7e4 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -1,9 +1,17 @@
from __future__ import annotations
-from typing import TYPE_CHECKING
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Sequence,
+ TypeVar,
+ cast,
+)
import numpy as np
+from pandas._config import get_option
+
from pandas._libs import (
lib,
missing as libmissing,
@@ -11,9 +19,12 @@
from pandas._libs.arrays import NDArrayBacked
from pandas._typing import (
Dtype,
+ NpDtype,
+ PositionalIndexer,
Scalar,
type_t,
)
+from pandas.compat import pa_version_under1p0
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.base import (
@@ -32,20 +43,45 @@
from pandas.core import ops
from pandas.core.array_algos import masked_reductions
+from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import (
FloatingArray,
IntegerArray,
PandasArray,
)
+from pandas.core.arrays.base import ExtensionArray
from pandas.core.arrays.floating import FloatingDtype
from pandas.core.arrays.integer import _IntegerDtype
from pandas.core.construction import extract_array
from pandas.core.indexers import check_array_indexer
from pandas.core.missing import isna
+from pandas.core.strings.object_array import ObjectStringArrayMixin
if TYPE_CHECKING:
+ from typing import Literal
+
import pyarrow
+ from pandas.core.arrays.string_arrow import ArrowStringArray
+
+ StringStorage = Literal["python", "pyarrow"]
+
+
+def _validate_string_storage(storage: StringStorage) -> None:
+ if storage not in {"python", "pyarrow"}:
+ raise ValueError(
+ f"Storage must be 'python' or 'pyarrow'. Got {storage} instead."
+ )
+ if storage == "pyarrow" and pa_version_under1p0:
+ raise ImportError("pyarrow>=1.0.0 is required for PyArrow backed StringArray.")
+
+
+def _get_string_storage(storage: StringStorage | None) -> StringStorage:
+ if storage is None:
+ storage = get_option("mode.string_storage")
+ _validate_string_storage(storage)
+ return storage
+
@register_extension_dtype
class StringDtype(ExtensionDtype):
@@ -125,7 +161,10 @@ def __from_arrow__(
return StringArray(np.array([], dtype="object"))
-class StringArray(PandasArray):
+StringArrayT = TypeVar("StringArrayT", bound="StringArray")
+
+
+class StringArray(OpsMixin, ExtensionArray, ObjectStringArrayMixin):
"""
Extension array for string data.
@@ -152,6 +191,9 @@ class StringArray(PandasArray):
copy : bool, default False
Whether to copy the array of data.
+ storage : {"python", "pyarrow"}, optional
+ If not given, the value of ``pd.options.mode.string_storage``.
+
Attributes
----------
None
@@ -201,6 +243,633 @@ class StringArray(PandasArray):
Length: 3, dtype: boolean
"""
+ _dtype = StringDtype()
+ _array: ObjectStringArray | ArrowStringArray
+ _storage: StringStorage
+
+ @property
+ def storage(self) -> str:
+ return self._storage
+
+ # ------------------------------------------------------------------------
+ # Constructors
+ # ------------------------------------------------------------------------
+
+ def __init__(self, values, copy=False, *, storage: StringStorage | None = None):
+ from pandas.core.arrays.string_arrow import ArrowStringArray
+
+ storage = _get_string_storage(storage)
+ self._storage = storage
+ klass = ObjectStringArray if storage == "python" else ArrowStringArray
+ # error: Incompatible types in assignment (expression has type
+ # "ObjectStringArrayMixin", variable has type "Union[ObjectStringArray,
+ # ArrowStringArray]")
+ self._array = klass(values, copy=copy) # type: ignore[assignment]
+
+ def _from_array(self, array):
+ klass = type(self)
+ new_string_array = klass.__new__(klass)
+ new_string_array._storage = self._storage
+ new_string_array._array = array
+ return new_string_array
+
+ def _maybe_wrap_result(self, result):
+ if isinstance(result, type(self._array)):
+ return self._from_array(result)
+ return result
+
+ @classmethod
+ def _from_sequence(
+ cls,
+ scalars,
+ *,
+ dtype: Dtype | None = None,
+ copy=False,
+ storage: StringStorage | None = None,
+ ):
+ from pandas.core.arrays.string_arrow import ArrowStringArray
+
+ if dtype:
+ assert dtype == "string"
+
+ new_string_array = cls.__new__(cls)
+ storage = _get_string_storage(storage)
+ new_string_array._storage = storage
+ klass = ObjectStringArray if storage == "python" else ArrowStringArray
+ # error: "Type[ObjectStringArrayMixin]" has no attribute "_from_sequence"
+ new_string_array._array = klass._from_sequence( # type: ignore[attr-defined]
+ scalars, dtype=dtype, copy=copy
+ )
+ return new_string_array
+
+ @classmethod
+ def _from_sequence_of_strings(
+ cls,
+ strings,
+ *,
+ dtype: Dtype | None = None,
+ copy=False,
+ storage: StringStorage | None = None,
+ ):
+ from pandas.core.arrays.string_arrow import ArrowStringArray
+
+ if dtype:
+ assert dtype == "string"
+
+ new_string_array = cls.__new__(cls)
+ storage = _get_string_storage(storage)
+ new_string_array._storage = storage
+ klass = ObjectStringArray if storage == "python" else ArrowStringArray
+ # error: "Type[ObjectStringArrayMixin]" has no attribute
+ # "_from_sequence_of_strings"
+ tmp = klass._from_sequence_of_strings # type: ignore[attr-defined]
+ new_string_array._array = tmp(strings, dtype=dtype, copy=copy)
+ return new_string_array
+
+ # ------------------------------------------------------------------------
+ # Must be a Sequence
+ # ------------------------------------------------------------------------
+
+ def __getitem__(self, item: PositionalIndexer) -> Any:
+ result = self._array[item]
+ return self._maybe_wrap_result(result)
+
+ def __setitem__(self, key, value) -> None:
+ if isinstance(value, type(self)):
+ value = value._array
+ self._array[key] = value
+
+ def __len__(self) -> int:
+ return len(self._array)
+
+ def to_numpy(
+ self,
+ dtype=None,
+ copy: bool = False,
+ na_value=lib.no_default,
+ ) -> np.ndarray:
+ return self._array.to_numpy(dtype=dtype, copy=copy, na_value=na_value)
+
+ # ------------------------------------------------------------------------
+ # Required attributes
+ # ------------------------------------------------------------------------
+
+ @property
+ def dtype(self) -> StringDtype:
+ return self._dtype
+
+ @property
+ def nbytes(self) -> int:
+ return self._array.nbytes
+
+ # ------------------------------------------------------------------------
+ # Additional Methods
+ # ------------------------------------------------------------------------
+
+ def astype(self, dtype, copy=True):
+ dtype = pandas_dtype(dtype)
+
+ if is_dtype_equal(dtype, self.dtype):
+ if copy:
+ return self.copy()
+ return self
+
+ return self._array.astype(dtype, copy=copy)
+
+ def isna(self) -> np.ndarray:
+ return self._array.isna()
+
+ # def _values_for_argsort(self) -> np.ndarray:
+ # return self._array._values_for_argsort()
+
+ # def argsort(
+ # self,
+ # ascending: bool = True,
+ # kind: str = "quicksort",
+ # na_position: str = "last",
+ # *args,
+ # **kwargs,
+ # ) -> np.ndarray:
+
+ # def argmin(self, skipna: bool = True) -> int:
+
+ # def argmax(self, skipna: bool = True) -> int:
+
+ # def fillna(
+ # self,
+ # value: object | ArrayLike | None = None,
+ # method: FillnaOptions | None = None,
+ # limit: int | None = None,
+ # ):
+
+ # def dropna(self):
+ # """
+ # Return ExtensionArray without NA values.
+
+ # Returns
+ # -------
+ # valid : ExtensionArray
+ # """
+ # # error: Unsupported operand type for ~ ("ExtensionArray")
+ # return self[~self.isna()] # type: ignore[operator]
+
+ # def shift(self, periods: int = 1, fill_value: object = None) -> ExtensionArray:
+ # """
+ # Shift values by desired number.
+
+ # Newly introduced missing values are filled with
+ # ``self.dtype.na_value``.
+
+ # .. versionadded:: 0.24.0
+
+ # Parameters
+ # ----------
+ # periods : int, default 1
+ # The number of periods to shift. Negative values are allowed
+ # for shifting backwards.
+
+ # fill_value : object, optional
+ # The scalar value to use for newly introduced missing values.
+ # The default is ``self.dtype.na_value``.
+
+ # .. versionadded:: 0.24.0
+
+ # Returns
+ # -------
+ # ExtensionArray
+ # Shifted.
+
+ # Notes
+ # -----
+ # If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
+ # returned.
+
+ # If ``periods > len(self)``, then an array of size
+ # len(self) is returned, with all values filled with
+ # ``self.dtype.na_value``.
+ # """
+ # # Note: this implementation assumes that `self.dtype.na_value` can be
+ # # stored in an instance of your ExtensionArray with `self.dtype`.
+ # if not len(self) or periods == 0:
+ # return self.copy()
+
+ # if isna(fill_value):
+ # fill_value = self.dtype.na_value
+
+ # empty = self._from_sequence(
+ # [fill_value] * min(abs(periods), len(self)), dtype=self.dtype
+ # )
+ # if periods > 0:
+ # a = empty
+ # b = self[:-periods]
+ # else:
+ # a = self[abs(periods) :]
+ # b = empty
+ # return self._concat_same_type([a, b])
+
+ # def unique(self: ExtensionArrayT) -> ExtensionArrayT:
+ # """
+ # Compute the ExtensionArray of unique values.
+
+ # Returns
+ # -------
+ # uniques : ExtensionArray
+ # """
+ # uniques = unique(self.astype(object))
+ # return self._from_sequence(uniques, dtype=self.dtype)
+
+ # def searchsorted(self, value, side="left", sorter=None):
+ # """
+ # Find indices where elements should be inserted to maintain order.
+
+ # .. versionadded:: 0.24.0
+
+ # Find the indices into a sorted array `self` (a) such that, if the
+ # corresponding elements in `value` were inserted before the indices,
+ # the order of `self` would be preserved.
+
+ # Assuming that `self` is sorted:
+
+ # ====== ================================
+ # `side` returned index `i` satisfies
+ # ====== ================================
+ # left ``self[i-1] < value <= self[i]``
+ # right ``self[i-1] <= value < self[i]``
+ # ====== ================================
+
+ # Parameters
+ # ----------
+ # value : array_like
+ # Values to insert into `self`.
+ # side : {'left', 'right'}, optional
+ # If 'left', the index of the first suitable location found is given.
+ # If 'right', return the last such index. If there is no suitable
+ # index, return either 0 or N (where N is the length of `self`).
+ # sorter : 1-D array_like, optional
+ # Optional array of integer indices that sort array a into ascending
+ # order. They are typically the result of argsort.
+
+ # Returns
+ # -------
+ # array of ints
+ # Array of insertion points with the same shape as `value`.
+
+ # See Also
+ # --------
+ # numpy.searchsorted : Similar method from NumPy.
+ # """
+ # # Note: the base tests provided by pandas only test the basics.
+ # # We do not test
+ # # 1. Values outside the range of the `data_for_sorting` fixture
+ # # 2. Values between the values in the `data_for_sorting` fixture
+ # # 3. Missing values.
+ # arr = self.astype(object)
+ # return arr.searchsorted(value, side=side, sorter=sorter)
+
+ def equals(self, other: object) -> bool:
+ # TODO: allow ObjectStringArray and ArrowStringArray to compare equal
+ if isinstance(other, type(self)):
+ other = other._array
+ return self._array.equals(other)
+
+ # def isin(self, values) -> np.ndarray:
+ # """
+ # Pointwise comparison for set containment in the given values.
+
+ # Roughly equivalent to `np.array([x in values for x in self])`
+
+ # Parameters
+ # ----------
+ # values : Sequence
+
+ # Returns
+ # -------
+ # np.ndarray[bool]
+ # """
+ # return isin(np.asarray(self), values)
+
+ # def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
+ # """
+ # Return an array and missing value suitable for factorization.
+
+ # Returns
+ # -------
+ # values : ndarray
+
+ # An array suitable for factorization. This should maintain order
+ # and be a supported dtype (Float64, Int64, UInt64, String, Object).
+ # By default, the extension array is cast to object dtype.
+ # na_value : object
+ # The value in `values` to consider missing. This will be treated
+ # as NA in the factorization routines, so it will be coded as
+ # `na_sentinel` and not included in `uniques`. By default,
+ # ``np.nan`` is used.
+
+ # Notes
+ # -----
+ # The values returned by this method are also used in
+ # :func:`pandas.util.hash_pandas_object`.
+ # """
+ # return self.astype(object), np.nan
+
+ def factorize(self, na_sentinel: int = -1) -> tuple[np.ndarray, ExtensionArray]:
+ return self._array.factorize(na_sentinel=na_sentinel)
+
+ # def factorize(self, na_sentinel: int = -1) -> tuple[np.ndarray, ExtensionArray]:
+ # """
+ # Encode the extension array as an enumerated type.
+
+ # Parameters
+ # ----------
+ # na_sentinel : int, default -1
+ # Value to use in the `codes` array to indicate missing values.
+
+ # Returns
+ # -------
+ # codes : ndarray
+ # An integer NumPy array that's an indexer into the original
+ # ExtensionArray.
+ # uniques : ExtensionArray
+ # An ExtensionArray containing the unique values of `self`.
+
+ # .. note::
+
+ # uniques will *not* contain an entry for the NA value of
+ # the ExtensionArray if there are any missing values present
+ # in `self`.
+
+ # See Also
+ # --------
+ # factorize : Top-level factorize method that dispatches here.
+
+ # Notes
+ # -----
+ # :meth:`pandas.factorize` offers a `sort` keyword as well.
+ # """
+ # # Implementer note: There are two ways to override the behavior of
+ # # pandas.factorize
+ # # 1. _values_for_factorize and _from_factorize.
+ # # Specify the values passed to pandas' internal factorization
+ # # routines, and how to convert from those values back to the
+ # # original ExtensionArray.
+ # # 2. ExtensionArray.factorize.
+ # # Complete control over factorization.
+ # arr, na_value = self._values_for_factorize()
+
+ # codes, uniques = factorize_array(
+ # arr, na_sentinel=na_sentinel, na_value=na_value
+ # )
+
+ # uniques = self._from_factorized(uniques, self)
+ # # error: Incompatible return value type (got "Tuple[ndarray, ndarray]",
+ # # expected "Tuple[ndarray, ExtensionArray]")
+ # return codes, uniques # type: ignore[return-value]
+
+ # _extension_array_shared_docs[
+ # "repeat"
+ # ] = """
+ # Repeat elements of a %(klass)s.
+
+ # Returns a new %(klass)s where each element of the current %(klass)s
+ # is repeated consecutively a given number of times.
+
+ # Parameters
+ # ----------
+ # repeats : int or array of ints
+ # The number of repetitions for each element. This should be a
+ # non-negative integer. Repeating 0 times will return an empty
+ # %(klass)s.
+ # axis : None
+ # Must be ``None``. Has no effect but is accepted for compatibility
+ # with numpy.
+
+ # Returns
+ # -------
+ # repeated_array : %(klass)s
+ # Newly created %(klass)s with repeated elements.
+
+ # See Also
+ # --------
+ # Series.repeat : Equivalent function for Series.
+ # Index.repeat : Equivalent function for Index.
+ # numpy.repeat : Similar method for :class:`numpy.ndarray`.
+ # ExtensionArray.take : Take arbitrary positions.
+
+ # Examples
+ # --------
+ # >>> cat = pd.Categorical(['a', 'b', 'c'])
+ # >>> cat
+ # ['a', 'b', 'c']
+ # Categories (3, object): ['a', 'b', 'c']
+ # >>> cat.repeat(2)
+ # ['a', 'a', 'b', 'b', 'c', 'c']
+ # Categories (3, object): ['a', 'b', 'c']
+ # >>> cat.repeat([1, 2, 3])
+ # ['a', 'b', 'b', 'c', 'c', 'c']
+ # Categories (3, object): ['a', 'b', 'c']
+ # """
+
+ # @Substitution(klass="ExtensionArray")
+ # @Appender(_extension_array_shared_docs["repeat"])
+ # def repeat(self, repeats: int | Sequence[int], axis: int | None = None):
+ # nv.validate_repeat((), {"axis": axis})
+ # ind = np.arange(len(self)).repeat(repeats)
+ # return self.take(ind)
+
+ # ------------------------------------------------------------------------
+ # Indexing methods
+ # ------------------------------------------------------------------------
+
+ def take(
+ self, indices: Sequence[int], allow_fill: bool = False, fill_value: Any = None
+ ):
+ result = self._array.take(indices, allow_fill=allow_fill, fill_value=fill_value)
+ return self._from_array(result)
+
+ def copy(self) -> StringArray:
+ result = self._array.copy()
+ return self._from_array(result)
+
+ # def view(self, dtype: Dtype | None = None) -> ArrayLike:
+ # """
+ # Return a view on the array.
+
+ # Parameters
+ # ----------
+ # dtype : str, np.dtype, or ExtensionDtype, optional
+ # Default None.
+
+ # Returns
+ # -------
+ # ExtensionArray or np.ndarray
+ # A view on the :class:`ExtensionArray`'s data.
+ # """
+ # # NB:
+ # # - This must return a *new* object referencing the same data, not self.
+ # # - The only case that *must* be implemented is with dtype=None,
+ # # giving a view with the same dtype as self.
+ # if dtype is not None:
+ # raise NotImplementedError(dtype)
+ # return self[:]
+
+ # ------------------------------------------------------------------------
+ # Printing
+ # ------------------------------------------------------------------------
+
+ # def __repr__(self) -> str:
+ # from pandas.io.formats.printing import format_object_summary
+
+ # # the short repr has no trailing newline, while the truncated
+ # # repr does. So we include a newline in our template, and strip
+ # # any trailing newlines from format_object_summary
+ # data = format_object_summary(
+ # self, self._formatter(), indent_for_name=False
+ # ).rstrip(", \n")
+ # class_name = f"<{type(self).__name__}>\n"
+ # return f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}"
+
+ # def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
+ # """
+ # Formatting function for scalar values.
+
+ # This is used in the default '__repr__'. The returned formatting
+ # function receives instances of your scalar type.
+
+ # Parameters
+ # ----------
+ # boxed : bool, default False
+ # An indicated for whether or not your array is being printed
+ # within a Series, DataFrame, or Index (True), or just by
+ # itself (False). This may be useful if you want scalar values
+ # to appear differently within a Series versus on its own (e.g.
+ # quoted or not).
+
+ # Returns
+ # -------
+ # Callable[[Any], str]
+ # A callable that gets instances of the scalar type and
+ # returns a string. By default, :func:`repr` is used
+ # when ``boxed=False`` and :func:`str` is used when
+ # ``boxed=True``.
+ # """
+ # if boxed:
+ # return str
+ # return repr
+
+ # ------------------------------------------------------------------------
+ # Reshaping
+ # ------------------------------------------------------------------------
+
+ @classmethod
+ def _concat_same_type(
+ cls: type[StringArrayT], to_concat: Sequence[StringArrayT]
+ ) -> StringArrayT:
+ from pandas.core.arrays.string_arrow import ArrowStringArray
+
+ result: ObjectStringArray | ArrowStringArray
+ if all(arr.storage == "python" for arr in to_concat):
+ to_concat_object = cast(
+ Sequence[ObjectStringArray], [arr._array for arr in to_concat]
+ )
+ result = ObjectStringArray._concat_same_type(to_concat_object)
+ storage = "python"
+ elif all(arr.storage == "pyarrow" for arr in to_concat):
+ to_concat_arrow = [arr._array for arr in to_concat]
+ result = ArrowStringArray._concat_same_type(to_concat_arrow)
+ storage = "pyarrow"
+ else:
+ raise NotImplementedError
+
+ new_string_array = cls.__new__(cls)
+ new_string_array._storage = storage
+ new_string_array._array = result
+ return new_string_array
+
+ # The _can_hold_na attribute is set to True so that pandas internals
+ # will use the ExtensionDtype.na_value as the NA value in operations
+ # such as take(), reindex(), shift(), etc. In addition, those results
+ # will then be of the ExtensionArray subclass rather than an array
+ # of objects
+ _can_hold_na = True
+
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
+ return self._array._reduce(name, skipna=skipna, **kwargs)
+
+ # def __hash__(self) -> int:
+ # raise TypeError(f"unhashable type: {repr(type(self).__name__)}")
+
+ # ------------------------------------------------------------------------
+ # Other
+ # ------------------------------------------------------------------------
+
+ # @classmethod
+ # def _empty(cls, shape, dtype) -> StringArray:
+ # values = np.empty(shape, dtype=object)
+ # values[:] = libmissing.NA
+ # return cls(values).astype(dtype, copy=False)
+
+ # def _values_for_factorize(self):
+ # arr = self._ndarray.copy()
+ # mask = self.isna()
+ # arr[mask] = -1
+ # return arr, -1
+
+ # ------------------------------------------------------------------------
+ # Additional array methods
+ # These are not part of the EA API, but we implement them because
+ # pandas assumes they're there.
+ # ------------------------------------------------------------------------
+
+ def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
+ return np.asarray(self._array, dtype=dtype)
+
+ def __arrow_array__(self, type=None):
+ return self._array.__arrow_array__(type)
+
+ def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
+ return self._array.min(axis=axis, skipna=skipna, **kwargs)
+
+ def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
+ return self._array.max(axis=axis, skipna=skipna, **kwargs)
+
+ def value_counts(self, dropna: bool = True):
+ return self._array.value_counts(dropna=dropna)
+
+ def memory_usage(self, deep: bool = False) -> int:
+ return self._array.memory_usage(deep=deep)
+
+ # ------------------------------------------------------------------------
+ # OpsMixin interface
+ # ------------------------------------------------------------------------
+
+ def _cmp_method(self, other, op):
+ return self._array._cmp_method(other, op)
+
+ def _logical_method(self, other, op):
+ return self._array._logical_method(other, op)
+
+ def _arith_method(self, other, op):
+ return self._array._arith_method(other, op)
+
+ # ------------------------------------------------------------------------
+ # String methods interface
+ # ------------------------------------------------------------------------
+
+ _str_na_value = StringDtype.na_value
+
+ def _str_map(
+ self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
+ ):
+ result = self._array._str_map(
+ f, na_value=na_value, dtype=dtype, convert=convert
+ )
+ return self._maybe_wrap_result(result)
+
+ # TODO: dispatch all str accessor methods to array instead of wrapping result of
+ # object fallback (_str_map)
+
+
+class ObjectStringArray(PandasArray):
# undo the PandasArray hack
_typ = "extension"
@@ -258,7 +927,7 @@ def _from_sequence_of_strings(
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@classmethod
- def _empty(cls, shape, dtype) -> StringArray:
+ def _empty(cls, shape, dtype) -> ObjectStringArray:
values = np.empty(shape, dtype=object)
values[:] = libmissing.NA
return cls(values).astype(dtype, copy=False)
@@ -377,7 +1046,7 @@ def memory_usage(self, deep: bool = False) -> int:
def _cmp_method(self, other, op):
from pandas.arrays import BooleanArray
- if isinstance(other, StringArray):
+ if isinstance(other, ObjectStringArray):
other = other._ndarray
mask = isna(self) | isna(other)
@@ -397,7 +1066,7 @@ def _cmp_method(self, other, op):
result = np.empty_like(self._ndarray, dtype="object")
result[mask] = StringDtype.na_value
result[valid] = op(self._ndarray[valid], other)
- return StringArray(result)
+ return type(self)(result)
else:
# logical
result = np.zeros(len(self._ndarray), dtype="bool")
@@ -457,7 +1126,7 @@ def _str_map(
result = lib.map_infer_mask(
arr, f, mask.view("uint8"), convert=False, na_value=na_value
)
- return StringArray(result)
+ return type(self)(result)
else:
# This is when the result type is object. We reach this when
# -> We know the result type is truly object (e.g. .encode returns bytes
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 3cf471e381da9..fde2ab7066a60 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -11,16 +11,12 @@
import numpy as np
-from pandas._libs import (
- lib,
- missing as libmissing,
-)
+from pandas._libs import lib
from pandas._typing import (
Dtype,
NpDtype,
PositionalIndexer,
Scalar,
- type_t,
)
from pandas.compat import (
pa_version_under1p0,
@@ -43,7 +39,6 @@
is_string_dtype,
pandas_dtype,
)
-from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.missing import isna
from pandas.core import missing
@@ -86,93 +81,6 @@ def _chk_pyarrow_available() -> None:
raise ImportError(msg)
-@register_extension_dtype
-class ArrowStringDtype(StringDtype):
- """
- Extension dtype for string data in a ``pyarrow.ChunkedArray``.
-
- .. versionadded:: 1.2.0
-
- .. warning::
-
- ArrowStringDtype is considered experimental. The implementation and
- parts of the API may change without warning.
-
- Attributes
- ----------
- None
-
- Methods
- -------
- None
-
- Examples
- --------
- >>> from pandas.core.arrays.string_arrow import ArrowStringDtype
- >>> ArrowStringDtype()
- ArrowStringDtype
- """
-
- name = "arrow_string"
-
- #: StringDtype.na_value uses pandas.NA
- na_value = libmissing.NA
-
- def __init__(self):
- _chk_pyarrow_available()
-
- @property
- def type(self) -> type[str]:
- return str
-
- @classmethod
- def construct_array_type(cls) -> type_t[ArrowStringArray]: # type: ignore[override]
- """
- Return the array type associated with this dtype.
-
- Returns
- -------
- type
- """
- return ArrowStringArray
-
- def __hash__(self) -> int:
- return hash("ArrowStringDtype")
-
- def __repr__(self) -> str:
- return "ArrowStringDtype"
-
- def __from_arrow__( # type: ignore[override]
- self, array: pa.Array | pa.ChunkedArray
- ) -> ArrowStringArray:
- """
- Construct StringArray from pyarrow Array/ChunkedArray.
- """
- return ArrowStringArray(array)
-
- def __eq__(self, other) -> bool:
- """Check whether 'other' is equal to self.
-
- By default, 'other' is considered equal if
- * it's a string matching 'self.name'.
- * it's an instance of this type.
-
- Parameters
- ----------
- other : Any
-
- Returns
- -------
- bool
- """
- if isinstance(other, ArrowStringDtype):
- return True
- elif isinstance(other, str) and other == "arrow_string":
- return True
- else:
- return False
-
-
# TODO: Inherit directly from BaseStringArrayMethods. Currently we inherit from
# ObjectStringArrayMixin because we want to have the object-dtype based methods as
# fallback for the ones that pyarrow doesn't yet support
@@ -222,8 +130,10 @@ class ArrowStringArray(OpsMixin, ExtensionArray, ObjectStringArrayMixin):
Length: 4, dtype: arrow_string
"""
- def __init__(self, values):
- self._dtype = ArrowStringDtype()
+ _dtype = StringDtype()
+
+ def __init__(self, values, copy: bool = False):
+ # copy is ignored, for compatibility with ObjectStringArray
if isinstance(values, pa.Array):
self._data = pa.chunked_array([values])
elif isinstance(values, pa.ChunkedArray):
@@ -261,9 +171,9 @@ def _from_sequence_of_strings(
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@property
- def dtype(self) -> ArrowStringDtype:
+ def dtype(self) -> StringDtype:
"""
- An instance of 'ArrowStringDtype'.
+ An instance of 'StringDtype'.
"""
return self._dtype
@@ -465,6 +375,12 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs):
raise TypeError(f"Cannot perform reduction '{name}' with string dtype")
+ def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
+ raise NotImplementedError
+
+ def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
+ raise NotImplementedError
+
@property
def nbytes(self) -> int:
"""
@@ -472,6 +388,9 @@ def nbytes(self) -> int:
"""
return self._data.nbytes
+ def memory_usage(self, deep: bool = False) -> int:
+ return self.nbytes
+
def isna(self) -> np.ndarray:
"""
Boolean NumPy array indicating if each value is missing.
@@ -761,7 +680,7 @@ def astype(self, dtype, copy=True):
# ------------------------------------------------------------------------
# String methods interface
- _str_na_value = ArrowStringDtype.na_value
+ _str_na_value = StringDtype.na_value
def _str_map(
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index 0db0c5a57207d..57d09cd8d78f5 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -525,6 +525,18 @@ def use_inf_as_na_cb(key):
validator=is_one_of_factory([None, "warn", "raise"]),
)
+string_storage_doc = """
+: string
+ The default storage for StringArray.
+"""
+
+with cf.config_prefix("mode"):
+ cf.register_option(
+ "string_storage",
+ "python",
+ string_storage_doc,
+ validator=is_one_of_factory(["python", "pyarrow"]),
+ )
# Set up the io.excel specific reader configuration.
reader_engine_doc = """
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 4abb5d98202f6..0e40b8c4ff14e 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -420,17 +420,13 @@ def maybe_cast_to_extension_array(
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
- from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
- if (
- issubclass(cls, (StringArray, ArrowStringArray))
- and lib.infer_dtype(obj) != "string"
- ):
+ if issubclass(cls, StringArray) and lib.infer_dtype(obj) != "string":
return obj
try:
diff --git a/pandas/core/strings/__init__.py b/pandas/core/strings/__init__.py
index 28aba7c9ce0b3..943686fc85a05 100644
--- a/pandas/core/strings/__init__.py
+++ b/pandas/core/strings/__init__.py
@@ -25,7 +25,6 @@
# - StringArray
# - PandasArray
# - Categorical
-# - ArrowStringArray
from pandas.core.strings.accessor import StringMethods
from pandas.core.strings.base import BaseStringArrayMethods
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index 7ce4abe904f3b..8fa1459b36df4 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -174,7 +174,6 @@ def scalar_rep(x):
return self._str_map(scalar_rep, dtype=str)
else:
from pandas.core.arrays.string_ import StringArray
- from pandas.core.arrays.string_arrow import ArrowStringArray
def rep(x, r):
if x is libmissing.NA:
@@ -186,7 +185,7 @@ def rep(x, r):
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(np.asarray(self), repeats, rep)
- if isinstance(self, (StringArray, ArrowStringArray)):
+ if isinstance(self, StringArray):
# Not going through map, so we have to do this here.
result = type(self)._from_sequence(result)
return result
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index c9533e239abe0..929e77c2e5609 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -1,5 +1,5 @@
"""
-This module tests the functionality of StringArray and ArrowStringArray.
+This module tests the functionality of StringArray.
Tests for the str accessors are in pandas/tests/strings/test_string_array.py
"""
@@ -14,69 +14,46 @@
import pandas as pd
import pandas._testing as tm
-from pandas.core.arrays.string_arrow import (
- ArrowStringArray,
- ArrowStringDtype,
+from pandas.core.arrays.string_ import (
+ StringArray,
+ StringDtype,
)
skip_if_no_pyarrow = td.skip_if_no("pyarrow", min_version="1.0.0")
-@pytest.fixture(
- params=["string", pytest.param("arrow_string", marks=skip_if_no_pyarrow)]
-)
-def dtype(request):
- return request.param
-
-
-@pytest.fixture
-def dtype_object(dtype):
- if dtype == "string":
- return pd.StringDtype
- else:
- return ArrowStringDtype
-
-
-@pytest.fixture(
- params=[
- pd.arrays.StringArray,
- pytest.param(ArrowStringArray, marks=skip_if_no_pyarrow),
- ]
-)
-def cls(request):
- return request.param
-
-
-def test_repr(dtype):
- df = pd.DataFrame({"A": pd.array(["a", pd.NA, "b"], dtype=dtype)})
+def test_repr(string_storage):
+ with pd.option_context("string_storage", string_storage):
+ df = pd.DataFrame({"A": pd.array(["a", pd.NA, "b"], dtype="string")})
expected = " A\n0 a\n1 <NA>\n2 b"
assert repr(df) == expected
- expected = f"0 a\n1 <NA>\n2 b\nName: A, dtype: {dtype}"
+ expected = "0 a\n1 <NA>\n2 b\nName: A, dtype: string"
assert repr(df.A) == expected
- arr_name = "ArrowStringArray" if dtype == "arrow_string" else "StringArray"
- expected = f"<{arr_name}>\n['a', <NA>, 'b']\nLength: 3, dtype: {dtype}"
+ expected = "<StringArray>\n['a', <NA>, 'b']\nLength: 3, dtype: string"
assert repr(df.A.array) == expected
-def test_none_to_nan(cls):
- a = cls._from_sequence(["a", None, "b"])
+def test_none_to_nan(string_storage):
+ with pd.option_context("string_storage", string_storage):
+ a = StringArray._from_sequence(["a", None, "b"])
assert a[1] is not None
assert a[1] is pd.NA
-def test_setitem_validates(cls):
- arr = cls._from_sequence(["a", "b"])
+def test_setitem_validates(string_storage):
+ with pd.option_context("string_storage", string_storage):
+ arr = StringArray._from_sequence(["a", "b"])
- if cls is pd.arrays.StringArray:
+ if string_storage == "python":
msg = "Cannot set non-string value '10' into a StringArray."
else:
msg = "Scalar must be NA or str"
with pytest.raises(ValueError, match=msg):
arr[0] = 10
- if cls is pd.arrays.StringArray:
+ if string_storage == "python":
msg = "Must provide strings."
else:
msg = "Scalar must be NA or str"
@@ -84,17 +61,18 @@ def test_setitem_validates(cls):
arr[:] = np.array([1, 2])
-def test_setitem_with_scalar_string(dtype):
+def test_setitem_with_scalar_string(string_storage):
# is_float_dtype considers some strings, like 'd', to be floats
# which can cause issues.
- arr = pd.array(["a", "c"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["a", "c"], dtype="string")
arr[0] = "d"
- expected = pd.array(["d", "c"], dtype=dtype)
+ expected = pd.array(["d", "c"], dtype="string")
tm.assert_extension_array_equal(arr, expected)
-def test_astype_roundtrip(dtype, request):
- if dtype == "arrow_string":
+def test_astype_roundtrip(string_storage, request):
+ if string_storage == "pyarrow":
reason = "ValueError: Could not convert object to NumPy datetime"
mark = pytest.mark.xfail(reason=reason, raises=ValueError)
request.node.add_marker(mark)
@@ -107,15 +85,16 @@ def test_astype_roundtrip(dtype, request):
ser = pd.Series(pd.date_range("2000", periods=12))
ser[0] = None
- casted = ser.astype(dtype)
- assert is_dtype_equal(casted.dtype, dtype)
+ with pd.option_context("string_storage", string_storage):
+ casted = ser.astype("string")
+ assert is_dtype_equal(casted.dtype, "string")
result = casted.astype("datetime64[ns]")
tm.assert_series_equal(result, ser)
-def test_add(dtype, request):
- if dtype == "arrow_string":
+def test_add(string_storage, string_storage2, string_storage3, request):
+ if string_storage == "pyarrow" or string_storage2 == "pyarrow":
reason = (
"unsupported operand type(s) for +: 'ArrowStringArray' and "
"'ArrowStringArray'"
@@ -123,32 +102,39 @@ def test_add(dtype, request):
mark = pytest.mark.xfail(raises=TypeError, reason=reason)
request.node.add_marker(mark)
- a = pd.Series(["a", "b", "c", None, None], dtype=dtype)
- b = pd.Series(["x", "y", None, "z", None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ a = pd.Series(["a", "b", "c", None, None], dtype="string")
+
+ with pd.option_context("string_storage", string_storage2):
+ b = pd.Series(["x", "y", None, "z", None], dtype="string")
result = a + b
- expected = pd.Series(["ax", "by", None, None, None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage3):
+ expected = pd.Series(["ax", "by", None, None, None], dtype="string")
tm.assert_series_equal(result, expected)
result = a.add(b)
tm.assert_series_equal(result, expected)
result = a.radd(b)
- expected = pd.Series(["xa", "yb", None, None, None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage3):
+ expected = pd.Series(["xa", "yb", None, None, None], dtype="string")
tm.assert_series_equal(result, expected)
result = a.add(b, fill_value="-")
- expected = pd.Series(["ax", "by", "c-", "-z", None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage3):
+ expected = pd.Series(["ax", "by", "c-", "-z", None], dtype="string")
tm.assert_series_equal(result, expected)
-def test_add_2d(dtype, request):
- if dtype == "arrow_string":
+def test_add_2d(string_storage, request):
+ if string_storage == "pyarrow":
reason = "Failed: DID NOT RAISE <class 'ValueError'>"
mark = pytest.mark.xfail(raises=None, reason=reason)
request.node.add_marker(mark)
- a = pd.array(["a", "b", "c"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ a = pd.array(["a", "b", "c"], dtype="string")
b = np.array([["a", "b", "c"]], dtype=object)
with pytest.raises(ValueError, match="3 != 1"):
a + b
@@ -158,33 +144,39 @@ def test_add_2d(dtype, request):
s + b
-def test_add_sequence(dtype, request):
- if dtype == "arrow_string":
+def test_add_sequence(string_storage, string_storage2, request):
+ if string_storage == "pyarrow":
reason = "unsupported operand type(s) for +: 'ArrowStringArray' and 'list'"
mark = pytest.mark.xfail(raises=TypeError, reason=reason)
request.node.add_marker(mark)
- a = pd.array(["a", "b", None, None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ a = pd.array(["a", "b", None, None], dtype="string")
other = ["x", None, "y", None]
result = a + other
- expected = pd.array(["ax", None, None, None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.array(["ax", None, None, None], dtype="string")
tm.assert_extension_array_equal(result, expected)
result = other + a
- expected = pd.array(["xa", None, None, None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.array(["xa", None, None, None], dtype="string")
tm.assert_extension_array_equal(result, expected)
-def test_mul(dtype, request):
- if dtype == "arrow_string":
+def test_mul(string_storage, string_storage2, request):
+ if string_storage == "pyarrow":
reason = "unsupported operand type(s) for *: 'ArrowStringArray' and 'int'"
mark = pytest.mark.xfail(raises=TypeError, reason=reason)
request.node.add_marker(mark)
- a = pd.array(["a", "b", None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ a = pd.array(["a", "b", None], dtype="string")
result = a * 2
- expected = pd.array(["aa", "bb", None], dtype=dtype)
+
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.array(["aa", "bb", None], dtype="string")
tm.assert_extension_array_equal(result, expected)
result = 2 * a
@@ -192,39 +184,46 @@ def test_mul(dtype, request):
@pytest.mark.xfail(reason="GH-28527")
-def test_add_strings(dtype):
- arr = pd.array(["a", "b", "c", "d"], dtype=dtype)
+def test_add_strings(string_storage, string_storage2):
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["a", "b", "c", "d"], dtype="string")
df = pd.DataFrame([["t", "u", "v", "w"]])
assert arr.__add__(df) is NotImplemented
result = arr + df
- expected = pd.DataFrame([["at", "bu", "cv", "dw"]]).astype(dtype)
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.DataFrame([["at", "bu", "cv", "dw"]]).astype("string")
tm.assert_frame_equal(result, expected)
result = df + arr
- expected = pd.DataFrame([["ta", "ub", "vc", "wd"]]).astype(dtype)
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.DataFrame([["ta", "ub", "vc", "wd"]]).astype("string")
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH-28527")
-def test_add_frame(dtype):
- arr = pd.array(["a", "b", np.nan, np.nan], dtype=dtype)
+def test_add_frame(string_storage, string_storage2):
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["a", "b", np.nan, np.nan], dtype="string")
df = pd.DataFrame([["x", np.nan, "y", np.nan]])
assert arr.__add__(df) is NotImplemented
result = arr + df
- expected = pd.DataFrame([["ax", np.nan, np.nan, np.nan]]).astype(dtype)
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.DataFrame([["ax", np.nan, np.nan, np.nan]]).astype("string")
tm.assert_frame_equal(result, expected)
result = df + arr
- expected = pd.DataFrame([["xa", np.nan, np.nan, np.nan]]).astype(dtype)
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.DataFrame([["xa", np.nan, np.nan, np.nan]]).astype("string")
tm.assert_frame_equal(result, expected)
-def test_comparison_methods_scalar(all_compare_operators, dtype):
+def test_comparison_methods_scalar(all_compare_operators, string_storage):
op_name = all_compare_operators
- a = pd.array(["a", None, "c"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ a = pd.array(["a", None, "c"], dtype="string")
other = "a"
result = getattr(a, op_name)(other)
expected = np.array([getattr(item, op_name)(other) for item in a], dtype=object)
@@ -232,22 +231,26 @@ def test_comparison_methods_scalar(all_compare_operators, dtype):
tm.assert_extension_array_equal(result, expected)
-def test_comparison_methods_scalar_pd_na(all_compare_operators, dtype):
+def test_comparison_methods_scalar_pd_na(all_compare_operators, string_storage):
op_name = all_compare_operators
- a = pd.array(["a", None, "c"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ a = pd.array(["a", None, "c"], dtype="string")
result = getattr(a, op_name)(pd.NA)
expected = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
-def test_comparison_methods_scalar_not_string(all_compare_operators, dtype, request):
+def test_comparison_methods_scalar_not_string(
+ all_compare_operators, string_storage, request
+):
if all_compare_operators not in ["__eq__", "__ne__"]:
reason = "comparison op not supported between instances of 'str' and 'int'"
mark = pytest.mark.xfail(raises=TypeError, reason=reason)
request.node.add_marker(mark)
op_name = all_compare_operators
- a = pd.array(["a", None, "c"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ a = pd.array(["a", None, "c"], dtype="string")
other = 42
result = getattr(a, op_name)(other)
expected_data = {"__eq__": [False, None, False], "__ne__": [True, None, True]}[
@@ -257,8 +260,8 @@ def test_comparison_methods_scalar_not_string(all_compare_operators, dtype, requ
tm.assert_extension_array_equal(result, expected)
-def test_comparison_methods_array(all_compare_operators, dtype, request):
- if dtype == "arrow_string":
+def test_comparison_methods_array(all_compare_operators, string_storage, request):
+ if string_storage == "pyarrow":
mark = pytest.mark.xfail(
raises=AssertionError, reason="left is not an ExtensionArray"
)
@@ -266,7 +269,8 @@ def test_comparison_methods_array(all_compare_operators, dtype, request):
op_name = all_compare_operators
- a = pd.array(["a", None, "c"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ a = pd.array(["a", None, "c"], dtype="string")
other = [None, None, "c"]
result = getattr(a, op_name)(other)
expected = np.empty_like(a, dtype="object")
@@ -279,31 +283,31 @@ def test_comparison_methods_array(all_compare_operators, dtype, request):
tm.assert_extension_array_equal(result, expected)
-def test_constructor_raises(cls):
- if cls is pd.arrays.StringArray:
+def test_constructor_raises(string_storage):
+ if string_storage == "python":
msg = "StringArray requires a sequence of strings or pandas.NA"
else:
msg = "Unsupported type '<class 'numpy.ndarray'>' for ArrowStringArray"
with pytest.raises(ValueError, match=msg):
- cls(np.array(["a", "b"], dtype="S1"))
+ StringArray(np.array(["a", "b"], dtype="S1"), storage=string_storage)
with pytest.raises(ValueError, match=msg):
- cls(np.array([]))
+ StringArray(np.array([]), storage=string_storage)
with pytest.raises(ValueError, match=msg):
- cls(np.array(["a", np.nan], dtype=object))
+ StringArray(np.array(["a", np.nan], dtype=object), storage=string_storage)
with pytest.raises(ValueError, match=msg):
- cls(np.array(["a", None], dtype=object))
+ StringArray(np.array(["a", None], dtype=object), storage=string_storage)
with pytest.raises(ValueError, match=msg):
- cls(np.array(["a", pd.NaT], dtype=object))
+ StringArray(np.array(["a", pd.NaT], dtype=object), storage=string_storage)
@pytest.mark.parametrize("copy", [True, False])
-def test_from_sequence_no_mutate(copy, cls, request):
- if cls is ArrowStringArray and copy is False:
+def test_from_sequence_no_mutate(copy, string_storage, request):
+ if string_storage == "pyarrow" and copy is False:
mark = pytest.mark.xfail(
raises=AssertionError, reason="numpy array are different"
)
@@ -312,14 +316,17 @@ def test_from_sequence_no_mutate(copy, cls, request):
nan_arr = np.array(["a", np.nan], dtype=object)
na_arr = np.array(["a", pd.NA], dtype=object)
- result = cls._from_sequence(nan_arr, copy=copy)
+ with pd.option_context("string_storage", string_storage):
+ result = StringArray._from_sequence(nan_arr, copy=copy)
- if cls is ArrowStringArray:
+ if string_storage == "pyarrow":
import pyarrow as pa
- expected = cls(pa.array(na_arr, type=pa.string(), from_pandas=True))
+ expected = StringArray(
+ pa.array(na_arr, type=pa.string(), from_pandas=True), storage="pyarrow"
+ )
else:
- expected = cls(na_arr)
+ expected = StringArray(na_arr, storage="python")
tm.assert_extension_array_equal(result, expected)
@@ -327,29 +334,33 @@ def test_from_sequence_no_mutate(copy, cls, request):
tm.assert_numpy_array_equal(nan_arr, expected)
-def test_astype_int(dtype):
- arr = pd.array(["1", "2", "3"], dtype=dtype)
+def test_astype_int(string_storage):
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["1", "2", "3"], dtype="string")
result = arr.astype("int64")
expected = np.array([1, 2, 3], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
- arr = pd.array(["1", pd.NA, "3"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["1", pd.NA, "3"], dtype="string")
msg = re.escape("int() argument must be a string, a bytes-like object or a number")
with pytest.raises(TypeError, match=msg):
arr.astype("int64")
-def test_astype_nullable_int(dtype):
- arr = pd.array(["1", pd.NA, "3"], dtype=dtype)
+def test_astype_nullable_int(string_storage):
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["1", pd.NA, "3"], dtype="string")
result = arr.astype("Int64")
expected = pd.array([1, pd.NA, 3], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
-def test_astype_float(dtype, any_float_allowed_nullable_dtype):
+def test_astype_float(string_storage, any_float_allowed_nullable_dtype):
# Don't compare arrays (37974)
- ser = pd.Series(["1.1", pd.NA, "3.3"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ ser = pd.Series(["1.1", pd.NA, "3.3"], dtype="string")
result = ser.astype(any_float_allowed_nullable_dtype)
expected = pd.Series([1.1, np.nan, 3.3], dtype=any_float_allowed_nullable_dtype)
tm.assert_series_equal(result, expected)
@@ -357,21 +368,22 @@ def test_astype_float(dtype, any_float_allowed_nullable_dtype):
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.xfail(reason="Not implemented StringArray.sum")
-def test_reduce(skipna, dtype):
- arr = pd.Series(["a", "b", "c"], dtype=dtype)
+def test_reduce(skipna, string_storage):
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.Series(["a", "b", "c"], dtype="string")
result = arr.sum(skipna=skipna)
assert result == "abc"
@pytest.mark.parametrize("method", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
-def test_min_max(method, skipna, dtype, request):
- if dtype == "arrow_string":
- reason = "'ArrowStringArray' object has no attribute 'max'"
- mark = pytest.mark.xfail(raises=AttributeError, reason=reason)
+def test_min_max(method, skipna, string_storage, request):
+ if string_storage == "pyarrow":
+ mark = pytest.mark.xfail(raises=NotImplementedError, reason="not implemented")
request.node.add_marker(mark)
- arr = pd.Series(["a", "b", "c", None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.Series(["a", "b", "c", None], dtype="string")
result = getattr(arr, method)(skipna=skipna)
if skipna:
expected = "a" if method == "min" else "c"
@@ -382,18 +394,13 @@ def test_min_max(method, skipna, dtype, request):
@pytest.mark.parametrize("method", ["min", "max"])
@pytest.mark.parametrize("box", [pd.Series, pd.array])
-def test_min_max_numpy(method, box, dtype, request):
- if dtype == "arrow_string":
- if box is pd.array:
- raises = TypeError
- reason = "'<=' not supported between instances of 'str' and 'NoneType'"
- else:
- raises = AttributeError
- reason = "'ArrowStringArray' object has no attribute 'max'"
- mark = pytest.mark.xfail(raises=raises, reason=reason)
+def test_min_max_numpy(method, box, string_storage, request):
+ if string_storage == "pyarrow":
+ mark = pytest.mark.xfail(raises=NotImplementedError, reason="not implemented")
request.node.add_marker(mark)
- arr = box(["a", "b", "c", None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ arr = box(["a", "b", "c", None], dtype="string")
result = getattr(np, method)(arr)
expected = "a" if method == "min" else "c"
assert result == expected
@@ -401,8 +408,9 @@ def test_min_max_numpy(method, box, dtype, request):
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.xfail(reason="Not implemented StringArray.sum")
-def test_reduce_missing(skipna, dtype):
- arr = pd.Series([None, "a", None, "b", "c", None], dtype=dtype)
+def test_reduce_missing(skipna, string_storage):
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.Series([None, "a", None, "b", "c", None], dtype="string")
result = arr.sum(skipna=skipna)
if skipna:
assert result == "abc"
@@ -410,10 +418,10 @@ def test_reduce_missing(skipna, dtype):
assert pd.isna(result)
-def test_fillna_args(dtype, request):
+def test_fillna_args(string_storage, string_storage2, request):
# GH 37987
- if dtype == "arrow_string":
+ if string_storage == "pyarrow":
reason = (
"Regex pattern \"Cannot set non-string value '1' into "
"a StringArray.\" does not match 'Scalar must be NA or str'"
@@ -421,14 +429,17 @@ def test_fillna_args(dtype, request):
mark = pytest.mark.xfail(raises=AssertionError, reason=reason)
request.node.add_marker(mark)
- arr = pd.array(["a", pd.NA], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["a", pd.NA], dtype="string")
res = arr.fillna(value="b")
- expected = pd.array(["a", "b"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.array(["a", "b"], dtype="string")
tm.assert_extension_array_equal(res, expected)
res = arr.fillna(value=np.str_("b"))
- expected = pd.array(["a", "b"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.array(["a", "b"], dtype="string")
tm.assert_extension_array_equal(res, expected)
msg = "Cannot set non-string value '1' into a StringArray."
@@ -437,53 +448,57 @@ def test_fillna_args(dtype, request):
@td.skip_if_no("pyarrow")
-def test_arrow_array(dtype):
+def test_arrow_array(string_storage):
# protocol added in 0.15.0
import pyarrow as pa
- data = pd.array(["a", "b", "c"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ data = pd.array(["a", "b", "c"], dtype="string")
arr = pa.array(data)
expected = pa.array(list(data), type=pa.string(), from_pandas=True)
- if dtype == "arrow_string":
+ if string_storage == "pyarrow":
expected = pa.chunked_array(expected)
assert arr.equals(expected)
@td.skip_if_no("pyarrow")
-def test_arrow_roundtrip(dtype, dtype_object):
+def test_arrow_roundtrip(string_storage):
# roundtrip possible from arrow 1.0.0
import pyarrow as pa
- data = pd.array(["a", "b", None], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ data = pd.array(["a", "b", None], dtype="string")
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == "string"
result = table.to_pandas()
- assert isinstance(result["a"].dtype, dtype_object)
+ assert isinstance(result["a"].dtype, StringDtype)
tm.assert_frame_equal(result, df)
# ensure the missing value is represented by NA and not np.nan or None
assert result.loc[2, "a"] is pd.NA
@td.skip_if_no("pyarrow")
-def test_arrow_load_from_zero_chunks(dtype, dtype_object):
+def test_arrow_load_from_zero_chunks(string_storage):
# GH-41040
import pyarrow as pa
- data = pd.array([], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ data = pd.array([], dtype="string")
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == "string"
# Instantiate the same table with no chunks at all
table = pa.table([pa.chunked_array([], type=pa.string())], schema=table.schema)
result = table.to_pandas()
- assert isinstance(result["a"].dtype, dtype_object)
+ assert isinstance(result["a"].dtype, StringDtype)
tm.assert_frame_equal(result, df)
-def test_value_counts_na(dtype):
- arr = pd.array(["a", "b", "a", pd.NA], dtype=dtype)
+def test_value_counts_na(string_storage):
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["a", "b", "a", pd.NA], dtype="string")
result = arr.value_counts(dropna=False)
expected = pd.Series([2, 1, 1], index=["a", "b", pd.NA], dtype="Int64")
tm.assert_series_equal(result, expected)
@@ -493,8 +508,9 @@ def test_value_counts_na(dtype):
tm.assert_series_equal(result, expected)
-def test_value_counts_with_normalize(dtype):
- s = pd.Series(["a", "b", "a", pd.NA], dtype=dtype)
+def test_value_counts_with_normalize(string_storage):
+ with pd.option_context("string_storage", string_storage):
+ s = pd.Series(["a", "b", "a", pd.NA], dtype="string")
result = s.value_counts(normalize=True)
expected = pd.Series([2, 1], index=["a", "b"], dtype="Float64") / 3
tm.assert_series_equal(result, expected)
@@ -507,9 +523,10 @@ def test_value_counts_with_normalize(dtype):
(["a", "b", None], np.array([False, False, True])),
],
)
-def test_use_inf_as_na(values, expected, dtype):
+def test_use_inf_as_na(values, expected, string_storage):
# https://github.com/pandas-dev/pandas/issues/33655
- values = pd.array(values, dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ values = pd.array(values, dtype="string")
with pd.option_context("mode.use_inf_as_na", True):
result = values.isna()
tm.assert_numpy_array_equal(result, expected)
@@ -523,43 +540,49 @@ def test_use_inf_as_na(values, expected, dtype):
tm.assert_frame_equal(result, expected)
-def test_memory_usage(dtype, request):
+def test_memory_usage(string_storage, request):
# GH 33963
- if dtype == "arrow_string":
+ if string_storage == "pyarrow":
pytest.skip("not applicable")
- series = pd.Series(["a", "b", "c"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ series = pd.Series(["a", "b", "c"], dtype="string")
assert 0 < series.nbytes <= series.memory_usage() < series.memory_usage(deep=True)
@pytest.mark.parametrize("float_dtype", [np.float16, np.float32, np.float64])
-def test_astype_from_float_dtype(float_dtype, dtype):
+def test_astype_from_float_dtype(float_dtype, string_storage, string_storage2):
# https://github.com/pandas-dev/pandas/issues/36451
s = pd.Series([0.1], dtype=float_dtype)
- result = s.astype(dtype)
- expected = pd.Series(["0.1"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ result = s.astype("string")
+ with pd.option_context("string_storage", string_storage2):
+ expected = pd.Series(["0.1"], dtype="string")
tm.assert_series_equal(result, expected)
-def test_to_numpy_returns_pdna_default(dtype):
- arr = pd.array(["a", pd.NA, "b"], dtype=dtype)
+def test_to_numpy_returns_pdna_default(string_storage):
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["a", pd.NA, "b"], dtype="string")
result = np.array(arr)
expected = np.array(["a", pd.NA, "b"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
-def test_to_numpy_na_value(dtype, nulls_fixture):
+def test_to_numpy_na_value(string_storage, nulls_fixture):
na_value = nulls_fixture
- arr = pd.array(["a", pd.NA, "b"], dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ arr = pd.array(["a", pd.NA, "b"], dtype="string")
result = arr.to_numpy(na_value=na_value)
expected = np.array(["a", na_value, "b"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
-def test_isin(dtype, request):
- s = pd.Series(["a", "b", None], dtype=dtype)
+def test_isin(string_storage, request):
+ with pd.option_context("string_storage", string_storage):
+ s = pd.Series(["a", "b", None], dtype="string")
result = s.isin(["a", "c"])
expected = pd.Series([True, False, False])
diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py
index 3db8333798e36..137572d28606b 100644
--- a/pandas/tests/arrays/string_/test_string_arrow.py
+++ b/pandas/tests/arrays/string_/test_string_arrow.py
@@ -5,10 +5,8 @@
from pandas.compat import pa_version_under1p0
-from pandas.core.arrays.string_arrow import (
- ArrowStringArray,
- ArrowStringDtype,
-)
+import pandas as pd
+from pandas.core.arrays.string_ import StringArray
@pytest.mark.skipif(
@@ -34,7 +32,7 @@ def test_constructor_not_string_type_raises(array, chunked):
"ArrowStringArray requires a PyArrow (chunked) array of string type"
)
with pytest.raises(ValueError, match=msg):
- ArrowStringArray(arr)
+ StringArray(arr, storage="pyarrow")
@pytest.mark.skipif(
@@ -45,10 +43,8 @@ def test_pyarrow_not_installed_raises():
msg = re.escape("pyarrow>=1.0.0 is required for PyArrow backed StringArray")
with pytest.raises(ImportError, match=msg):
- ArrowStringDtype()
-
- with pytest.raises(ImportError, match=msg):
- ArrowStringArray([])
+ StringArray([], storage="pyarrow")
- with pytest.raises(ImportError, match=msg):
- ArrowStringArray._from_sequence(["a", None, "b"])
+ with pd.option_context("string_storage", "pyarrow"):
+ with pytest.raises(ImportError, match=msg):
+ StringArray._from_sequence(["a", None, "b"])
diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py
index 99a5666926e10..0d8675e3a85a1 100644
--- a/pandas/tests/extension/base/casting.py
+++ b/pandas/tests/extension/base/casting.py
@@ -46,20 +46,19 @@ def test_astype_str(self, data):
self.assert_series_equal(result, expected)
@pytest.mark.parametrize(
- "nullable_string_dtype",
+ "string_storage",
[
- "string",
+ "python",
pytest.param(
- "arrow_string", marks=td.skip_if_no("pyarrow", min_version="1.0.0")
+ "pyarrow", marks=td.skip_if_no("pyarrow", min_version="1.0.0")
),
],
)
- def test_astype_string(self, data, nullable_string_dtype):
+ def test_astype_string(self, data, string_storage):
# GH-33465
- from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
-
- result = pd.Series(data[:5]).astype(nullable_string_dtype)
- expected = pd.Series([str(x) for x in data[:5]], dtype=nullable_string_dtype)
+ with pd.option_context("string_storage", string_storage):
+ result = pd.Series(data[:5]).astype("string")
+ expected = pd.Series([str(x) for x in data[:5]], dtype="string")
self.assert_series_equal(result, expected)
def test_to_numpy(self, data):
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index 49aee76e10f6a..1e60af52435f5 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -18,28 +18,25 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
-from pandas.core.arrays.string_ import StringDtype
-from pandas.core.arrays.string_arrow import ArrowStringDtype
+from pandas.core.arrays.string_ import StringArray
from pandas.tests.extension import base
def split_array(arr):
- if not isinstance(arr.dtype, ArrowStringDtype):
+ if arr.storage == "python":
pytest.skip("chunked array n/a")
def _split_array(arr):
import pyarrow as pa
- arrow_array = arr._data
+ arrow_array = arr._array._data
split = len(arrow_array) // 2
arrow_array = pa.chunked_array(
[*arrow_array[:split].chunks, *arrow_array[split:].chunks]
)
assert arrow_array.num_chunks == 2
- return type(arr)(arrow_array)
+ return type(arr)(arrow_array, storage="pyarrow")
return _split_array(arr)
@@ -49,44 +46,43 @@ def chunked(request):
return request.param
-@pytest.fixture(
- params=[
- StringDtype,
- pytest.param(
- ArrowStringDtype, marks=td.skip_if_no("pyarrow", min_version="1.0.0")
- ),
- ]
-)
-def dtype(request):
- return request.param()
+@pytest.fixture(autouse=True)
+def string_storage_setting(string_storage):
+ with pd.option_context("string_storage", string_storage):
+ yield
+
+
+@pytest.fixture
+def dtype():
+ return pd.StringDtype()
@pytest.fixture
-def data(dtype, chunked):
+def data(chunked):
strings = np.random.choice(list(string.ascii_letters), size=100)
while strings[0] == strings[1]:
strings = np.random.choice(list(string.ascii_letters), size=100)
- arr = dtype.construct_array_type()._from_sequence(strings)
+ arr = StringArray._from_sequence(strings)
return split_array(arr) if chunked else arr
@pytest.fixture
-def data_missing(dtype, chunked):
+def data_missing(chunked):
"""Length 2 array with [NA, Valid]"""
- arr = dtype.construct_array_type()._from_sequence([pd.NA, "A"])
+ arr = StringArray._from_sequence([pd.NA, "A"])
return split_array(arr) if chunked else arr
@pytest.fixture
-def data_for_sorting(dtype, chunked):
- arr = dtype.construct_array_type()._from_sequence(["B", "C", "A"])
+def data_for_sorting(chunked):
+ arr = StringArray._from_sequence(["B", "C", "A"])
return split_array(arr) if chunked else arr
@pytest.fixture
-def data_missing_for_sorting(dtype, chunked):
- arr = dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"])
+def data_missing_for_sorting(chunked):
+ arr = StringArray._from_sequence(["B", pd.NA, "A"])
return split_array(arr) if chunked else arr
@@ -96,10 +92,8 @@ def na_value():
@pytest.fixture
-def data_for_grouping(dtype, chunked):
- arr = dtype.construct_array_type()._from_sequence(
- ["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"]
- )
+def data_for_grouping(chunked):
+ arr = StringArray._from_sequence(["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"])
return split_array(arr) if chunked else arr
@@ -109,7 +103,7 @@ class TestDtype(base.BaseDtypeTests):
class TestInterface(base.BaseInterfaceTests):
def test_view(self, data, request):
- if isinstance(data.dtype, ArrowStringDtype):
+ if data.storage == "pyarrow":
mark = pytest.mark.xfail(reason="not implemented")
request.node.add_marker(mark)
super().test_view(data)
@@ -120,8 +114,8 @@ class TestConstructors(base.BaseConstructorsTests):
class TestReshaping(base.BaseReshapingTests):
- def test_transpose(self, data, dtype, request):
- if isinstance(dtype, ArrowStringDtype):
+ def test_transpose(self, data, request):
+ if data.storage == "pyarrow":
mark = pytest.mark.xfail(reason="not implemented")
request.node.add_marker(mark)
super().test_transpose(data)
@@ -132,8 +126,8 @@ class TestGetitem(base.BaseGetitemTests):
class TestSetitem(base.BaseSetitemTests):
- def test_setitem_preserves_views(self, data, dtype, request):
- if isinstance(dtype, ArrowStringDtype):
+ def test_setitem_preserves_views(self, data, request):
+ if data.storage == "pyarrow":
mark = pytest.mark.xfail(reason="not implemented")
request.node.add_marker(mark)
super().test_setitem_preserves_views(data)
diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py
index 1583b3f91bea2..c477ce117bf3f 100644
--- a/pandas/tests/frame/methods/test_astype.py
+++ b/pandas/tests/frame/methods/test_astype.py
@@ -3,8 +3,6 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
import pandas as pd
from pandas import (
Categorical,
@@ -585,22 +583,19 @@ def test_astype_empty_dtype_dict(self):
"data, dtype",
[
(["x", "y", "z"], "string"),
- pytest.param(
- ["x", "y", "z"],
- "arrow_string",
- marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
- ),
(["x", "y", "z"], "category"),
(3 * [Timestamp("2020-01-01", tz="UTC")], None),
(3 * [Interval(0, 1)], None),
],
)
@pytest.mark.parametrize("errors", ["raise", "ignore"])
- def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors):
+ def test_astype_ignores_errors_for_extension_dtypes(
+ self, data, dtype, errors, string_storage
+ ):
# https://github.com/pandas-dev/pandas/issues/35471
- from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
- df = DataFrame(Series(data, dtype=dtype))
+ with option_context("string_storage", string_storage):
+ df = DataFrame(Series(data, dtype=dtype))
if errors == "ignore":
expected = df
result = df.astype(float, errors=errors)
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index ae6425cd93ac5..11e68cab058d8 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -811,10 +811,11 @@ def test_additional_extension_arrays(self, pa):
@td.skip_if_no("pyarrow", min_version="1.0.0")
def test_pyarrow_backed_string_array(self, pa):
- # test ArrowStringArray supported through the __arrow_array__ protocol
- from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
+ # test StringArray(..., storage="pyarrow") supported through the __arrow_array__
+ # protocol
- df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="arrow_string")})
+ with pd.option_context("string_storage", "pyarrow"):
+ df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="string")})
check_round_trip(df, pa, expected=df)
@td.skip_if_no("pyarrow")
diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py
index ffaecf1576364..3e439144cf3f5 100644
--- a/pandas/tests/series/methods/test_astype.py
+++ b/pandas/tests/series/methods/test_astype.py
@@ -10,8 +10,8 @@
import pytest
from pandas._libs.tslibs import iNaT
-import pandas.util._test_decorators as td
+import pandas as pd
from pandas import (
NA,
Categorical,
@@ -250,23 +250,19 @@ def test_td64_series_astype_object(self):
"data, dtype",
[
(["x", "y", "z"], "string"),
- pytest.param(
- ["x", "y", "z"],
- "arrow_string",
- marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
- ),
(["x", "y", "z"], "category"),
(3 * [Timestamp("2020-01-01", tz="UTC")], None),
(3 * [Interval(0, 1)], None),
],
)
@pytest.mark.parametrize("errors", ["raise", "ignore"])
- def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors):
+ def test_astype_ignores_errors_for_extension_dtypes(
+ self, data, dtype, errors, string_storage
+ ):
# https://github.com/pandas-dev/pandas/issues/35471
- from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
-
- ser = Series(data, dtype=dtype)
+ with pd.option_context("string_storage", string_storage):
+ ser = Series(data, dtype=dtype)
if errors == "ignore":
expected = ser
result = ser.astype(float, errors="ignore")
diff --git a/pandas/tests/series/methods/test_update.py b/pandas/tests/series/methods/test_update.py
index 9a64877cb92ff..7cb4a129d8917 100644
--- a/pandas/tests/series/methods/test_update.py
+++ b/pandas/tests/series/methods/test_update.py
@@ -1,8 +1,7 @@
import numpy as np
import pytest
-import pandas.util._test_decorators as td
-
+import pandas as pd
from pandas import (
CategoricalDtype,
DataFrame,
@@ -11,7 +10,6 @@
Timestamp,
)
import pandas._testing as tm
-from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
class TestUpdate:
@@ -88,13 +86,6 @@ def test_update_from_non_series(self, series, other, expected):
"data, other, expected, dtype",
[
(["a", None], [None, "b"], ["a", "b"], "string"),
- pytest.param(
- ["a", None],
- [None, "b"],
- ["a", "b"],
- "arrow_string",
- marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
- ),
([1, None], [None, 2], [1, 2], "Int64"),
([True, None], [None, False], [True, False], "boolean"),
(
@@ -111,10 +102,13 @@ def test_update_from_non_series(self, series, other, expected):
),
],
)
- def test_update_extension_array_series(self, data, other, expected, dtype):
- result = Series(data, dtype=dtype)
- other = Series(other, dtype=dtype)
- expected = Series(expected, dtype=dtype)
+ def test_update_extension_array_series(
+ self, data, other, expected, dtype, string_storage
+ ):
+ with pd.option_context("string_storage", string_storage):
+ result = Series(data, dtype=dtype)
+ other = Series(other, dtype=dtype)
+ expected = Series(expected, dtype=dtype)
result.update(other)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/strings/test_api.py b/pandas/tests/strings/test_api.py
index ec8b5bfa11ad5..e9e89ea6442b2 100644
--- a/pandas/tests/strings/test_api.py
+++ b/pandas/tests/strings/test_api.py
@@ -125,10 +125,12 @@ def test_api_per_method(
method(*args, **kwargs)
-def test_api_for_categorical(any_string_method, any_string_dtype, request):
+def test_api_for_categorical(
+ any_string_method, any_string_dtype, any_string_dtype_param, request
+):
# https://github.com/pandas-dev/pandas/issues/10661
- if any_string_dtype == "arrow_string":
+ if any_string_dtype_param == "pyarrow":
# unsupported operand type(s) for +: 'ArrowStringArray' and 'str'
mark = pytest.mark.xfail(raises=TypeError, reason="Not Implemented")
request.node.add_marker(mark)
diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py
index 98f3fc859976e..ec5ae7c67875e 100644
--- a/pandas/tests/strings/test_strings.py
+++ b/pandas/tests/strings/test_strings.py
@@ -366,7 +366,7 @@ def test_len_mixed():
)
def test_index(method, sub, start, end, index_or_series, any_string_dtype, expected):
if index_or_series is Index and not any_string_dtype == "object":
- pytest.skip("Index cannot yet be backed by a StringArray/ArrowStringArray")
+ pytest.skip("Index cannot yet be backed by a StringArray")
obj = index_or_series(
["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype
| xref https://github.com/pandas-dev/pandas/issues/35169#issuecomment-712372349
this PR is draft as this change has not yet been discussed. (maybe could keep the StringArray the same and create a StringArrayBase instead)
also probably want to do this after #39908 (which is not yet ready) and then move the common base class into a separate module along with the dtype. (this could be string_base.py or create subdir with maybe base.py, string_arrow.py and string_python.py)
we should leave moving things around for a follow-up to keep the diff simpler to review.
This PR could be a precusor to a follow-up to #40708 to remove duplication https://github.com/pandas-dev/pandas/pull/40708#discussion_r605106321 (or could be done with common helper function or mixin https://github.com/pandas-dev/pandas/pull/40708#discussion_r605674743)
No deduplication here. just creating base class and changes to make the tests pass
This is API breaking, but for experimental API, so we would probably need some documentation or add to #40747 if we do this after #39908 | https://api.github.com/repos/pandas-dev/pandas/pulls/40962 | 2021-04-15T12:34:43Z | 2021-11-28T21:04:56Z | null | 2021-11-28T21:04:56Z |
[ArrowStringArray] CLN: imports | diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index fd47597b2191f..52bdcd03d3b49 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -25,19 +25,17 @@
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
- is_object_dtype,
- is_string_dtype,
-)
-from pandas.core.dtypes.dtypes import register_extension_dtype
-from pandas.core.dtypes.missing import isna
-
-from pandas.api.types import (
is_array_like,
is_bool_dtype,
is_integer,
is_integer_dtype,
+ is_object_dtype,
is_scalar,
+ is_string_dtype,
)
+from pandas.core.dtypes.dtypes import register_extension_dtype
+from pandas.core.dtypes.missing import isna
+
from pandas.core import missing
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays.base import ExtensionArray
| https://api.github.com/repos/pandas-dev/pandas/pulls/40961 | 2021-04-15T10:23:57Z | 2021-04-15T17:27:59Z | 2021-04-15T17:27:58Z | 2021-04-16T07:12:17Z | |
[ArrowStringArray] CLN: move and rename test_string_methods | diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 749f3d0aee8a5..2fec1925149ad 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -1,4 +1,7 @@
-import operator
+"""
+This module tests the functionality of StringArray and ArrowStringArray.
+Tests for the str accessors are in pandas/tests/strings/test_string_array.py
+"""
import numpy as np
import pytest
@@ -88,23 +91,6 @@ def test_setitem_with_scalar_string(dtype):
tm.assert_extension_array_equal(arr, expected)
-@pytest.mark.parametrize(
- "input, method",
- [
- (["a", "b", "c"], operator.methodcaller("capitalize")),
- (["a b", "a bc. de"], operator.methodcaller("capitalize")),
- ],
-)
-def test_string_methods(input, method, dtype):
- a = pd.Series(input, dtype=dtype)
- b = pd.Series(input, dtype="object")
- result = method(a.str)
- expected = method(b.str)
-
- assert result.dtype.name == dtype
- tm.assert_series_equal(result.astype(object), expected)
-
-
def test_astype_roundtrip(dtype, request):
if dtype == "arrow_string":
reason = "ValueError: Could not convert object to NumPy datetime"
diff --git a/pandas/tests/strings/test_string_array.py b/pandas/tests/strings/test_string_array.py
index 23c9b14c5a36a..02ccb3a930557 100644
--- a/pandas/tests/strings/test_string_array.py
+++ b/pandas/tests/strings/test_string_array.py
@@ -1,3 +1,5 @@
+import operator
+
import numpy as np
import pytest
@@ -117,3 +119,20 @@ def test_str_get_stringarray_multiple_nans(nullable_string_dtype):
result = s.str.get(2)
expected = Series(pd.array([pd.NA, pd.NA, pd.NA, "c"], dtype=nullable_string_dtype))
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "input, method",
+ [
+ (["a", "b", "c"], operator.methodcaller("capitalize")),
+ (["a b", "a bc. de"], operator.methodcaller("capitalize")),
+ ],
+)
+def test_capitalize(input, method, nullable_string_dtype):
+ a = Series(input, dtype=nullable_string_dtype)
+ b = Series(input, dtype="object")
+ result = method(a.str)
+ expected = method(b.str)
+
+ assert result.dtype.name == nullable_string_dtype
+ tm.assert_series_equal(result.astype(object), expected)
| follow-up to #40708
in #40708 the str accessor was enabled and tests changed in `pandas/tests/arrays/string_/test_string.py` and `pandas/tests/strings/test_string_array.py`
This PR colocates the str accessor test in `pandas/tests/arrays/string_/test_string.py` with those in `pandas/tests/strings/test_string_array.py` and renames it `test_string_methods` -> `test_capitalize` | https://api.github.com/repos/pandas-dev/pandas/pulls/40960 | 2021-04-15T09:11:13Z | 2021-04-15T17:49:44Z | 2021-04-15T17:49:44Z | 2021-04-16T07:11:28Z |
Add 'virt' and 'group' settings for arm64-graviton2 | diff --git a/.travis.yml b/.travis.yml
index 0098e3872bec7..540cd026a43d5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -36,6 +36,8 @@ matrix:
include:
- arch: arm64-graviton2
+ virt: lxd
+ group: edge
env:
- JOB="3.7, arm64" PYTEST_WORKERS="auto" ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)"
| https://github.com/pandas-dev/pandas/pull/40868 tried to change from `arm64` to `arm64-graviton2` but actually changed it to `amd64` because of the missing `virt` and `group` settings, as explained at https://blog.travis-ci.com/2020-09-11-arm-on-aws, section "Quick tips" | https://api.github.com/repos/pandas-dev/pandas/pulls/40959 | 2021-04-15T07:33:27Z | 2021-04-15T17:27:37Z | 2021-04-15T17:27:37Z | 2021-04-16T05:54:39Z |
CLN: refactor tests outside of class setup in `style/test_matplotlib.py` | diff --git a/pandas/tests/io/formats/style/test_matplotlib.py b/pandas/tests/io/formats/style/test_matplotlib.py
index f0158711664ce..496344c59ec04 100644
--- a/pandas/tests/io/formats/style/test_matplotlib.py
+++ b/pandas/tests/io/formats/style/test_matplotlib.py
@@ -10,218 +10,236 @@
pytest.importorskip("matplotlib")
pytest.importorskip("jinja2")
+from pandas.io.formats.style import Styler
-class TestStylerMatplotlibDep:
- def test_background_gradient(self):
- df = DataFrame([[1, 2], [2, 4]], columns=["A", "B"])
-
- for c_map in [None, "YlOrRd"]:
- result = df.style.background_gradient(cmap=c_map)._compute().ctx
- assert all("#" in x[0][1] for x in result.values())
- assert result[(0, 0)] == result[(0, 1)]
- assert result[(1, 0)] == result[(1, 1)]
-
- result = df.style.background_gradient(subset=IndexSlice[1, "A"])._compute().ctx
-
- assert result[(1, 0)] == [("background-color", "#fff7fb"), ("color", "#000000")]
-
- @pytest.mark.parametrize(
- "cmap, expected",
- [
- (
- "PuBu",
- {
- (4, 5): [("background-color", "#86b0d3"), ("color", "#000000")],
- (4, 6): [("background-color", "#83afd3"), ("color", "#f1f1f1")],
- },
- ),
- (
- "YlOrRd",
- {
- (4, 8): [("background-color", "#fd913e"), ("color", "#000000")],
- (4, 9): [("background-color", "#fd8f3d"), ("color", "#f1f1f1")],
- },
- ),
- (
- None,
- {
- (7, 0): [("background-color", "#48c16e"), ("color", "#f1f1f1")],
- (7, 1): [("background-color", "#4cc26c"), ("color", "#000000")],
- },
- ),
- ],
- )
- def test_text_color_threshold(self, cmap, expected):
- df = DataFrame(np.arange(100).reshape(10, 10))
- result = df.style.background_gradient(cmap=cmap, axis=None)._compute().ctx
- for k in expected.keys():
- assert result[k] == expected[k]
-
- def test_background_gradient_axis(self):
- df = DataFrame([[1, 2], [2, 4]], columns=["A", "B"])
-
- low = [("background-color", "#f7fbff"), ("color", "#000000")]
- high = [("background-color", "#08306b"), ("color", "#f1f1f1")]
- mid = [("background-color", "#abd0e6"), ("color", "#000000")]
- result = df.style.background_gradient(cmap="Blues", axis=0)._compute().ctx
- assert result[(0, 0)] == low
- assert result[(0, 1)] == low
- assert result[(1, 0)] == high
- assert result[(1, 1)] == high
-
- result = df.style.background_gradient(cmap="Blues", axis=1)._compute().ctx
- assert result[(0, 0)] == low
- assert result[(0, 1)] == high
- assert result[(1, 0)] == low
- assert result[(1, 1)] == high
-
- result = df.style.background_gradient(cmap="Blues", axis=None)._compute().ctx
- assert result[(0, 0)] == low
- assert result[(0, 1)] == mid
- assert result[(1, 0)] == mid
- assert result[(1, 1)] == high
-
- def test_background_gradient_vmin_vmax(self):
- # GH 12145
- df = DataFrame(range(5))
- ctx = df.style.background_gradient(vmin=1, vmax=3)._compute().ctx
- assert ctx[(0, 0)] == ctx[(1, 0)]
- assert ctx[(4, 0)] == ctx[(3, 0)]
-
- def test_background_gradient_int64(self):
- # GH 28869
- df1 = Series(range(3)).to_frame()
- df2 = Series(range(3), dtype="Int64").to_frame()
- ctx1 = df1.style.background_gradient()._compute().ctx
- ctx2 = df2.style.background_gradient()._compute().ctx
- assert ctx2[(0, 0)] == ctx1[(0, 0)]
- assert ctx2[(1, 0)] == ctx1[(1, 0)]
- assert ctx2[(2, 0)] == ctx1[(2, 0)]
-
- @pytest.mark.parametrize(
- "axis, gmap, expected",
- [
- (
- 0,
- [1, 2],
- {
- (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
- (1, 0): [("background-color", "#023858"), ("color", "#f1f1f1")],
- (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")],
- (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
- },
- ),
- (
- 1,
- [1, 2],
- {
- (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
- (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
- (0, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
- (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
- },
- ),
- (
- None,
- np.array([[2, 1], [1, 2]]),
- {
- (0, 0): [("background-color", "#023858"), ("color", "#f1f1f1")],
- (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
- (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")],
- (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
- },
- ),
- ],
- )
- def test_background_gradient_gmap_array(self, axis, gmap, expected):
- # tests when gmap is given as a sequence and converted to ndarray
- df = DataFrame([[0, 0], [0, 0]])
- result = df.style.background_gradient(axis=axis, gmap=gmap)._compute().ctx
- assert result == expected
-
- @pytest.mark.parametrize(
- "gmap, axis", [([1, 2, 3], 0), ([1, 2], 1), (np.array([[1, 2], [1, 2]]), None)]
- )
- def test_background_gradient_gmap_array_raises(self, gmap, axis):
- # test when gmap as converted ndarray is bad shape
- df = DataFrame([[0, 0, 0], [0, 0, 0]])
- msg = "supplied 'gmap' is not correct shape"
- with pytest.raises(ValueError, match=msg):
- df.style.background_gradient(gmap=gmap, axis=axis)._compute()
-
- @pytest.mark.parametrize(
- "gmap",
- [
- DataFrame( # reverse the columns
- [[2, 1], [1, 2]], columns=["B", "A"], index=["X", "Y"]
- ),
- DataFrame( # reverse the index
- [[2, 1], [1, 2]], columns=["A", "B"], index=["Y", "X"]
- ),
- DataFrame( # reverse the index and columns
- [[1, 2], [2, 1]], columns=["B", "A"], index=["Y", "X"]
- ),
- DataFrame( # add unnecessary columns
- [[1, 2, 3], [2, 1, 3]], columns=["A", "B", "C"], index=["X", "Y"]
- ),
- DataFrame( # add unnecessary index
- [[1, 2], [2, 1], [3, 3]], columns=["A", "B"], index=["X", "Y", "Z"]
- ),
- ],
- )
- @pytest.mark.parametrize(
- "subset, exp_gmap", # exp_gmap is underlying map DataFrame should conform to
- [
- (None, [[1, 2], [2, 1]]),
- (["A"], [[1], [2]]), # slice only column "A" in data and gmap
- (["B", "A"], [[2, 1], [1, 2]]), # reverse the columns in data
- (IndexSlice["X", :], [[1, 2]]), # slice only index "X" in data and gmap
- (IndexSlice[["Y", "X"], :], [[2, 1], [1, 2]]), # reverse the index in data
- ],
- )
- def test_background_gradient_gmap_dataframe_align(self, gmap, subset, exp_gmap):
- # test gmap given as DataFrame that it aligns to the the data including subset
- df = DataFrame([[0, 0], [0, 0]], columns=["A", "B"], index=["X", "Y"])
-
- expected = df.style.background_gradient(axis=None, gmap=exp_gmap, subset=subset)
- result = df.style.background_gradient(axis=None, gmap=gmap, subset=subset)
- assert expected._compute().ctx == result._compute().ctx
-
- @pytest.mark.parametrize(
- "gmap, axis, exp_gmap",
- [
- (Series([2, 1], index=["Y", "X"]), 0, [[1, 1], [2, 2]]), # revrse the index
- (Series([2, 1], index=["B", "A"]), 1, [[1, 2], [1, 2]]), # revrse the cols
- (Series([1, 2, 3], index=["X", "Y", "Z"]), 0, [[1, 1], [2, 2]]), # add idx
- (Series([1, 2, 3], index=["A", "B", "C"]), 1, [[1, 2], [1, 2]]), # add col
- ],
- )
- def test_background_gradient_gmap_series_align(self, gmap, axis, exp_gmap):
- # test gmap given as Series that it aligns to the the data including subset
- df = DataFrame([[0, 0], [0, 0]], columns=["A", "B"], index=["X", "Y"])
-
- expected = df.style.background_gradient(axis=None, gmap=exp_gmap)._compute()
- result = df.style.background_gradient(axis=axis, gmap=gmap)._compute()
- assert expected.ctx == result.ctx
-
- @pytest.mark.parametrize(
- "gmap, axis",
- [
- (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 1),
- (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 0),
- ],
- )
- def test_background_gradient_gmap_wrong_dataframe(self, gmap, axis):
- # test giving a gmap in DataFrame but with wrong axis
- df = DataFrame([[0, 0], [0, 0]], columns=["A", "B"], index=["X", "Y"])
- msg = "'gmap' is a DataFrame but underlying data for operations is a Series"
- with pytest.raises(ValueError, match=msg):
- df.style.background_gradient(gmap=gmap, axis=axis)._compute()
-
- def test_background_gradient_gmap_wrong_series(self):
- # test giving a gmap in Series form but with wrong axis
- df = DataFrame([[0, 0], [0, 0]], columns=["A", "B"], index=["X", "Y"])
- msg = "'gmap' is a Series but underlying data for operations is a DataFrame"
- gmap = Series([1, 2], index=["X", "Y"])
- with pytest.raises(ValueError, match=msg):
- df.style.background_gradient(gmap=gmap, axis=None)._compute()
+
+@pytest.fixture
+def df():
+ return DataFrame([[1, 2], [2, 4]], columns=["A", "B"])
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+@pytest.fixture
+def df_blank():
+ return DataFrame([[0, 0], [0, 0]], columns=["A", "B"], index=["X", "Y"])
+
+
+@pytest.fixture
+def styler_blank(df_blank):
+ return Styler(df_blank, uuid_len=0)
+
+
+def test_background_gradient(styler):
+ for c_map in [None, "YlOrRd"]:
+ result = styler.background_gradient(cmap=c_map)._compute().ctx
+ assert all("#" in x[0][1] for x in result.values())
+ assert result[(0, 0)] == result[(0, 1)]
+ assert result[(1, 0)] == result[(1, 1)]
+
+
+def test_background_gradient_color(styler):
+ result = styler.background_gradient(subset=IndexSlice[1, "A"])._compute().ctx
+ assert result[(1, 0)] == [("background-color", "#fff7fb"), ("color", "#000000")]
+
+
+@pytest.mark.parametrize(
+ "axis, expected",
+ [
+ (0, ["low", "low", "high", "high"]),
+ (1, ["low", "high", "low", "high"]),
+ (None, ["low", "mid", "mid", "high"]),
+ ],
+)
+def test_background_gradient_axis(styler, axis, expected):
+ bg_colors = {
+ "low": [("background-color", "#f7fbff"), ("color", "#000000")],
+ "mid": [("background-color", "#abd0e6"), ("color", "#000000")],
+ "high": [("background-color", "#08306b"), ("color", "#f1f1f1")],
+ }
+ result = styler.background_gradient(cmap="Blues", axis=axis)._compute().ctx
+ for i, cell in enumerate([(0, 0), (0, 1), (1, 0), (1, 1)]):
+ assert result[cell] == bg_colors[expected[i]]
+
+
+@pytest.mark.parametrize(
+ "cmap, expected",
+ [
+ (
+ "PuBu",
+ {
+ (4, 5): [("background-color", "#86b0d3"), ("color", "#000000")],
+ (4, 6): [("background-color", "#83afd3"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ "YlOrRd",
+ {
+ (4, 8): [("background-color", "#fd913e"), ("color", "#000000")],
+ (4, 9): [("background-color", "#fd8f3d"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ None,
+ {
+ (7, 0): [("background-color", "#48c16e"), ("color", "#f1f1f1")],
+ (7, 1): [("background-color", "#4cc26c"), ("color", "#000000")],
+ },
+ ),
+ ],
+)
+def test_text_color_threshold(cmap, expected):
+ # GH 39888
+ df = DataFrame(np.arange(100).reshape(10, 10))
+ result = df.style.background_gradient(cmap=cmap, axis=None)._compute().ctx
+ for k in expected.keys():
+ assert result[k] == expected[k]
+
+
+def test_background_gradient_vmin_vmax():
+ # GH 12145
+ df = DataFrame(range(5))
+ ctx = df.style.background_gradient(vmin=1, vmax=3)._compute().ctx
+ assert ctx[(0, 0)] == ctx[(1, 0)]
+ assert ctx[(4, 0)] == ctx[(3, 0)]
+
+
+def test_background_gradient_int64():
+ # GH 28869
+ df1 = Series(range(3)).to_frame()
+ df2 = Series(range(3), dtype="Int64").to_frame()
+ ctx1 = df1.style.background_gradient()._compute().ctx
+ ctx2 = df2.style.background_gradient()._compute().ctx
+ assert ctx2[(0, 0)] == ctx1[(0, 0)]
+ assert ctx2[(1, 0)] == ctx1[(1, 0)]
+ assert ctx2[(2, 0)] == ctx1[(2, 0)]
+
+
+@pytest.mark.parametrize(
+ "axis, gmap, expected",
+ [
+ (
+ 0,
+ [1, 2],
+ {
+ (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 0): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ 1,
+ [1, 2],
+ {
+ (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (0, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ None,
+ np.array([[2, 1], [1, 2]]),
+ {
+ (0, 0): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ },
+ ),
+ ],
+)
+def test_background_gradient_gmap_array(styler_blank, axis, gmap, expected):
+ # tests when gmap is given as a sequence and converted to ndarray
+ result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "gmap, axis", [([1, 2, 3], 0), ([1, 2], 1), (np.array([[1, 2], [1, 2]]), None)]
+)
+def test_background_gradient_gmap_array_raises(gmap, axis):
+ # test when gmap as converted ndarray is bad shape
+ df = DataFrame([[0, 0, 0], [0, 0, 0]])
+ msg = "supplied 'gmap' is not correct shape"
+ with pytest.raises(ValueError, match=msg):
+ df.style.background_gradient(gmap=gmap, axis=axis)._compute()
+
+
+@pytest.mark.parametrize(
+ "gmap",
+ [
+ DataFrame( # reverse the columns
+ [[2, 1], [1, 2]], columns=["B", "A"], index=["X", "Y"]
+ ),
+ DataFrame( # reverse the index
+ [[2, 1], [1, 2]], columns=["A", "B"], index=["Y", "X"]
+ ),
+ DataFrame( # reverse the index and columns
+ [[1, 2], [2, 1]], columns=["B", "A"], index=["Y", "X"]
+ ),
+ DataFrame( # add unnecessary columns
+ [[1, 2, 3], [2, 1, 3]], columns=["A", "B", "C"], index=["X", "Y"]
+ ),
+ DataFrame( # add unnecessary index
+ [[1, 2], [2, 1], [3, 3]], columns=["A", "B"], index=["X", "Y", "Z"]
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "subset, exp_gmap", # exp_gmap is underlying map DataFrame should conform to
+ [
+ (None, [[1, 2], [2, 1]]),
+ (["A"], [[1], [2]]), # slice only column "A" in data and gmap
+ (["B", "A"], [[2, 1], [1, 2]]), # reverse the columns in data
+ (IndexSlice["X", :], [[1, 2]]), # slice only index "X" in data and gmap
+ (IndexSlice[["Y", "X"], :], [[2, 1], [1, 2]]), # reverse the index in data
+ ],
+)
+def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, exp_gmap):
+ # test gmap given as DataFrame that it aligns to the the data including subset
+ expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap, subset=subset)
+ result = styler_blank.background_gradient(axis=None, gmap=gmap, subset=subset)
+ assert expected._compute().ctx == result._compute().ctx
+
+
+@pytest.mark.parametrize(
+ "gmap, axis, exp_gmap",
+ [
+ (Series([2, 1], index=["Y", "X"]), 0, [[1, 1], [2, 2]]), # revrse the index
+ (Series([2, 1], index=["B", "A"]), 1, [[1, 2], [1, 2]]), # revrse the cols
+ (Series([1, 2, 3], index=["X", "Y", "Z"]), 0, [[1, 1], [2, 2]]), # add idx
+ (Series([1, 2, 3], index=["A", "B", "C"]), 1, [[1, 2], [1, 2]]), # add col
+ ],
+)
+def test_background_gradient_gmap_series_align(styler_blank, gmap, axis, exp_gmap):
+ # test gmap given as Series that it aligns to the the data including subset
+ expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap)._compute()
+ result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute()
+ assert expected.ctx == result.ctx
+
+
+@pytest.mark.parametrize(
+ "gmap, axis",
+ [
+ (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 1),
+ (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 0),
+ ],
+)
+def test_background_gradient_gmap_wrong_dataframe(styler_blank, gmap, axis):
+ # test giving a gmap in DataFrame but with wrong axis
+ msg = "'gmap' is a DataFrame but underlying data for operations is a Series"
+ with pytest.raises(ValueError, match=msg):
+ styler_blank.background_gradient(gmap=gmap, axis=axis)._compute()
+
+
+def test_background_gradient_gmap_wrong_series(styler_blank):
+ # test giving a gmap in Series form but with wrong axis
+ msg = "'gmap' is a Series but underlying data for operations is a DataFrame"
+ gmap = Series([1, 2], index=["X", "Y"])
+ with pytest.raises(ValueError, match=msg):
+ styler_blank.background_gradient(gmap=gmap, axis=None)._compute()
| This is a refactor to remove the pytest class setup structure, reverting to class-less fixtures.
It also parametrises some of the tests.
No test is altered or removed.
```
def test_background_gradient(styler):
def test_background_gradient_color(styler):
def test_background_gradient_axis(styler, axis, expected):
def test_text_color_threshold(cmap, expected):
def test_background_gradient_vmin_vmax():
def test_background_gradient_int64():
def test_background_gradient_gmap_array(styler_blank, axis, gmap, expected):
def test_background_gradient_gmap_array_raises(gmap, axis):
def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, exp_gmap):
def test_background_gradient_gmap_series_align(styler_blank, gmap, axis, exp_gmap):
def test_background_gradient_gmap_wrong_dataframe(styler_blank, gmap, axis):
def test_background_gradient_gmap_wrong_series(styler_blank):
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/40958 | 2021-04-15T07:26:55Z | 2021-04-15T17:49:00Z | 2021-04-15T17:49:00Z | 2021-04-15T21:55:44Z |
REF: move union_categoricals call outside of cython | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 153ac4b5f0893..8e7c927b7e2d5 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -94,7 +94,6 @@ from pandas._libs.khash cimport (
)
from pandas.errors import (
- DtypeWarning,
EmptyDataError,
ParserError,
ParserWarning,
@@ -108,9 +107,7 @@ from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
is_object_dtype,
- pandas_dtype,
)
-from pandas.core.dtypes.concat import union_categoricals
cdef:
float64_t INF = <float64_t>np.inf
@@ -492,12 +489,10 @@ cdef class TextReader:
raise ValueError(f'Unrecognized float_precision option: '
f'{float_precision}')
- if isinstance(dtype, dict):
- dtype = {k: pandas_dtype(dtype[k])
- for k in dtype}
- elif dtype is not None:
- dtype = pandas_dtype(dtype)
-
+ # Caller is responsible for ensuring we have one of
+ # - None
+ # - DtypeObj
+ # - dict[Any, DtypeObj]
self.dtype = dtype
# XXX
@@ -775,6 +770,8 @@ cdef class TextReader:
"""
if self.low_memory:
# Conserve intermediate space
+ # Caller is responsible for concatenating chunks,
+ # see c_parser_wrapper._concatenatve_chunks
columns = self._read_low_memory(rows)
else:
# Don't care about memory usage
@@ -818,8 +815,7 @@ cdef class TextReader:
if len(chunks) == 0:
raise StopIteration
- # destructive to chunks
- return _concatenate_chunks(chunks)
+ return chunks
cdef _tokenize_rows(self, size_t nrows):
cdef:
@@ -1907,49 +1903,6 @@ cdef raise_parser_error(object base, parser_t *parser):
raise ParserError(message)
-# chunks: list[dict[int, "ArrayLike"]]
-# -> dict[int, "ArrayLike"]
-def _concatenate_chunks(list chunks) -> dict:
- cdef:
- list names = list(chunks[0].keys())
- object name
- list warning_columns = []
- object warning_names
- object common_type
-
- result = {}
- for name in names:
- arrs = [chunk.pop(name) for chunk in chunks]
- # Check each arr for consistent types.
- dtypes = {a.dtype for a in arrs}
- numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)}
- if len(numpy_dtypes) > 1:
- common_type = np.find_common_type(numpy_dtypes, [])
- if common_type == object:
- warning_columns.append(str(name))
-
- dtype = dtypes.pop()
- if is_categorical_dtype(dtype):
- sort_categories = isinstance(dtype, str)
- result[name] = union_categoricals(arrs,
- sort_categories=sort_categories)
- else:
- if is_extension_array_dtype(dtype):
- array_type = dtype.construct_array_type()
- result[name] = array_type._concat_same_type(arrs)
- else:
- result[name] = np.concatenate(arrs)
-
- if warning_columns:
- warning_names = ','.join(warning_columns)
- warning_message = " ".join([
- f"Columns ({warning_names}) have mixed types."
- f"Specify dtype option on import or set low_memory=False."
- ])
- warnings.warn(warning_message, DtypeWarning, stacklevel=8)
- return result
-
-
# ----------------------------------------------------------------------
# NA values
def _compute_na_values():
diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py
index 8305ff64c42c6..b88ee3d3f479e 100644
--- a/pandas/io/parsers/c_parser_wrapper.py
+++ b/pandas/io/parsers/c_parser_wrapper.py
@@ -1,5 +1,19 @@
+from __future__ import annotations
+
+import warnings
+
+import numpy as np
+
import pandas._libs.parsers as parsers
from pandas._typing import FilePathOrBuffer
+from pandas.errors import DtypeWarning
+
+from pandas.core.dtypes.common import (
+ is_categorical_dtype,
+ is_extension_array_dtype,
+ pandas_dtype,
+)
+from pandas.core.dtypes.concat import union_categoricals
from pandas.core.indexes.api import ensure_index_from_sequences
@@ -47,6 +61,7 @@ def __init__(self, src: FilePathOrBuffer, **kwds):
# TextIOBase, TextIOWrapper, mmap]" has no attribute "mmap"
self.handles.handle = self.handles.handle.mmap # type: ignore[union-attr]
+ kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))
try:
self._reader = parsers.TextReader(self.handles.handle, **kwds)
except Exception:
@@ -183,6 +198,10 @@ def read(self, nrows=None):
else:
self.close()
raise
+ else:
+ if self._reader.low_memory:
+ # destructive to data
+ data = _concatenate_chunks(data)
# Done with first read, next time raise StopIteration
self._first_chunk = False
@@ -265,7 +284,71 @@ def _get_index_names(self):
return names, idx_names
- def _maybe_parse_dates(self, values, index, try_parse_dates=True):
+ def _maybe_parse_dates(self, values, index: int, try_parse_dates=True):
if try_parse_dates and self._should_parse_dates(index):
values = self._date_conv(values)
return values
+
+
+def _concatenate_chunks(chunks: list[dict]) -> dict:
+ """
+ Concatenate chunks of data read with low_memory=True.
+
+ The tricky part is handling Categoricals, where different chunks
+ may have different inferred categories.
+ """
+ names = list(chunks[0].keys())
+ warning_columns = []
+
+ result = {}
+ for name in names:
+ arrs = [chunk.pop(name) for chunk in chunks]
+ # Check each arr for consistent types.
+ dtypes = {a.dtype for a in arrs}
+ # TODO: shouldn't we exclude all EA dtypes here?
+ numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)}
+ if len(numpy_dtypes) > 1:
+ # error: Argument 1 to "find_common_type" has incompatible type
+ # "Set[Any]"; expected "Sequence[Union[dtype[Any], None, type,
+ # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
+ # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]]"
+ common_type = np.find_common_type(
+ numpy_dtypes, # type: ignore[arg-type]
+ [],
+ )
+ if common_type == object:
+ warning_columns.append(str(name))
+
+ dtype = dtypes.pop()
+ if is_categorical_dtype(dtype):
+ result[name] = union_categoricals(arrs, sort_categories=False)
+ else:
+ if is_extension_array_dtype(dtype):
+ # TODO: concat_compat?
+ array_type = dtype.construct_array_type()
+ result[name] = array_type._concat_same_type(arrs)
+ else:
+ result[name] = np.concatenate(arrs)
+
+ if warning_columns:
+ warning_names = ",".join(warning_columns)
+ warning_message = " ".join(
+ [
+ f"Columns ({warning_names}) have mixed types."
+ f"Specify dtype option on import or set low_memory=False."
+ ]
+ )
+ warnings.warn(warning_message, DtypeWarning, stacklevel=8)
+ return result
+
+
+def ensure_dtype_objs(dtype):
+ """
+ Ensure we have either None, a dtype object, or a dictionary mapping to
+ dtype objects.
+ """
+ if isinstance(dtype, dict):
+ dtype = {k: pandas_dtype(dtype[k]) for k in dtype}
+ elif dtype is not None:
+ dtype = pandas_dtype(dtype)
+ return dtype
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index 104cf56419bfd..7f84c5e378d16 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -21,6 +21,7 @@
TextFileReader,
read_csv,
)
+from pandas.io.parsers.c_parser_wrapper import ensure_dtype_objs
class TestTextReader:
@@ -206,6 +207,8 @@ def test_numpy_string_dtype(self):
aaaaa,5"""
def _make_reader(**kwds):
+ if "dtype" in kwds:
+ kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])
return TextReader(StringIO(data), delimiter=",", header=None, **kwds)
reader = _make_reader(dtype="S5,i4")
@@ -233,6 +236,8 @@ def test_pass_dtype(self):
4,d"""
def _make_reader(**kwds):
+ if "dtype" in kwds:
+ kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])
return TextReader(StringIO(data), delimiter=",", **kwds)
reader = _make_reader(dtype={"one": "u1", 1: "S1"})
| There's no real perf bump to calling union_categoricals inside cython, better to do it in the python code where we can e.g. get the benefit of mypy. Plus gets us closer to dependency structure goals. | https://api.github.com/repos/pandas-dev/pandas/pulls/40957 | 2021-04-15T04:20:35Z | 2021-04-15T15:41:29Z | null | 2021-04-15T15:41:29Z |
TYP: annotations | diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index b80a127be970d..4dc5e7516db7e 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -791,7 +791,8 @@ cdef class StringHashTable(HashTable):
raise KeyError(key)
@cython.boundscheck(False)
- def get_indexer(self, ndarray[object] values):
+ def get_indexer(self, ndarray[object] values) -> ndarray:
+ # -> np.ndarray[np.intp]
cdef:
Py_ssize_t i, n = len(values)
ndarray[intp_t] labels = np.empty(n, dtype=np.intp)
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi
index 477c9fd655a4a..4c647056641f5 100644
--- a/pandas/_libs/lib.pyi
+++ b/pandas/_libs/lib.pyi
@@ -4,6 +4,7 @@
from typing import (
Any,
Callable,
+ Generator,
)
import numpy as np
@@ -52,8 +53,7 @@ def is_bool_array(values: np.ndarray, skipna: bool = False): ...
def fast_multiget(mapping: dict, keys: np.ndarray, default=np.nan) -> ArrayLike: ...
-# TODO: gen: Generator?
-def fast_unique_multiple_list_gen(gen: object, sort: bool = True) -> list: ...
+def fast_unique_multiple_list_gen(gen: Generator, sort: bool = True) -> list: ...
def fast_unique_multiple_list(lists: list, sort: bool = True) -> list: ...
def fast_unique_multiple(arrays: list, sort: bool = True) -> list: ...
@@ -90,10 +90,9 @@ def infer_datetimelike_array(
arr: np.ndarray # np.ndarray[object]
) -> str: ...
-# TODO: new_dtype -> np.dtype?
def astype_intsafe(
arr: np.ndarray, # np.ndarray[object]
- new_dtype,
+ new_dtype: np.dtype,
) -> np.ndarray: ...
def fast_zip(ndarrays: list) -> np.ndarray: ... # np.ndarray[object]
@@ -134,15 +133,13 @@ def memory_usage_of_objects(
) -> int: ... # np.int64
-# TODO: f: Callable?
-# TODO: dtype -> DtypeObj?
def map_infer_mask(
arr: np.ndarray,
f: Callable[[Any], Any],
mask: np.ndarray, # const uint8_t[:]
convert: bool = ...,
na_value: Any = ...,
- dtype: Any = ...,
+ dtype: np.dtype = ...,
) -> ArrayLike: ...
def indices_fast(
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index e816bd4cd4026..a5ed650d72911 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -633,7 +633,7 @@ def array_equivalent_object(left: object[:], right: object[:]) -> bool:
@cython.wraparound(False)
@cython.boundscheck(False)
-def astype_intsafe(ndarray[object] arr, new_dtype) -> ndarray:
+def astype_intsafe(ndarray[object] arr, cnp.dtype new_dtype) -> ndarray:
cdef:
Py_ssize_t i, n = len(arr)
object val
@@ -661,7 +661,8 @@ cpdef ndarray[object] ensure_string_array(
bint copy=True,
bint skipna=True,
):
- """Returns a new numpy array with object dtype and only strings and na values.
+ """
+ Returns a new numpy array with object dtype and only strings and na values.
Parameters
----------
@@ -679,7 +680,7 @@ cpdef ndarray[object] ensure_string_array(
Returns
-------
- ndarray
+ np.ndarray[object]
An array with the input array's elements casted to str or nan-like.
"""
cdef:
@@ -2452,7 +2453,8 @@ no_default = NoDefault.no_default # Sentinel indicating the default value.
@cython.boundscheck(False)
@cython.wraparound(False)
def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask, bint convert=True,
- object na_value=no_default, object dtype=object) -> "ArrayLike":
+ object na_value=no_default, cnp.dtype dtype=np.dtype(object)
+ ) -> "ArrayLike":
"""
Substitute for np.vectorize with pandas-friendly dtype inference.
@@ -2472,7 +2474,7 @@ def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask, bint convert=Tr
Returns
-------
- ndarray
+ np.ndarray or ExtensionArray
"""
cdef:
Py_ssize_t i, n
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index d6ca38e57d2d8..4d55967c1e135 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -93,7 +93,7 @@ def build_field_sarray(const int64_t[:] dtindex):
return out
-def month_position_check(fields, weekdays):
+def month_position_check(fields, weekdays) -> str | None:
cdef:
int32_t daysinmonth, y, m, d
bint calendar_end = True
@@ -755,7 +755,7 @@ cdef inline ndarray[int64_t] _roundup_int64(values, int64_t unit):
return _floor_int64(values + unit // 2, unit)
-def round_nsint64(values: np.ndarray, mode: RoundTo, nanos) -> np.ndarray:
+def round_nsint64(values: np.ndarray, mode: RoundTo, nanos: int) -> np.ndarray:
"""
Applies rounding mode at given frequency
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 02731bd4fbbc1..5a2643dd531ed 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -609,7 +609,7 @@ def argsort(
Returns
-------
- ndarray
+ np.ndarray[np.intp]
Array of indices that sort ``self``. If NaN values are contained,
NaN values are placed at the end.
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index f2b5ad447a0cf..272cf19be559c 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1599,7 +1599,7 @@ def argsort(self, ascending=True, kind="quicksort", **kwargs):
Returns
-------
- numpy.array
+ np.ndarray[np.intp]
See Also
--------
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index ee68f5558a651..087ce415cc4ba 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -135,10 +135,10 @@ class TimedeltaArray(dtl.TimelikeOps):
# define my properties & methods for delegation
_other_ops: list[str] = []
_bool_ops: list[str] = []
- _object_ops = ["freq"]
- _field_ops = ["days", "seconds", "microseconds", "nanoseconds"]
- _datetimelike_ops = _field_ops + _object_ops + _bool_ops
- _datetimelike_methods = [
+ _object_ops: list[str] = ["freq"]
+ _field_ops: list[str] = ["days", "seconds", "microseconds", "nanoseconds"]
+ _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops
+ _datetimelike_methods: list[str] = [
"to_pytimedelta",
"total_seconds",
"round",
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py
index 5656323b82fb7..f56e13775460b 100644
--- a/pandas/core/indexes/api.py
+++ b/pandas/core/indexes/api.py
@@ -164,7 +164,7 @@ def _get_combined_index(
return index
-def union_indexes(indexes, sort=True) -> Index:
+def union_indexes(indexes, sort: bool = True) -> Index:
"""
Return the union of indexes.
@@ -273,7 +273,7 @@ def _sanitize_and_check(indexes):
return indexes, "array"
-def all_indexes_same(indexes):
+def all_indexes_same(indexes) -> bool:
"""
Determine if all indexes contain the same elements.
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index c79518702169a..310ee4c3a63e3 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -215,7 +215,7 @@ def join(
return cast(F, join)
-def disallow_kwargs(kwargs: dict[str, Any]):
+def disallow_kwargs(kwargs: dict[str, Any]) -> None:
if kwargs:
raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}")
@@ -626,7 +626,7 @@ def _maybe_check_unique(self) -> None:
raise DuplicateLabelError(msg)
@final
- def _format_duplicate_message(self):
+ def _format_duplicate_message(self) -> DataFrame:
"""
Construct the DataFrame for a DuplicateLabelError.
@@ -789,7 +789,7 @@ def __array_wrap__(self, result, context=None):
return Index(result, **attrs)
@cache_readonly
- def dtype(self):
+ def dtype(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
"""
@@ -1064,11 +1064,11 @@ def copy(
return new_index
@final
- def __copy__(self, **kwargs):
+ def __copy__(self: _IndexT, **kwargs) -> _IndexT:
return self.copy(**kwargs)
@final
- def __deepcopy__(self, memo=None):
+ def __deepcopy__(self: _IndexT, memo=None) -> _IndexT:
"""
Parameters
----------
@@ -1354,7 +1354,7 @@ def to_series(self, index=None, name: Hashable = None) -> Series:
return Series(self._values.copy(), index=index, name=name)
- def to_frame(self, index: bool = True, name=None) -> DataFrame:
+ def to_frame(self, index: bool = True, name: Hashable = None) -> DataFrame:
"""
Create a DataFrame with a column containing the Index.
@@ -1426,7 +1426,7 @@ def name(self):
return self._name
@name.setter
- def name(self, value):
+ def name(self, value: Hashable):
if self._no_setting_name:
# Used in MultiIndex.levels to avoid silently ignoring name updates.
raise RuntimeError(
@@ -2367,7 +2367,7 @@ def _is_all_dates(self) -> bool:
@cache_readonly
@final
- def is_all_dates(self):
+ def is_all_dates(self) -> bool:
"""
Whether or not the index values only consist of dates.
"""
@@ -3380,7 +3380,7 @@ def get_loc(self, key, method=None, tolerance=None):
Returns
-------
- indexer : ndarray of int
+ indexer : np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
@@ -4610,7 +4610,7 @@ def _can_hold_identifiers_and_holds_name(self, name) -> bool:
return name in self
return False
- def append(self, other) -> Index:
+ def append(self, other: Index | Sequence[Index]) -> Index:
"""
Append a collection of Index options together.
@@ -4627,7 +4627,9 @@ def append(self, other) -> Index:
if isinstance(other, (list, tuple)):
to_concat += list(other)
else:
- to_concat.append(other)
+ # error: Argument 1 to "append" of "list" has incompatible type
+ # "Union[Index, Sequence[Index]]"; expected "Index"
+ to_concat.append(other) # type: ignore[arg-type]
for obj in to_concat:
if not isinstance(obj, Index):
@@ -5181,11 +5183,11 @@ def set_value(self, arr, key, value):
Returns
-------
- indexer : ndarray of int
+ indexer : np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
- missing : ndarray of int
+ missing : np.ndarray[np.intp]
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array.
"""
@@ -5227,7 +5229,7 @@ def get_indexer_for(self, target, **kwargs) -> np.ndarray:
Returns
-------
- numpy.ndarray
+ np.ndarray[np.intp]
List of indices.
"""
if self._index_as_unique:
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 724caebd69c23..5b98b956e33e6 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -457,8 +457,8 @@ def reindex(
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
- new_target = Categorical(new_target, dtype=target.dtype)
- new_target = type(self)._simple_new(new_target, name=self.name)
+ cat = Categorical(new_target, dtype=target.dtype)
+ new_target = type(self)._simple_new(cat, name=self.name)
else:
new_target = Index(new_target, name=self.name)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 9f02196466ebf..f77f28deecf57 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -391,7 +391,7 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
# --------------------------------------------------------------------
# Rendering Methods
- def _mpl_repr(self):
+ def _mpl_repr(self) -> np.ndarray:
# how to represent ourselves to matplotlib
return ints_to_pydatetime(self.asi8, self.tz)
@@ -448,7 +448,7 @@ def _maybe_utc_convert(self, other: Index) -> tuple[DatetimeIndex, Index]:
# --------------------------------------------------------------------
- def _get_time_micros(self):
+ def _get_time_micros(self) -> np.ndarray:
"""
Return the number of microseconds since midnight.
@@ -541,7 +541,7 @@ def to_series(self, keep_tz=lib.no_default, index=None, name=None):
return Series(values, index=index, name=name)
- def snap(self, freq="S"):
+ def snap(self, freq="S") -> DatetimeIndex:
"""
Snap time stamps to nearest occurring frequency.
@@ -891,7 +891,7 @@ def indexer_at_time(self, time, asof: bool = False) -> np.ndarray:
else:
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
- return (micros == time_micros).nonzero()[0]
+ return (time_micros == micros).nonzero()[0]
def indexer_between_time(
self, start_time, end_time, include_start: bool = True, include_end: bool = True
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index f7ab09e4f176f..171ab57264f85 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -390,7 +390,7 @@ def from_tuples(
# --------------------------------------------------------------------
@cache_readonly
- def _engine(self):
+ def _engine(self) -> IntervalTree:
left = self._maybe_convert_i8(self.left)
right = self._maybe_convert_i8(self.right)
return IntervalTree(left, right, closed=self.closed)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 5b4f3e1bb9e09..59ff128713aca 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2673,6 +2673,7 @@ def _get_indexer(
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
+ # returned ndarray is np.intp
# empty indexer
if not len(target):
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 8e8c67927c20f..1e974063bd839 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -249,7 +249,7 @@ def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[st
)
@property
- def start(self):
+ def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
@@ -257,7 +257,7 @@ def start(self):
return self._range.start
@property
- def _start(self):
+ def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
@@ -272,14 +272,14 @@ def _start(self):
return self.start
@property
- def stop(self):
+ def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
- def _stop(self):
+ def _stop(self) -> int:
"""
The value of the `stop` parameter.
@@ -295,7 +295,7 @@ def _stop(self):
return self.stop
@property
- def step(self):
+ def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
@@ -303,7 +303,7 @@ def step(self):
return self._range.step
@property
- def _step(self):
+ def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
@@ -405,6 +405,7 @@ def _get_indexer(
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
+ # -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
@@ -522,7 +523,7 @@ def argsort(self, *args, **kwargs) -> np.ndarray:
Returns
-------
- argsorted : numpy array
+ np.ndarray[np.intp]
See Also
--------
@@ -532,9 +533,9 @@ def argsort(self, *args, **kwargs) -> np.ndarray:
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
- result = np.arange(len(self))
+ result = np.arange(len(self), dtype=np.intp)
else:
- result = np.arange(len(self) - 1, -1, -1)
+ result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
if not ascending:
result = result[::-1]
@@ -759,7 +760,7 @@ def symmetric_difference(self, other, result_name: Hashable = None, sort=None):
# --------------------------------------------------------------------
- def _concat(self, indexes, name: Hashable):
+ def _concat(self, indexes: list[Index], name: Hashable):
"""
Overriding parent method for the case of all RangeIndex instances.
@@ -780,7 +781,8 @@ def _concat(self, indexes, name: Hashable):
non_empty_indexes = [obj for obj in indexes if len(obj)]
for obj in non_empty_indexes:
- rng: range = obj._range
+ # error: "Index" has no attribute "_range"
+ rng: range = obj._range # type: ignore[attr-defined]
if start is None:
# This is set by the first non-empty index
@@ -808,7 +810,12 @@ def _concat(self, indexes, name: Hashable):
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
- stop = non_empty_indexes[-1].stop if next_ is None else next_
+ # error: "Index" has no attribute "stop"
+ stop = (
+ non_empty_indexes[-1].stop # type: ignore[attr-defined]
+ if next_ is None
+ else next_
+ )
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5c605a6b441c6..fac87515c7d96 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2756,13 +2756,15 @@ def __rmatmul__(self, other):
return self.dot(np.transpose(other))
@doc(base.IndexOpsMixin.searchsorted, klass="Series")
- def searchsorted(self, value, side="left", sorter=None):
+ def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
# -------------------------------------------------------------------
# Combination
- def append(self, to_append, ignore_index=False, verify_integrity=False):
+ def append(
+ self, to_append, ignore_index: bool = False, verify_integrity: bool = False
+ ):
"""
Concatenate two or more Series.
@@ -2846,7 +2848,7 @@ def append(self, to_append, ignore_index=False, verify_integrity=False):
to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity
)
- def _binop(self, other, func, level=None, fill_value=None):
+ def _binop(self, other: Series, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value.
@@ -3609,7 +3611,7 @@ def argsort(self, axis=0, kind="quicksort", order=None) -> Series:
Returns
-------
- Series
+ Series[np.intp]
Positions of values within the sort order with -1 indicating
nan values.
@@ -3730,7 +3732,7 @@ def nlargest(self, n=5, keep="first") -> Series:
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
- def nsmallest(self, n=5, keep="first") -> Series:
+ def nsmallest(self, n: int = 5, keep: str = "first") -> Series:
"""
Return the smallest `n` elements.
@@ -3942,7 +3944,7 @@ def explode(self, ignore_index: bool = False) -> Series:
return self._constructor(values, index=index, name=self.name)
- def unstack(self, level=-1, fill_value=None):
+ def unstack(self, level=-1, fill_value=None) -> DataFrame:
"""
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
@@ -4294,7 +4296,11 @@ def _reduce(
with np.errstate(all="ignore"):
return op(delegate, skipna=skipna, **kwds)
- def _reindex_indexer(self, new_index, indexer, copy):
+ def _reindex_indexer(
+ self, new_index: Index | None, indexer: np.ndarray | None, copy: bool
+ ) -> Series:
+ # Note: new_index is None iff indexer is None
+ # if not None, indexer is np.intp
if indexer is None:
if copy:
return self.copy()
| getting close to ... who am i kidding, no we're not. | https://api.github.com/repos/pandas-dev/pandas/pulls/40955 | 2021-04-14T22:38:27Z | 2021-04-16T19:08:33Z | 2021-04-16T19:08:33Z | 2021-04-16T21:22:12Z |
Add keyword sort to pivot_table | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 38a1802340c69..afe6a3296f04d 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -219,6 +219,7 @@ Other enhancements
- :meth:`pandas.read_csv` and :meth:`pandas.read_json` expose the argument ``encoding_errors`` to control how encoding errors are handled (:issue:`39450`)
- :meth:`.GroupBy.any` and :meth:`.GroupBy.all` use Kleene logic with nullable data types (:issue:`37506`)
- :meth:`.GroupBy.any` and :meth:`.GroupBy.all` return a ``BooleanDtype`` for columns with nullable data types (:issue:`33449`)
+- Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 045776c3f5c50..7224a055fc148 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7664,6 +7664,11 @@ def pivot(self, index=None, columns=None, values=None) -> DataFrame:
.. versionchanged:: 0.25.0
+ sort : bool, default True
+ Specifies if the result should be sorted.
+
+ .. versionadded:: 1.3.0
+
Returns
-------
DataFrame
@@ -7767,6 +7772,7 @@ def pivot_table(
dropna=True,
margins_name="All",
observed=False,
+ sort=True,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
@@ -7781,6 +7787,7 @@ def pivot_table(
dropna=dropna,
margins_name=margins_name,
observed=observed,
+ sort=sort,
)
def stack(self, level: Level = -1, dropna: bool = True):
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 795f5250012cb..e324534e0433f 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -64,6 +64,7 @@ def pivot_table(
dropna=True,
margins_name="All",
observed=False,
+ sort=True,
) -> DataFrame:
index = _convert_by(index)
columns = _convert_by(columns)
@@ -83,6 +84,7 @@ def pivot_table(
dropna=dropna,
margins_name=margins_name,
observed=observed,
+ sort=sort,
)
pieces.append(_table)
keys.append(getattr(func, "__name__", func))
@@ -101,6 +103,7 @@ def pivot_table(
dropna,
margins_name,
observed,
+ sort,
)
return table.__finalize__(data, method="pivot_table")
@@ -116,6 +119,7 @@ def __internal_pivot_table(
dropna: bool,
margins_name: str,
observed: bool,
+ sort: bool,
) -> DataFrame:
"""
Helper of :func:`pandas.pivot_table` for any non-list ``aggfunc``.
@@ -157,7 +161,7 @@ def __internal_pivot_table(
pass
values = list(values)
- grouped = data.groupby(keys, observed=observed)
+ grouped = data.groupby(keys, observed=observed, sort=sort)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
agged = agged.dropna(how="all")
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 20aa0c9e2ee9a..3d1c3b81c492f 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -2115,6 +2115,28 @@ def test_pivot_table_doctest_case(self):
expected = DataFrame(vals, columns=cols, index=index)
tm.assert_frame_equal(table, expected)
+ def test_pivot_table_sort_false(self):
+ # GH#39143
+ df = DataFrame(
+ {
+ "a": ["d1", "d4", "d3"],
+ "col": ["a", "b", "c"],
+ "num": [23, 21, 34],
+ "year": ["2018", "2018", "2019"],
+ }
+ )
+ result = df.pivot_table(
+ index=["a", "col"], columns="year", values="num", aggfunc="sum", sort=False
+ )
+ expected = DataFrame(
+ [[23, np.nan], [21, np.nan], [np.nan, 34]],
+ columns=Index(["2018", "2019"], name="year"),
+ index=MultiIndex.from_arrays(
+ [["d1", "d4", "d3"], ["a", "b", "c"]], names=["a", "col"]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
class TestPivot:
def test_pivot(self):
| - [x] closes #39143
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
This adds support for sort=False to pivot_table if we want to do this | https://api.github.com/repos/pandas-dev/pandas/pulls/40954 | 2021-04-14T21:14:15Z | 2021-04-20T23:23:08Z | 2021-04-20T23:23:07Z | 2021-04-21T20:40:35Z |
Clarify docs for MultiIndex drops and levels | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 38766d2856cfe..f9ad737bff46c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4749,7 +4749,8 @@ def drop(
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
- the level.
+ the level. See the `user guide <advanced.shown_levels>`
+ for more information about the now unused levels.
Parameters
----------
| - [x] closes #36227
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
I did something like this in the past but this got lost back then between prs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/40953 | 2021-04-14T20:29:23Z | 2021-04-20T22:50:55Z | 2021-04-20T22:50:55Z | 2021-04-21T20:40:19Z |
BUG: various groupby ewm times issues | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 38a1802340c69..065390820d7e6 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -787,6 +787,9 @@ Groupby/resample/rolling
- Bug in :class:`core.window.ewm.ExponentialMovingWindow` when calling ``__getitem__`` would incorrectly raise a ``ValueError`` when providing ``times`` (:issue:`40164`)
- Bug in :class:`core.window.ewm.ExponentialMovingWindow` when calling ``__getitem__`` would not retain ``com``, ``span``, ``alpha`` or ``halflife`` attributes (:issue:`40164`)
- :class:`core.window.ewm.ExponentialMovingWindow` now raises a ``NotImplementedError`` when specifying ``times`` with ``adjust=False`` due to an incorrect calculation (:issue:`40098`)
+- Bug in :meth:`core.window.ewm.ExponentialMovingWindowGroupby.mean` where the times argument was ignored when ``engine='numba'`` (:issue:`40951`)
+- Bug in :meth:`core.window.ewm.ExponentialMovingWindowGroupby.mean` where the wrong times were used in case of multiple groups (:issue:`40951`)
+- Bug in :class:`core.window.ewm.ExponentialMovingWindowGroupby` where the times vector and values became out of sync for non-trivial groups (:issue:`40951`)
- Bug in :meth:`Series.asfreq` and :meth:`DataFrame.asfreq` dropping rows when the index is not sorted (:issue:`39805`)
- Bug in aggregation functions for :class:`DataFrame` not respecting ``numeric_only`` argument when ``level`` keyword was given (:issue:`40660`)
- Bug in :class:`core.window.RollingGroupby` where ``as_index=False`` argument in ``groupby`` was ignored (:issue:`39433`)
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 46041b6a37a17..8c8629ad6f032 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -1485,8 +1485,7 @@ def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
com : float64
adjust : bool
ignore_na : bool
- times : ndarray (float64 type)
- halflife : float64
+ deltas : ndarray (float64 type)
Returns
-------
@@ -1495,7 +1494,7 @@ def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
cdef:
Py_ssize_t i, j, s, e, nobs, win_size, N = len(vals), M = len(start)
- const float64_t[:] sub_vals
+ const float64_t[:] sub_deltas, sub_vals
ndarray[float64_t] sub_output, output = np.empty(N, dtype=float)
float64_t alpha, old_wt_factor, new_wt, weighted_avg, old_wt, cur
bint is_observation
@@ -1511,6 +1510,9 @@ def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
s = start[j]
e = end[j]
sub_vals = vals[s:e]
+ # note that len(deltas) = len(vals) - 1 and deltas[i] is to be used in
+ # conjunction with vals[i+1]
+ sub_deltas = deltas[s:e - 1]
win_size = len(sub_vals)
sub_output = np.empty(win_size, dtype=float)
@@ -1528,7 +1530,7 @@ def ewma(const float64_t[:] vals, const int64_t[:] start, const int64_t[:] end,
if weighted_avg == weighted_avg:
if is_observation or not ignore_na:
- old_wt *= old_wt_factor ** deltas[i - 1]
+ old_wt *= old_wt_factor ** sub_deltas[i - 1]
if is_observation:
# avoid numerical errors on constant series
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 67bcdb0a387dd..eee9cb3976e39 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -78,6 +78,38 @@ def get_center_of_mass(
return float(comass)
+def _calculate_deltas(
+ times: str | np.ndarray | FrameOrSeries | None,
+ halflife: float | TimedeltaConvertibleTypes | None,
+) -> np.ndarray:
+ """
+ Return the diff of the times divided by the half-life. These values are used in
+ the calculation of the ewm mean.
+
+ Parameters
+ ----------
+ times : str, np.ndarray, Series, default None
+ Times corresponding to the observations. Must be monotonically increasing
+ and ``datetime64[ns]`` dtype.
+ halflife : float, str, timedelta, optional
+ Half-life specifying the decay
+
+ Returns
+ -------
+ np.ndarray
+ Diff of the times divided by the half-life
+ """
+ # error: Item "str" of "Union[str, ndarray, FrameOrSeries, None]" has no
+ # attribute "view"
+ # error: Item "None" of "Union[str, ndarray, FrameOrSeries, None]" has no
+ # attribute "view"
+ _times = np.asarray(
+ times.view(np.int64), dtype=np.float64 # type: ignore[union-attr]
+ )
+ _halflife = float(Timedelta(halflife).value)
+ return np.diff(_times) / _halflife
+
+
class ExponentialMovingWindow(BaseWindow):
r"""
Provide exponential weighted (EW) functions.
@@ -268,15 +300,7 @@ def __init__(
)
if isna(self.times).any():
raise ValueError("Cannot convert NaT values to integer")
- # error: Item "str" of "Union[str, ndarray, FrameOrSeries, None]" has no
- # attribute "view"
- # error: Item "None" of "Union[str, ndarray, FrameOrSeries, None]" has no
- # attribute "view"
- _times = np.asarray(
- self.times.view(np.int64), dtype=np.float64 # type: ignore[union-attr]
- )
- _halflife = float(Timedelta(self.halflife).value)
- self._deltas = np.diff(_times) / _halflife
+ self._deltas = _calculate_deltas(self.times, self.halflife)
# Halflife is no longer applicable when calculating COM
# But allow COM to still be calculated if the user passes other decay args
if common.count_not_none(self.com, self.span, self.alpha) > 0:
@@ -585,6 +609,17 @@ class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow)
_attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes
+ def __init__(self, obj, *args, _grouper=None, **kwargs):
+ super().__init__(obj, *args, _grouper=_grouper, **kwargs)
+
+ if not obj.empty and self.times is not None:
+ # sort the times and recalculate the deltas according to the groups
+ groupby_order = np.concatenate(list(self._grouper.indices.values()))
+ self._deltas = _calculate_deltas(
+ self.times.take(groupby_order), # type: ignore[union-attr]
+ self.halflife,
+ )
+
def _get_window_indexer(self) -> GroupbyIndexer:
"""
Return an indexer class that will compute the window start and end bounds
@@ -628,10 +663,7 @@ def mean(self, engine=None, engine_kwargs=None):
"""
if maybe_use_numba(engine):
groupby_ewma_func = generate_numba_groupby_ewma_func(
- engine_kwargs,
- self._com,
- self.adjust,
- self.ignore_na,
+ engine_kwargs, self._com, self.adjust, self.ignore_na, self._deltas
)
return self._apply(
groupby_ewma_func,
diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py
index c9107c8ed0aa7..d84dea7ee622c 100644
--- a/pandas/core/window/numba_.py
+++ b/pandas/core/window/numba_.py
@@ -85,6 +85,7 @@ def generate_numba_groupby_ewma_func(
com: float,
adjust: bool,
ignore_na: bool,
+ deltas: np.ndarray,
):
"""
Generate a numba jitted groupby ewma function specified by values
@@ -97,6 +98,7 @@ def generate_numba_groupby_ewma_func(
com : float
adjust : bool
ignore_na : bool
+ deltas : numpy.ndarray
Returns
-------
@@ -141,7 +143,9 @@ def groupby_ewma(
if is_observation or not ignore_na:
- old_wt *= old_wt_factor
+ # note that len(deltas) = len(vals) - 1 and deltas[i] is to be
+ # used in conjunction with vals[i+1]
+ old_wt *= old_wt_factor ** deltas[start + j - 1]
if is_observation:
# avoid numerical errors on constant series
diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py
index d394a4b2be548..b1f1bb7086149 100644
--- a/pandas/tests/window/conftest.py
+++ b/pandas/tests/window/conftest.py
@@ -13,6 +13,7 @@
Series,
bdate_range,
notna,
+ to_datetime,
)
@@ -302,6 +303,31 @@ def frame():
)
+@pytest.fixture
+def times_frame():
+ """Frame for testing times argument in EWM groupby."""
+ return DataFrame(
+ {
+ "A": ["a", "b", "c", "a", "b", "c", "a", "b", "c", "a"],
+ "B": [0, 0, 0, 1, 1, 1, 2, 2, 2, 3],
+ "C": to_datetime(
+ [
+ "2020-01-01",
+ "2020-01-01",
+ "2020-01-01",
+ "2020-01-02",
+ "2020-01-10",
+ "2020-01-22",
+ "2020-01-03",
+ "2020-01-23",
+ "2020-01-23",
+ "2020-01-04",
+ ]
+ ),
+ }
+ )
+
+
@pytest.fixture
def series():
"""Make mocked series as fixture."""
diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py
index 51a6288598c32..5d7fc50620ef8 100644
--- a/pandas/tests/window/test_groupby.py
+++ b/pandas/tests/window/test_groupby.py
@@ -926,3 +926,63 @@ def test_pairwise_methods(self, method, expected_data):
expected = df.groupby("A").apply(lambda x: getattr(x.ewm(com=1.0), method)())
tm.assert_frame_equal(result, expected)
+
+ def test_times(self, times_frame):
+ # GH 40951
+ halflife = "23 days"
+ result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
+ expected = DataFrame(
+ {
+ "B": [
+ 0.0,
+ 0.507534,
+ 1.020088,
+ 1.537661,
+ 0.0,
+ 0.567395,
+ 1.221209,
+ 0.0,
+ 0.653141,
+ 1.195003,
+ ]
+ },
+ index=MultiIndex.from_tuples(
+ [
+ ("a", 0),
+ ("a", 3),
+ ("a", 6),
+ ("a", 9),
+ ("b", 1),
+ ("b", 4),
+ ("b", 7),
+ ("c", 2),
+ ("c", 5),
+ ("c", 8),
+ ],
+ names=["A", None],
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_times_vs_apply(self, times_frame):
+ # GH 40951
+ halflife = "23 days"
+ result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
+ expected = (
+ times_frame.groupby("A")
+ .apply(lambda x: x.ewm(halflife=halflife, times="C").mean())
+ .iloc[[0, 3, 6, 9, 1, 4, 7, 2, 5, 8]]
+ .reset_index(drop=True)
+ )
+ tm.assert_frame_equal(result.reset_index(drop=True), expected)
+
+ def test_times_array(self, times_frame):
+ # GH 40951
+ halflife = "23 days"
+ result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
+ expected = (
+ times_frame.groupby("A")
+ .ewm(halflife=halflife, times=times_frame["C"].values)
+ .mean()
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index f64d242a4e820..06b34201e0dba 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -8,6 +8,7 @@
DataFrame,
Series,
option_context,
+ to_datetime,
)
import pandas._testing as tm
from pandas.core.util.numba_ import NUMBA_FUNC_CACHE
@@ -145,6 +146,30 @@ def test_cython_vs_numba(self, nogil, parallel, nopython, ignore_na, adjust):
tm.assert_frame_equal(result, expected)
+ def test_cython_vs_numba_times(self, nogil, parallel, nopython, ignore_na):
+ # GH 40951
+ halflife = "23 days"
+ times = to_datetime(
+ [
+ "2020-01-01",
+ "2020-01-01",
+ "2020-01-02",
+ "2020-01-10",
+ "2020-02-23",
+ "2020-01-03",
+ ]
+ )
+ df = DataFrame({"A": ["a", "b", "a", "b", "b", "a"], "B": [0, 0, 1, 1, 2, 2]})
+ gb_ewm = df.groupby("A").ewm(
+ halflife=halflife, adjust=True, ignore_na=ignore_na, times=times
+ )
+
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
+ result = gb_ewm.mean(engine="numba", engine_kwargs=engine_kwargs)
+ expected = gb_ewm.mean(engine="cython")
+
+ tm.assert_frame_equal(result, expected)
+
@td.skip_if_no("numba", "0.46.0")
def test_use_global_config():
| - [x] closes #40951
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40952 | 2021-04-14T19:55:40Z | 2021-04-16T18:42:55Z | 2021-04-16T18:42:55Z | 2021-04-16T18:43:53Z |
TYP: Index.reindex | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 705a279638097..c79518702169a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3762,7 +3762,9 @@ def _validate_can_reindex(self, indexer: np.ndarray) -> None:
if not self._index_as_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
- def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
+ def reindex(
+ self, target, method=None, level=None, limit=None, tolerance=None
+ ) -> tuple[Index, np.ndarray | None]:
"""
Create index with target's values.
@@ -3774,7 +3776,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
-------
new_index : pd.Index
Resulting index.
- indexer : np.ndarray or None
+ indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
@@ -3815,7 +3817,9 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
return target, indexer
- def _reindex_non_unique(self, target):
+ def _reindex_non_unique(
+ self, target: Index
+ ) -> tuple[Index, np.ndarray, np.ndarray | None]:
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
@@ -3828,8 +3832,9 @@ def _reindex_non_unique(self, target):
-------
new_index : pd.Index
Resulting index.
- indexer : np.ndarray or None
+ indexer : np.ndarray[np.intp]
Indices of output values in original index.
+ new_indexer : np.ndarray[np.intp] or None
"""
target = ensure_index(target)
@@ -3858,13 +3863,13 @@ def _reindex_non_unique(self, target):
# GH#38906
if not len(self):
- new_indexer = np.arange(0)
+ new_indexer = np.arange(0, dtype=np.intp)
# a unique indexer
elif target.is_unique:
# see GH5553, make sure we use the right indexer
- new_indexer = np.arange(len(indexer))
+ new_indexer = np.arange(len(indexer), dtype=np.intp)
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
@@ -3876,7 +3881,7 @@ def _reindex_non_unique(self, target):
indexer[~check] = -1
# reset the new indexer to account for the new size
- new_indexer = np.arange(len(self.take(indexer)))
+ new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp)
new_indexer[~check] = -1
if isinstance(self, ABCMultiIndex):
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index b5089621313b8..724caebd69c23 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -395,7 +395,9 @@ def unique(self, level=None):
# of result, not self.
return type(self)._simple_new(result, name=self.name)
- def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
+ def reindex(
+ self, target, method=None, level=None, limit=None, tolerance=None
+ ) -> tuple[Index, np.ndarray | None]:
"""
Create index with target's values (move/add/delete values as necessary)
@@ -403,7 +405,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
-------
new_index : pd.Index
Resulting index
- indexer : np.ndarray or None
+ indexer : np.ndarray[np.intp] or None
Indices of output values in original index
"""
@@ -440,7 +442,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
if not isinstance(cats, CategoricalIndex) or (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
- new_target, indexer, _ = result._reindex_non_unique(np.array(target))
+ new_target, indexer, _ = result._reindex_non_unique(target)
else:
codes = new_target.codes.copy()
@@ -462,25 +464,34 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
return new_target, indexer
- def _reindex_non_unique(self, target):
+ # error: Return type "Tuple[Index, Optional[ndarray], Optional[ndarray]]"
+ # of "_reindex_non_unique" incompatible with return type
+ # "Tuple[Index, ndarray, Optional[ndarray]]" in supertype "Index"
+ def _reindex_non_unique( # type: ignore[override]
+ self, target: Index
+ ) -> tuple[Index, np.ndarray | None, np.ndarray | None]:
"""
reindex from a non-unique; which CategoricalIndex's are almost
always
"""
+ # TODO: rule out `indexer is None` here to make the signature
+ # match the parent class's signature. This should be equivalent
+ # to ruling out `self.equals(target)`
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
- if check.any():
- new_indexer = np.arange(len(self.take(indexer)))
+ # error: Item "bool" of "Union[Any, bool]" has no attribute "any"
+ if check.any(): # type: ignore[union-attr]
+ new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp)
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
- new_target = Categorical(new_target, dtype=self.dtype)
- new_target = type(self)._simple_new(new_target, name=self.name)
+ cat = Categorical(new_target, dtype=self.dtype)
+ new_target = type(self)._simple_new(cat, name=self.name)
return new_target, indexer, new_indexer
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 3305610a4022e..5b4f3e1bb9e09 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2503,7 +2503,9 @@ def sortlevel(
return new_index, indexer
- def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
+ def reindex(
+ self, target, method=None, level=None, limit=None, tolerance=None
+ ) -> tuple[MultiIndex, np.ndarray | None]:
"""
Create index with target's values (move/add/delete values as necessary)
@@ -2511,7 +2513,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
-------
new_index : pd.MultiIndex
Resulting index
- indexer : np.ndarray or None
+ indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
"""
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40950 | 2021-04-14T19:15:25Z | 2021-04-16T01:07:49Z | 2021-04-16T01:07:49Z | 2021-04-16T01:15:17Z |
TYP use typeddict to define cssdict | diff --git a/pandas/_typing.py b/pandas/_typing.py
index 9f23fcc56597f..a58dc0dba1bf1 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -36,7 +36,10 @@
# and use a string literal forward reference to it in subsequent types
# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
- from typing import final
+ from typing import (
+ TypedDict,
+ final,
+ )
from pandas._libs import (
Period,
@@ -70,6 +73,8 @@
else:
# typing.final does not exist until py38
final = lambda x: x
+ # typing.TypedDict does not exist until py38
+ TypedDict = dict
# array-like
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 15557c993eab4..776cedcf11592 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -12,7 +12,6 @@
Sequence,
Tuple,
Union,
- cast,
)
from uuid import uuid4
@@ -21,7 +20,10 @@
from pandas._config import get_option
from pandas._libs import lib
-from pandas._typing import FrameOrSeriesUnion
+from pandas._typing import (
+ FrameOrSeriesUnion,
+ TypedDict,
+)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.generic import ABCSeries
@@ -45,10 +47,14 @@
CSSPair = Tuple[str, Union[str, int, float]]
CSSList = List[CSSPair]
CSSProperties = Union[str, CSSList]
-CSSStyles = List[Dict[str, CSSProperties]] # = List[CSSDict]
-# class CSSDict(TypedDict): # available when TypedDict is valid in pandas
-# selector: str
-# props: CSSProperties
+
+
+class CSSDict(TypedDict):
+ selector: str
+ props: CSSProperties
+
+
+CSSStyles = List[CSSDict]
class StylerRenderer:
@@ -615,15 +621,9 @@ def _format_table_styles(styles: CSSStyles) -> CSSStyles:
{'selector': 'th', 'props': 'a:v;'}]
"""
return [
- item
- for sublist in [
- [ # this is a CSSDict when TypedDict is available to avoid cast.
- {"selector": x, "props": style["props"]}
- for x in cast(str, style["selector"]).split(",")
- ]
- for style in styles
- ]
- for item in sublist
+ {"selector": selector, "props": css_dict["props"]}
+ for css_dict in styles
+ for selector in css_dict["selector"].split(",")
]
| xref https://github.com/pandas-dev/pandas/pull/39942#issuecomment-783363832 - here's a solution, the [docs](https://docs.python.org/3/library/typing.html#typing.TypedDict) say that
> At runtime it is a plain dict | https://api.github.com/repos/pandas-dev/pandas/pulls/40947 | 2021-04-14T15:45:09Z | 2021-04-27T08:54:41Z | 2021-04-27T08:54:41Z | 2021-04-27T09:00:22Z |
TYP: timestamps.pyi | diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi
new file mode 100644
index 0000000000000..8728b700a1f6d
--- /dev/null
+++ b/pandas/_libs/tslibs/timestamps.pyi
@@ -0,0 +1,205 @@
+from datetime import (
+ date as _date,
+ datetime,
+ time as _time,
+ timedelta,
+ tzinfo as _tzinfo,
+)
+import sys
+from time import struct_time
+from typing import (
+ ClassVar,
+ Optional,
+ Type,
+ TypeVar,
+ overload,
+)
+
+import numpy as np
+
+from pandas._libs.tslibs import (
+ NaT,
+ NaTType,
+ Period,
+ Timedelta,
+)
+
+_S = TypeVar("_S")
+
+
+def integer_op_not_supported(obj) -> None: ...
+
+
+class Timestamp(datetime):
+ min: ClassVar[Timestamp]
+ max: ClassVar[Timestamp]
+
+ resolution: ClassVar[Timedelta]
+ value: int # np.int64
+
+ # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
+ def __new__( # type: ignore[misc]
+ cls: Type[_S],
+ ts_input: int | np.integer | float | str | _date | datetime | np.datetime64 = ...,
+ freq=...,
+ tz: str | _tzinfo | None | int= ...,
+ unit=...,
+ year: int | None = ...,
+ month: int | None = ...,
+ day: int | None = ...,
+ hour: int | None = ...,
+ minute: int | None = ...,
+ second: int | None = ...,
+ microsecond: int | None = ...,
+ nanosecond: int | None = ...,
+ tzinfo: _tzinfo | None = ...,
+ *,
+ fold: int | None= ...,
+ ) -> _S | NaTType: ...
+
+ @property
+ def year(self) -> int: ...
+ @property
+ def month(self) -> int: ...
+ @property
+ def day(self) -> int: ...
+ @property
+ def hour(self) -> int: ...
+ @property
+ def minute(self) -> int: ...
+ @property
+ def second(self) -> int: ...
+ @property
+ def microsecond(self) -> int: ...
+ @property
+ def tzinfo(self) -> Optional[_tzinfo]: ...
+ @property
+ def tz(self) -> Optional[_tzinfo]: ...
+
+ @property
+ def fold(self) -> int: ...
+
+ @classmethod
+ def fromtimestamp(cls: Type[_S], t: float, tz: Optional[_tzinfo] = ...) -> _S: ...
+ @classmethod
+ def utcfromtimestamp(cls: Type[_S], t: float) -> _S: ...
+ @classmethod
+ def today(cls: Type[_S]) -> _S: ...
+ @classmethod
+ def fromordinal(cls: Type[_S], n: int) -> _S: ...
+
+ if sys.version_info >= (3, 8):
+ @classmethod
+ def now(cls: Type[_S], tz: _tzinfo | str | None = ...) -> _S: ...
+ else:
+ @overload
+ @classmethod
+ def now(cls: Type[_S], tz: None = ...) -> _S: ...
+ @overload
+ @classmethod
+ def now(cls, tz: _tzinfo) -> datetime: ...
+
+ @classmethod
+ def utcnow(cls: Type[_S]) -> _S: ...
+ @classmethod
+ def combine(cls, date: _date, time: _time, tzinfo: Optional[_tzinfo] = ...) -> datetime: ...
+
+ @classmethod
+ def fromisoformat(cls: Type[_S], date_string: str) -> _S: ...
+
+ def strftime(self, fmt: str) -> str: ...
+ def __format__(self, fmt: str) -> str: ...
+
+ def toordinal(self) -> int: ...
+ def timetuple(self) -> struct_time: ...
+
+ def timestamp(self) -> float: ...
+
+ def utctimetuple(self) -> struct_time: ...
+ def date(self) -> _date: ...
+ def time(self) -> _time: ...
+ def timetz(self) -> _time: ...
+
+ def replace(
+ self,
+ year: int = ...,
+ month: int = ...,
+ day: int = ...,
+ hour: int = ...,
+ minute: int = ...,
+ second: int = ...,
+ microsecond: int = ...,
+ tzinfo: Optional[_tzinfo] = ...,
+ *,
+ fold: int = ...,
+ ) -> datetime: ...
+
+ if sys.version_info >= (3, 8):
+ def astimezone(self: _S, tz: Optional[_tzinfo] = ...) -> _S: ...
+ else:
+ def astimezone(self, tz: Optional[_tzinfo] = ...) -> datetime: ...
+
+ def ctime(self) -> str: ...
+ def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
+
+ @classmethod
+ def strptime(cls, date_string: str, format: str) -> datetime: ...
+
+ def utcoffset(self) -> Optional[timedelta]: ...
+ def tzname(self) -> Optional[str]: ...
+ def dst(self) -> Optional[timedelta]: ...
+
+ def __le__(self, other: datetime) -> bool: ... # type: ignore
+ def __lt__(self, other: datetime) -> bool: ... # type: ignore
+ def __ge__(self, other: datetime) -> bool: ... # type: ignore
+ def __gt__(self, other: datetime) -> bool: ... # type: ignore
+ if sys.version_info >= (3, 8):
+ def __add__(self: _S, other: timedelta) -> _S: ...
+ def __radd__(self: _S, other: timedelta) -> _S: ...
+ else:
+ def __add__(self, other: timedelta) -> datetime: ...
+ def __radd__(self, other: timedelta) -> datetime: ...
+ @overload # type: ignore
+ def __sub__(self, other: datetime) -> timedelta: ...
+ @overload
+ def __sub__(self, other: timedelta) -> datetime: ...
+
+ def __hash__(self) -> int: ...
+ def weekday(self) -> int: ...
+ def isoweekday(self) -> int: ...
+ def isocalendar(self) -> tuple[int, int, int]: ...
+
+ @property
+ def is_leap_year(self) -> bool: ...
+ @property
+ def is_month_start(self) -> bool: ...
+ @property
+ def is_quarter_start(self) -> bool: ...
+ @property
+ def is_year_start(self) -> bool: ...
+ @property
+ def is_month_end(self) -> bool: ...
+ @property
+ def is_quarter_end(self) -> bool: ...
+ @property
+ def is_year_end(self) -> bool: ...
+
+ def to_pydatetime(self, warn: bool = ...) -> datetime: ...
+ def to_datetime64(self) -> np.datetime64: ...
+ def to_period(self, freq) -> Period: ...
+ def to_julian_date(self) -> np.float64: ...
+
+ @property
+ def asm8(self) -> np.datetime64: ...
+
+ def tz_convert(self: _S, tz) -> _S: ...
+
+ # TODO: could return NaT?
+ def tz_localize(self: _S, tz, ambiguous: str = ..., nonexistent: str = ...) -> _S: ...
+
+ def normalize(self: _S) -> _S: ...
+
+ # TODO: round/floor/ceil could return NaT?
+ def round(self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ...) -> _S: ...
+ def floor(self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ...) -> _S: ...
+ def ceil(self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ...) -> _S: ...
diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py
index 34d5ea6cfb20d..a537951786646 100644
--- a/pandas/core/arrays/_ranges.py
+++ b/pandas/core/arrays/_ranges.py
@@ -41,20 +41,20 @@ def generate_regular_range(
-------
ndarray[np.int64] Representing nanoseconds.
"""
- start = start.value if start is not None else None
- end = end.value if end is not None else None
+ istart = start.value if start is not None else None
+ iend = end.value if end is not None else None
stride = freq.nanos
if periods is None:
- b = start
+ b = istart
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
- e = b + (end - b) // stride * stride + stride // 2 + 1
- elif start is not None:
- b = start
+ e = b + (iend - b) // stride * stride + stride // 2 + 1
+ elif istart is not None:
+ b = istart
e = _generate_range_overflow_safe(b, periods, stride, side="start")
- elif end is not None:
- e = end + stride
+ elif iend is not None:
+ e = iend + stride
b = _generate_range_overflow_safe(e, periods, stride, side="end")
else:
raise ValueError(
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 289ed4948934f..117b267fd49e5 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -742,7 +742,9 @@ def _sub_datetimelike_scalar(self, other):
assert isinstance(other, (datetime, np.datetime64))
assert other is not NaT
other = Timestamp(other)
- if other is NaT:
+ # error: Non-overlapping identity check (left operand type: "Timestamp",
+ # right operand type: "NaTType")
+ if other is NaT: # type: ignore[comparison-overlap]
return self - NaT
if not self._has_same_tz(other):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index d739b46620032..d4ecec667cc86 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -189,13 +189,13 @@ def maybe_box_native(value: Scalar) -> Scalar:
value = maybe_box_datetimelike(value)
elif is_float(value):
# error: Argument 1 to "float" has incompatible type
- # "Union[Union[str, int, float, bool], Union[Any, Any, Timedelta, Any]]";
+ # "Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]";
# expected "Union[SupportsFloat, _SupportsIndex, str]"
value = float(value) # type: ignore[arg-type]
elif is_integer(value):
# error: Argument 1 to "int" has incompatible type
- # "Union[Union[str, int, float, bool], Union[Any, Any, Timedelta, Any]]";
- # pected "Union[str, SupportsInt, _SupportsIndex, _SupportsTrunc]"
+ # "Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]";
+ # expected "Union[str, SupportsInt, _SupportsIndex, _SupportsTrunc]"
value = int(value) # type: ignore[arg-type]
elif is_bool(value):
value = bool(value)
@@ -729,7 +729,9 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj,
except OutOfBoundsDatetime:
return np.dtype(object), val
- if val is NaT or val.tz is None:
+ # error: Non-overlapping identity check (left operand type: "Timestamp",
+ # right operand type: "NaTType")
+ if val is NaT or val.tz is None: # type: ignore[comparison-overlap]
dtype = np.dtype("M8[ns]")
val = val.to_datetime64()
else:
@@ -2056,7 +2058,7 @@ def validate_numeric_casting(dtype: np.dtype, value: Scalar) -> None:
ValueError
"""
# error: Argument 1 to "__call__" of "ufunc" has incompatible type
- # "Union[Union[str, int, float, bool], Union[Any, Any, Timedelta, Any]]";
+ # "Union[Union[str, int, float, bool], Union[Any, Timestamp, Timedelta, Any]]";
# expected "Union[Union[int, float, complex, str, bytes, generic],
# Sequence[Union[int, float, complex, str, bytes, generic]],
# Sequence[Sequence[Any]], _SupportsArray]"
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 2d7d83d6a2bc3..61396fdf372d5 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1892,7 +1892,9 @@ def get_block_type(values, dtype: Dtype | None = None):
cls = ExtensionBlock
elif isinstance(dtype, CategoricalDtype):
cls = CategoricalBlock
- elif vtype is Timestamp:
+ # error: Non-overlapping identity check (left operand type: "Type[generic]",
+ # right operand type: "Type[Timestamp]")
+ elif vtype is Timestamp: # type: ignore[comparison-overlap]
cls = DatetimeTZBlock
elif isinstance(dtype, ExtensionDtype):
# Note: need to be sure PandasArray is unwrapped before we get here
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index bb37f670ed302..8577bb5dc311b 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -628,16 +628,16 @@ def _adjust_to_origin(arg, origin, unit):
if offset.tz is not None:
raise ValueError(f"origin offset {offset} must be tz-naive")
- offset -= Timestamp(0)
+ td_offset = offset - Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
- offset = offset // Timedelta(1, unit=unit)
+ ioffset = td_offset // Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(arg, (ABCSeries, Index, np.ndarray)):
arg = np.asarray(arg)
- arg = arg + offset
+ arg = arg + ioffset
return arg
@@ -887,13 +887,17 @@ def to_datetime(
infer_datetime_format=infer_datetime_format,
)
+ result: Timestamp | NaTType | Series | Index
+
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
- result = result.tz_convert(tz)
+ # error: Too many arguments for "tz_convert" of "NaTType"
+ result = result.tz_convert(tz) # type: ignore[call-arg]
else:
- result = result.tz_localize(tz)
+ # error: Too many arguments for "tz_localize" of "NaTType"
+ result = result.tz_localize(tz) # type: ignore[call-arg]
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
@@ -928,7 +932,10 @@ def to_datetime(
else:
result = convert_listlike(np.array([arg]), format)[0]
- return result
+ # error: Incompatible return value type (got "Union[Timestamp, NaTType,
+ # Series, Index]", expected "Union[DatetimeIndex, Series, float, str,
+ # NaTType, None]")
+ return result # type: ignore[return-value]
# mappings for assembling units
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 1324485f49bdb..c105465cddd95 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -1,7 +1,4 @@
-from typing import (
- List,
- cast,
-)
+from typing import List
import numpy as np
@@ -200,10 +197,9 @@ def _get_cell_value(self, cell, convert_float: bool) -> Scalar:
cell_value = cell.attributes.get((OFFICENS, "date-value"))
return pd.to_datetime(cell_value)
elif cell_type == "time":
- result = pd.to_datetime(str(cell))
- result = cast(pd.Timestamp, result)
+ stamp = pd.to_datetime(str(cell))
# error: Item "str" of "Union[float, str, NaTType]" has no attribute "time"
- return result.time() # type: ignore[union-attr]
+ return stamp.time() # type: ignore[union-attr]
else:
self.close()
raise ValueError(f"Unrecognized type {cell_type}")
diff --git a/pandas/tests/indexes/test_engines.py b/pandas/tests/indexes/test_engines.py
index 52af29d999fcc..9f41c68909f6e 100644
--- a/pandas/tests/indexes/test_engines.py
+++ b/pandas/tests/indexes/test_engines.py
@@ -61,7 +61,13 @@ class TestTimedeltaEngine:
@pytest.mark.parametrize(
"scalar",
[
- pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")),
+ # error: Argument 1 to "Timestamp" has incompatible type "timedelta64";
+ # expected "Union[integer[Any], float, str, date, datetime64]"
+ pd.Timestamp(
+ pd.Timedelta(days=42).asm8.view(
+ "datetime64[ns]"
+ ) # type: ignore[arg-type]
+ ),
pd.Timedelta(days=42).value,
pd.Timedelta(days=42).to_pytimedelta(),
pd.Timedelta(days=42).to_timedelta64(),
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index 663892cefb5e6..98ec4de614a07 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -331,7 +331,9 @@ def test_constructor_fromordinal(self):
tz="UTC",
),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
- Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC),
+ # error: Argument 9 to "Timestamp" has incompatible type "_UTCclass";
+ # expected "Optional[int]"
+ Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC), # type: ignore[arg-type]
],
)
def test_constructor_nanosecond(self, result):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40945 | 2021-04-14T14:44:23Z | 2021-04-26T09:31:48Z | 2021-04-26T09:31:47Z | 2021-04-26T17:18:07Z |
REF: handle dtype dispatch in libhashtable, share Vector/Hashtable code | diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd
index a5679af44ac06..80d7ab58dc559 100644
--- a/pandas/_libs/hashtable.pxd
+++ b/pandas/_libs/hashtable.pxd
@@ -128,10 +128,12 @@ cdef struct Int64VectorData:
int64_t *data
Py_ssize_t n, m
-cdef class Int64Vector:
+cdef class Vector:
+ cdef bint external_view_exists
+
+cdef class Int64Vector(Vector):
cdef Int64VectorData *data
cdef ndarray ao
- cdef bint external_view_exists
cdef resize(self)
cpdef ndarray to_array(self)
diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi
index b6278b3956a1d..0612acd25a5d5 100644
--- a/pandas/_libs/hashtable.pyi
+++ b/pandas/_libs/hashtable.pyi
@@ -12,34 +12,28 @@ def unique_label_indices(
class Factorizer:
- table: PyObjectHashTable
- uniques: ObjectVector
count: int
def __init__(self, size_hint: int): ...
def get_count(self) -> int: ...
+
+class ObjectFactorizer(Factorizer):
+ table: PyObjectHashTable
+ uniques: ObjectVector
+
def factorize(
self,
- values: np.ndarray, # np.ndarray[object]
+ values: np.ndarray, # ndarray[object]
sort: bool = ...,
na_sentinel=...,
na_value=...,
) -> np.ndarray: ... # np.ndarray[intp]
- def unique(
- self,
- values: np.ndarray, # np.ndarray[object]
- ) -> np.ndarray: ... # np.ndarray[object]
-
-class Int64Factorizer:
+class Int64Factorizer(Factorizer):
table: Int64HashTable
uniques: Int64Vector
- count: int
-
- def __init__(self, size_hint: int): ...
- def get_count(self) -> int: ...
def factorize(
self,
@@ -240,3 +234,26 @@ def value_count_int64(
np.ndarray, # np.ndarray[np.int64]
np.ndarray, # np.ndarray[np.int64]
]: ...
+
+
+def duplicated(
+ values: np.ndarray,
+ keep: Literal["last", "first", False] = ...,
+) -> np.ndarray: ... # np.ndarray[bool]
+
+def mode(values: np.ndarray, dropna: bool) -> np.ndarray: ...
+
+def value_count(
+ values: np.ndarray,
+ dropna: bool,
+) -> tuple[
+ np.ndarray,
+ np.ndarray, # np.ndarray[np.int64]
+]: ...
+
+
+# arr and values should have same dtype
+def ismember(
+ arr: np.ndarray,
+ values: np.ndarray,
+) -> np.ndarray: ... # np.ndarray[bool]
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx
index 4566f22be2c36..7df3f69337643 100644
--- a/pandas/_libs/hashtable.pyx
+++ b/pandas/_libs/hashtable.pyx
@@ -56,19 +56,25 @@ include "hashtable_class_helper.pxi"
include "hashtable_func_helper.pxi"
cdef class Factorizer:
- cdef public:
- PyObjectHashTable table
- ObjectVector uniques
+ cdef readonly:
Py_ssize_t count
- def __init__(self, size_hint: int):
- self.table = PyObjectHashTable(size_hint)
- self.uniques = ObjectVector()
+ def __cinit__(self, size_hint: int):
self.count = 0
def get_count(self) -> int:
return self.count
+
+cdef class ObjectFactorizer(Factorizer):
+ cdef public:
+ PyObjectHashTable table
+ ObjectVector uniques
+
+ def __cinit__(self, size_hint: int):
+ self.table = PyObjectHashTable(size_hint)
+ self.uniques = ObjectVector()
+
def factorize(
self, ndarray[object] values, sort=False, na_sentinel=-1, na_value=None
) -> np.ndarray:
@@ -105,24 +111,15 @@ cdef class Factorizer:
self.count = len(self.uniques)
return labels
- def unique(self, ndarray[object] values):
- # just for fun
- return self.table.unique(values)
-
-cdef class Int64Factorizer:
+cdef class Int64Factorizer(Factorizer):
cdef public:
Int64HashTable table
Int64Vector uniques
- Py_ssize_t count
- def __init__(self, size_hint: int):
+ def __cinit__(self, size_hint: int):
self.table = Int64HashTable(size_hint)
self.uniques = Int64Vector()
- self.count = 0
-
- def get_count(self) -> int:
- return self.count
def factorize(self, const int64_t[:] values, sort=False,
na_sentinel=-1, na_value=None) -> np.ndarray:
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index 4cacd3245f9d8..6d51ea7d5de7b 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -127,6 +127,8 @@ dtypes = [('Complex128', 'complex128', 'khcomplex128_t'),
{{if dtype != 'int64'}}
+# Int64VectorData is defined in the .pxd file because it is needed (indirectly)
+# by IntervalTree
ctypedef struct {{name}}VectorData:
{{c_type}} *data
@@ -167,6 +169,14 @@ cdef inline bint needs_resize(vector_data *data) nogil:
# Vector
# ----------------------------------------------------------------------
+cdef class Vector:
+ # cdef readonly:
+ # bint external_view_exists
+
+ def __cinit__(self):
+ self.external_view_exists = False
+
+
{{py:
# name, dtype, c_type
@@ -187,11 +197,12 @@ dtypes = [('Complex128', 'complex128', 'khcomplex128_t'),
{{for name, dtype, c_type in dtypes}}
-cdef class {{name}}Vector:
+cdef class {{name}}Vector(Vector):
+ # For int64 we have to put this declaration in the .pxd file;
+ # Int64Vector is the only one we need exposed for other cython files.
{{if dtype != 'int64'}}
cdef:
- bint external_view_exists
{{name}}VectorData *data
ndarray ao
{{endif}}
@@ -201,7 +212,6 @@ cdef class {{name}}Vector:
sizeof({{name}}VectorData))
if not self.data:
raise MemoryError()
- self.external_view_exists = False
self.data.n = 0
self.data.m = _INIT_VEC_CAP
self.ao = np.empty(self.data.m, dtype=np.{{dtype}})
@@ -246,17 +256,15 @@ cdef class {{name}}Vector:
{{endfor}}
-cdef class StringVector:
+cdef class StringVector(Vector):
cdef:
StringVectorData *data
- bint external_view_exists
def __cinit__(self):
self.data = <StringVectorData *>PyMem_Malloc(sizeof(StringVectorData))
if not self.data:
raise MemoryError()
- self.external_view_exists = False
self.data.n = 0
self.data.m = _INIT_VEC_CAP
self.data.data = <char **>malloc(self.data.m * sizeof(char *))
@@ -314,16 +322,14 @@ cdef class StringVector:
self.append(x[i])
-cdef class ObjectVector:
+cdef class ObjectVector(Vector):
cdef:
PyObject **data
Py_ssize_t n, m
ndarray ao
- bint external_view_exists
def __cinit__(self):
- self.external_view_exists = False
self.n = 0
self.m = _INIT_VEC_CAP
self.ao = np.empty(_INIT_VEC_CAP, dtype=object)
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index 772d83e67394c..ceb473a0b06af 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -31,9 +31,9 @@ dtypes = [('Complex128', 'complex128', 'complex128',
@cython.wraparound(False)
@cython.boundscheck(False)
{{if dtype == 'object'}}
-cpdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, navalue=np.NaN):
+cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, navalue=np.NaN):
{{else}}
-cpdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
+cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
{{endif}}
cdef:
Py_ssize_t i = 0
@@ -107,9 +107,9 @@ cpdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
@cython.wraparound(False)
@cython.boundscheck(False)
{{if dtype == 'object'}}
-def duplicated_{{dtype}}(ndarray[{{dtype}}] values, object keep='first'):
+cdef duplicated_{{dtype}}(ndarray[{{dtype}}] values, object keep='first'):
{{else}}
-def duplicated_{{dtype}}(const {{dtype}}_t[:] values, object keep='first'):
+cdef duplicated_{{dtype}}(const {{dtype}}_t[:] values, object keep='first'):
{{endif}}
cdef:
int ret = 0
@@ -189,9 +189,9 @@ def duplicated_{{dtype}}(const {{dtype}}_t[:] values, object keep='first'):
@cython.wraparound(False)
@cython.boundscheck(False)
{{if dtype == 'object'}}
-def ismember_{{dtype}}(ndarray[{{c_type}}] arr, ndarray[{{c_type}}] values):
+cdef ismember_{{dtype}}(ndarray[{{c_type}}] arr, ndarray[{{c_type}}] values):
{{else}}
-def ismember_{{dtype}}(const {{dtype}}_t[:] arr, const {{dtype}}_t[:] values):
+cdef ismember_{{dtype}}(const {{dtype}}_t[:] arr, const {{dtype}}_t[:] values):
{{endif}}
"""
Return boolean of values in arr on an
@@ -256,9 +256,9 @@ def ismember_{{dtype}}(const {{dtype}}_t[:] arr, const {{dtype}}_t[:] values):
@cython.wraparound(False)
@cython.boundscheck(False)
{{if dtype == 'object'}}
-def mode_{{dtype}}(ndarray[{{dtype}}] values, bint dropna):
+cdef mode_{{dtype}}(ndarray[{{dtype}}] values, bint dropna):
{{else}}
-def mode_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
+cdef mode_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
{{endif}}
cdef:
{{if dtype == 'object'}}
@@ -310,3 +310,163 @@ def mode_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
return modes[:j + 1]
{{endfor}}
+
+
+ctypedef fused htfunc_t:
+ complex128_t
+ complex64_t
+ float64_t
+ float32_t
+ uint64_t
+ uint32_t
+ uint16_t
+ uint8_t
+ int64_t
+ int32_t
+ int16_t
+ int8_t
+ object
+
+
+cpdef value_count(ndarray[htfunc_t] values, bint dropna):
+ if htfunc_t is object:
+ return value_count_object(values, dropna)
+
+ elif htfunc_t is int8_t:
+ return value_count_int8(values, dropna)
+ elif htfunc_t is int16_t:
+ return value_count_int16(values, dropna)
+ elif htfunc_t is int32_t:
+ return value_count_int32(values, dropna)
+ elif htfunc_t is int64_t:
+ return value_count_int64(values, dropna)
+
+ elif htfunc_t is uint8_t:
+ return value_count_uint8(values, dropna)
+ elif htfunc_t is uint16_t:
+ return value_count_uint16(values, dropna)
+ elif htfunc_t is uint32_t:
+ return value_count_uint32(values, dropna)
+ elif htfunc_t is uint64_t:
+ return value_count_uint64(values, dropna)
+
+ elif htfunc_t is float64_t:
+ return value_count_float64(values, dropna)
+ elif htfunc_t is float32_t:
+ return value_count_float32(values, dropna)
+
+ elif htfunc_t is complex128_t:
+ return value_count_complex128(values, dropna)
+ elif htfunc_t is complex64_t:
+ return value_count_complex64(values, dropna)
+
+ else:
+ raise TypeError(values.dtype)
+
+
+cpdef duplicated(ndarray[htfunc_t] values, object keep="first"):
+ if htfunc_t is object:
+ return duplicated_object(values, keep)
+
+ elif htfunc_t is int8_t:
+ return duplicated_int8(values, keep)
+ elif htfunc_t is int16_t:
+ return duplicated_int16(values, keep)
+ elif htfunc_t is int32_t:
+ return duplicated_int32(values, keep)
+ elif htfunc_t is int64_t:
+ return duplicated_int64(values, keep)
+
+ elif htfunc_t is uint8_t:
+ return duplicated_uint8(values, keep)
+ elif htfunc_t is uint16_t:
+ return duplicated_uint16(values, keep)
+ elif htfunc_t is uint32_t:
+ return duplicated_uint32(values, keep)
+ elif htfunc_t is uint64_t:
+ return duplicated_uint64(values, keep)
+
+ elif htfunc_t is float64_t:
+ return duplicated_float64(values, keep)
+ elif htfunc_t is float32_t:
+ return duplicated_float32(values, keep)
+
+ elif htfunc_t is complex128_t:
+ return duplicated_complex128(values, keep)
+ elif htfunc_t is complex64_t:
+ return duplicated_complex64(values, keep)
+
+ else:
+ raise TypeError(values.dtype)
+
+
+cpdef ismember(ndarray[htfunc_t] arr, ndarray[htfunc_t] values):
+ if htfunc_t is object:
+ return ismember_object(arr, values)
+
+ elif htfunc_t is int8_t:
+ return ismember_int8(arr, values)
+ elif htfunc_t is int16_t:
+ return ismember_int16(arr, values)
+ elif htfunc_t is int32_t:
+ return ismember_int32(arr, values)
+ elif htfunc_t is int64_t:
+ return ismember_int64(arr, values)
+
+ elif htfunc_t is uint8_t:
+ return ismember_uint8(arr, values)
+ elif htfunc_t is uint16_t:
+ return ismember_uint16(arr, values)
+ elif htfunc_t is uint32_t:
+ return ismember_uint32(arr, values)
+ elif htfunc_t is uint64_t:
+ return ismember_uint64(arr, values)
+
+ elif htfunc_t is float64_t:
+ return ismember_float64(arr, values)
+ elif htfunc_t is float32_t:
+ return ismember_float32(arr, values)
+
+ elif htfunc_t is complex128_t:
+ return ismember_complex128(arr, values)
+ elif htfunc_t is complex64_t:
+ return ismember_complex64(arr, values)
+
+ else:
+ raise TypeError(values.dtype)
+
+
+cpdef mode(ndarray[htfunc_t] values, bint dropna):
+ if htfunc_t is object:
+ return mode_object(values, dropna)
+
+ elif htfunc_t is int8_t:
+ return mode_int8(values, dropna)
+ elif htfunc_t is int16_t:
+ return mode_int16(values, dropna)
+ elif htfunc_t is int32_t:
+ return mode_int32(values, dropna)
+ elif htfunc_t is int64_t:
+ return mode_int64(values, dropna)
+
+ elif htfunc_t is uint8_t:
+ return mode_uint8(values, dropna)
+ elif htfunc_t is uint16_t:
+ return mode_uint16(values, dropna)
+ elif htfunc_t is uint32_t:
+ return mode_uint32(values, dropna)
+ elif htfunc_t is uint64_t:
+ return mode_uint64(values, dropna)
+
+ elif htfunc_t is float64_t:
+ return mode_float64(values, dropna)
+ elif htfunc_t is float32_t:
+ return mode_float32(values, dropna)
+
+ elif htfunc_t is complex128_t:
+ return mode_complex128(values, dropna)
+ elif htfunc_t is complex64_t:
+ return mode_complex64(values, dropna)
+
+ else:
+ raise TypeError(values.dtype)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index ce718d9c9c810..f8f5e5e05bc35 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -84,6 +84,8 @@
from pandas.core.indexers import validate_indices
if TYPE_CHECKING:
+ from typing import Literal
+
from pandas import (
Categorical,
DataFrame,
@@ -188,7 +190,7 @@ def _reconstruct_data(
Parameters
----------
values : np.ndarray or ExtensionArray
- dtype : np.ndtype or ExtensionDtype
+ dtype : np.dtype or ExtensionDtype
original : AnyArrayLike
Returns
@@ -516,10 +518,7 @@ def f(c, v):
)
values = values.astype(common, copy=False)
comps = comps.astype(common, copy=False)
- name = common.name
- if name == "bool":
- name = "uint8"
- f = getattr(htable, f"ismember_{name}")
+ f = htable.ismember
return f(comps, values)
@@ -888,30 +887,24 @@ def value_counts_arraylike(values, dropna: bool):
values = _ensure_arraylike(values)
original = values
values, _ = _ensure_data(values)
- ndtype = values.dtype.name
+
+ # TODO: handle uint8
+ keys, counts = htable.value_count(values, dropna)
if needs_i8_conversion(original.dtype):
# datetime, timedelta, or period
- keys, counts = htable.value_count_int64(values, dropna)
-
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
- else:
- # ndarray like
-
- # TODO: handle uint8
- f = getattr(htable, f"value_count_{ndtype}")
- keys, counts = f(values, dropna)
-
res_keys = _reconstruct_data(keys, original.dtype, original)
-
return res_keys, counts
-def duplicated(values: ArrayLike, keep: str | bool = "first") -> np.ndarray:
+def duplicated(
+ values: ArrayLike, keep: Literal["first", "last", False] = "first"
+) -> np.ndarray:
"""
Return boolean ndarray denoting duplicate values.
@@ -931,9 +924,7 @@ def duplicated(values: ArrayLike, keep: str | bool = "first") -> np.ndarray:
duplicated : ndarray[bool]
"""
values, _ = _ensure_data(values)
- ndtype = values.dtype.name
- f = getattr(htable, f"duplicated_{ndtype}")
- return f(values, keep=keep)
+ return htable.duplicated(values, keep=keep)
def mode(values, dropna: bool = True) -> Series:
@@ -971,16 +962,14 @@ def mode(values, dropna: bool = True) -> Series:
values = values[~mask]
values, _ = _ensure_data(values)
- ndtype = values.dtype.name
- f = getattr(htable, f"mode_{ndtype}")
- result = f(values, dropna=dropna)
+ npresult = htable.mode(values, dropna=dropna)
try:
- result = np.sort(result)
+ npresult = np.sort(npresult)
except TypeError as err:
warn(f"Unable to sort modes: {err}")
- result = _reconstruct_data(result, original.dtype, original)
+ result = _reconstruct_data(npresult, original.dtype, original)
# Ensure index is type stable (should always use int index)
return Series(result, index=ibase.default_index(len(result)))
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index a82c75f4b2557..26c582561cd3d 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2183,11 +2183,9 @@ def mode(self, dropna=True):
if dropna:
good = self._codes != -1
codes = self._codes[good]
- # error: Incompatible types in assignment (expression has type "List[Any]",
- # variable has type "ndarray")
- codes = sorted( # type: ignore[assignment]
- htable.mode_int64(ensure_int64(codes), dropna)
- )
+
+ codes = htable.mode(codes, dropna)
+ codes.sort()
codes = coerce_indexer_dtype(codes, self.dtype.categories)
return self._from_backing_data(codes)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 3270e3dd82f7d..adc904d80fea8 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -61,6 +61,8 @@
import pandas.core.nanops as nanops
if TYPE_CHECKING:
+ from typing import Literal
+
from pandas import Categorical
_shared_docs: dict[str, str] = {}
@@ -1258,5 +1260,7 @@ def drop_duplicates(self, keep="first"):
return self[~duplicated] # type: ignore[index]
@final
- def _duplicated(self, keep: str | bool = "first") -> np.ndarray:
+ def _duplicated(
+ self, keep: Literal["first", "last", False] = "first"
+ ) -> np.ndarray:
return duplicated(self._values, keep=keep)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6d3042507d930..899526694f4d9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -43,6 +43,7 @@
lib,
properties,
)
+from pandas._libs.hashtable import duplicated
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
@@ -6141,7 +6142,6 @@ def duplicated(
4 True
dtype: bool
"""
- from pandas._libs.hashtable import duplicated_int64
if self.empty:
return self._constructor_sliced(dtype=bool)
@@ -6181,7 +6181,7 @@ def f(vals) -> tuple[np.ndarray, int]:
sort=False,
xnull=False,
)
- result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)
+ result = self._constructor_sliced(duplicated(ids, keep), index=self.index)
return result.__finalize__(self, method="duplicated")
# ----------------------------------------------------------------------
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 84f1245299d53..5895d12622aa1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2685,7 +2685,7 @@ def drop_duplicates(self: _IndexT, keep: str_t | bool = "first") -> _IndexT:
return super().drop_duplicates(keep=keep)
- def duplicated(self, keep: str_t | bool = "first") -> np.ndarray:
+ def duplicated(self, keep: Literal["first", "last", False] = "first") -> np.ndarray:
"""
Indicate duplicate index values.
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index a68238af003e4..4e4bcd570391d 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -25,7 +25,7 @@
index as libindex,
lib,
)
-from pandas._libs.hashtable import duplicated_int64
+from pandas._libs.hashtable import duplicated
from pandas._typing import (
AnyArrayLike,
DtypeObj,
@@ -1614,7 +1614,7 @@ def duplicated(self, keep="first") -> np.ndarray:
shape = tuple(len(lev) for lev in self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
- return duplicated_int64(ids, keep)
+ return duplicated(ids, keep)
# error: Cannot override final attribute "_duplicated"
# (previously declared in base class "IndexOpsMixin")
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 8478e2a17efa5..f8085b2bab1ed 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -2153,7 +2153,7 @@ def _factorize_keys(
rk = ensure_int64(np.asarray(rk, dtype=np.int64))
else:
- klass = libhashtable.Factorizer
+ klass = libhashtable.ObjectFactorizer
lk = ensure_object(lk)
rk = ensure_object(rk)
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index bc0b6e0b028a8..ea59d55989f8b 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -253,7 +253,7 @@ def test_duplicated_large(keep):
mi = MultiIndex(levels=levels, codes=codes)
result = mi.duplicated(keep=keep)
- expected = hashtable.duplicated_object(mi.values, keep=keep)
+ expected = hashtable.duplicated(mi.values, keep=keep)
tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py
index 04a8aeefbfcd6..aeff591e3f0dc 100644
--- a/pandas/tests/libs/test_hashtable.py
+++ b/pandas/tests/libs/test_hashtable.py
@@ -278,7 +278,7 @@ def test_unique(self, table_type, dtype):
def get_ht_function(fun_name, type_suffix):
- return getattr(ht, fun_name + "_" + type_suffix)
+ return getattr(ht, fun_name)
@pytest.mark.parametrize(
@@ -374,7 +374,7 @@ def test_modes_with_nans():
values = np.array([True, pd.NA, np.nan], dtype=np.object_)
# pd.Na and np.nan will have the same representative: np.nan
# thus we have 2 nans and 1 True
- modes = ht.mode_object(values, False)
+ modes = ht.mode(values, False)
assert modes.size == 1
assert np.isnan(modes[0])
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 964dd9bdd0e0a..4df95d895e475 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -194,7 +194,7 @@ def test_factorize_nan(self):
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype="O")
- rizer = ht.Factorizer(len(key))
+ rizer = ht.ObjectFactorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype="int32")
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40944 | 2021-04-14T14:23:49Z | 2021-05-10T13:52:45Z | 2021-05-10T13:52:45Z | 2021-05-10T14:07:16Z |
Backport PR #40924: BUG: concat with DTI and all-None Index | diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst
index cdfc2e5686b91..16f9284802407 100644
--- a/doc/source/whatsnew/v1.2.5.rst
+++ b/doc/source/whatsnew/v1.2.5.rst
@@ -14,7 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
-
+- Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`)
-
-
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 11d191597d61e..25860d6a4ecb3 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2742,7 +2742,8 @@ def _union(self, other, sort):
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
- result = Index(result)._values # do type inference here
+ # If objects are unorderable, we must have object dtype.
+ return np.array(result, dtype=object)
else:
# find indexes of things in "other" that are not in "self"
if self.is_unique:
diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index cd58df4fc5da6..3b11a5dbda41c 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -572,3 +572,23 @@ def test_concat_repeated_keys(keys, integrity):
tuples = list(zip(keys, ["a", "b", "c"]))
expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples))
tm.assert_series_equal(result, expected)
+
+
+def test_concat_null_object_with_dti():
+ # GH#40841
+ dti = pd.DatetimeIndex(
+ ["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)"
+ )
+ right = DataFrame(data={"C": [0.5274]}, index=dti)
+
+ idx = Index([None], dtype="object", name="Maybe Time (UTC)")
+ left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx)
+
+ result = concat([left, right], axis="columns")
+
+ exp_index = Index([None, dti[0]], dtype=object)
+ expected = DataFrame(
+ {"A": [None, None], "B": [np.nan, np.nan], "C": [np.nan, 0.5274]},
+ index=exp_index,
+ )
+ tm.assert_frame_equal(result, expected)
| Backport PR #40924 | https://api.github.com/repos/pandas-dev/pandas/pulls/40942 | 2021-04-14T14:11:39Z | 2021-04-14T15:41:27Z | 2021-04-14T15:41:27Z | 2021-04-14T15:41:31Z |
BUG: Dataframe mask method does not work properly with pd.StringDtype() | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 2b0b62ab7facf..8ebb5437978ea 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -830,7 +830,7 @@ ExtensionArray
- Bug in :meth:`DataFrame.where` when ``other`` is a :class:`Series` with :class:`ExtensionArray` dtype (:issue:`38729`)
- Fixed bug where :meth:`Series.idxmax`, :meth:`Series.idxmin` and ``argmax/min`` fail when the underlying data is :class:`ExtensionArray` (:issue:`32749`, :issue:`33719`, :issue:`36566`)
- Fixed a bug where some properties of subclasses of :class:`PandasExtensionDtype` where improperly cached (:issue:`40329`)
--
+- Bug in :meth:`DataFrame.mask` where masking a :class:`Dataframe` with an :class:`ExtensionArray` dtype raises ``ValueError`` (:issue:`40941`)
Styler
^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index cbc353eead464..bad42a85aeeee 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8958,7 +8958,7 @@ def _where(
join="left",
axis=axis,
level=level,
- fill_value=np.nan,
+ fill_value=None,
copy=False,
)
diff --git a/pandas/tests/frame/indexing/test_mask.py b/pandas/tests/frame/indexing/test_mask.py
index afa8c757c23e4..364475428e529 100644
--- a/pandas/tests/frame/indexing/test_mask.py
+++ b/pandas/tests/frame/indexing/test_mask.py
@@ -5,7 +5,10 @@
import numpy as np
from pandas import (
+ NA,
DataFrame,
+ Series,
+ StringDtype,
isna,
)
import pandas._testing as tm
@@ -99,3 +102,24 @@ def test_mask_try_cast_deprecated(frame_or_series):
with tm.assert_produces_warning(FutureWarning):
# try_cast keyword deprecated
obj.mask(mask, -1, try_cast=True)
+
+
+def test_mask_stringdtype():
+ # GH 40824
+ df = DataFrame(
+ {"A": ["foo", "bar", "baz", NA]},
+ index=["id1", "id2", "id3", "id4"],
+ dtype=StringDtype(),
+ )
+ filtered_df = DataFrame(
+ {"A": ["this", "that"]}, index=["id2", "id3"], dtype=StringDtype()
+ )
+ filter_ser = Series([False, True, True, False])
+ result = df.mask(filter_ser, filtered_df)
+
+ expected = DataFrame(
+ {"A": [NA, "this", "that", NA]},
+ index=["id1", "id2", "id3", "id4"],
+ dtype=StringDtype(),
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py
index 574fa46d10f67..7ffe2fb9ab1ff 100644
--- a/pandas/tests/frame/indexing/test_where.py
+++ b/pandas/tests/frame/indexing/test_where.py
@@ -10,6 +10,7 @@
DataFrame,
DatetimeIndex,
Series,
+ StringDtype,
Timestamp,
date_range,
isna,
@@ -709,3 +710,22 @@ def test_where_copies_with_noop(frame_or_series):
where_res *= 2
tm.assert_equal(result, expected)
+
+
+def test_where_string_dtype(frame_or_series):
+ # GH40824
+ obj = frame_or_series(
+ ["a", "b", "c", "d"], index=["id1", "id2", "id3", "id4"], dtype=StringDtype()
+ )
+ filtered_obj = frame_or_series(
+ ["b", "c"], index=["id2", "id3"], dtype=StringDtype()
+ )
+ filter_ser = Series([False, True, True, False])
+
+ result = obj.where(filter_ser, filtered_obj)
+ expected = frame_or_series(
+ [pd.NA, "b", "c", pd.NA],
+ index=["id1", "id2", "id3", "id4"],
+ dtype=StringDtype(),
+ )
+ tm.assert_equal(result, expected)
diff --git a/pandas/tests/series/indexing/test_mask.py b/pandas/tests/series/indexing/test_mask.py
index dc4fb530dbb52..a4dda3a5c0c5b 100644
--- a/pandas/tests/series/indexing/test_mask.py
+++ b/pandas/tests/series/indexing/test_mask.py
@@ -1,7 +1,11 @@
import numpy as np
import pytest
-from pandas import Series
+from pandas import (
+ NA,
+ Series,
+ StringDtype,
+)
import pandas._testing as tm
@@ -63,3 +67,22 @@ def test_mask_inplace():
rs = s.copy()
rs.mask(cond, -s, inplace=True)
tm.assert_series_equal(rs, s.mask(cond, -s))
+
+
+def test_mask_stringdtype():
+ # GH 40824
+ ser = Series(
+ ["foo", "bar", "baz", NA],
+ index=["id1", "id2", "id3", "id4"],
+ dtype=StringDtype(),
+ )
+ filtered_ser = Series(["this", "that"], index=["id2", "id3"], dtype=StringDtype())
+ filter_ser = Series([False, True, True, False])
+ result = ser.mask(filter_ser, filtered_ser)
+
+ expected = Series(
+ [NA, "this", "that", NA],
+ index=["id1", "id2", "id3", "id4"],
+ dtype=StringDtype(),
+ )
+ tm.assert_series_equal(result, expected)
| - [x] closes #40824
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40941 | 2021-04-14T14:04:23Z | 2021-04-16T16:14:42Z | 2021-04-16T16:14:42Z | 2021-04-16T16:16:25Z |
DOC: clarify (un)aware logic in tz_localize() docstring | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 1be2ec0dd92d7..06ff6ffa61559 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -857,8 +857,9 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArr
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
- Time zone localization helps to switch from time zone aware to time
- zone unaware objects.
+
+ This method can also be used to do the inverse -- to create a time
+ zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
| A simple hopefully no-brainer documentation patch; came across this while reading https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DatetimeIndex.tz_localize.html#pandas-datetimeindex-tz-localize.
| https://api.github.com/repos/pandas-dev/pandas/pulls/40940 | 2021-04-14T10:54:48Z | 2021-04-16T16:24:04Z | 2021-04-16T16:24:04Z | 2021-04-16T16:24:09Z |
call pytest with python3 -m | diff --git a/test_fast.sh b/test_fast.sh
index 6444b81b3c6da..e64d21b330824 100755
--- a/test_fast.sh
+++ b/test_fast.sh
@@ -5,4 +5,4 @@
# https://github.com/pytest-dev/pytest/issues/1075
export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))')
-pytest pandas --skip-slow --skip-network --skip-db -m "not single" -n 4 -r sxX --strict-markers "$@"
+python3 -m pytest pandas --skip-slow --skip-network --skip-db -m "not single" -n 4 -r sxX --strict-markers "$@"
| This runs pytest as
```sh
python3 -m pytest # ...
```
This allows running the tests in a venv. (Otherwise, if pytest is not installed in the venv, it falls back to the system pytest, leading to some surprises down the road.) | https://api.github.com/repos/pandas-dev/pandas/pulls/40939 | 2021-04-14T10:10:24Z | 2021-04-14T12:47:18Z | null | 2021-04-14T12:47:23Z |
DOC/CI: add missing import to ipython directive in `whatsnew/v0.11.0.rst` | diff --git a/doc/source/whatsnew/v0.11.0.rst b/doc/source/whatsnew/v0.11.0.rst
index a69d1ad1dec3b..0fba784e36661 100644
--- a/doc/source/whatsnew/v0.11.0.rst
+++ b/doc/source/whatsnew/v0.11.0.rst
@@ -306,6 +306,7 @@ Astype conversion on ``datetime64[ns]`` to ``object``, implicitly converts ``NaT
.. ipython:: python
+ import datetime
s = pd.Series([datetime.datetime(2001, 1, 2, 0, 0) for i in range(3)])
s.dtype
s[1] = np.nan
| some of the builds have failed recently due to this ipython directive, so I added the fix as advised in the error warning. | https://api.github.com/repos/pandas-dev/pandas/pulls/40937 | 2021-04-14T08:17:33Z | 2021-04-15T08:56:14Z | 2021-04-15T08:56:14Z | 2021-04-15T11:41:31Z |
CLN: remove unused out kwd from take functions | diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py
index b97a777400134..93d87f6bb4dfa 100644
--- a/pandas/core/array_algos/take.py
+++ b/pandas/core/array_algos/take.py
@@ -100,14 +100,13 @@ def take_nd(
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
arr = np.asarray(arr)
- return _take_nd_ndarray(arr, indexer, axis, None, fill_value, allow_fill)
+ return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)
def _take_nd_ndarray(
arr: np.ndarray,
indexer,
axis: int,
- out: np.ndarray | None,
fill_value,
allow_fill: bool,
) -> np.ndarray:
@@ -119,7 +118,7 @@ def _take_nd_ndarray(
indexer = ensure_platform_int(indexer)
indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
- arr, indexer, out, fill_value, allow_fill
+ arr, indexer, fill_value, allow_fill
)
flip_order = False
@@ -129,23 +128,20 @@ def _take_nd_ndarray(
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
- if out is not None:
- out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
- if out is None:
- out_shape_ = list(arr.shape)
- out_shape_[axis] = len(indexer)
- out_shape = tuple(out_shape_)
- if arr.flags.f_contiguous and axis == arr.ndim - 1:
- # minor tweak that can make an order-of-magnitude difference
- # for dataframes initialized directly from 2-d ndarrays
- # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its
- # f-contiguous transpose)
- out = np.empty(out_shape, dtype=dtype, order="F")
- else:
- out = np.empty(out_shape, dtype=dtype)
+ out_shape_ = list(arr.shape)
+ out_shape_[axis] = len(indexer)
+ out_shape = tuple(out_shape_)
+ if arr.flags.f_contiguous and axis == arr.ndim - 1:
+ # minor tweak that can make an order-of-magnitude difference
+ # for dataframes initialized directly from 2-d ndarrays
+ # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its
+ # f-contiguous transpose)
+ out = np.empty(out_shape, dtype=dtype, order="F")
+ else:
+ out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(
arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
@@ -190,7 +186,7 @@ def take_1d(
return arr.take(indexer)
indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
- arr, indexer, None, fill_value, True
+ arr, indexer, fill_value, True
)
# at this point, it's guaranteed that dtype can hold both the arr values
@@ -516,7 +512,6 @@ def _take_2d_multi_object(
def _take_preprocess_indexer_and_fill_value(
arr: np.ndarray,
indexer: np.ndarray,
- out: np.ndarray | None,
fill_value,
allow_fill: bool,
):
@@ -534,10 +529,7 @@ def _take_preprocess_indexer_and_fill_value(
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
- if needs_masking:
- if out is not None and out.dtype != dtype:
- raise TypeError("Incompatible type for fill_value")
- else:
+ if not needs_masking:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
| Un-revert part of #40510 in the hopes of tracking down where the perf impact was. xref #40852 which un-reverted a different part.
Let's not merge for a few days to make sure that any surprising effects from #40852 show up in the asvs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/40934 | 2021-04-13T22:34:56Z | 2021-04-20T22:48:34Z | 2021-04-20T22:48:33Z | 2021-04-20T23:05:19Z |
TYP: fix mypy on master | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index d6c5935ecf685..a381a7bcb33f5 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -177,9 +177,6 @@ def pinner(cls):
class SeriesGroupBy(GroupBy[Series]):
_apply_allowlist = base.series_apply_allowlist
- # Defined as a cache_readonly in SelectionMixin
- _obj_with_exclusions: Series
-
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
@@ -930,9 +927,6 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)
class DataFrameGroupBy(GroupBy[DataFrame]):
- # Defined as a cache_readonly in SelectionMixin
- _obj_with_exclusions: DataFrame
-
_apply_allowlist = base.dataframe_apply_allowlist
_agg_examples_doc = dedent(
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40930 | 2021-04-13T16:49:25Z | 2021-04-13T20:55:24Z | 2021-04-13T20:55:24Z | 2021-04-13T20:55:35Z |
CLN: preliminary refactor before `Styler.highlight_quantile` | diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 9b4673ddb7906..f51f81d7c3504 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1558,55 +1558,10 @@ def highlight_between(
.. figure:: ../../_static/style/hbetw_props.png
"""
-
- def f(
- data: FrameOrSeries,
- props: str,
- left: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None,
- right: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None,
- inclusive: bool | str = True,
- ) -> np.ndarray:
- if np.iterable(left) and not isinstance(left, str):
- left = _validate_apply_axis_arg(
- left, "left", None, data # type: ignore[arg-type]
- )
-
- if np.iterable(right) and not isinstance(right, str):
- right = _validate_apply_axis_arg(
- right, "right", None, data # type: ignore[arg-type]
- )
-
- # get ops with correct boundary attribution
- if inclusive == "both":
- ops = (operator.ge, operator.le)
- elif inclusive == "neither":
- ops = (operator.gt, operator.lt)
- elif inclusive == "left":
- ops = (operator.ge, operator.lt)
- elif inclusive == "right":
- ops = (operator.gt, operator.le)
- else:
- raise ValueError(
- f"'inclusive' values can be 'both', 'left', 'right', or 'neither' "
- f"got {inclusive}"
- )
-
- g_left = (
- ops[0](data, left)
- if left is not None
- else np.full(data.shape, True, dtype=bool)
- )
- l_right = (
- ops[1](data, right)
- if right is not None
- else np.full(data.shape, True, dtype=bool)
- )
- return np.where(g_left & l_right, props, "")
-
if props is None:
props = f"background-color: {color};"
return self.apply(
- f, # type: ignore[arg-type]
+ _highlight_between, # type: ignore[arg-type]
axis=axis,
subset=subset,
props=props,
@@ -1831,3 +1786,51 @@ def css(rgba) -> str:
index=data.index,
columns=data.columns,
)
+
+
+def _highlight_between(
+ data: FrameOrSeries,
+ props: str,
+ left: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None,
+ right: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None,
+ inclusive: bool | str = True,
+) -> np.ndarray:
+ """
+ Return an array of css props based on condition of data values within given range.
+ """
+ if np.iterable(left) and not isinstance(left, str):
+ left = _validate_apply_axis_arg(
+ left, "left", None, data # type: ignore[arg-type]
+ )
+
+ if np.iterable(right) and not isinstance(right, str):
+ right = _validate_apply_axis_arg(
+ right, "right", None, data # type: ignore[arg-type]
+ )
+
+ # get ops with correct boundary attribution
+ if inclusive == "both":
+ ops = (operator.ge, operator.le)
+ elif inclusive == "neither":
+ ops = (operator.gt, operator.lt)
+ elif inclusive == "left":
+ ops = (operator.ge, operator.lt)
+ elif inclusive == "right":
+ ops = (operator.gt, operator.le)
+ else:
+ raise ValueError(
+ f"'inclusive' values can be 'both', 'left', 'right', or 'neither' "
+ f"got {inclusive}"
+ )
+
+ g_left = (
+ ops[0](data, left)
+ if left is not None
+ else np.full(data.shape, True, dtype=bool)
+ )
+ l_right = (
+ ops[1](data, right)
+ if right is not None
+ else np.full(data.shape, True, dtype=bool)
+ )
+ return np.where(g_left & l_right, props, "")
| The current structure is:
```
def highlight_between:
def f:
return x
return self.apply(f)
```
So that `Styler.highlight_quantile` can reuse code this PR refactors the above to:
```
def highlight_between:
return self.apply(_new_name)
def _new_name:
return x
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/40928 | 2021-04-13T15:34:33Z | 2021-04-13T23:19:12Z | 2021-04-13T23:19:12Z | 2021-04-14T05:08:38Z |
Fix 40420: Interpret NaN in clip() as no bound. | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 34269185bccd6..34d833bb52d99 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -910,6 +910,7 @@ Other
- Bug in :meth:`DataFrame.equals`, :meth:`Series.equals`, :meth:`Index.equals` with object-dtype containing ``np.datetime64("NaT")`` or ``np.timedelta64("NaT")`` (:issue:`39650`)
- Bug in :func:`pandas.util.show_versions` where console JSON output was not proper JSON (:issue:`39701`)
- Bug in :meth:`DataFrame.convert_dtypes` incorrectly raised ValueError when called on an empty DataFrame (:issue:`40393`)
+- Bug in :meth:`DataFrame.clip` not interpreting missing values as no threshold (:issue:`40420`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d69e933164118..c77a3717c4c03 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7341,8 +7341,6 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace):
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
- subset = method(threshold, axis=axis) | isna(self)
-
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
@@ -7351,6 +7349,18 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = align_method_FRAME(self, threshold, axis, flex=None)[1]
+
+ # GH 40420
+ # Treat missing thresholds as no bounds, not clipping the values
+ if is_list_like(threshold):
+ fill_value = np.inf if method.__name__ == "le" else -np.inf
+ threshold_inf = threshold.fillna(fill_value)
+ else:
+ threshold_inf = threshold
+
+ subset = method(threshold_inf, axis=axis) | isna(self)
+
+ # GH 40420
return self.where(subset, threshold, axis=axis, inplace=inplace)
@overload
@@ -7482,10 +7492,12 @@ def clip(
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
- threshold will be set to it.
+ threshold will be set to it. A missing
+ threshold (e.g `NA`) will not clip the value.
upper : float or array_like, default None
Maximum threshold value. All values above this
- threshold will be set to it.
+ threshold will be set to it. A missing
+ threshold (e.g `NA`) will not clip the value.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
@@ -7546,6 +7558,25 @@ def clip(
2 0 3
3 6 8
4 5 3
+
+ Clips using specific lower threshold per column element, with missing values:
+
+ >>> t = pd.Series([2, -4, np.NaN, 6, 3])
+ >>> t
+ 0 2.0
+ 1 -4.0
+ 2 NaN
+ 3 6.0
+ 4 3.0
+ dtype: float64
+
+ >>> df.clip(t, axis=0)
+ col_0 col_1
+ 0 9 2
+ 1 -3 -4
+ 2 0 6
+ 3 6 8
+ 4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
@@ -7558,9 +7589,17 @@ def clip(
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
- if not is_list_like(lower) and np.any(isna(lower)):
+ isna_lower = isna(lower)
+ if not is_list_like(lower):
+ if np.any(isna_lower):
+ lower = None
+ elif np.all(isna_lower):
lower = None
- if not is_list_like(upper) and np.any(isna(upper)):
+ isna_upper = isna(upper)
+ if not is_list_like(upper):
+ if np.any(isna_upper):
+ upper = None
+ elif np.all(isna_upper):
upper = None
# GH 2747 (arguments were reversed)
diff --git a/pandas/tests/frame/methods/test_clip.py b/pandas/tests/frame/methods/test_clip.py
index 8a2374a414482..6525109da4394 100644
--- a/pandas/tests/frame/methods/test_clip.py
+++ b/pandas/tests/frame/methods/test_clip.py
@@ -144,17 +144,25 @@ def test_clip_with_na_args(self, float_frame):
tm.assert_frame_equal(float_frame.clip(np.nan), float_frame)
tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan), float_frame)
- # GH#19992
+ # GH#19992 and adjusted in GH#40420
df = DataFrame({"col_0": [1, 2, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]})
result = df.clip(lower=[4, 5, np.nan], axis=0)
expected = DataFrame(
- {"col_0": [4, 5, np.nan], "col_1": [4, 5, np.nan], "col_2": [7, 8, np.nan]}
+ {"col_0": [4, 5, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]}
)
tm.assert_frame_equal(result, expected)
result = df.clip(lower=[4, 5, np.nan], axis=1)
expected = DataFrame(
- {"col_0": [4, 4, 4], "col_1": [5, 5, 6], "col_2": [np.nan, np.nan, np.nan]}
+ {"col_0": [4, 4, 4], "col_1": [5, 5, 6], "col_2": [7, 8, 9]}
)
tm.assert_frame_equal(result, expected)
+
+ # GH#40420
+ data = {"col_0": [9, -3, 0, -1, 5], "col_1": [-2, -7, 6, 8, -5]}
+ df = DataFrame(data)
+ t = Series([2, -4, np.NaN, 6, 3])
+ result = df.clip(lower=t, axis=0)
+ expected = DataFrame({"col_0": [9, -3, 0, 6, 5], "col_1": [2, -4, 6, 8, 3]})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_clip.py b/pandas/tests/series/methods/test_clip.py
index 528e95f65c8f4..442718d677101 100644
--- a/pandas/tests/series/methods/test_clip.py
+++ b/pandas/tests/series/methods/test_clip.py
@@ -49,8 +49,13 @@ def test_clip_with_na_args(self):
tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3]))
# GH#19992
- tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, np.nan]))
- tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, np.nan, 1]))
+ tm.assert_series_equal(s.clip(lower=[0, 4, np.nan]), Series([1, 4, 3]))
+ tm.assert_series_equal(s.clip(upper=[1, np.nan, 1]), Series([1, 2, 1]))
+
+ # GH#40420
+ s = Series([1, 2, 3])
+ result = s.clip(0, [np.nan, np.nan, np.nan])
+ tm.assert_series_equal(s, result)
def test_clip_against_series(self):
# GH#6966
| - [x] closes #40420
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40927 | 2021-04-13T15:34:11Z | 2021-04-23T14:58:08Z | 2021-04-23T14:58:07Z | 2021-12-15T06:44:52Z |
ENH: `Styler.highlight_quantile` method | diff --git a/doc/source/_static/style/hq_ax1.png b/doc/source/_static/style/hq_ax1.png
new file mode 100644
index 0000000000000..95d840b7c8f99
Binary files /dev/null and b/doc/source/_static/style/hq_ax1.png differ
diff --git a/doc/source/_static/style/hq_axNone.png b/doc/source/_static/style/hq_axNone.png
new file mode 100644
index 0000000000000..40a33b194e640
Binary files /dev/null and b/doc/source/_static/style/hq_axNone.png differ
diff --git a/doc/source/_static/style/hq_props.png b/doc/source/_static/style/hq_props.png
new file mode 100644
index 0000000000000..1f11749096690
Binary files /dev/null and b/doc/source/_static/style/hq_props.png differ
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 85d9acff353be..bba71b0d62e92 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -119,7 +119,9 @@ to accept more universal CSS language for arguments, such as ``'color:red;'`` in
to allow custom CSS highlighting instead of default background coloring (:issue:`40242`).
Enhancements to other built-in methods include extending the :meth:`.Styler.background_gradient`
method to shade elements based on a given gradient map and not be restricted only to
-values in the DataFrame (:issue:`39930` :issue:`22727` :issue:`28901`).
+values in the DataFrame (:issue:`39930` :issue:`22727` :issue:`28901`). Additional
+built-in methods such as :meth:`.Styler.highlight_between` and :meth:`.Styler.highlight_quantile`
+have been added (:issue:`39821` and :issue:`40926`).
The :meth:`.Styler.apply` now consistently allows functions with ``ndarray`` output to
allow more flexible development of UDFs when ``axis`` is ``None`` ``0`` or ``1`` (:issue:`39393`).
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index f51f81d7c3504..7998365234682 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1355,6 +1355,7 @@ def highlight_null(
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
+ Styler.highlight_quantile: Highlight values defined by a quantile with a style.
"""
def f(data: DataFrame, props: str) -> np.ndarray:
@@ -1403,6 +1404,7 @@ def highlight_max(
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
+ Styler.highlight_quantile: Highlight values defined by a quantile with a style.
"""
def f(data: FrameOrSeries, props: str) -> np.ndarray:
@@ -1451,6 +1453,7 @@ def highlight_min(
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_between: Highlight a defined range with a style.
+ Styler.highlight_quantile: Highlight values defined by a quantile with a style.
"""
def f(data: FrameOrSeries, props: str) -> np.ndarray:
@@ -1507,6 +1510,7 @@ def highlight_between(
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
+ Styler.highlight_quantile: Highlight values defined by a quantile with a style.
Notes
-----
@@ -1570,6 +1574,110 @@ def highlight_between(
inclusive=inclusive,
)
+ def highlight_quantile(
+ self,
+ subset: IndexLabel | None = None,
+ color: str = "yellow",
+ axis: Axis | None = 0,
+ q_left: float = 0.0,
+ q_right: float = 1.0,
+ interpolation: str = "linear",
+ inclusive: str = "both",
+ props: str | None = None,
+ ) -> Styler:
+ """
+ Highlight values defined by a quantile with a style.
+
+ .. versionadded:: 1.3.0
+
+ Parameters
+ ----------
+ subset : IndexSlice, default None
+ A valid slice for ``data`` to limit the style application to.
+ color : str, default 'yellow'
+ Background color to use for highlighting
+ axis : {0 or 'index', 1 or 'columns', None}, default 0
+ Axis along which to determine and highlight quantiles. If ``None`` quantiles
+ are measured over the entire DataFrame. See examples.
+ q_left : float, default 0
+ Left bound, in [0, q_right), for the target quantile range.
+ q_right : float, default 1
+ Right bound, in (q_left, 1], for the target quantile range.
+ interpolation : {‘linear’, ‘lower’, ‘higher’, ‘midpoint’, ‘nearest’}
+ Argument passed to ``Series.quantile`` or ``DataFrame.quantile`` for
+ quantile estimation.
+ inclusive : {'both', 'neither', 'left', 'right'}
+ Identify whether quantile bounds are closed or open.
+ props : str, default None
+ CSS properties to use for highlighting. If ``props`` is given, ``color``
+ is not used.
+
+ Returns
+ -------
+ self : Styler
+
+ See Also
+ --------
+ Styler.highlight_null: Highlight missing values with a style.
+ Styler.highlight_max: Highlight the maximum with a style.
+ Styler.highlight_min: Highlight the minimum with a style.
+ Styler.highlight_between: Highlight a defined range with a style.
+
+ Notes
+ -----
+ This function does not work with ``str`` dtypes.
+
+ Examples
+ --------
+ Using ``axis=None`` and apply a quantile to all collective data
+
+ >>> df = pd.DataFrame(np.arange(10).reshape(2,5) + 1)
+ >>> df.style.highlight_quantile(axis=None, q_left=0.8, color="#fffd75")
+
+ .. figure:: ../../_static/style/hq_axNone.png
+
+ Or highlight quantiles row-wise or column-wise, in this case by row-wise
+
+ >>> df.style.highlight_quantile(axis=1, q_left=0.8, color="#fffd75")
+
+ .. figure:: ../../_static/style/hq_ax1.png
+
+ Use ``props`` instead of default background coloring
+
+ >>> df.style.highlight_quantile(axis=None, q_left=0.2, q_right=0.8,
+ ... props='font-weight:bold;color:#e83e8c')
+
+ .. figure:: ../../_static/style/hq_props.png
+ """
+ subset_ = slice(None) if subset is None else subset
+ subset_ = non_reducing_slice(subset_)
+ data = self.data.loc[subset_]
+
+ # after quantile is found along axis, e.g. along rows,
+ # applying the calculated quantile to alternate axis, e.g. to each column
+ kwargs = {"q": [q_left, q_right], "interpolation": interpolation}
+ if axis in [0, "index"]:
+ q = data.quantile(axis=axis, numeric_only=False, **kwargs)
+ axis_apply: int | None = 1
+ elif axis in [1, "columns"]:
+ q = data.quantile(axis=axis, numeric_only=False, **kwargs)
+ axis_apply = 0
+ else: # axis is None
+ q = Series(data.to_numpy().ravel()).quantile(**kwargs)
+ axis_apply = None
+
+ if props is None:
+ props = f"background-color: {color};"
+ return self.apply(
+ _highlight_between, # type: ignore[arg-type]
+ axis=axis_apply,
+ subset=subset,
+ props=props,
+ left=q.iloc[0],
+ right=q.iloc[1],
+ inclusive=inclusive,
+ )
+
@classmethod
def from_custom_template(cls, searchpath, name):
"""
diff --git a/pandas/tests/io/formats/style/test_highlight.py b/pandas/tests/io/formats/style/test_highlight.py
index b8c194f8955ab..9e956e055d1aa 100644
--- a/pandas/tests/io/formats/style/test_highlight.py
+++ b/pandas/tests/io/formats/style/test_highlight.py
@@ -142,3 +142,54 @@ def test_highlight_between_inclusive(styler, inclusive, expected):
kwargs = {"left": 0, "right": 1, "subset": IndexSlice[[0, 1], :]}
result = styler.highlight_between(**kwargs, inclusive=inclusive)._compute()
assert result.ctx == expected
+
+
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {"q_left": 0.5, "q_right": 1, "axis": 0}, # base case
+ {"q_left": 0.5, "q_right": 1, "axis": None}, # test axis
+ {"q_left": 0, "q_right": 1, "subset": IndexSlice[2, :]}, # test subset
+ {"q_left": 0.5, "axis": 0}, # test no high
+ {"q_right": 1, "subset": IndexSlice[2, :], "axis": 1}, # test no low
+ {"q_left": 0.5, "axis": 0, "props": "background-color: yellow"}, # tst prop
+ ],
+)
+def test_highlight_quantile(styler, kwargs):
+ expected = {
+ (2, 0): [("background-color", "yellow")],
+ (2, 1): [("background-color", "yellow")],
+ }
+ result = styler.highlight_quantile(**kwargs)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.skipif(np.__version__[:4] in ["1.16", "1.17"], reason="Numpy Issue #14831")
+@pytest.mark.parametrize(
+ "f,kwargs",
+ [
+ ("highlight_min", {"axis": 1, "subset": IndexSlice[1, :]}),
+ ("highlight_max", {"axis": 0, "subset": [0]}),
+ ("highlight_quantile", {"axis": None, "q_left": 0.6, "q_right": 0.8}),
+ ("highlight_between", {"subset": [0]}),
+ ],
+)
+@pytest.mark.parametrize(
+ "df",
+ [
+ DataFrame([[0, 10], [20, 30]], dtype=int),
+ DataFrame([[0, 10], [20, 30]], dtype=float),
+ DataFrame([[0, 10], [20, 30]], dtype="datetime64[ns]"),
+ DataFrame([[0, 10], [20, 30]], dtype=str),
+ DataFrame([[0, 10], [20, 30]], dtype="timedelta64[ns]"),
+ ],
+)
+def test_all_highlight_dtypes(f, kwargs, df):
+ if f == "highlight_quantile" and isinstance(df.iloc[0, 0], (str)):
+ return None # quantile incompatible with str
+ if f == "highlight_between":
+ kwargs["left"] = df.iloc[1, 0] # set the range low for testing
+
+ expected = {(1, 0): [("background-color", "yellow")]}
+ result = getattr(df.style, f)(**kwargs)._compute().ctx
+ assert result == expected
|

| https://api.github.com/repos/pandas-dev/pandas/pulls/40926 | 2021-04-13T15:24:37Z | 2021-04-20T23:21:23Z | 2021-04-20T23:21:23Z | 2021-04-21T06:08:34Z |
DOC: inconsistency in doc argument types #40560 | diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 41e1ff41d9ba2..cba1a15934bb8 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -24,7 +24,7 @@
is_datetime_or_timedelta_dtype,
is_extension_array_dtype,
is_integer,
- is_integer_dtype,
+ is_numeric_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
@@ -488,7 +488,7 @@ def _coerce_to_type(x):
# Will properly support in the future.
# https://github.com/pandas-dev/pandas/pull/31290
# https://github.com/pandas-dev/pandas/issues/31389
- elif is_extension_array_dtype(x.dtype) and is_integer_dtype(x.dtype):
+ elif is_extension_array_dtype(x.dtype) and is_numeric_dtype(x.dtype):
x = x.to_numpy(dtype=np.float64, na_value=np.nan)
if dtype is not None:
| This PR is used to address documentation inconsistencies as in issue #40560. All Array_like has been changed to array-like. However, Array_like that is used as a parameter cannot be changed as it would be identified as "array minus like". | https://api.github.com/repos/pandas-dev/pandas/pulls/40925 | 2021-04-13T14:24:34Z | 2021-04-16T04:13:24Z | null | 2021-04-16T04:13:24Z |
BUG: concat with DTI and all-None Index | diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst
index cdfc2e5686b91..16f9284802407 100644
--- a/doc/source/whatsnew/v1.2.5.rst
+++ b/doc/source/whatsnew/v1.2.5.rst
@@ -14,7 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
-
+- Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`)
-
-
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 119326622ff3f..28dfdc23eb76e 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2977,7 +2977,8 @@ def _union(self, other: Index, sort):
# worth making this faster? a very unusual case
value_set = set(lvals)
value_list.extend([x for x in rvals if x not in value_set])
- return Index(value_list)._values # do type inference here
+ # If objects are unorderable, we must have object dtype.
+ return np.array(value_list, dtype=object)
elif not other.is_unique and not self.is_unique:
# self and other both have duplicates
diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py
index 46029b8a695ea..2ed38670e88a6 100644
--- a/pandas/tests/reshape/concat/test_concat.py
+++ b/pandas/tests/reshape/concat/test_concat.py
@@ -607,3 +607,23 @@ def test_concat_repeated_keys(keys, integrity):
tuples = list(zip(keys, ["a", "b", "c"]))
expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples))
tm.assert_series_equal(result, expected)
+
+
+def test_concat_null_object_with_dti():
+ # GH#40841
+ dti = pd.DatetimeIndex(
+ ["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)"
+ )
+ right = DataFrame(data={"C": [0.5274]}, index=dti)
+
+ idx = Index([None], dtype="object", name="Maybe Time (UTC)")
+ left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx)
+
+ result = concat([left, right], axis="columns")
+
+ exp_index = Index([None, dti[0]], dtype=object)
+ expected = DataFrame(
+ {"A": [None, None], "B": [np.nan, np.nan], "C": [np.nan, 0.5274]},
+ index=exp_index,
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #40841
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40924 | 2021-04-13T14:19:16Z | 2021-04-14T12:53:22Z | 2021-04-14T12:53:21Z | 2021-04-14T14:27:06Z |
PR for DOC: inconsistency in doc argument types #40560 | diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 9e2dd846f0379..486e424003103 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1546,13 +1546,13 @@ def searchsorted(arr, value, side="left", sorter=None) -> np.ndarray:
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
- value : array_like
+ value : array-like
Values to insert into `arr`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
- sorter : 1-D array_like, optional
+ sorter : 1-D array-like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py
index 201b9fdcc51cc..ac34ae7e56036 100644
--- a/pandas/core/array_algos/replace.py
+++ b/pandas/core/array_algos/replace.py
@@ -45,14 +45,14 @@ def compare_or_regex_search(
a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: np.ndarray
) -> Union[ArrayLike, bool]:
"""
- Compare two array_like inputs of the same shape or two scalar values
+ Compare two array-like inputs of the same shape or two scalar values
Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
----------
- a : array_like
+ a : array-like
b : scalar or regex pattern
regex : bool
mask : np.ndarray[bool]
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 933b829e0b29f..be9b57f698a6f 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -826,13 +826,13 @@ def searchsorted(self, value, side="left", sorter=None):
Parameters
----------
- value : array_like
+ value : array-like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
- sorter : 1-D array_like, optional
+ sorter : 1-D array-like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 05ec9d543976a..13271c227fea6 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1159,13 +1159,13 @@ def factorize(self, sort: bool = False, na_sentinel: Optional[int] = -1):
Parameters
----------
- value : array_like
+ value : array-like
Values to insert into `self`.
side : {{'left', 'right'}}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
- sorter : 1-D array_like, optional
+ sorter : 1-D array-like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index a0727500ecc81..aaea5976a2acf 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -6010,8 +6010,8 @@ def any(self, *args, **kwargs):
Returns
-------
- any : bool or array_like (if axis is specified)
- A single element array_like may be converted to bool.
+ any : bool or array-like (if axis is specified)
+ A single element array-like may be converted to bool.
See Also
--------
@@ -6054,7 +6054,7 @@ def all(self, *args, **kwargs):
Returns
-------
- all : bool or array_like (if axis is specified)
+ all : bool or array-like (if axis is specified)
A single element array_like may be converted to bool.
See Also
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 953796a35db7c..3ea212234edc0 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3893,7 +3893,7 @@ def maybe_droplevels(index: Index, key) -> Index:
return index
-def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
+def _coerce_indexer_frozen(array-like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array_like indexer to the smallest integer dtype that can encode all
of the given categories.
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 603cc6a6ff1f2..14052894c8ecc 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1280,7 +1280,7 @@ def _unstack(self, unstacker, fill_value, new_placement):
-------
blocks : list of Block
New blocks of unstacked values.
- mask : array_like of bool
+ mask : array-like of bool
The mask of columns of `blocks` we should keep.
"""
new_values, mask = unstacker.get_new_values(
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 8849eb0670faa..642c7666a4894 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -524,11 +524,11 @@ def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
Parameters
----------
- xi : array_like
+ xi : array-like
sorted 1D array of x-coordinates
- yi : array_like or list of array-likes
+ yi : array-like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
- order: None or int or array_like of ints. Default: None.
+ order: None or int or array-like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 55097054fec88..783e2274f9971 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -427,7 +427,7 @@ def hist_frame(
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
- color : str, array_like, or dict, optional
+ color : str, array-like, or dict, optional
The color for each of the DataFrame's columns. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
@@ -1568,7 +1568,7 @@ def scatter(self, x, y, s=None, c=None, **kwargs):
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
- s : str, scalar or array_like, optional
+ s : str, scalar or array-like, optional
The size of each point. Possible values are:
- A string with the name of the column to be used for marker's size.
| This PR is used to address documentation inconsistencies as in issue #40560. Array_like have been changed to array-like | https://api.github.com/repos/pandas-dev/pandas/pulls/40923 | 2021-04-13T13:56:53Z | 2021-04-13T14:16:32Z | null | 2021-04-13T14:16:32Z |
Backport PR #40761 on branch 1.2.x (DOC: collapse subpages in sidebar for API reference docs) | diff --git a/doc/_templates/sidebar-nav-bs.html b/doc/_templates/sidebar-nav-bs.html
new file mode 100644
index 0000000000000..7e0043e771e72
--- /dev/null
+++ b/doc/_templates/sidebar-nav-bs.html
@@ -0,0 +1,9 @@
+<nav class="bd-links" id="bd-docs-nav" aria-label="Main navigation">
+ <div class="bd-toc-item active">
+ {% if pagename.startswith("reference") %}
+ {{ generate_nav_html("sidebar", maxdepth=4, collapse=True, includehidden=True, titles_only=True) }}
+ {% else %}
+ {{ generate_nav_html("sidebar", maxdepth=4, collapse=False, includehidden=True, titles_only=True) }}
+ {% endif %}
+ </div>
+</nav>
| Backport PR #40761: DOC: collapse subpages in sidebar for API reference docs | https://api.github.com/repos/pandas-dev/pandas/pulls/40922 | 2021-04-13T12:26:39Z | 2021-04-13T14:21:25Z | 2021-04-13T14:21:25Z | 2021-04-13T14:21:26Z |
ENH: Nullable integer/boolean/floating support in lib inferencing functions | diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi
index 4c647056641f5..11f578d9e4d60 100644
--- a/pandas/_libs/lib.pyi
+++ b/pandas/_libs/lib.pyi
@@ -5,6 +5,8 @@ from typing import (
Any,
Callable,
Generator,
+ Literal,
+ overload,
)
import numpy as np
@@ -70,12 +72,24 @@ def maybe_convert_objects(
convert_to_nullable_integer: bool = False,
) -> ArrayLike: ...
+@overload
def maybe_convert_numeric(
values: np.ndarray, # np.ndarray[object]
na_values: set,
convert_empty: bool = True,
coerce_numeric: bool = False,
-) -> np.ndarray: ...
+ convert_to_masked_nullable: Literal[False] = ...,
+) -> tuple[np.ndarray, None]: ...
+
+@overload
+def maybe_convert_numeric(
+ values: np.ndarray, # np.ndarray[object]
+ na_values: set,
+ convert_empty: bool = True,
+ coerce_numeric: bool = False,
+ *,
+ convert_to_masked_nullable: Literal[True],
+) -> tuple[np.ndarray, np.ndarray]: ...
# TODO: restrict `arr`?
def ensure_string_array(
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 77375cac39921..7b42c07b65c89 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -2029,7 +2029,8 @@ def maybe_convert_numeric(
set na_values,
bint convert_empty=True,
bint coerce_numeric=False,
-) -> ndarray:
+ bint convert_to_masked_nullable=False,
+) -> tuple[np.ndarray, np.ndarray | None]:
"""
Convert object array to a numeric array if possible.
@@ -2053,14 +2054,20 @@ def maybe_convert_numeric(
numeric array has no suitable numerical dtype to return (i.e. uint64,
int32, uint8). If set to False, the original object array will be
returned. Otherwise, a ValueError will be raised.
-
+ convert_to_masked_nullable : bool, default False
+ Whether to return a mask for the converted values. This also disables
+ upcasting for ints with nulls to float64.
Returns
-------
np.ndarray
Array of converted object values to numerical ones.
+
+ Optional[np.ndarray]
+ If convert_to_masked_nullable is True,
+ returns a boolean mask for the converted values, otherwise returns None.
"""
if len(values) == 0:
- return np.array([], dtype='i8')
+ return (np.array([], dtype='i8'), None)
# fastpath for ints - try to convert all based on first value
cdef:
@@ -2070,7 +2077,7 @@ def maybe_convert_numeric(
try:
maybe_ints = values.astype('i8')
if (maybe_ints == values).all():
- return maybe_ints
+ return (maybe_ints, None)
except (ValueError, OverflowError, TypeError):
pass
@@ -2084,21 +2091,40 @@ def maybe_convert_numeric(
ndarray[int64_t] ints = np.empty(n, dtype='i8')
ndarray[uint64_t] uints = np.empty(n, dtype='u8')
ndarray[uint8_t] bools = np.empty(n, dtype='u1')
+ ndarray[uint8_t] mask = np.zeros(n, dtype="u1")
float64_t fval
+ bint allow_null_in_int = convert_to_masked_nullable
for i in range(n):
val = values[i]
+ # We only want to disable NaNs showing as float if
+ # a) convert_to_masked_nullable = True
+ # b) no floats have been seen ( assuming an int shows up later )
+ # However, if no ints present (all null array), we need to return floats
+ allow_null_in_int = convert_to_masked_nullable and not seen.float_
if val.__hash__ is not None and val in na_values:
- seen.saw_null()
+ if allow_null_in_int:
+ seen.null_ = True
+ mask[i] = 1
+ else:
+ if convert_to_masked_nullable:
+ mask[i] = 1
+ seen.saw_null()
floats[i] = complexes[i] = NaN
elif util.is_float_object(val):
fval = val
if fval != fval:
seen.null_ = True
-
+ if allow_null_in_int:
+ mask[i] = 1
+ else:
+ if convert_to_masked_nullable:
+ mask[i] = 1
+ seen.float_ = True
+ else:
+ seen.float_ = True
floats[i] = complexes[i] = fval
- seen.float_ = True
elif util.is_integer_object(val):
floats[i] = complexes[i] = val
@@ -2121,7 +2147,13 @@ def maybe_convert_numeric(
floats[i] = uints[i] = ints[i] = bools[i] = val
seen.bool_ = True
elif val is None or val is C_NA:
- seen.saw_null()
+ if allow_null_in_int:
+ seen.null_ = True
+ mask[i] = 1
+ else:
+ if convert_to_masked_nullable:
+ mask[i] = 1
+ seen.saw_null()
floats[i] = complexes[i] = NaN
elif hasattr(val, '__len__') and len(val) == 0:
if convert_empty or seen.coerce_numeric:
@@ -2142,9 +2174,11 @@ def maybe_convert_numeric(
if fval in na_values:
seen.saw_null()
floats[i] = complexes[i] = NaN
+ mask[i] = 1
else:
if fval != fval:
seen.null_ = True
+ mask[i] = 1
floats[i] = fval
@@ -2152,7 +2186,10 @@ def maybe_convert_numeric(
as_int = int(val)
if as_int in na_values:
- seen.saw_null()
+ mask[i] = 1
+ seen.null_ = True
+ if not allow_null_in_int:
+ seen.float_ = True
else:
seen.saw_int(as_int)
@@ -2180,22 +2217,34 @@ def maybe_convert_numeric(
floats[i] = NaN
if seen.check_uint64_conflict():
- return values
+ return (values, None)
+
+ # This occurs since we disabled float nulls showing as null in anticipation
+ # of seeing ints that were never seen. So then, we return float
+ if allow_null_in_int and seen.null_ and not seen.int_:
+ seen.float_ = True
if seen.complex_:
- return complexes
+ return (complexes, None)
elif seen.float_:
- return floats
+ if seen.null_ and convert_to_masked_nullable:
+ return (floats, mask.view(np.bool_))
+ return (floats, None)
elif seen.int_:
+ if seen.null_ and convert_to_masked_nullable:
+ if seen.uint_:
+ return (uints, mask.view(np.bool_))
+ else:
+ return (ints, mask.view(np.bool_))
if seen.uint_:
- return uints
+ return (uints, None)
else:
- return ints
+ return (ints, None)
elif seen.bool_:
- return bools.view(np.bool_)
+ return (bools.view(np.bool_), None)
elif seen.uint_:
- return uints
- return ints
+ return (uints, None)
+ return (ints, None)
@cython.boundscheck(False)
diff --git a/pandas/_libs/ops.pyi b/pandas/_libs/ops.pyi
index b4f42f217a5db..11d67dfb93d5f 100644
--- a/pandas/_libs/ops.pyi
+++ b/pandas/_libs/ops.pyi
@@ -1,6 +1,8 @@
from typing import (
Any,
Callable,
+ Literal,
+ overload,
)
import numpy as np
@@ -35,9 +37,19 @@ def vec_binop(
op: _BinOp, # binary operator
) -> np.ndarray: ...
+@overload
+def maybe_convert_bool(
+ arr: np.ndarray, # np.ndarray[object]
+ true_values=...,
+ false_values=...,
+ convert_to_masked_nullable: Literal[False] = ...,
+) -> tuple[np.ndarray, None]: ...
+@overload
def maybe_convert_bool(
arr: np.ndarray, # np.ndarray[object]
true_values=...,
- false_values=...
-) -> np.ndarray: ...
+ false_values=...,
+ *,
+ convert_to_masked_nullable: Literal[True],
+) -> tuple[np.ndarray, np.ndarray]: ...
diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx
index 7951bb5c093ef..ac8a7f2cc57f7 100644
--- a/pandas/_libs/ops.pyx
+++ b/pandas/_libs/ops.pyx
@@ -24,10 +24,7 @@ import_array()
from pandas._libs.missing cimport checknull
-from pandas._libs.util cimport (
- UINT8_MAX,
- is_nan,
-)
+from pandas._libs.util cimport is_nan
@cython.wraparound(False)
@@ -212,7 +209,7 @@ def scalar_binop(object[:] values, object val, object op) -> ndarray:
else:
result[i] = op(x, val)
- return maybe_convert_bool(result.base)
+ return maybe_convert_bool(result.base)[0]
@cython.wraparound(False)
@@ -254,21 +251,25 @@ def vec_binop(object[:] left, object[:] right, object op) -> ndarray:
else:
raise
- return maybe_convert_bool(result.base) # `.base` to access np.ndarray
+ return maybe_convert_bool(result.base)[0] # `.base` to access np.ndarray
def maybe_convert_bool(ndarray[object] arr,
- true_values=None, false_values=None) -> ndarray:
+ true_values=None,
+ false_values=None,
+ convert_to_masked_nullable=False
+ ) -> tuple[np.ndarray, np.ndarray | None]:
cdef:
Py_ssize_t i, n
ndarray[uint8_t] result
+ ndarray[uint8_t] mask
object val
set true_vals, false_vals
- int na_count = 0
+ bint has_na = False
n = len(arr)
result = np.empty(n, dtype=np.uint8)
-
+ mask = np.zeros(n, dtype=np.uint8)
# the defaults
true_vals = {'True', 'TRUE', 'true'}
false_vals = {'False', 'FALSE', 'false'}
@@ -291,16 +292,19 @@ def maybe_convert_bool(ndarray[object] arr,
result[i] = 1
elif val in false_vals:
result[i] = 0
- elif isinstance(val, float):
- result[i] = UINT8_MAX
- na_count += 1
+ elif is_nan(val):
+ mask[i] = 1
+ result[i] = 0 # Value here doesn't matter, will be replaced w/ nan
+ has_na = True
else:
- return arr
+ return (arr, None)
- if na_count > 0:
- mask = result == UINT8_MAX
- arr = result.view(np.bool_).astype(object)
- np.putmask(arr, mask, np.nan)
- return arr
+ if has_na:
+ if convert_to_masked_nullable:
+ return (result.view(np.bool_), mask.view(np.bool_))
+ else:
+ arr = result.view(np.bool_).astype(object)
+ np.putmask(arr, mask, np.nan)
+ return (arr, None)
else:
- return result.view(np.bool_)
+ return (result.view(np.bool_), None)
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index d4ecec667cc86..46dc97214e2f6 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -1356,7 +1356,7 @@ def soft_convert_objects(
return converted
if numeric and is_object_dtype(values.dtype):
- converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
+ converted, _ = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index b7116ee95949b..6f5e8ab900dfd 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -180,7 +180,7 @@ def to_numeric(arg, errors="raise", downcast=None):
values = ensure_object(values)
coerce_numeric = errors not in ("ignore", "raise")
try:
- values = lib.maybe_convert_numeric(
+ values, _ = lib.maybe_convert_numeric(
values, set(), coerce_numeric=coerce_numeric
)
except (ValueError, TypeError):
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py
index a011a789bf17c..11fbdb860592e 100644
--- a/pandas/io/parsers/base_parser.py
+++ b/pandas/io/parsers/base_parser.py
@@ -676,7 +676,7 @@ def _infer_types(self, values, na_values, try_num_bool=True):
if try_num_bool and is_object_dtype(values.dtype):
# exclude e.g DatetimeIndex here
try:
- result = lib.maybe_convert_numeric(values, na_values, False)
+ result, _ = lib.maybe_convert_numeric(values, na_values, False)
except (ValueError, TypeError):
# e.g. encountering datetime string gets ValueError
# TypeError can be raised in floatify
@@ -690,7 +690,7 @@ def _infer_types(self, values, na_values, try_num_bool=True):
na_count = parsers.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
- result = libops.maybe_convert_bool(
+ result, _ = libops.maybe_convert_bool(
np.asarray(values),
true_values=self.true_values,
false_values=self.false_values,
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index d1e6409307915..076cc155f3626 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -24,6 +24,7 @@
from pandas._libs import (
lib,
missing as libmissing,
+ ops as libops,
)
import pandas.util._test_decorators as td
@@ -61,7 +62,11 @@
Timestamp,
)
import pandas._testing as tm
-from pandas.core.arrays import IntegerArray
+from pandas.core.arrays import (
+ BooleanArray,
+ FloatingArray,
+ IntegerArray,
+)
@pytest.fixture(params=[True, False], ids=str)
@@ -416,73 +421,116 @@ def test_isneginf_scalar(self, value, expected):
result = libmissing.isneginf_scalar(value)
assert result is expected
+ @pytest.mark.parametrize(
+ "convert_to_masked_nullable, exp",
+ [
+ (
+ True,
+ BooleanArray(
+ np.array([True, False], dtype="bool"), np.array([False, True])
+ ),
+ ),
+ (False, np.array([True, np.nan], dtype="object")),
+ ],
+ )
+ def test_maybe_convert_nullable_boolean(self, convert_to_masked_nullable, exp):
+ # GH 40687
+ arr = np.array([True, np.NaN], dtype=object)
+ result = libops.maybe_convert_bool(
+ arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
+ )
+ if convert_to_masked_nullable:
+ tm.assert_extension_array_equal(BooleanArray(*result), exp)
+ else:
+ result = result[0]
+ tm.assert_numpy_array_equal(result, exp)
+
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
@pytest.mark.parametrize("coerce_numeric", [True, False])
@pytest.mark.parametrize(
"infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]
)
@pytest.mark.parametrize("prefix", ["", "-", "+"])
- def test_maybe_convert_numeric_infinities(self, coerce_numeric, infinity, prefix):
+ def test_maybe_convert_numeric_infinities(
+ self, coerce_numeric, infinity, prefix, convert_to_masked_nullable
+ ):
# see gh-13274
- result = lib.maybe_convert_numeric(
+ result, _ = lib.maybe_convert_numeric(
np.array([prefix + infinity], dtype=object),
na_values={"", "NULL", "nan"},
coerce_numeric=coerce_numeric,
+ convert_to_masked_nullable=convert_to_masked_nullable,
)
expected = np.array([np.inf if prefix in ["", "+"] else -np.inf])
tm.assert_numpy_array_equal(result, expected)
- def test_maybe_convert_numeric_infinities_raises(self):
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
+ def test_maybe_convert_numeric_infinities_raises(self, convert_to_masked_nullable):
msg = "Unable to parse string"
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(["foo_inf"], dtype=object),
na_values={"", "NULL", "nan"},
coerce_numeric=False,
+ convert_to_masked_nullable=convert_to_masked_nullable,
)
- def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
+ def test_maybe_convert_numeric_post_floatify_nan(
+ self, coerce, convert_to_masked_nullable
+ ):
# see gh-13314
data = np.array(["1.200", "-999.000", "4.500"], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
- out = lib.maybe_convert_numeric(data, nan_values, coerce)
- tm.assert_numpy_array_equal(out, expected)
+ out = lib.maybe_convert_numeric(
+ data,
+ nan_values,
+ coerce,
+ convert_to_masked_nullable=convert_to_masked_nullable,
+ )
+ if convert_to_masked_nullable:
+ expected = FloatingArray(expected, np.isnan(expected))
+ tm.assert_extension_array_equal(expected, FloatingArray(*out))
+ else:
+ out = out[0]
+ tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(["inf", "inf", "inf"], dtype="O")
- result = lib.maybe_convert_numeric(arr, set(), False)
+ result, _ = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(["-inf", "-inf", "-inf"], dtype="O")
- result = lib.maybe_convert_numeric(arr, set(), False)
+ result, _ = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(["42E", "2E", "99e", "6e"], dtype="O")
- result = lib.maybe_convert_numeric(arr, set(), False, True)
+ result, _ = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, "apple"], dtype=object)
- result = lib.maybe_convert_numeric(arr, set(), False, True)
+ result, _ = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2 ** 63], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
- tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
+ tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
arr = np.array([str(2 ** 63)], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
- tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
+ tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
arr = np.array([np.uint64(2 ** 63)], dtype=object)
exp = np.array([2 ** 63], dtype=np.uint64)
- tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
+ tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
@pytest.mark.parametrize(
"arr",
@@ -495,17 +543,33 @@ def test_convert_numeric_uint64(self):
)
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
- result = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce)
+ result, _ = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
- def test_convert_numeric_uint64_nan_values(self, coerce):
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
+ def test_convert_numeric_uint64_nan_values(
+ self, coerce, convert_to_masked_nullable
+ ):
arr = np.array([2 ** 63, 2 ** 63 + 1], dtype=object)
na_values = {2 ** 63}
expected = (
np.array([np.nan, 2 ** 63 + 1], dtype=float) if coerce else arr.copy()
)
- result = lib.maybe_convert_numeric(arr, na_values, coerce_numeric=coerce)
+ result = lib.maybe_convert_numeric(
+ arr,
+ na_values,
+ coerce_numeric=coerce,
+ convert_to_masked_nullable=convert_to_masked_nullable,
+ )
+ if convert_to_masked_nullable and coerce:
+ expected = IntegerArray(
+ np.array([0, 2 ** 63 + 1], dtype="u8"),
+ np.array([True, False], dtype="bool"),
+ )
+ result = IntegerArray(*result)
+ else:
+ result = result[0] # discard mask
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
@@ -519,16 +583,33 @@ def test_convert_numeric_uint64_nan_values(self, coerce):
np.array([str(-1), str(2 ** 63)], dtype=object),
],
)
- def test_convert_numeric_int64_uint64(self, case, coerce):
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
+ def test_convert_numeric_int64_uint64(
+ self, case, coerce, convert_to_masked_nullable
+ ):
expected = case.astype(float) if coerce else case.copy()
- result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
+ result, _ = lib.maybe_convert_numeric(
+ case,
+ set(),
+ coerce_numeric=coerce,
+ convert_to_masked_nullable=convert_to_masked_nullable,
+ )
+
tm.assert_almost_equal(result, expected)
- def test_convert_numeric_string_uint64(self):
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
+ def test_convert_numeric_string_uint64(self, convert_to_masked_nullable):
# GH32394
result = lib.maybe_convert_numeric(
- np.array(["uint64"], dtype=object), set(), coerce_numeric=True
+ np.array(["uint64"], dtype=object),
+ set(),
+ coerce_numeric=True,
+ convert_to_masked_nullable=convert_to_masked_nullable,
)
+ if convert_to_masked_nullable:
+ result = FloatingArray(*result)
+ else:
+ result = result[0]
assert np.isnan(result)
@pytest.mark.parametrize("value", [-(2 ** 63) - 1, 2 ** 64])
@@ -608,6 +689,54 @@ def test_maybe_convert_objects_nullable_integer(self, exp):
tm.assert_extension_array_equal(result, exp)
+ @pytest.mark.parametrize(
+ "convert_to_masked_nullable, exp",
+ [
+ (True, IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True]))),
+ (False, np.array([2, np.nan], dtype="float64")),
+ ],
+ )
+ def test_maybe_convert_numeric_nullable_integer(
+ self, convert_to_masked_nullable, exp
+ ):
+ # GH 40687
+ arr = np.array([2, np.NaN], dtype=object)
+ result = lib.maybe_convert_numeric(
+ arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
+ )
+ if convert_to_masked_nullable:
+ result = IntegerArray(*result)
+ tm.assert_extension_array_equal(result, exp)
+ else:
+ result = result[0]
+ tm.assert_numpy_array_equal(result, exp)
+
+ @pytest.mark.parametrize(
+ "convert_to_masked_nullable, exp",
+ [
+ (
+ True,
+ FloatingArray(
+ np.array([2.0, 0.0], dtype="float64"), np.array([False, True])
+ ),
+ ),
+ (False, np.array([2.0, np.nan], dtype="float64")),
+ ],
+ )
+ def test_maybe_convert_numeric_floating_array(
+ self, convert_to_masked_nullable, exp
+ ):
+ # GH 40687
+ arr = np.array([2.0, np.nan], dtype=object)
+ result = lib.maybe_convert_numeric(
+ arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
+ )
+ if convert_to_masked_nullable:
+ tm.assert_extension_array_equal(FloatingArray(*result), exp)
+ else:
+ result = result[0]
+ tm.assert_numpy_array_equal(result, exp)
+
def test_maybe_convert_objects_bool_nan(self):
# GH32146
ind = Index([True, False, np.nan], dtype=object)
| Precursor for #40687
| https://api.github.com/repos/pandas-dev/pandas/pulls/40914 | 2021-04-13T03:00:36Z | 2021-05-05T12:46:39Z | 2021-05-05T12:46:39Z | 2021-05-29T17:55:11Z |
TST: Add test to verify DataFrame's constructor doesn't convert Nones to strings on string columns | diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index ca68885fdc470..cb9fbdf6fa4a8 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2396,6 +2396,20 @@ def check_views():
# assert b[0] == 0
assert df.iloc[0, 2] == 0
+ def test_consistency_of_string_columns_with_none(self):
+ # https://github.com/pandas-dev/pandas/issues/32218
+
+ df = DataFrame(["1", "2", None], columns=["a"], dtype="str")
+
+ assert isinstance(df.loc[0, "a"], str)
+ assert df.loc[2, "a"] is None
+
+ # Equivalent df created from dict, None remains NoneType
+ df = DataFrame({"a": ["1", "2", None]}, dtype="str")
+
+ assert isinstance(df.loc[0, "a"], str)
+ assert df.loc[2, "a"] is None
+
class TestDataFrameConstructorWithDatetimeTZ:
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
| - [x] closes #32218
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
### whatsnew
Adds a very simple test to verify that `None`s are not casted to strings as `"None"` inside the DataFrame's constructor for string columns.
Questions:
* Should the test be more general?
* What is the proper location for the test?
| https://api.github.com/repos/pandas-dev/pandas/pulls/40912 | 2021-04-13T01:31:14Z | 2021-09-01T00:10:51Z | null | 2021-09-01T00:10:51Z |
Add type to arg of series/drop-duplicates | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5ba68aaa5c16d..93e3fbf769118 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2004,11 +2004,13 @@ def unique(self) -> ArrayLike:
return super().unique()
@overload
- def drop_duplicates(self, keep=..., inplace: Literal[False] = ...) -> Series:
+ def drop_duplicates(
+ self, keep: str | bool = ..., inplace: Literal[False] = ...
+ ) -> Series:
...
@overload
- def drop_duplicates(self, keep, inplace: Literal[True]) -> None:
+ def drop_duplicates(self, keep: str | bool, inplace: Literal[True]) -> None:
...
@overload
@@ -2016,10 +2018,14 @@ def drop_duplicates(self, *, inplace: Literal[True]) -> None:
...
@overload
- def drop_duplicates(self, keep=..., inplace: bool = ...) -> Series | None:
+ def drop_duplicates(
+ self, keep: str | bool = ..., inplace: bool = ...
+ ) -> Series | None:
...
- def drop_duplicates(self, keep="first", inplace=False) -> Series | None:
+ def drop_duplicates(
+ self, keep: str | bool = "first", inplace=False
+ ) -> Series | None:
"""
Return Series with duplicate values removed.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
I noticed in https://github.com/pandas-dev/pandas/pull/40867 that keep didn't specify a type, so adding it here.
| https://api.github.com/repos/pandas-dev/pandas/pulls/40910 | 2021-04-12T23:08:28Z | 2021-05-22T16:04:59Z | null | 2021-05-22T16:04:59Z |
REF: re-use maybe_cast_result in Series.combine | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index f62aa95e1e814..b68ec3c473a41 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -102,7 +102,6 @@
if TYPE_CHECKING:
from typing import Literal
- from pandas import Series
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
@@ -375,7 +374,11 @@ def trans(x):
def maybe_cast_result(
- result: ArrayLike, obj: Series, numeric_only: bool = False, how: str = ""
+ result: ArrayLike,
+ dtype: DtypeObj,
+ numeric_only: bool = False,
+ how: str = "",
+ same_dtype: bool = True,
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
@@ -384,19 +387,20 @@ def maybe_cast_result(
----------
result : array-like
Result to cast.
- obj : Series
+ dtype : np.dtype or ExtensionDtype
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
+ same_dtype : bool, default True
+ Specify dtype when calling _from_sequence
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
- dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
@@ -407,7 +411,10 @@ def maybe_cast_result(
# things like counts back to categorical
cls = dtype.construct_array_type()
- result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
+ if same_dtype:
+ result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
+ else:
+ result = maybe_cast_to_extension_array(cls, result)
elif (numeric_only and is_numeric_dtype(dtype)) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index bc5318a1f367c..2e7031ab2888e 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -788,7 +788,7 @@ def _aggregate_series_pure_python(self, obj: Series, func: F):
result[label] = res
out = lib.maybe_convert_objects(result, try_float=False)
- out = maybe_cast_result(out, obj, numeric_only=True)
+ out = maybe_cast_result(out, obj.dtype, numeric_only=True)
return out, counts
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5ba68aaa5c16d..1c49a28ef93ed 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -60,15 +60,13 @@
from pandas.core.dtypes.cast import (
convert_dtypes,
maybe_box_native,
- maybe_cast_to_extension_array,
+ maybe_cast_result,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_bool,
- is_categorical_dtype,
is_dict_like,
- is_extension_array_dtype,
is_integer,
is_iterator,
is_list_like,
@@ -3079,22 +3077,9 @@ def combine(self, other, func, fill_value=None) -> Series:
new_values = [func(lv, other) for lv in self._values]
new_name = self.name
- if is_categorical_dtype(self.dtype):
- pass
- elif is_extension_array_dtype(self.dtype):
- # TODO: can we do this for only SparseDtype?
- # The function can return something of any type, so check
- # if the type is compatible with the calling EA.
-
- # error: Incompatible types in assignment (expression has type
- # "Union[ExtensionArray, ndarray]", variable has type "List[Any]")
- new_values = maybe_cast_to_extension_array( # type: ignore[assignment]
- # error: Argument 2 to "maybe_cast_to_extension_array" has incompatible
- # type "List[Any]"; expected "Union[ExtensionArray, ndarray]"
- type(self._values),
- new_values, # type: ignore[arg-type]
- )
- return self._constructor(new_values, index=new_index, name=new_name)
+ res_values = sanitize_array(new_values, None)
+ res_values = maybe_cast_result(res_values, self.dtype, same_dtype=False)
+ return self._constructor(res_values, index=new_index, name=new_name)
def combine_first(self, other) -> Series:
"""
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 55f9d85574f94..7a3f88d0d6c41 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -362,13 +362,18 @@ def _create_arithmetic_method(cls, op):
DecimalArrayWithoutCoercion._add_arithmetic_ops()
-def test_combine_from_sequence_raises():
+def test_combine_from_sequence_raises(monkeypatch):
# https://github.com/pandas-dev/pandas/issues/22850
- ser = pd.Series(
- DecimalArrayWithoutFromSequence(
- [decimal.Decimal("1.0"), decimal.Decimal("2.0")]
- )
- )
+ cls = DecimalArrayWithoutFromSequence
+
+ @classmethod
+ def construct_array_type(cls):
+ return DecimalArrayWithoutFromSequence
+
+ monkeypatch.setattr(DecimalDtype, "construct_array_type", construct_array_type)
+
+ arr = cls([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
+ ser = pd.Series(arr)
result = ser.combine(ser, operator.add)
# note: object dtype
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40909 | 2021-04-12T22:23:22Z | 2021-04-13T15:25:30Z | 2021-04-13T15:25:30Z | 2021-04-13T16:54:33Z |
ENH: Make maybe_convert_object respect dtype itemsize | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index a286d152f03c3..8cebdbe5ca7b0 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -221,6 +221,7 @@ Other enhancements
- :meth:`pandas.read_csv` and :meth:`pandas.read_json` expose the argument ``encoding_errors`` to control how encoding errors are handled (:issue:`39450`)
- :meth:`.GroupBy.any` and :meth:`.GroupBy.all` use Kleene logic with nullable data types (:issue:`37506`)
- :meth:`.GroupBy.any` and :meth:`.GroupBy.all` return a ``BooleanDtype`` for columns with nullable data types (:issue:`33449`)
+- Constructing a :class:`DataFrame` or :class:`Series` with the ``data`` argument being a Python iterable that is *not* a NumPy ``ndarray`` consisting of NumPy scalars will now result in a dtype with a precision the maximum of the NumPy scalars; this was already the case when ``data`` is a NumPy ``ndarray`` (:issue:`40908`)
- Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`)
-
@@ -689,7 +690,7 @@ Numeric
- Bug in :meth:`DataFrame.apply` and :meth:`DataFrame.agg` when passed argument ``func="size"`` would operate on the entire ``DataFrame`` instead of rows or columns (:issue:`39934`)
- Bug in :meth:`DataFrame.transform` would raise ``SpecificationError`` when passed a dictionary and columns were missing; will now raise a ``KeyError`` instead (:issue:`40004`)
- Bug in :meth:`DataFrameGroupBy.rank` giving incorrect results with ``pct=True`` and equal values between consecutive groups (:issue:`40518`)
--
+- Bug in :meth:`Series.count` would result in an ``int32`` result on 32-bit platforms when argument ``level=None`` (:issue:`40908`)
Conversion
^^^^^^^^^^
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index a5ed650d72911..77375cac39921 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -68,6 +68,9 @@ cdef extern from "numpy/arrayobject.h":
object fields
tuple names
+cdef extern from "numpy/ndarrayobject.h":
+ bint PyArray_CheckScalar(obj) nogil
+
cdef extern from "src/parse_helper.h":
int floatify(object, float64_t *result, int *maybe_int) except -1
@@ -209,6 +212,24 @@ def is_scalar(val: object) -> bool:
or is_offset_object(val))
+cdef inline int64_t get_itemsize(object val):
+ """
+ Get the itemsize of a NumPy scalar, -1 if not a NumPy scalar.
+
+ Parameters
+ ----------
+ val : object
+
+ Returns
+ -------
+ is_ndarray : bool
+ """
+ if PyArray_CheckScalar(val):
+ return cnp.PyArray_DescrFromScalar(val).itemsize
+ else:
+ return -1
+
+
def is_iterator(obj: object) -> bool:
"""
Check if the object is an iterator.
@@ -2188,7 +2209,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False,
Parameters
----------
- values : ndarray[object]
+ objects : ndarray[object]
Array of object elements to convert.
try_float : bool, default False
If an array-like object contains only float or NaN values is
@@ -2212,7 +2233,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False,
Array of converted object values to more specific dtypes if applicable.
"""
cdef:
- Py_ssize_t i, n
+ Py_ssize_t i, n, itemsize_max = 0
ndarray[float64_t] floats
ndarray[complex128_t] complexes
ndarray[int64_t] ints
@@ -2245,6 +2266,10 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False,
for i in range(n):
val = objects[i]
+ if itemsize_max != -1:
+ itemsize = get_itemsize(val)
+ if itemsize > itemsize_max or itemsize == -1:
+ itemsize_max = itemsize
if val is None:
seen.null_ = True
@@ -2346,50 +2371,51 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False,
seen.object_ = True
if not seen.object_:
+ result = None
if not safe:
if seen.null_ or seen.nan_:
if seen.is_float_or_complex:
if seen.complex_:
- return complexes
+ result = complexes
elif seen.float_:
- return floats
+ result = floats
elif seen.int_:
if convert_to_nullable_integer:
from pandas.core.arrays import IntegerArray
- return IntegerArray(ints, mask)
+ result = IntegerArray(ints, mask)
else:
- return floats
+ result = floats
elif seen.nan_:
- return floats
+ result = floats
else:
if not seen.bool_:
if seen.datetime_:
if not seen.numeric_ and not seen.timedelta_:
- return datetimes
+ result = datetimes
elif seen.timedelta_:
if not seen.numeric_:
- return timedeltas
+ result = timedeltas
elif seen.nat_:
if not seen.numeric_:
if convert_datetime and convert_timedelta:
# TODO: array full of NaT ambiguity resolve here needed
pass
elif convert_datetime:
- return datetimes
+ result = datetimes
elif convert_timedelta:
- return timedeltas
+ result = timedeltas
else:
if seen.complex_:
- return complexes
+ result = complexes
elif seen.float_:
- return floats
+ result = floats
elif seen.int_:
if seen.uint_:
- return uints
+ result = uints
else:
- return ints
+ result = ints
elif seen.is_bool:
- return bools.view(np.bool_)
+ result = bools.view(np.bool_)
else:
# don't cast int to float, etc.
@@ -2397,41 +2423,49 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=False,
if seen.is_float_or_complex:
if seen.complex_:
if not seen.int_:
- return complexes
+ result = complexes
elif seen.float_ or seen.nan_:
if not seen.int_:
- return floats
+ result = floats
else:
if not seen.bool_:
if seen.datetime_:
if not seen.numeric_ and not seen.timedelta_:
- return datetimes
+ result = datetimes
elif seen.timedelta_:
if not seen.numeric_:
- return timedeltas
+ result = timedeltas
elif seen.nat_:
if not seen.numeric_:
if convert_datetime and convert_timedelta:
# TODO: array full of NaT ambiguity resolve here needed
pass
elif convert_datetime:
- return datetimes
+ result = datetimes
elif convert_timedelta:
- return timedeltas
+ result = timedeltas
else:
if seen.complex_:
if not seen.int_:
- return complexes
+ result = complexes
elif seen.float_ or seen.nan_:
if not seen.int_:
- return floats
+ result = floats
elif seen.int_:
if seen.uint_:
- return uints
+ result = uints
else:
- return ints
+ result = ints
elif seen.is_bool and not seen.nan_:
- return bools.view(np.bool_)
+ result = bools.view(np.bool_)
+
+ if result is uints or result is ints or result is floats or result is complexes:
+ # cast to the largest itemsize when all values are NumPy scalars
+ if itemsize_max > 0 and itemsize_max != result.dtype.itemsize:
+ result = result.astype(result.dtype.kind + str(itemsize_max))
+ return result
+ elif result is not None:
+ return result
return objects
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 440bc4c89e647..e19d521bda3df 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1891,7 +1891,7 @@ def count(self, level=None):
2
"""
if level is None:
- return notna(self._values).sum()
+ return notna(self._values).sum().astype("int64")
else:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 907991b97ead1..d1e6409307915 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -31,6 +31,7 @@
from pandas.core.dtypes.common import (
ensure_int32,
is_bool,
+ is_complex,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
@@ -614,6 +615,69 @@ def test_maybe_convert_objects_bool_nan(self):
out = lib.maybe_convert_objects(ind.values, safe=1)
tm.assert_numpy_array_equal(out, exp)
+ @pytest.mark.parametrize(
+ "data0",
+ [
+ True,
+ 1,
+ 1.0,
+ 1.0 + 1.0j,
+ np.int8(1),
+ np.int16(1),
+ np.int32(1),
+ np.int64(1),
+ np.float16(1),
+ np.float32(1),
+ np.float64(1),
+ np.complex64(1),
+ np.complex128(1),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "data1",
+ [
+ True,
+ 1,
+ 1.0,
+ 1.0 + 1.0j,
+ np.int8(1),
+ np.int16(1),
+ np.int32(1),
+ np.int64(1),
+ np.float16(1),
+ np.float32(1),
+ np.float64(1),
+ np.complex64(1),
+ np.complex128(1),
+ ],
+ )
+ def test_maybe_convert_objects_itemsize(self, data0, data1):
+ # GH 40908
+ data = [data0, data1]
+ arr = np.array(data, dtype="object")
+
+ common_kind = np.find_common_type(
+ [type(data0), type(data1)], scalar_types=[]
+ ).kind
+ kind0 = "python" if not hasattr(data0, "dtype") else data0.dtype.kind
+ kind1 = "python" if not hasattr(data1, "dtype") else data1.dtype.kind
+ if kind0 != "python" and kind1 != "python":
+ kind = common_kind
+ itemsize = max(data0.dtype.itemsize, data1.dtype.itemsize)
+ elif is_bool(data0) or is_bool(data1):
+ kind = "bool" if (is_bool(data0) and is_bool(data1)) else "object"
+ itemsize = ""
+ elif is_complex(data0) or is_complex(data1):
+ kind = common_kind
+ itemsize = 16
+ else:
+ kind = common_kind
+ itemsize = 8
+
+ expected = np.array(data, dtype=f"{kind}{itemsize}")
+ result = lib.maybe_convert_objects(arr)
+ tm.assert_numpy_array_equal(result, expected)
+
def test_mixed_dtypes_remain_object_array(self):
# GH14956
arr = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object)
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 759277a47f62b..f0d3fb7ff9e1b 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -16,10 +16,6 @@
import numpy as np
import pytest
-from pandas.compat import (
- IS64,
- is_platform_windows,
-)
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import is_object_dtype
@@ -428,9 +424,6 @@ def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
]:
mark = pytest.mark.xfail(reason="result dtype.fill_value mismatch")
request.node.add_marker(mark)
- elif is_platform_windows() or not IS64:
- mark = pytest.mark.xfail(reason="results are int32, expected int64")
- request.node.add_marker(mark)
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py
index e8d0a789e7cbd..35ad9f3e9693b 100644
--- a/pandas/tests/frame/constructors/test_from_records.py
+++ b/pandas/tests/frame/constructors/test_from_records.py
@@ -117,7 +117,7 @@ def test_from_records_sequencelike(self):
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
- tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
+ tm.assert_series_equal(result["E1"], df["E1"])
def test_from_records_sequencelike_empty(self):
# empty case
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index d8f93f047e74b..e6ed60dc2bb08 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -10,6 +10,8 @@
import numpy as np
import pytest
+from pandas.compat import np_version_under1p20
+
import pandas as pd
from pandas import (
DataFrame,
@@ -1514,8 +1516,14 @@ def test_replace_commutative(self, df, to_replace, exp):
np.float64(1),
],
)
- def test_replace_replacer_dtype(self, replacer):
+ def test_replace_replacer_dtype(self, request, replacer):
# GH26632
+ if np.isscalar(replacer) and replacer.dtype.itemsize < 8:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ np_version_under1p20, reason="np.putmask doesn't coerce dtype"
+ )
+ )
df = DataFrame(["a"])
result = df.replace({"a": replacer, "b": replacer})
expected = DataFrame([replacer])
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index ca68885fdc470..c565567754da0 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1924,12 +1924,12 @@ def test_constructor_for_list_with_dtypes(self):
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.dtypes
- expected = Series([np.dtype("int64")] * 5)
+ expected = Series([np.dtype("int")] * 5)
tm.assert_series_equal(result, expected)
df = DataFrame([np.array(np.arange(5), dtype="int32") for x in range(5)])
result = df.dtypes
- expected = Series([np.dtype("int64")] * 5)
+ expected = Series([np.dtype("int32")] * 5)
tm.assert_series_equal(result, expected)
# overflow issue? (we always expected int64 upcasting here)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index c5620d6d8c06c..3f6485be871f1 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -99,10 +99,7 @@ def max_value(group):
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
- expected = Series(
- [np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
- index=["A", "B", "C", "D", "value"],
- )
+ expected = df.dtypes
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 7642f78076dcb..2bb9b51df2285 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -641,7 +641,7 @@ def test_where_series_complex128(self, fill_val, exp_dtype):
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
- exp = klass([1 + 1j, values[1], 3 + 3j, values[3]])
+ exp = klass([1 + 1j, values[1], 3 + 3j, values[3]], dtype=exp_dtype)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
| - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Precursor to #40790
This adds support for e.g. `float32` NumPy dtypes to maybe_convert_object. If any non-NumPy scalar is hit, the behavior is the same as master. This is my first foray into the NumPy C-API, so any tips are appreciated. In particular, I couldn't figure out how to use the C API to do the cast:
result = result.astype(result.dtype.kind + str(itemsize))
Not sure if there should also be specific logic for EAs/nullable types.
From a full ASV run:
```
before after ratio
[7d4757b4] [c1288962]
<maybe_convert_object_itemsize~12> <maybe_convert_object_itemsize>
+ 768±8μs 1.01±0ms 1.32 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, False, 'float')
+ 779±9μs 1.02±0ms 1.31 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, True, 'float')
+ 579±5μs 710±20μs 1.23 arithmetic.NumericInferOps.time_divide(<class 'numpy.int8'>)
+ 608±30μs 715±20μs 1.18 arithmetic.NumericInferOps.time_divide(<class 'numpy.uint8'>)
+ 3.18±0.2μs 3.69±0.3μs 1.16 index_cached_properties.IndexCache.time_engine('UInt64Index')
+ 1.58±0.01ms 1.83±0.01ms 1.16 ctors.SeriesConstructors.time_series_constructor(<function arr_dict at 0x7f5964729820>, False, 'float')
+ 3.30±0.2μs 3.80±0.5μs 1.15 index_cached_properties.IndexCache.time_engine('TimedeltaIndex')
+ 943±80ns 1.08±0.1μs 1.15 index_cached_properties.IndexCache.time_inferred_type('Float64Index')
+ 1.73±0.01ms 1.98±0ms 1.14 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, True, 'int')
+ 1.72±0.01ms 1.96±0ms 1.14 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, False, 'int')
+ 5.19±0.03μs 5.93±0.3μs 1.14 tslibs.period.TimeDT64ArrToPeriodArr.time_dt64arr_to_periodarr(1, 6000, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 1.65±0.01ms 1.89±0.01ms 1.14 ctors.SeriesConstructors.time_series_constructor(<function arr_dict at 0x7f5964729820>, True, 'float')
+ 6.06±0.06μs 6.91±0.1μs 1.14 tslibs.tz_convert.TimeTZConvert.time_tz_convert_from_utc(1, datetime.timezone(datetime.timedelta(seconds=3600)))
+ 7.46±0.04μs 8.47±0.5μs 1.14 tslibs.offsets.OffestDatetimeArithmetic.time_subtract(<BusinessDay>)
+ 1.15±0.09μs 1.27±0.08μs 1.11 index_cached_properties.IndexCache.time_values('UInt64Index')
- 11.7±1ms 10.5±0.1ms 0.90 algos.isin.IsinAlmostFullWithRandomInt.time_isin(<class 'numpy.object_'>, 18, 'outside')
- 641±30ns 577±20ns 0.90 index_cached_properties.IndexCache.time_is_monotonic('RangeIndex')
- 641±20ns 576±20ns 0.90 index_cached_properties.IndexCache.time_shape('Int64Index')
- 3.37±0.03ms 3.02±0.02ms 0.90 timeseries.ResampleSeries.time_resample('period', '5min', 'ohlc')
- 787±20ns 702±20ns 0.89 index_cached_properties.IndexCache.time_is_monotonic_decreasing('RangeIndex')
- 1.25±0.04μs 1.12±0.04μs 0.89 index_cached_properties.IndexCache.time_is_all_dates('Int64Index')
- 1.50±0.1ms 1.33±0.02ms 0.88 dtypes.SelectDtypes.time_select_dtype_string_exclude('float32')
- 1.19±0.09μs 1.05±0.08μs 0.88 index_cached_properties.IndexCache.time_inferred_type('UInt64Index')
- 57.6±1μs 50.3±0.2μs 0.87 frame_methods.Dtypes.time_frame_dtypes
- 168±3μs 146±0.6μs 0.87 algos.isin.IsinWithArangeSorted.time_isin(<class 'numpy.uint64'>, 8000)
- 552±20ns 480±20ns 0.87 index_cached_properties.IndexCache.time_is_monotonic_increasing('RangeIndex')
- 1.50±0.1ms 1.30±0ms 0.87 dtypes.SelectDtypes.time_select_dtype_string_exclude('complex64')
- 1.89±0ms 1.64±0.01ms 0.87 period.DataFramePeriodColumn.time_set_index
- 11.0±0.2μs 9.50±0.08μs 0.87 period.Indexing.time_series_loc
- 110±20ms 93.1±0.06ms 0.85 algos.isin.IsInLongSeriesLookUpDominates.time_isin('float32', 1000, 'random_hits')
- 392±30ns 330±10ns 0.84 index_cached_properties.IndexCache.time_inferred_type('RangeIndex')
- 4.45±0.4ms 3.68±0.02ms 0.83 algorithms.Factorize.time_factorize(True, True, 'Int64')
- 9.60±0.8ms 7.75±0.01ms 0.81 algorithms.Factorize.time_factorize(True, False, 'string')
- 11.2±0.8ms 8.90±0.1ms 0.80 algos.isin.IsinAlmostFullWithRandomInt.time_isin(<class 'numpy.object_'>, 18, 'inside')
- 615±30ns 484±30ns 0.79 index_cached_properties.IndexCache.time_is_monotonic_increasing('Int64Index')
- 73.8±20ms 55.4±0.5ms 0.75 algos.isin.IsInLongSeriesLookUpDominates.time_isin('float32', 1000, 'monotone_hits')
- 180±0.1μs 128±0.5μs 0.71 indexing_engines.NumericEngineIndexing.time_get_loc((<class 'pandas._libs.index.Int8Engine'>, <class 'numpy.int8'>), 'monotonic_incr')
- 128±40ms 90.8±0.03ms 0.71 algos.isin.IsInLongSeriesLookUpDominates.time_isin('object', 5, 'monotone_hits')
- 186±0.3μs 131±2μs 0.70 indexing_engines.NumericEngineIndexing.time_get_loc((<class 'pandas._libs.index.Int16Engine'>, <class 'numpy.int16'>), 'monotonic_incr')
- 1.72±0.4μs 1.17±0.1μs 0.68 index_cached_properties.IndexCache.time_inferred_type('TimedeltaIndex')
- 216±0.8μs 140±3μs 0.65 indexing_engines.NumericEngineIndexing.time_get_loc((<class 'pandas._libs.index.UInt32Engine'>, <class 'numpy.uint32'>), 'monotonic_incr')
- 1.55±0.01ms 972±7μs 0.63 algos.isin.IsinWithArangeSorted.time_isin(<class 'numpy.int64'>, 100000)
SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY.
PERFORMANCE DECREASED.
```
Specific timings via %timeit on `maybe_convert_objects` directly:
```
np.array of python integers:
127 ms ± 80.9 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- PR
120 ms ± 155 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- master
np.array of int32:
179 ms ± 1.21 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- PR
133 ms ± 274 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- master
np.array of int32 with last one a python int:
178 ms ± 1.71 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- PR
133 ms ± 106 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) <--- master
```
<details>
<summary>timeit code</summary>
```
values = np.array(range(1_000_000), dtype="object")
print('np.array of python integers:')
%timeit maybe_convert_objects(values)
print()
values = np.array([np.int32(1)] * 1_000_000, dtype="object")
print('np.array of int32:')
%timeit maybe_convert_objects(values)
print()
values = np.array([np.int32(1)] * 999_999 + [1], dtype="object")
print('np.array of int32 with last one a python int:')
%timeit maybe_convert_objects(values)
print()
```
<details> | https://api.github.com/repos/pandas-dev/pandas/pulls/40908 | 2021-04-12T22:18:39Z | 2021-04-21T12:53:28Z | 2021-04-21T12:53:28Z | 2021-04-21T15:37:42Z |
BUG: to_string truncation column with index=False | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 0ec9758477eba..fcfac2d2d63f1 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -740,6 +740,7 @@ I/O
- Bug in :func:`read_hdf` returning unexpected records when filtering on categorical string columns using ``where`` parameter (:issue:`39189`)
- Bug in :func:`read_sas` raising ``ValueError`` when ``datetimes`` were null (:issue:`39725`)
- Bug in :func:`read_excel` dropping empty values from single-column spreadsheets (:issue:`39808`)
+- Bug in :meth:`DataFrame.to_string` misplacing the truncation column when ``index=False`` (:issue:`40907`)
Period
^^^^^^
diff --git a/pandas/io/formats/string.py b/pandas/io/formats/string.py
index 84333cfc441b2..de53646b5f95f 100644
--- a/pandas/io/formats/string.py
+++ b/pandas/io/formats/string.py
@@ -77,7 +77,8 @@ def _insert_dot_separators(self, strcols: List[List[str]]) -> List[List[str]]:
def _insert_dot_separator_horizontal(
self, strcols: List[List[str]], index_length: int
) -> List[List[str]]:
- strcols.insert(self.fmt.tr_col_num + 1, [" ..."] * index_length)
+ tr_col_num = self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num
+ strcols.insert(tr_col_num, [" ..."] * index_length)
return strcols
def _insert_dot_separator_vertical(
diff --git a/pandas/tests/io/formats/test_to_string.py b/pandas/tests/io/formats/test_to_string.py
index 551734f343dfa..f9b3cac3527ef 100644
--- a/pandas/tests/io/formats/test_to_string.py
+++ b/pandas/tests/io/formats/test_to_string.py
@@ -106,6 +106,40 @@ def test_format_remove_leading_space_dataframe(input_array, expected):
assert df == expected
+@pytest.mark.parametrize(
+ "max_cols, expected",
+ [
+ (
+ 10,
+ [
+ " 0 1 2 3 4 ... 6 7 8 9 10",
+ " 0 0 0 0 0 ... 0 0 0 0 0",
+ " 0 0 0 0 0 ... 0 0 0 0 0",
+ ],
+ ),
+ (
+ 9,
+ [
+ " 0 1 2 3 ... 7 8 9 10",
+ " 0 0 0 0 ... 0 0 0 0",
+ " 0 0 0 0 ... 0 0 0 0",
+ ],
+ ),
+ (
+ 1,
+ [
+ " 0 ...",
+ " 0 ...",
+ " 0 ...",
+ ],
+ ),
+ ],
+)
+def test_truncation_col_placement_no_index(max_cols, expected):
+ df = DataFrame([[0] * 11] * 2)
+ assert df.to_string(index=False, max_cols=max_cols).split("\n") == expected
+
+
def test_to_string_unicode_columns(float_frame):
df = DataFrame({"\u03c3": np.arange(10.0)})
| - [x] closes #40904
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40907 | 2021-04-12T21:02:05Z | 2021-04-16T01:11:25Z | 2021-04-16T01:11:25Z | 2021-04-16T01:11:32Z |
STYLE use pandas-dev-flaker | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 9424b2f34eaff..5b11490479088 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -38,7 +38,10 @@ repos:
rev: 3.9.0
hooks:
- id: flake8
- additional_dependencies: [flake8-comprehensions>=3.1.0, flake8-bugbear>=21.3.2]
+ additional_dependencies:
+ - flake8-comprehensions==3.1.0
+ - flake8-bugbear==21.3.2
+ - pandas-dev-flaker==0.2.0
- id: flake8
name: flake8 (cython)
types: [cython]
@@ -71,7 +74,11 @@ repos:
rev: v1.2.2
hooks:
- id: yesqa
- additional_dependencies: [flake8==3.9.0]
+ additional_dependencies:
+ - flake8==3.9.0
+ - flake8-comprehensions==3.1.0
+ - flake8-bugbear==21.3.2
+ - pandas-dev-flaker==0.2.0
- repo: local
hooks:
- id: flake8-rst
@@ -82,28 +89,6 @@ repos:
types: [rst]
args: [--filename=*.rst]
additional_dependencies: [flake8-rst==0.7.0, flake8==3.7.9]
- - id: frame-or-series-union
- name: Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias
- entry: Union\[.*(Series,.*DataFrame|DataFrame,.*Series).*\]
- language: pygrep
- types: [python]
- exclude: ^pandas/_typing\.py$
- - id: inconsistent-namespace-usage
- name: 'Check for inconsistent use of pandas namespace'
- entry: python scripts/check_for_inconsistent_pandas_namespace.py
- language: python
- types: [python]
- - id: no-os-remove
- name: Check code for instances of os.remove
- entry: os\.remove
- language: pygrep
- types: [python]
- files: ^pandas/tests/
- exclude: |
- (?x)^
- pandas/tests/io/excel/test_writers\.py
- |pandas/tests/io/pytables/common\.py
- |pandas/tests/io/pytables/test_store\.py$
- id: unwanted-patterns
name: Unwanted patterns
language: pygrep
@@ -113,52 +98,10 @@ repos:
\#\ type:\ (?!ignore)
|\#\ type:\s?ignore(?!\[)
- # foo._class__ instead of type(foo)
- |\.__class__
-
- # np.bool/np.object instead of np.bool_/np.object_
- |np\.bool[^_8]
- |np\.object[^_8]
-
- # imports from pandas.core.common instead of `import pandas.core.common as com`
- |from\ pandas\.core\.common\ import
- |from\ pandas\.core\ import\ common
-
- # imports from collections.abc instead of `from collections import abc`
- |from\ collections\.abc\ import
-
- # Numpy
- |from\ numpy\ import\ random
- |from\ numpy\.random\ import
-
# Incorrect code-block / IPython directives
|\.\.\ code-block\ ::
|\.\.\ ipython\ ::
types_or: [python, cython, rst]
- exclude: ^doc/source/development/code_style\.rst # contains examples of patterns to avoid
- - id: unwanted-patterns-in-tests
- name: Unwanted patterns in tests
- language: pygrep
- entry: |
- (?x)
- # pytest.xfail instead of pytest.mark.xfail
- pytest\.xfail
-
- # imports from pandas._testing instead of `import pandas._testing as tm`
- |from\ pandas\._testing\ import
- |from\ pandas\ import\ _testing\ as\ tm
-
- # No direct imports from conftest
- |conftest\ import
- |import\ conftest
-
- # pandas.testing instead of tm
- |pd\.testing\.
-
- # pd.api.types instead of from pandas.api.types import ...
- |(pd|pandas)\.api\.types\.
- files: ^pandas/tests/
- types_or: [python, cython, rst]
- id: pip-to-conda
name: Generate pip dependency from conda
description: This hook checks if the conda environment.yml and requirements-dev.txt are equal
@@ -180,35 +123,6 @@ repos:
language: python
types: [rst]
files: ^doc/source/(development|reference)/
- - id: unwanted-patterns-bare-pytest-raises
- name: Check for use of bare pytest raises
- language: python
- entry: python scripts/validate_unwanted_patterns.py --validation-type="bare_pytest_raises"
- types: [python]
- files: ^pandas/tests/
- exclude: ^pandas/tests/extension/
- - id: unwanted-patterns-private-function-across-module
- name: Check for use of private functions across modules
- language: python
- entry: python scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module"
- types: [python]
- exclude: ^(asv_bench|pandas/tests|doc)/
- - id: unwanted-patterns-private-import-across-module
- name: Check for import of private attributes across modules
- language: python
- entry: python scripts/validate_unwanted_patterns.py --validation-type="private_import_across_module"
- types: [python]
- exclude: ^(asv_bench|pandas/tests|doc)/
- - id: unwanted-patterns-strings-to-concatenate
- name: Check for use of not concatenated strings
- language: python
- entry: python scripts/validate_unwanted_patterns.py --validation-type="strings_to_concatenate"
- types_or: [python, cython]
- - id: unwanted-patterns-strings-with-wrong-placed-whitespace
- name: Check for strings with wrong placed spaces
- language: python
- entry: python scripts/validate_unwanted_patterns.py --validation-type="strings_with_wrong_placed_whitespace"
- types_or: [python, cython]
- id: use-pd_array-in-core
name: Import pandas.array as pd_array in core
language: python
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index 459046d2decfb..ac7cd87c846d5 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -31,7 +31,7 @@
except ImportError:
from pandas import algos
try:
- from pandas._testing import test_parallel
+ from pandas._testing import test_parallel # noqa: PDF014
have_real_test_parallel = True
except ImportError:
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index 7bd4d639633b3..ed44102700dc6 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -70,7 +70,7 @@ class BaseIO:
def remove(self, f):
"""Remove created files"""
try:
- os.remove(f)
+ os.remove(f) # noqa: PDF008
except OSError:
# On Windows, attempting to remove a file that is in use
# causes an exception to be raised
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 3b1774ade6f85..d4b6c0d6ff09d 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -64,27 +64,6 @@ fi
### PATTERNS ###
if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
- MSG='Check for use of exec' ; echo $MSG
- invgrep -R --include="*.py*" -E "[^a-zA-Z0-9_]exec\(" pandas
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Check for pytest warns' ; echo $MSG
- invgrep -r -E --include '*.py' 'pytest\.warns' pandas/tests/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Check for pytest raises without context' ; echo $MSG
- invgrep -r -E --include '*.py' "[[:space:]] pytest.raises" pandas/tests/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Check for use of builtin filter function' ; echo $MSG
- invgrep -R --include="*.py" -P '(?<!def)[\(\s]filter\(' pandas
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- # Check for the following code in testing: `np.testing` and `np.array_equal`
- MSG='Check for invalid testing' ; echo $MSG
- invgrep -r -E --include '*.py' --exclude testing.py '(numpy|np)(\.testing|\.array_equal)' pandas/tests/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
# Check for the following code in the extension array base tests: `tm.assert_frame_equal` and `tm.assert_series_equal`
MSG='Check for invalid EA testing' ; echo $MSG
invgrep -r -E --include '*.py' --exclude base.py 'tm.assert_(series|frame)_equal' pandas/tests/extension/base
@@ -98,15 +77,6 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
invgrep -R --include="*.rst" -E "[a-zA-Z0-9]\`\`?[a-zA-Z0-9]" doc/source/
RET=$(($RET + $?)) ; echo $MSG "DONE"
- # Check for the following code in testing: `unittest.mock`, `mock.Mock()` or `mock.patch`
- MSG='Check that unittest.mock is not used (pytest builtin monkeypatch fixture should be used instead)' ; echo $MSG
- invgrep -r -E --include '*.py' '(unittest(\.| import )mock|mock\.Mock\(\)|mock\.patch)' pandas/tests/
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
- MSG='Check for use of {foo!r} instead of {repr(foo)}' ; echo $MSG
- invgrep -R --include=*.{py,pyx} '!r}' pandas
- RET=$(($RET + $?)) ; echo $MSG "DONE"
- echo $MSG "DONE"
fi
### CODE ###
diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index 19d83eb75e5bd..8f399ef6f1192 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -19,147 +19,8 @@ consistent code format throughout the project. We encourage you to use
Patterns
========
-Using foo.__class__
--------------------
-
-
-pandas uses 'type(foo)' instead 'foo.__class__' as it is making the code more
-readable.
-For example:
-
-**Good:**
-
-.. code-block:: python
-
- foo = "bar"
- type(foo)
-
-**Bad:**
-
-.. code-block:: python
-
- foo = "bar"
- foo.__class__
-
-
-String formatting
-=================
-
-Concatenated strings
---------------------
-
-Using f-strings
-~~~~~~~~~~~~~~~
-
-pandas uses f-strings formatting instead of '%' and '.format()' string formatters.
-
-The convention of using f-strings on a string that is concatenated over several lines,
-is to prefix only the lines containing values which need to be interpreted.
-
-For example:
-
-**Good:**
-
-.. code-block:: python
-
- foo = "old_function"
- bar = "new_function"
-
- my_warning_message = (
- f"Warning, {foo} is deprecated, "
- "please use the new and way better "
- f"{bar}"
- )
-
-**Bad:**
-
-.. code-block:: python
-
- foo = "old_function"
- bar = "new_function"
-
- my_warning_message = (
- f"Warning, {foo} is deprecated, "
- f"please use the new and way better "
- f"{bar}"
- )
-
-White spaces
-~~~~~~~~~~~~
-
-Only put white space at the end of the previous line, so
-there is no whitespace at the beginning of the concatenated string.
-
-For example:
-
-**Good:**
-
-.. code-block:: python
-
- example_string = (
- "Some long concatenated string, "
- "with good placement of the "
- "whitespaces"
- )
-
-**Bad:**
-
-.. code-block:: python
-
- example_string = (
- "Some long concatenated string,"
- " with bad placement of the"
- " whitespaces"
- )
-
-Representation function (aka 'repr()')
---------------------------------------
-
-pandas uses 'repr()' instead of '%r' and '!r'.
-
-The use of 'repr()' will only happen when the value is not an obvious string.
-
-For example:
-
-**Good:**
-
-.. code-block:: python
-
- value = str
- f"Unknown received value, got: {repr(value)}"
-
-**Good:**
-
-.. code-block:: python
-
- value = str
- f"Unknown received type, got: '{type(value).__name__}'"
-
-
-Imports (aim for absolute)
-==========================
-
-In Python 3, absolute imports are recommended. Using absolute imports, doing something
-like ``import string`` will import the string module rather than ``string.py``
-in the same directory. As much as possible, you should try to write out
-absolute imports that show the whole import chain from top-level pandas.
-
-Explicit relative imports are also supported in Python 3 but it is not
-recommended to use them. Implicit relative imports should never be used
-and are removed in Python 3.
-
-For example:
-
-::
-
- # preferred
- import pandas.core.common as com
-
- # not preferred
- from .common import test_base
-
- # wrong
- from common import test_base
+We use a ``flake8`` plugin, `pandas-dev-flaker <https://github.com/pandas-dev/pandas-dev-flaker>`_, to
+check our codebase for unwanted patterns. See its ``README`` for the up-to-date list of rules we enforce.
Testing
=======
diff --git a/environment.yml b/environment.yml
index 90a9186aa017f..146bf6db08d8b 100644
--- a/environment.yml
+++ b/environment.yml
@@ -21,8 +21,8 @@ dependencies:
- black=20.8b1
- cpplint
- flake8=3.9.0
- - flake8-bugbear>=21.3.2 # used by flake8, find likely bugs
- - flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions
+ - flake8-bugbear=21.3.2 # used by flake8, find likely bugs
+ - flake8-comprehensions=3.1.0 # used by flake8, linting of unnecessary comprehensions
- isort>=5.2.1 # check that imports are in the right order
- mypy=0.812
- pre-commit>=2.9.2
@@ -117,3 +117,4 @@ dependencies:
- pip:
- git+https://github.com/pydata/pydata-sphinx-theme.git@master
- numpydoc < 1.2 # 2021-02-09 1.2dev breaking CI
+ - pandas-dev-flaker==0.2.0
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index a603222094bdb..aaf58f1fcb150 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -915,7 +915,7 @@ def external_error_raised(expected_exception: type[Exception]) -> ContextManager
"""
import pytest
- return pytest.raises(expected_exception, match=None)
+ return pytest.raises(expected_exception, match=None) # noqa: PDF010
cython_table = pd.core.common._cython_table.items()
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 67bcdb0a387dd..0628aa5add4a3 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -21,7 +21,7 @@
from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.core.dtypes.missing import isna
-import pandas.core.common as common
+import pandas.core.common as common # noqa: PDF018
from pandas.core.util.numba_ import maybe_use_numba
from pandas.core.window.common import zsqrt
from pandas.core.window.doc import (
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 33b1ceee6e529..e4710254d9311 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -56,7 +56,7 @@
DataError,
SelectionMixin,
)
-import pandas.core.common as common
+import pandas.core.common as com
from pandas.core.indexes.api import (
Index,
MultiIndex,
@@ -643,7 +643,7 @@ def _apply_pairwise(
)
gb_pairs = (
- common.maybe_make_list(pair) for pair in self._grouper.indices.keys()
+ com.maybe_make_list(pair) for pair in self._grouper.indices.keys()
)
groupby_codes = []
groupby_levels = []
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index ee90dfa3e9a52..c36552f59da71 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -274,7 +274,7 @@ class TestTesting(Base):
]
def test_testing(self):
- from pandas import testing
+ from pandas import testing # noqa: PDF015
self.check(testing, self.funcs)
diff --git a/pandas/tests/indexes/object/test_astype.py b/pandas/tests/indexes/object/test_astype.py
index 42c7b8eb4aeec..9bfc0c1312200 100644
--- a/pandas/tests/indexes/object/test_astype.py
+++ b/pandas/tests/indexes/object/test_astype.py
@@ -1,5 +1,5 @@
from pandas import Index
-import pandas.testing as tm
+import pandas._testing as tm
def test_astype_str_from_bytes():
diff --git a/pandas/tests/io/pytables/common.py b/pandas/tests/io/pytables/common.py
index 7e7a76e287d32..6a9d5745ab457 100644
--- a/pandas/tests/io/pytables/common.py
+++ b/pandas/tests/io/pytables/common.py
@@ -16,7 +16,7 @@
def safe_remove(path):
if path is not None:
try:
- os.remove(path)
+ os.remove(path) # noqa: PDF008
except OSError:
pass
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 24a4d35b5d94d..bb6928d2fd95a 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -911,7 +911,7 @@ def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
os.close(fd)
except (OSError, ValueError):
pass
- os.remove(new_f)
+ os.remove(new_f) # noqa: PDF008
# new table
df = tm.makeDataFrame()
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index 30d6436c7e250..e863fb45b1f81 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -232,9 +232,7 @@ def test_type_check(errors):
# see gh-11776
df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = {"errors": errors} if errors is not None else {}
- error_ctx = pytest.raises(TypeError, match="1-d array")
-
- with error_ctx:
+ with pytest.raises(TypeError, match="1-d array"):
to_numeric(df, **kwargs)
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 02a4e63374305..33deeef9f1f82 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -9,8 +9,8 @@ cython>=0.29.21
black==20.8b1
cpplint
flake8==3.9.0
-flake8-bugbear>=21.3.2
-flake8-comprehensions>=3.1.0
+flake8-bugbear==21.3.2
+flake8-comprehensions==3.1.0
isort>=5.2.1
mypy==0.812
pre-commit>=2.9.2
@@ -80,3 +80,4 @@ tabulate>=0.8.3
natsort
git+https://github.com/pydata/pydata-sphinx-theme.git@master
numpydoc < 1.2
+pandas-dev-flaker==0.2.0
diff --git a/scripts/check_for_inconsistent_pandas_namespace.py b/scripts/check_for_inconsistent_pandas_namespace.py
deleted file mode 100644
index 3c21821e794a9..0000000000000
--- a/scripts/check_for_inconsistent_pandas_namespace.py
+++ /dev/null
@@ -1,142 +0,0 @@
-"""
-Check that test suite file doesn't use the pandas namespace inconsistently.
-
-We check for cases of ``Series`` and ``pd.Series`` appearing in the same file
-(likewise for other pandas objects).
-
-This is meant to be run as a pre-commit hook - to run it manually, you can do:
-
- pre-commit run inconsistent-namespace-usage --all-files
-
-To automatically fixup a given file, you can pass `--replace`, e.g.
-
- python scripts/check_for_inconsistent_pandas_namespace.py test_me.py --replace
-
-though note that you may need to manually fixup some imports and that you will also
-need the additional dependency `tokenize-rt` (which is left out from the pre-commit
-hook so that it uses the same virtualenv as the other local ones).
-
-The general structure is similar to that of some plugins from
-https://github.com/asottile/pyupgrade .
-"""
-
-import argparse
-import ast
-import sys
-from typing import (
- MutableMapping,
- NamedTuple,
- Optional,
- Sequence,
- Set,
-)
-
-ERROR_MESSAGE = (
- "{path}:{lineno}:{col_offset}: "
- "Found both '{prefix}.{name}' and '{name}' in {path}"
-)
-
-
-class OffsetWithNamespace(NamedTuple):
- lineno: int
- col_offset: int
- namespace: str
-
-
-class Visitor(ast.NodeVisitor):
- def __init__(self) -> None:
- self.pandas_namespace: MutableMapping[OffsetWithNamespace, str] = {}
- self.imported_from_pandas: Set[str] = set()
-
- def visit_Attribute(self, node: ast.Attribute) -> None:
- if isinstance(node.value, ast.Name) and node.value.id in {"pandas", "pd"}:
- offset_with_namespace = OffsetWithNamespace(
- node.lineno, node.col_offset, node.value.id
- )
- self.pandas_namespace[offset_with_namespace] = node.attr
- self.generic_visit(node)
-
- def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
- if node.module is not None and "pandas" in node.module:
- self.imported_from_pandas.update(name.name for name in node.names)
- self.generic_visit(node)
-
-
-def replace_inconsistent_pandas_namespace(visitor: Visitor, content: str) -> str:
- from tokenize_rt import (
- reversed_enumerate,
- src_to_tokens,
- tokens_to_src,
- )
-
- tokens = src_to_tokens(content)
- for n, i in reversed_enumerate(tokens):
- offset_with_namespace = OffsetWithNamespace(i.offset[0], i.offset[1], i.src)
- if (
- offset_with_namespace in visitor.pandas_namespace
- and visitor.pandas_namespace[offset_with_namespace]
- in visitor.imported_from_pandas
- ):
- # Replace `pd`
- tokens[n] = i._replace(src="")
- # Replace `.`
- tokens[n + 1] = tokens[n + 1]._replace(src="")
-
- new_src: str = tokens_to_src(tokens)
- return new_src
-
-
-def check_for_inconsistent_pandas_namespace(
- content: str, path: str, *, replace: bool
-) -> Optional[str]:
- tree = ast.parse(content)
-
- visitor = Visitor()
- visitor.visit(tree)
-
- inconsistencies = visitor.imported_from_pandas.intersection(
- visitor.pandas_namespace.values()
- )
-
- if not inconsistencies:
- # No inconsistent namespace usage, nothing to replace.
- return None
-
- if not replace:
- inconsistency = inconsistencies.pop()
- lineno, col_offset, prefix = next(
- key for key, val in visitor.pandas_namespace.items() if val == inconsistency
- )
- msg = ERROR_MESSAGE.format(
- lineno=lineno,
- col_offset=col_offset,
- prefix=prefix,
- name=inconsistency,
- path=path,
- )
- sys.stdout.write(msg)
- sys.exit(1)
-
- return replace_inconsistent_pandas_namespace(visitor, content)
-
-
-def main(argv: Optional[Sequence[str]] = None) -> None:
- parser = argparse.ArgumentParser()
- parser.add_argument("paths", nargs="*")
- parser.add_argument("--replace", action="store_true")
- args = parser.parse_args(argv)
-
- for path in args.paths:
- with open(path, encoding="utf-8") as fd:
- content = fd.read()
- new_content = check_for_inconsistent_pandas_namespace(
- content, path, replace=args.replace
- )
- if not args.replace or new_content is None:
- continue
- with open(path, "w", encoding="utf-8") as fd:
- fd.write(new_content)
-
-
-if __name__ == "__main__":
- main()
diff --git a/scripts/sync_flake8_versions.py b/scripts/sync_flake8_versions.py
index 8dd7abcf47f02..cb6bb1eb0986e 100644
--- a/scripts/sync_flake8_versions.py
+++ b/scripts/sync_flake8_versions.py
@@ -1,5 +1,5 @@
"""
-Check that the flake8 pins are the same in:
+Check that the flake8 (and pandas-dev-flaker) pins are the same in:
- environment.yml
- .pre-commit-config.yaml, in the flake8 hook
@@ -12,68 +12,152 @@
- ``python scripts/sync_flake8_versions.py``, or
- ``pre-commit run sync-flake8-versions --all-files``.
"""
+from __future__ import annotations
+
+from dataclasses import (
+ dataclass,
+ replace,
+)
import sys
from typing import (
Any,
Mapping,
- NamedTuple,
Sequence,
- Tuple,
TypeVar,
)
import yaml
-class Revisions(NamedTuple):
- precommit_rev: str
- precommit_yesqa_rev: str
- environment_rev: str
+@dataclass
+class Revision:
+ name: str
+ compare: str
+ version: str
+
+
+@dataclass
+class Revisions:
+ name: str
+ pre_commit: Revision | None = None
+ yesqa: Revision | None = None
+ environment: Revision | None = None
YamlMapping = Mapping[str, Any]
Repo = TypeVar("Repo", bound=YamlMapping)
+COMPARE = ("<=", "==", ">=", "<", ">", "=")
-def _get_repo_hook(repos: Sequence[Repo], hook_name: str) -> Tuple[Repo, YamlMapping]:
+
+def _get_repo_hook(repos: Sequence[Repo], hook_name: str) -> tuple[Repo, YamlMapping]:
for repo in repos:
for hook in repo["hooks"]:
if hook["id"] == hook_name:
return repo, hook
- else:
+ else: # pragma: no cover
raise RuntimeError(f"Repo with hook {hook_name} not found")
-def get_revisions(precommit_config: YamlMapping, environment: YamlMapping) -> Revisions:
- repos = precommit_config["repos"]
- flake8_repo, _ = _get_repo_hook(repos, "flake8")
- precommit_rev = flake8_repo["rev"]
-
- _, yesqa_hook = _get_repo_hook(repos, "yesqa")
- additional_dependencies = yesqa_hook.get("additional_dependencies", [])
- for dep in additional_dependencies:
- if "==" in dep:
- pkg, rev = dep.split("==", maxsplit=1)
- if pkg == "flake8":
- precommit_yesqa_rev = rev
- break
+def _conda_to_pip_compat(dep):
+ if dep.compare == "=":
+ return replace(dep, compare="==")
else:
- raise RuntimeError(
- "flake8 not found, or not pinned, in additional dependencies of yesqa "
- "hook in .pre-commit-config.yaml"
+ return dep
+
+
+def _validate_additional_dependencies(
+ flake8_additional_dependencies,
+ yesqa_additional_dependencies,
+ environment_additional_dependencies,
+) -> None:
+ for dep in flake8_additional_dependencies:
+ if dep not in yesqa_additional_dependencies:
+ sys.stdout.write(
+ f"Mismatch of '{dep.name}' version between 'flake8' "
+ "and 'yesqa' in '.pre-commit-config.yaml'\n"
+ )
+ sys.exit(1)
+ if dep not in environment_additional_dependencies:
+ sys.stdout.write(
+ f"Mismatch of '{dep.name}' version between 'enviroment.yml' "
+ "and additional dependencies of 'flake8' in '.pre-commit-config.yaml'\n"
+ )
+ sys.exit(1)
+
+
+def _validate_revisions(revisions):
+ if revisions.environment != revisions.pre_commit:
+ sys.stdout.write(
+ f"{revisions.name} in 'environment.yml' does not "
+ "match in 'flake8' from 'pre-commit'\n"
)
+ sys.exit(1)
+
+ if revisions.yesqa != revisions.pre_commit:
+ sys.stdout.write(
+ f"{revisions.name} in 'yesqa' does not match "
+ "in 'flake8' from 'pre-commit'\n"
+ )
+ sys.exit(1)
- deps = environment["dependencies"]
+
+def _process_dependencies(deps):
for dep in deps:
- if isinstance(dep, str) and "=" in dep:
- pkg, rev = dep.split("=", maxsplit=1)
- if pkg == "flake8":
- environment_rev = rev
- break
- else:
- raise RuntimeError("flake8 not found, or not pinned, in environment.yml")
+ if isinstance(dep, str):
+ for compare in COMPARE:
+ if compare in dep:
+ pkg, rev = dep.split(compare, maxsplit=1)
+ yield _conda_to_pip_compat(Revision(pkg, compare, rev))
+ break
+ else:
+ yield from _process_dependencies(dep["pip"])
+
+
+def get_revisions(
+ precommit_config: YamlMapping, environment: YamlMapping
+) -> tuple[Revisions, Revisions]:
+ flake8_revisions = Revisions(name="flake8")
+ pandas_dev_flaker_revisions = Revisions(name="pandas-dev-flaker")
+
+ repos = precommit_config["repos"]
+ flake8_repo, flake8_hook = _get_repo_hook(repos, "flake8")
+ flake8_revisions.pre_commit = Revision("flake8", "==", flake8_repo["rev"])
+ flake8_additional_dependencies = []
+ for dep in _process_dependencies(flake8_hook.get("additional_dependencies", [])):
+ if dep.name == "pandas-dev-flaker":
+ pandas_dev_flaker_revisions.pre_commit = dep
+ else:
+ flake8_additional_dependencies.append(dep)
- return Revisions(precommit_rev, precommit_yesqa_rev, environment_rev)
+ _, yesqa_hook = _get_repo_hook(repos, "yesqa")
+ yesqa_additional_dependencies = []
+ for dep in _process_dependencies(yesqa_hook.get("additional_dependencies", [])):
+ if dep.name == "flake8":
+ flake8_revisions.yesqa = dep
+ elif dep.name == "pandas-dev-flaker":
+ pandas_dev_flaker_revisions.yesqa = dep
+ else:
+ yesqa_additional_dependencies.append(dep)
+
+ environment_dependencies = environment["dependencies"]
+ environment_additional_dependencies = []
+ for dep in _process_dependencies(environment_dependencies):
+ if dep.name == "flake8":
+ flake8_revisions.environment = dep
+ elif dep.name == "pandas-dev-flaker":
+ pandas_dev_flaker_revisions.environment = dep
+ else:
+ environment_additional_dependencies.append(dep)
+
+ _validate_additional_dependencies(
+ flake8_additional_dependencies,
+ yesqa_additional_dependencies,
+ environment_additional_dependencies,
+ )
+
+ for revisions in flake8_revisions, pandas_dev_flaker_revisions:
+ _validate_revisions(revisions)
if __name__ == "__main__":
@@ -81,21 +165,5 @@ def get_revisions(precommit_config: YamlMapping, environment: YamlMapping) -> Re
precommit_config = yaml.safe_load(fd)
with open("environment.yml") as fd:
environment = yaml.safe_load(fd)
-
- revisions = get_revisions(precommit_config, environment)
-
- if revisions.environment_rev != revisions.precommit_rev:
- sys.stdout.write(
- f"flake8 pin in environment.yml is {revisions.environment_rev}, "
- f"should be {revisions.precommit_rev}\n"
- )
- sys.exit(1)
-
- if revisions.precommit_yesqa_rev != revisions.precommit_rev:
- sys.stdout.write(
- f"flake8 pin in yesqa is {revisions.precommit_yesqa_rev}, "
- f"should be {revisions.precommit_rev}\n"
- )
- sys.exit(1)
-
+ get_revisions(precommit_config, environment)
sys.exit(0)
diff --git a/scripts/tests/test_inconsistent_namespace_check.py b/scripts/tests/test_inconsistent_namespace_check.py
deleted file mode 100644
index eb995158d8cb4..0000000000000
--- a/scripts/tests/test_inconsistent_namespace_check.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import pytest
-
-from ..check_for_inconsistent_pandas_namespace import (
- check_for_inconsistent_pandas_namespace,
-)
-
-BAD_FILE_0 = (
- "from pandas import Categorical\n"
- "cat_0 = Categorical()\n"
- "cat_1 = pd.Categorical()"
-)
-BAD_FILE_1 = (
- "from pandas import Categorical\n"
- "cat_0 = pd.Categorical()\n"
- "cat_1 = Categorical()"
-)
-BAD_FILE_2 = (
- "from pandas import Categorical\n"
- "cat_0 = pandas.Categorical()\n"
- "cat_1 = Categorical()"
-)
-GOOD_FILE_0 = (
- "from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = Categorical()"
-)
-GOOD_FILE_1 = "cat_0 = pd.Categorical()\ncat_1 = pd.Categorical()"
-GOOD_FILE_2 = "from array import array\nimport pandas as pd\narr = pd.array([])"
-PATH = "t.py"
-
-
-@pytest.mark.parametrize(
- "content, expected",
- [
- (BAD_FILE_0, "t.py:3:8: Found both 'pd.Categorical' and 'Categorical' in t.py"),
- (BAD_FILE_1, "t.py:2:8: Found both 'pd.Categorical' and 'Categorical' in t.py"),
- (
- BAD_FILE_2,
- "t.py:2:8: Found both 'pandas.Categorical' and 'Categorical' in t.py",
- ),
- ],
-)
-def test_inconsistent_usage(content, expected, capsys):
- with pytest.raises(SystemExit):
- check_for_inconsistent_pandas_namespace(content, PATH, replace=False)
- result, _ = capsys.readouterr()
- assert result == expected
-
-
-@pytest.mark.parametrize("content", [GOOD_FILE_0, GOOD_FILE_1, GOOD_FILE_2])
-@pytest.mark.parametrize("replace", [True, False])
-def test_consistent_usage(content, replace):
- # should not raise
- check_for_inconsistent_pandas_namespace(content, PATH, replace=replace)
-
-
-@pytest.mark.parametrize("content", [BAD_FILE_0, BAD_FILE_1, BAD_FILE_2])
-def test_inconsistent_usage_with_replace(content):
- result = check_for_inconsistent_pandas_namespace(content, PATH, replace=True)
- expected = (
- "from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = Categorical()"
- )
- assert result == expected
diff --git a/scripts/tests/test_sync_flake8_versions.py b/scripts/tests/test_sync_flake8_versions.py
index fc559f3e5e982..d9b6dbe8c3f0a 100644
--- a/scripts/tests/test_sync_flake8_versions.py
+++ b/scripts/tests/test_sync_flake8_versions.py
@@ -1,25 +1,221 @@
-from ..sync_flake8_versions import (
- Revisions,
- get_revisions,
-)
+import pytest
+from ..sync_flake8_versions import get_revisions
-def test_get_revisions():
+
+def test_wrong_yesqa_flake8(capsys):
+ precommit_config = {
+ "repos": [
+ {
+ "repo": "https://gitlab.com/pycqa/flake8",
+ "rev": "0.1.1",
+ "hooks": [
+ {
+ "id": "flake8",
+ }
+ ],
+ },
+ {
+ "repo": "https://github.com/asottile/yesqa",
+ "rev": "v1.2.2",
+ "hooks": [
+ {
+ "id": "yesqa",
+ "additional_dependencies": [
+ "flake8==0.4.2",
+ ],
+ }
+ ],
+ },
+ ]
+ }
+ environment = {
+ "dependencies": [
+ "flake8=0.1.1",
+ ]
+ }
+ with pytest.raises(SystemExit, match=None):
+ get_revisions(precommit_config, environment)
+ result, _ = capsys.readouterr()
+ expected = "flake8 in 'yesqa' does not match in 'flake8' from 'pre-commit'\n"
+ assert result == expected
+
+
+def test_wrong_env_flake8(capsys):
+ precommit_config = {
+ "repos": [
+ {
+ "repo": "https://gitlab.com/pycqa/flake8",
+ "rev": "0.1.1",
+ "hooks": [
+ {
+ "id": "flake8",
+ }
+ ],
+ },
+ {
+ "repo": "https://github.com/asottile/yesqa",
+ "rev": "v1.2.2",
+ "hooks": [
+ {
+ "id": "yesqa",
+ "additional_dependencies": [
+ "flake8==0.4.2",
+ ],
+ }
+ ],
+ },
+ ]
+ }
+ environment = {
+ "dependencies": [
+ "flake8=1.5.6",
+ ]
+ }
+ with pytest.raises(SystemExit, match=None):
+ get_revisions(precommit_config, environment)
+ result, _ = capsys.readouterr()
+ expected = (
+ "flake8 in 'environment.yml' does not match in 'flake8' from 'pre-commit'\n"
+ )
+ assert result == expected
+
+
+def test_wrong_yesqa_add_dep(capsys):
+ precommit_config = {
+ "repos": [
+ {
+ "repo": "https://gitlab.com/pycqa/flake8",
+ "rev": "0.1.1",
+ "hooks": [
+ {
+ "id": "flake8",
+ "additional_dependencies": [
+ "flake8-bugs==1.1.1",
+ ],
+ }
+ ],
+ },
+ {
+ "repo": "https://github.com/asottile/yesqa",
+ "rev": "v1.2.2",
+ "hooks": [
+ {
+ "id": "yesqa",
+ "additional_dependencies": [
+ "flake8==0.4.2",
+ "flake8-bugs>=1.1.1",
+ ],
+ }
+ ],
+ },
+ ]
+ }
+ environment = {
+ "dependencies": [
+ "flake8=1.5.6",
+ "flake8-bugs=1.1.1",
+ ]
+ }
+ with pytest.raises(SystemExit, match=None):
+ get_revisions(precommit_config, environment)
+ result, _ = capsys.readouterr()
+ expected = (
+ "Mismatch of 'flake8-bugs' version between 'flake8' and 'yesqa' in "
+ "'.pre-commit-config.yaml'\n"
+ )
+ assert result == expected
+
+
+def test_wrong_env_add_dep(capsys):
precommit_config = {
"repos": [
{
"repo": "https://gitlab.com/pycqa/flake8",
- "rev": "foo",
- "hooks": [{"id": "flake8"}],
+ "rev": "0.1.1",
+ "hooks": [
+ {
+ "id": "flake8",
+ "additional_dependencies": [
+ "flake8-bugs==1.1.1",
+ ],
+ }
+ ],
},
{
"repo": "https://github.com/asottile/yesqa",
"rev": "v1.2.2",
- "hooks": [{"id": "yesqa", "additional_dependencies": ["flake8==bar"]}],
+ "hooks": [
+ {
+ "id": "yesqa",
+ "additional_dependencies": [
+ "flake8==0.4.2",
+ "flake8-bugs==1.1.1",
+ ],
+ }
+ ],
},
]
}
- environment = {"dependencies": ["flake8=qux"]}
- result = get_revisions(precommit_config, environment)
- expected = Revisions("foo", "bar", "qux")
+ environment = {
+ "dependencies": [
+ "flake8=1.5.6",
+ "flake8-bugs=1.1.2",
+ ]
+ }
+ with pytest.raises(SystemExit, match=None):
+ get_revisions(precommit_config, environment)
+ result, _ = capsys.readouterr()
+ expected = (
+ "Mismatch of 'flake8-bugs' version between 'enviroment.yml' "
+ "and additional dependencies of 'flake8' in '.pre-commit-config.yaml'\n"
+ )
assert result == expected
+
+
+def test_get_revisions_no_failure(capsys):
+ precommit_config = {
+ "repos": [
+ {
+ "repo": "https://gitlab.com/pycqa/flake8",
+ "rev": "0.1.1",
+ "hooks": [
+ {
+ "id": "flake8",
+ "additional_dependencies": [
+ "pandas-dev-flaker==0.2.0",
+ "flake8-bugs==1.1.1",
+ ],
+ }
+ ],
+ },
+ {
+ "repo": "https://github.com/asottile/yesqa",
+ "rev": "v1.2.2",
+ "hooks": [
+ {
+ "id": "yesqa",
+ "additional_dependencies": [
+ "flake8==0.1.1",
+ "pandas-dev-flaker==0.2.0",
+ "flake8-bugs==1.1.1",
+ ],
+ }
+ ],
+ },
+ ]
+ }
+ environment = {
+ "dependencies": [
+ "flake8=0.1.1",
+ "flake8-bugs=1.1.1",
+ {
+ "pip": [
+ "git+https://github.com/pydata/pydata-sphinx-theme.git@master",
+ "pandas-dev-flaker==0.2.0",
+ ]
+ },
+ ]
+ }
+ # should not raise
+ get_revisions(precommit_config, environment)
diff --git a/scripts/tests/test_use_pd_array_in_core.py b/scripts/tests/test_use_pd_array_in_core.py
index 9c66199a82846..8f13a6e735899 100644
--- a/scripts/tests/test_use_pd_array_in_core.py
+++ b/scripts/tests/test_use_pd_array_in_core.py
@@ -14,7 +14,7 @@ def test_inconsistent_usage(content, capsys):
result_msg = (
"t.py:2:0: Don't use pd.array in core, import array as pd_array instead\n"
)
- with pytest.raises(SystemExit):
+ with pytest.raises(SystemExit, match=None):
use_pd_array(content, PATH)
expected_msg, _ = capsys.readouterr()
assert result_msg == expected_msg
diff --git a/scripts/tests/test_validate_unwanted_patterns.py b/scripts/tests/test_validate_unwanted_patterns.py
deleted file mode 100644
index ef93fd1d21981..0000000000000
--- a/scripts/tests/test_validate_unwanted_patterns.py
+++ /dev/null
@@ -1,419 +0,0 @@
-import io
-
-import pytest
-
-from .. import validate_unwanted_patterns
-
-
-class TestBarePytestRaises:
- @pytest.mark.parametrize(
- "data",
- [
- (
- """
- with pytest.raises(ValueError, match="foo"):
- pass
- """
- ),
- (
- """
- # with pytest.raises(ValueError, match="foo"):
- # pass
- """
- ),
- (
- """
- # with pytest.raises(ValueError):
- # pass
- """
- ),
- (
- """
- with pytest.raises(
- ValueError,
- match="foo"
- ):
- pass
- """
- ),
- ],
- )
- def test_pytest_raises(self, data):
- fd = io.StringIO(data.strip())
- result = list(validate_unwanted_patterns.bare_pytest_raises(fd))
- assert result == []
-
- @pytest.mark.parametrize(
- "data, expected",
- [
- (
- (
- """
- with pytest.raises(ValueError):
- pass
- """
- ),
- [
- (
- 1,
- (
- "Bare pytests raise have been found. "
- "Please pass in the argument 'match' "
- "as well the exception."
- ),
- ),
- ],
- ),
- (
- (
- """
- with pytest.raises(ValueError, match="foo"):
- with pytest.raises(ValueError):
- pass
- pass
- """
- ),
- [
- (
- 2,
- (
- "Bare pytests raise have been found. "
- "Please pass in the argument 'match' "
- "as well the exception."
- ),
- ),
- ],
- ),
- (
- (
- """
- with pytest.raises(ValueError):
- with pytest.raises(ValueError, match="foo"):
- pass
- pass
- """
- ),
- [
- (
- 1,
- (
- "Bare pytests raise have been found. "
- "Please pass in the argument 'match' "
- "as well the exception."
- ),
- ),
- ],
- ),
- (
- (
- """
- with pytest.raises(
- ValueError
- ):
- pass
- """
- ),
- [
- (
- 1,
- (
- "Bare pytests raise have been found. "
- "Please pass in the argument 'match' "
- "as well the exception."
- ),
- ),
- ],
- ),
- (
- (
- """
- with pytest.raises(
- ValueError,
- # match = "foo"
- ):
- pass
- """
- ),
- [
- (
- 1,
- (
- "Bare pytests raise have been found. "
- "Please pass in the argument 'match' "
- "as well the exception."
- ),
- ),
- ],
- ),
- ],
- )
- def test_pytest_raises_raises(self, data, expected):
- fd = io.StringIO(data.strip())
- result = list(validate_unwanted_patterns.bare_pytest_raises(fd))
- assert result == expected
-
-
-@pytest.mark.parametrize(
- "data, expected",
- [
- (
- 'msg = ("bar " "baz")',
- [
- (
- 1,
- (
- "String unnecessarily split in two by black. "
- "Please merge them manually."
- ),
- )
- ],
- ),
- (
- 'msg = ("foo " "bar " "baz")',
- [
- (
- 1,
- (
- "String unnecessarily split in two by black. "
- "Please merge them manually."
- ),
- ),
- (
- 1,
- (
- "String unnecessarily split in two by black. "
- "Please merge them manually."
- ),
- ),
- ],
- ),
- ],
-)
-def test_strings_to_concatenate(data, expected):
- fd = io.StringIO(data.strip())
- result = list(validate_unwanted_patterns.strings_to_concatenate(fd))
- assert result == expected
-
-
-class TestStringsWithWrongPlacedWhitespace:
- @pytest.mark.parametrize(
- "data",
- [
- (
- """
- msg = (
- "foo\n"
- " bar"
- )
- """
- ),
- (
- """
- msg = (
- "foo"
- " bar"
- "baz"
- )
- """
- ),
- (
- """
- msg = (
- f"foo"
- " bar"
- )
- """
- ),
- (
- """
- msg = (
- "foo"
- f" bar"
- )
- """
- ),
- (
- """
- msg = (
- "foo"
- rf" bar"
- )
- """
- ),
- ],
- )
- def test_strings_with_wrong_placed_whitespace(self, data):
- fd = io.StringIO(data.strip())
- result = list(
- validate_unwanted_patterns.strings_with_wrong_placed_whitespace(fd)
- )
- assert result == []
-
- @pytest.mark.parametrize(
- "data, expected",
- [
- (
- (
- """
- msg = (
- "foo"
- " bar"
- )
- """
- ),
- [
- (
- 3,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- )
- ],
- ),
- (
- (
- """
- msg = (
- f"foo"
- " bar"
- )
- """
- ),
- [
- (
- 3,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- )
- ],
- ),
- (
- (
- """
- msg = (
- "foo"
- f" bar"
- )
- """
- ),
- [
- (
- 3,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- )
- ],
- ),
- (
- (
- """
- msg = (
- f"foo"
- f" bar"
- )
- """
- ),
- [
- (
- 3,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- )
- ],
- ),
- (
- (
- """
- msg = (
- "foo"
- rf" bar"
- " baz"
- )
- """
- ),
- [
- (
- 3,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- ),
- (
- 4,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- ),
- ],
- ),
- (
- (
- """
- msg = (
- "foo"
- " bar"
- rf" baz"
- )
- """
- ),
- [
- (
- 3,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- ),
- (
- 4,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- ),
- ],
- ),
- (
- (
- """
- msg = (
- "foo"
- rf" bar"
- rf" baz"
- )
- """
- ),
- [
- (
- 3,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- ),
- (
- 4,
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- ),
- ],
- ),
- ],
- )
- def test_strings_with_wrong_placed_whitespace_raises(self, data, expected):
- fd = io.StringIO(data.strip())
- result = list(
- validate_unwanted_patterns.strings_with_wrong_placed_whitespace(fd)
- )
- assert result == expected
diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py
deleted file mode 100755
index b6b038ae9dd17..0000000000000
--- a/scripts/validate_unwanted_patterns.py
+++ /dev/null
@@ -1,487 +0,0 @@
-#!/usr/bin/env python3
-"""
-Unwanted patterns test cases.
-
-The reason this file exist despite the fact we already have
-`ci/code_checks.sh`,
-(see https://github.com/pandas-dev/pandas/blob/master/ci/code_checks.sh)
-
-is that some of the test cases are more complex/imposible to validate via regex.
-So this file is somewhat an extensions to `ci/code_checks.sh`
-"""
-
-import argparse
-import ast
-import sys
-import token
-import tokenize
-from typing import (
- IO,
- Callable,
- Iterable,
- List,
- Set,
- Tuple,
-)
-
-PRIVATE_IMPORTS_TO_IGNORE: Set[str] = {
- "_extension_array_shared_docs",
- "_index_shared_docs",
- "_interval_shared_docs",
- "_merge_doc",
- "_shared_docs",
- "_apply_docs",
- "_new_Index",
- "_new_PeriodIndex",
- "_doc_template",
- "_agg_template",
- "_pipe_template",
- "__main__",
- "_transform_template",
- "_flex_comp_doc_FRAME",
- "_op_descriptions",
- "_IntegerDtype",
- "_use_inf_as_na",
- "_get_plot_backend",
- "_matplotlib",
- "_arrow_utils",
- "_registry",
- "_get_offset", # TODO: remove after get_offset deprecation enforced
- "_test_parse_iso8601",
- "_json_normalize", # TODO: remove after deprecation is enforced
- "_testing",
- "_test_decorators",
- "__version__", # check np.__version__ in compat.numpy.function
-}
-
-
-def _get_literal_string_prefix_len(token_string: str) -> int:
- """
- Getting the length of the literal string prefix.
-
- Parameters
- ----------
- token_string : str
- String to check.
-
- Returns
- -------
- int
- Length of the literal string prefix.
-
- Examples
- --------
- >>> example_string = "'Hello world'"
- >>> _get_literal_string_prefix_len(example_string)
- 0
- >>> example_string = "r'Hello world'"
- >>> _get_literal_string_prefix_len(example_string)
- 1
- """
- try:
- return min(
- token_string.find(quote)
- for quote in (r"'", r'"')
- if token_string.find(quote) >= 0
- )
- except ValueError:
- return 0
-
-
-def bare_pytest_raises(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
- """
- Test Case for bare pytest raises.
-
- For example, this is wrong:
-
- >>> with pytest.raise(ValueError):
- ... # Some code that raises ValueError
-
- And this is what we want instead:
-
- >>> with pytest.raise(ValueError, match="foo"):
- ... # Some code that raises ValueError
-
- Parameters
- ----------
- file_obj : IO
- File-like object containing the Python code to validate.
-
- Yields
- ------
- line_number : int
- Line number of unconcatenated string.
- msg : str
- Explenation of the error.
-
- Notes
- -----
- GH #23922
- """
- contents = file_obj.read()
- tree = ast.parse(contents)
-
- for node in ast.walk(tree):
- if not isinstance(node, ast.Call):
- continue
-
- try:
- if not (node.func.value.id == "pytest" and node.func.attr == "raises"):
- continue
- except AttributeError:
- continue
-
- if not node.keywords:
- yield (
- node.lineno,
- "Bare pytests raise have been found. "
- "Please pass in the argument 'match' as well the exception.",
- )
- else:
- # Means that there are arguments that are being passed in,
- # now we validate that `match` is one of the passed in arguments
- if not any(keyword.arg == "match" for keyword in node.keywords):
- yield (
- node.lineno,
- "Bare pytests raise have been found. "
- "Please pass in the argument 'match' as well the exception.",
- )
-
-
-PRIVATE_FUNCTIONS_ALLOWED = {"sys._getframe"} # no known alternative
-
-
-def private_function_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
- """
- Checking that a private function is not used across modules.
- Parameters
- ----------
- file_obj : IO
- File-like object containing the Python code to validate.
- Yields
- ------
- line_number : int
- Line number of the private function that is used across modules.
- msg : str
- Explenation of the error.
- """
- contents = file_obj.read()
- tree = ast.parse(contents)
-
- imported_modules: Set[str] = set()
-
- for node in ast.walk(tree):
- if isinstance(node, (ast.Import, ast.ImportFrom)):
- for module in node.names:
- module_fqdn = module.name if module.asname is None else module.asname
- imported_modules.add(module_fqdn)
-
- if not isinstance(node, ast.Call):
- continue
-
- try:
- module_name = node.func.value.id
- function_name = node.func.attr
- except AttributeError:
- continue
-
- # Exception section #
-
- # (Debatable) Class case
- if module_name[0].isupper():
- continue
- # (Debatable) Dunder methods case
- elif function_name.startswith("__") and function_name.endswith("__"):
- continue
- elif module_name + "." + function_name in PRIVATE_FUNCTIONS_ALLOWED:
- continue
-
- if module_name in imported_modules and function_name.startswith("_"):
- yield (node.lineno, f"Private function '{module_name}.{function_name}'")
-
-
-def private_import_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
- """
- Checking that a private function is not imported across modules.
- Parameters
- ----------
- file_obj : IO
- File-like object containing the Python code to validate.
- Yields
- ------
- line_number : int
- Line number of import statement, that imports the private function.
- msg : str
- Explenation of the error.
- """
- contents = file_obj.read()
- tree = ast.parse(contents)
-
- for node in ast.walk(tree):
- if not (isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom)):
- continue
-
- for module in node.names:
- module_name = module.name.split(".")[-1]
- if module_name in PRIVATE_IMPORTS_TO_IGNORE:
- continue
-
- if module_name.startswith("_"):
- yield (node.lineno, f"Import of internal function {repr(module_name)}")
-
-
-def strings_to_concatenate(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
- """
- This test case is necessary after 'Black' (https://github.com/psf/black),
- is formating strings over multiple lines.
-
- For example, when this:
-
- >>> foo = (
- ... "bar "
- ... "baz"
- ... )
-
- Is becoming this:
-
- >>> foo = ("bar " "baz")
-
- 'Black' is not considering this as an
- issue (see https://github.com/psf/black/issues/1051),
- so we are checking it here instead.
-
- Parameters
- ----------
- file_obj : IO
- File-like object containing the Python code to validate.
-
- Yields
- ------
- line_number : int
- Line number of unconcatenated string.
- msg : str
- Explenation of the error.
-
- Notes
- -----
- GH #30454
- """
- tokens: List = list(tokenize.generate_tokens(file_obj.readline))
-
- for current_token, next_token in zip(tokens, tokens[1:]):
- if current_token.type == next_token.type == token.STRING:
- yield (
- current_token.start[0],
- (
- "String unnecessarily split in two by black. "
- "Please merge them manually."
- ),
- )
-
-
-def strings_with_wrong_placed_whitespace(
- file_obj: IO[str],
-) -> Iterable[Tuple[int, str]]:
- """
- Test case for leading spaces in concated strings.
-
- For example:
-
- >>> rule = (
- ... "We want the space at the end of the line, "
- ... "not at the beginning"
- ... )
-
- Instead of:
-
- >>> rule = (
- ... "We want the space at the end of the line,"
- ... " not at the beginning"
- ... )
-
- Parameters
- ----------
- file_obj : IO
- File-like object containing the Python code to validate.
-
- Yields
- ------
- line_number : int
- Line number of unconcatenated string.
- msg : str
- Explenation of the error.
- """
-
- def has_wrong_whitespace(first_line: str, second_line: str) -> bool:
- """
- Checking if the two lines are mattching the unwanted pattern.
-
- Parameters
- ----------
- first_line : str
- First line to check.
- second_line : str
- Second line to check.
-
- Returns
- -------
- bool
- True if the two recived string match, an unwanted pattern.
-
- Notes
- -----
- The unwanted pattern that we are trying to catch is if the spaces in
- a string that is concatenated over multiple lines are placed at the
- end of each string, unless this string is ending with a
- newline character (\n).
-
- For example, this is bad:
-
- >>> rule = (
- ... "We want the space at the end of the line,"
- ... " not at the beginning"
- ... )
-
- And what we want is:
-
- >>> rule = (
- ... "We want the space at the end of the line, "
- ... "not at the beginning"
- ... )
-
- And if the string is ending with a new line character (\n) we
- do not want any trailing whitespaces after it.
-
- For example, this is bad:
-
- >>> rule = (
- ... "We want the space at the begging of "
- ... "the line if the previous line is ending with a \n "
- ... "not at the end, like always"
- ... )
-
- And what we do want is:
-
- >>> rule = (
- ... "We want the space at the begging of "
- ... "the line if the previous line is ending with a \n"
- ... " not at the end, like always"
- ... )
- """
- if first_line.endswith(r"\n"):
- return False
- elif first_line.startswith(" ") or second_line.startswith(" "):
- return False
- elif first_line.endswith(" ") or second_line.endswith(" "):
- return False
- elif (not first_line.endswith(" ")) and second_line.startswith(" "):
- return True
- return False
-
- tokens: List = list(tokenize.generate_tokens(file_obj.readline))
-
- for first_token, second_token, third_token in zip(tokens, tokens[1:], tokens[2:]):
- # Checking if we are in a block of concated string
- if (
- first_token.type == third_token.type == token.STRING
- and second_token.type == token.NL
- ):
- # Striping the quotes, with the string litteral prefix
- first_string: str = first_token.string[
- _get_literal_string_prefix_len(first_token.string) + 1 : -1
- ]
- second_string: str = third_token.string[
- _get_literal_string_prefix_len(third_token.string) + 1 : -1
- ]
-
- if has_wrong_whitespace(first_string, second_string):
- yield (
- third_token.start[0],
- (
- "String has a space at the beginning instead "
- "of the end of the previous string."
- ),
- )
-
-
-def main(
- function: Callable[[IO[str]], Iterable[Tuple[int, str]]],
- source_path: str,
- output_format: str,
-) -> bool:
- """
- Main entry point of the script.
-
- Parameters
- ----------
- function : Callable
- Function to execute for the specified validation type.
- source_path : str
- Source path representing path to a file/directory.
- output_format : str
- Output format of the error message.
- file_extensions_to_check : str
- Comma separated values of what file extensions to check.
- excluded_file_paths : str
- Comma separated values of what file paths to exclude during the check.
-
- Returns
- -------
- bool
- True if found any patterns are found related to the given function.
-
- Raises
- ------
- ValueError
- If the `source_path` is not pointing to existing file/directory.
- """
- is_failed: bool = False
-
- for file_path in source_path:
- with open(file_path, encoding="utf-8") as file_obj:
- for line_number, msg in function(file_obj):
- is_failed = True
- print(
- output_format.format(
- source_path=file_path, line_number=line_number, msg=msg
- )
- )
-
- return is_failed
-
-
-if __name__ == "__main__":
- available_validation_types: List[str] = [
- "bare_pytest_raises",
- "private_function_across_module",
- "private_import_across_module",
- "strings_to_concatenate",
- "strings_with_wrong_placed_whitespace",
- ]
-
- parser = argparse.ArgumentParser(description="Unwanted patterns checker.")
-
- parser.add_argument("paths", nargs="*", help="Source paths of files to check.")
- parser.add_argument(
- "--format",
- "-f",
- default="{source_path}:{line_number}:{msg}",
- help="Output format of the error message.",
- )
- parser.add_argument(
- "--validation-type",
- "-vt",
- choices=available_validation_types,
- required=True,
- help="Validation test case to check.",
- )
-
- args = parser.parse_args()
-
- sys.exit(
- main(
- function=globals().get(args.validation_type),
- source_path=args.paths,
- output_format=args.format,
- )
- )
diff --git a/setup.cfg b/setup.cfg
index 9e3deff4c7183..610b30e4422a9 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -93,6 +93,16 @@ exclude =
.eggs/*.py,
versioneer.py,
env # exclude asv benchmark environments from linting
+per-file-ignores =
+ # private import across modules
+ pandas/tests/*:PDF020
+ # pytest.raises without match=
+ pandas/tests/extension/*:PDF009
+ # os.remove
+ doc/make.py:PDF008
+ # import from pandas._testing
+ pandas/testing.py:PDF014
+
[flake8-rst]
max-line-length = 84
| - [ ] closes #40826
- [ ] closes #40873
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
----
This means that, just by running `flake8`, you'll be able to get all these custom linting errors alongside the regular `flake8` ones. E.g. if we make `pandas/tests/t.py` as follows:
```python
import pytest
import pandas as pd
from pandas import Categorical
def test_foo():
cat_0 = pd.Categorical([1])
with pytest.warns(FutureWarning):
cat_1 = pd.Categorical(1)
```
then we get:
```console
$ flake8 pandas/tests/t.py
pandas/tests/t.py:4:1: F401 'pandas.Categorical' imported but unused
pandas/tests/t.py:8:5: F841 local variable 'cat_0' is assigned to but never used
pandas/tests/t.py:8:13: PDF019 found both 'pd.Categorical' and 'Categorical' in the same file
pandas/tests/t.py:9:10: PDF011 found 'pytest.warns' (use 'pandas._testing.assert_produces_warning')
pandas/tests/t.py:10:9: F841 local variable 'cat_1' is assigned to but never used
pandas/tests/t.py:10:17: PDF019 found both 'pd.Categorical' and 'Categorical' in the same file
```
These can then easily be configured using `flake8`'s configuration options, like `# noqa` and `per-file-ignores` in `setup.cfg`.
This also brings down the number of pre-commit checks.
----
I this is something the team wants, and it's decided to keep it as a separate repo, then some outstanding things to do would be:
- transferring it to `pandas-dev`
- sorting out the license (it's currently in my name, and I presume that should change) | https://api.github.com/repos/pandas-dev/pandas/pulls/40906 | 2021-04-12T20:40:51Z | 2021-04-16T01:06:48Z | 2021-04-16T01:06:48Z | 2021-04-16T07:17:01Z |
Backport PR #40902 on branch 1.2.x (DOC: Start v1.2.5 release notes) | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 8739694c20e33..e546c8c8b80e3 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -16,6 +16,7 @@ Version 1.2
.. toctree::
:maxdepth: 2
+ v1.2.5
v1.2.4
v1.2.3
v1.2.2
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst
index dd74091b64014..433ee37508e66 100644
--- a/doc/source/whatsnew/v1.2.4.rst
+++ b/doc/source/whatsnew/v1.2.4.rst
@@ -30,4 +30,4 @@ Fixed regressions
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.2.3..v1.2.4|HEAD
+.. contributors:: v1.2.3..v1.2.4
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst
new file mode 100644
index 0000000000000..cdfc2e5686b91
--- /dev/null
+++ b/doc/source/whatsnew/v1.2.5.rst
@@ -0,0 +1,48 @@
+.. _whatsnew_125:
+
+What's new in 1.2.5 (May ??, 2021)
+----------------------------------
+
+These are the changes in pandas 1.2.5. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_125.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_125.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_125.other:
+
+Other
+~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_125.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.2.4..v1.2.5|HEAD
| Backport PR #40902: DOC: Start v1.2.5 release notes | https://api.github.com/repos/pandas-dev/pandas/pulls/40905 | 2021-04-12T19:29:22Z | 2021-04-12T20:36:13Z | 2021-04-12T20:36:13Z | 2021-04-12T20:36:13Z |
DOC: Fixed documentation for few files | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index a2a108924a0f2..c178e9f7cecbe 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -110,10 +110,13 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
pytest -q --doctest-modules \
pandas/core/accessor.py \
pandas/core/aggregation.py \
+ pandas/core/algorithms.py \
pandas/core/base.py \
pandas/core/construction.py \
pandas/core/frame.py \
pandas/core/generic.py \
+ pandas/core/indexers.py \
+ pandas/core/nanops.py \
pandas/core/series.py \
pandas/io/sql.py
RET=$(($RET + $?)) ; echo $MSG "DONE"
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index c8389ae24f000..16ec2bb5f253c 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -375,46 +375,60 @@ def unique(values):
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
- >>> pd.unique(pd.Series([pd.Timestamp('20160101'),
- ... pd.Timestamp('20160101')]))
+ >>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
- >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
- ... pd.Timestamp('20160101', tz='US/Eastern')]))
- array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
- dtype=object)
-
- >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
- ... pd.Timestamp('20160101', tz='US/Eastern')]))
+ >>> pd.unique(
+ ... pd.Series(
+ ... [
+ ... pd.Timestamp("20160101", tz="US/Eastern"),
+ ... pd.Timestamp("20160101", tz="US/Eastern"),
+ ... ]
+ ... )
+ ... )
+ <DatetimeArray>
+ ['2016-01-01 00:00:00-05:00']
+ Length: 1, dtype: datetime64[ns, US/Eastern]
+
+ >>> pd.unique(
+ ... pd.Index(
+ ... [
+ ... pd.Timestamp("20160101", tz="US/Eastern"),
+ ... pd.Timestamp("20160101", tz="US/Eastern"),
+ ... ]
+ ... )
+ ... )
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
- ... dtype='datetime64[ns, US/Eastern]', freq=None)
+ dtype='datetime64[ns, US/Eastern]',
+ freq=None)
- >>> pd.unique(list('baabc'))
+ >>> pd.unique(list("baabc"))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
- >>> pd.unique(pd.Series(pd.Categorical(list('baabc'))))
- [b, a, c]
- Categories (3, object): [b, a, c]
+ >>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
+ ['b', 'a', 'c']
+ Categories (3, object): ['a', 'b', 'c']
- >>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
- ... categories=list('abc'))))
- [b, a, c]
- Categories (3, object): [b, a, c]
+ >>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
+ ['b', 'a', 'c']
+ Categories (3, object): ['a', 'b', 'c']
An ordered Categorical preserves the category ordering.
- >>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
- ... categories=list('abc'),
- ... ordered=True)))
- [b, a, c]
- Categories (3, object): [a < b < c]
+ >>> pd.unique(
+ ... pd.Series(
+ ... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
+ ... )
+ ... )
+ ['b', 'a', 'c']
+ Categories (3, object): ['a' < 'b' < 'c']
An array of tuples
- >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
+ >>> pd.unique([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
"""
values = _ensure_arraylike(values)
diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py
index db28ad710989d..aa780787d58b6 100644
--- a/pandas/core/indexers.py
+++ b/pandas/core/indexers.py
@@ -209,16 +209,24 @@ def validate_indices(indices: np.ndarray, n: int) -> None:
Examples
--------
- >>> validate_indices([1, 2], 3)
- # OK
- >>> validate_indices([1, -2], 3)
- ValueError
- >>> validate_indices([1, 2, 3], 3)
- IndexError
- >>> validate_indices([-1, -1], 0)
- # OK
- >>> validate_indices([0, 1], 0)
- IndexError
+ >>> validate_indices(np.array([1, 2]), 3) # OK
+
+ >>> validate_indices(np.array([1, -2]), 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: negative dimensions are not allowed
+
+ >>> validate_indices(np.array([1, 2, 3]), 3)
+ Traceback (most recent call last):
+ ...
+ IndexError: indices are out-of-bounds
+
+ >>> validate_indices(np.array([-1, -1]), 0) # OK
+
+ >>> validate_indices(np.array([0, 1]), 0)
+ Traceback (most recent call last):
+ ...
+ IndexError: indices are out-of-bounds
"""
if len(indices):
min_idx = indices.min()
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 54588eafc3fa0..92618605e47cc 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -1056,7 +1056,7 @@ def nanargmax(
[ 6., 7., nan],
[ 9., 10., nan]])
>>> nanops.nanargmax(arr, axis=1)
- array([2, 2, 1, 1], dtype=int64)
+ array([2, 2, 1, 1])
"""
values, mask, _, _, _ = _get_values(values, True, fill_value_typ="-inf", mask=mask)
# error: Need type annotation for 'result'
@@ -1102,7 +1102,7 @@ def nanargmin(
[nan, 7., 8.],
[nan, 10., 11.]])
>>> nanops.nanargmin(arr, axis=1)
- array([0, 0, 1, 1], dtype=int64)
+ array([0, 0, 1, 1])
"""
values, mask, _, _, _ = _get_values(values, True, fill_value_typ="+inf", mask=mask)
# error: Need type annotation for 'result'
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
| https://api.github.com/repos/pandas-dev/pandas/pulls/40903 | 2021-04-12T17:59:27Z | 2021-04-26T21:21:37Z | 2021-04-26T21:21:37Z | 2021-04-28T13:01:44Z |
DOC: Start v1.2.5 release notes | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 8697182f5ca6f..986cf43b80494 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -24,6 +24,7 @@ Version 1.2
.. toctree::
:maxdepth: 2
+ v1.2.5
v1.2.4
v1.2.3
v1.2.2
diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst
index dd74091b64014..433ee37508e66 100644
--- a/doc/source/whatsnew/v1.2.4.rst
+++ b/doc/source/whatsnew/v1.2.4.rst
@@ -30,4 +30,4 @@ Fixed regressions
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.2.3..v1.2.4|HEAD
+.. contributors:: v1.2.3..v1.2.4
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst
new file mode 100644
index 0000000000000..cdfc2e5686b91
--- /dev/null
+++ b/doc/source/whatsnew/v1.2.5.rst
@@ -0,0 +1,48 @@
+.. _whatsnew_125:
+
+What's new in 1.2.5 (May ??, 2021)
+----------------------------------
+
+These are the changes in pandas 1.2.5. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_125.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_125.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_125.other:
+
+Other
+~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_125.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.2.4..v1.2.5|HEAD
| ~~do not merge yet. will merge to master after github release to trigger website update~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/40902 | 2021-04-12T17:30:14Z | 2021-04-12T19:29:00Z | 2021-04-12T19:29:00Z | 2021-04-12T19:29:04Z |
CLN: change jinja2 template name to `template_html` | diff --git a/doc/source/reference/style.rst b/doc/source/reference/style.rst
index 90ec5a2283f1e..6eacc90f4f62a 100644
--- a/doc/source/reference/style.rst
+++ b/doc/source/reference/style.rst
@@ -23,7 +23,7 @@ Styler properties
:toctree: api/
Styler.env
- Styler.template
+ Styler.template_html
Styler.loader
Style application
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb
index 8a10a6e4d4c2e..765b2929d3014 100644
--- a/doc/source/user_guide/style.ipynb
+++ b/doc/source/user_guide/style.ipynb
@@ -1710,7 +1710,7 @@
" Styler.loader, # the default\n",
" ])\n",
" )\n",
- " template = env.get_template(\"myhtml.tpl\")"
+ " template_html = env.get_template(\"myhtml.tpl\")"
]
},
{
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 7b5347ba2d9a9..91e4413e14e62 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1485,7 +1485,7 @@ def from_custom_template(cls, searchpath, name):
# error: Invalid base class "cls"
class MyStyler(cls): # type:ignore[valid-type,misc]
env = jinja2.Environment(loader=loader)
- template = env.get_template(name)
+ template_html = env.get_template(name)
return MyStyler
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 82f57b71caebf..9baa0542670e3 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -58,7 +58,7 @@ class StylerRenderer:
loader = jinja2.PackageLoader("pandas", "io/formats/templates")
env = jinja2.Environment(loader=loader, trim_blocks=True)
- template = env.get_template("html.tpl")
+ template_html = env.get_template("html.tpl")
def __init__(
self,
@@ -143,7 +143,7 @@ def render(self, **kwargs) -> str:
# TODO: namespace all the pandas keys
d = self._translate()
d.update(kwargs)
- return self.template.render(**d)
+ return self.template_html.render(**d)
def _compute(self):
"""
diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py
index 56e9581f8785a..1bb672e06291f 100644
--- a/pandas/tests/io/formats/style/test_style.py
+++ b/pandas/tests/io/formats/style/test_style.py
@@ -1367,7 +1367,7 @@ def test_block_names():
"tr",
"after_rows",
}
- result = set(Styler.template.blocks)
+ result = set(Styler.template_html.blocks)
assert result == expected
@@ -1386,6 +1386,6 @@ def test_from_custom_template(tmpdir):
result = Styler.from_custom_template(str(tmpdir.join("templates")), "myhtml.tpl")
assert issubclass(result, Styler)
assert result.env is not Styler.env
- assert result.template is not Styler.template
+ assert result.template_html is not Styler.template_html
styler = result(DataFrame({"A": [1, 2]}))
assert styler.render()
| prior to adding `template_latex` this changes the name of the html template to `template_html` including:
- updating docs on the new name for subclassing
- updating the custom method for subclassing
- updating tests which refer to it
| https://api.github.com/repos/pandas-dev/pandas/pulls/40901 | 2021-04-12T17:12:06Z | 2021-04-13T14:15:41Z | 2021-04-13T14:15:41Z | 2021-04-13T19:00:12Z |
DOC: whats new `other` section reduced | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 0ec9758477eba..0066eabf36c2b 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -117,6 +117,9 @@ We provided some focused development on :class:`.Styler`, including altering met
to accept more universal CSS language for arguments, such as ``'color:red;'`` instead of
``[('color', 'red')]`` (:issue:`39564`). This is also added to the built-in methods
to allow custom CSS highlighting instead of default background coloring (:issue:`40242`).
+Enhancements to other built-in methods include extending the :meth:`.Styler.background_gradient`
+method to shade elements based on a given gradient map and not be restricted only to
+values in the DataFrame (:issue:`39930` :issue:`22727` :issue:`28901`).
The :meth:`.Styler.apply` now consistently allows functions with ``ndarray`` output to
allow more flexible development of UDFs when ``axis`` is ``None`` ``0`` or ``1`` (:issue:`39393`).
@@ -818,24 +821,29 @@ ExtensionArray
- Fixed a bug where some properties of subclasses of :class:`PandasExtensionDtype` where improperly cached (:issue:`40329`)
-
+Styler
+^^^^^^
+
+- Bug in :class:`Styler` where ``subset`` arg in methods raised an error for some valid multiindex slices (:issue:`33562`)
+- :class:`Styler` rendered HTML output minor alterations to support w3 good code standard (:issue:`39626`)
+- Bug in :class:`Styler` where rendered HTML was missing a column class identifier for certain header cells (:issue:`39716`)
+- Bug in :meth:`Styler.background_gradient` where text-color was not determined correctly (:issue:`39888`)
+- Bug in :class:`Styler` where multiple elements in CSS-selectors were not correctly added to ``table_styles`` (:issue:`39942`)
+- Bug in :class:`.Styler` where copying from Jupyter dropped top left cell and misaligned headers (:issue:`12147`)
+- Bug in :class:`.Styler.where` where ``kwargs`` were not passed to the applicable callable (:issue:`40845`)
+- Bug in :class:`Styler` which caused CSS to duplicate on multiple renders. (:issue:`39395`, :issue:`40334`)
+
+
Other
^^^^^
- Bug in :class:`Index` constructor sometimes silently ignoring a specified ``dtype`` (:issue:`38879`)
- Bug in :func:`pandas.api.types.infer_dtype` not recognizing Series, Index or array with a period dtype (:issue:`23553`)
- Bug in :func:`pandas.api.types.infer_dtype` raising an error for general :class:`.ExtensionArray` objects. It will now return ``"unknown-array"`` instead of raising (:issue:`37367`)
- Bug in constructing a :class:`Series` from a list and a :class:`PandasDtype` (:issue:`39357`)
-- Bug in :class:`Styler` which caused CSS to duplicate on multiple renders. (:issue:`39395`, :issue:`40334`)
- ``inspect.getmembers(Series)`` no longer raises an ``AbstractMethodError`` (:issue:`38782`)
- Bug in :meth:`Series.where` with numeric dtype and ``other = None`` not casting to ``nan`` (:issue:`39761`)
- :meth:`Index.where` behavior now mirrors :meth:`Index.putmask` behavior, i.e. ``index.where(mask, other)`` matches ``index.putmask(~mask, other)`` (:issue:`39412`)
- Bug in :func:`pandas.testing.assert_series_equal`, :func:`pandas.testing.assert_frame_equal`, :func:`pandas.testing.assert_index_equal` and :func:`pandas.testing.assert_extension_array_equal` incorrectly raising when an attribute has an unrecognized NA type (:issue:`39461`)
-- Bug in :class:`Styler` where ``subset`` arg in methods raised an error for some valid multiindex slices (:issue:`33562`)
-- :class:`Styler` rendered HTML output minor alterations to support w3 good code standard (:issue:`39626`)
-- Bug in :class:`Styler` where rendered HTML was missing a column class identifier for certain header cells (:issue:`39716`)
-- Bug in :meth:`Styler.background_gradient` where text-color was not determined correctly (:issue:`39888`)
-- Bug in :class:`Styler` where multiple elements in CSS-selectors were not correctly added to ``table_styles`` (:issue:`39942`)
-- Bug in :class:`.Styler` where copying from Jupyter dropped top left cell and misaligned headers (:issue:`12147`)
-- Bug in :class:`.Styler.where` where ``kwargs`` were not passed to the applicable callable (:issue:`40845`)
- Bug in :meth:`DataFrame.equals`, :meth:`Series.equals`, :meth:`Index.equals` with object-dtype containing ``np.datetime64("NaT")`` or ``np.timedelta64("NaT")`` (:issue:`39650`)
- Bug in :func:`pandas.util.show_versions` where console JSON output was not proper JSON (:issue:`39701`)
- Bug in :meth:`DataFrame.convert_dtypes` incorrectly raised ValueError when called on an empty DataFrame (:issue:`40393`)
| - [x] closes #40897
| https://api.github.com/repos/pandas-dev/pandas/pulls/40900 | 2021-04-12T16:23:37Z | 2021-04-13T14:09:32Z | 2021-04-13T14:09:32Z | 2021-04-13T14:21:50Z |
t | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index bfb633ae55095..7543ea177d58c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -360,8 +360,8 @@ class DataFrame(NDFrame, OpsMixin):
Parameters
----------
- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
- Dict can contain Series, arrays, constants, dataclass or list-like objects. If
+ data : ndarray (structured or homogeneous), Iterable, dict, dataclass or DataFrame
+ Dict can contain Series, arrays, constants, or list-like objects. If
data is a dict, column order follows insertion-order.
.. versionchanged:: 0.25.0
@@ -426,8 +426,9 @@ class DataFrame(NDFrame, OpsMixin):
>>> from dataclasses import make_dataclass
>>> Point = make_dataclass("Point", [("x", int), ("y", int)])
- >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
- x y
+ >>> df3 = pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
+ df3
+ x y
0 0 0
1 0 3
2 2 3
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40899 | 2021-04-12T16:10:51Z | 2021-04-12T16:13:04Z | null | 2021-04-12T16:13:04Z |
CLN: refactor `Styler._translate` into composite translate functions | diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py
index 82f57b71caebf..0b7279d796464 100644
--- a/pandas/io/formats/style_render.py
+++ b/pandas/io/formats/style_render.py
@@ -173,15 +173,6 @@ def _translate(self):
BLANK_CLASS = "blank"
BLANK_VALUE = " "
- # mapping variables
- ctx = self.ctx # td css styles from apply() and applymap()
- cell_context = self.cell_context # td css classes from set_td_classes()
- cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(list)
-
- # copied attributes
- hidden_index = self.hidden_index
- hidden_columns = self.hidden_columns
-
# construct render dict
d = {
"uuid": self.uuid,
@@ -189,165 +180,185 @@ def _translate(self):
"caption": self.caption,
}
+ head = self._translate_header(
+ BLANK_CLASS, BLANK_VALUE, INDEX_NAME_CLASS, COL_HEADING_CLASS
+ )
+ d.update({"head": head})
+
+ self.cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(
+ list
+ )
+ body = self._translate_body(DATA_CLASS, ROW_HEADING_CLASS)
+ d.update({"body": body})
+
+ cellstyle: list[dict[str, CSSList | list[str]]] = [
+ {"props": list(props), "selectors": selectors}
+ for props, selectors in self.cellstyle_map.items()
+ ]
+ d.update({"cellstyle": cellstyle})
+
+ table_attr = self.table_attributes
+ use_mathjax = get_option("display.html.use_mathjax")
+ if not use_mathjax:
+ table_attr = table_attr or ""
+ if 'class="' in table_attr:
+ table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ')
+ else:
+ table_attr += ' class="tex2jax_ignore"'
+ d.update({"table_attributes": table_attr})
+
+ if self.tooltips:
+ d = self.tooltips._translate(self.data, self.uuid, d)
+
+ return d
+
+ def _translate_header(
+ self, blank_class, blank_value, index_name_class, col_heading_class
+ ):
+ """
+ Build each <tr> within table <head>, using the structure:
+ +----------------------------+---------------+---------------------------+
+ | index_blanks ... | column_name_0 | column_headers (level_0) |
+ 1) | .. | .. | .. |
+ | index_blanks ... | column_name_n | column_headers (level_n) |
+ +----------------------------+---------------+---------------------------+
+ 2) | index_names (level_0 to level_n) ... | column_blanks ... |
+ +----------------------------+---------------+---------------------------+
+ """
# for sparsifying a MultiIndex
- idx_lengths = _get_level_lengths(self.index)
- col_lengths = _get_level_lengths(self.columns, hidden_columns)
+ col_lengths = _get_level_lengths(self.columns, self.hidden_columns)
- n_rlvls = self.data.index.nlevels
- n_clvls = self.data.columns.nlevels
- rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
-
- if n_rlvls == 1:
- rlabels = [[x] for x in rlabels]
- if n_clvls == 1:
+ if self.data.columns.nlevels == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
head = []
- for r in range(n_clvls):
- # Blank for Index columns...
- row_es = [
- {
- "type": "th",
- "value": BLANK_VALUE,
- "display_value": BLANK_VALUE,
- "is_visible": not hidden_index,
- "class": " ".join([BLANK_CLASS]),
- }
- ] * (n_rlvls - 1)
-
- # ... except maybe the last for columns.names
+ # 1) column headers
+ for r in range(self.data.columns.nlevels):
+ index_blanks = [
+ _element("th", blank_class, blank_value, not self.hidden_index)
+ ] * (self.data.index.nlevels - 1)
+
name = self.data.columns.names[r]
- cs = [
- BLANK_CLASS if name is None else INDEX_NAME_CLASS,
- f"level{r}",
+ column_name = [
+ _element(
+ "th",
+ f"{blank_class if name is None else index_name_class} level{r}",
+ name if name is not None else blank_value,
+ not self.hidden_index,
+ )
]
- name = BLANK_VALUE if name is None else name
- row_es.append(
- {
- "type": "th",
- "value": name,
- "display_value": name,
- "class": " ".join(cs),
- "is_visible": not hidden_index,
- }
- )
if clabels:
- for c, value in enumerate(clabels[r]):
- es = {
- "type": "th",
- "value": value,
- "display_value": value,
- "class": f"{COL_HEADING_CLASS} level{r} col{c}",
- "is_visible": _is_visible(c, r, col_lengths),
- }
- colspan = col_lengths.get((r, c), 0)
- if colspan > 1:
- es["attributes"] = f'colspan="{colspan}"'
- row_es.append(es)
- head.append(row_es)
+ column_headers = [
+ _element(
+ "th",
+ f"{col_heading_class} level{r} col{c}",
+ value,
+ _is_visible(c, r, col_lengths),
+ attributes=(
+ f'colspan="{col_lengths.get((r, c), 0)}"'
+ if col_lengths.get((r, c), 0) > 1
+ else ""
+ ),
+ )
+ for c, value in enumerate(clabels[r])
+ ]
+ head.append(index_blanks + column_name + column_headers)
+ # 2) index names
if (
self.data.index.names
and com.any_not_none(*self.data.index.names)
- and not hidden_index
+ and not self.hidden_index
):
- index_header_row = []
+ index_names = [
+ _element(
+ "th",
+ f"{index_name_class} level{c}",
+ blank_value if name is None else name,
+ True,
+ )
+ for c, name in enumerate(self.data.index.names)
+ ]
- for c, name in enumerate(self.data.index.names):
- cs = [INDEX_NAME_CLASS, f"level{c}"]
- name = "" if name is None else name
- index_header_row.append(
- {"type": "th", "value": name, "class": " ".join(cs)}
+ column_blanks = [
+ _element(
+ "th",
+ f"{blank_class} col{c}",
+ blank_value,
+ c not in self.hidden_columns,
)
+ for c in range(len(clabels[0]))
+ ]
+ head.append(index_names + column_blanks)
- index_header_row.extend(
- [
- {
- "type": "th",
- "value": BLANK_VALUE,
- "class": " ".join([BLANK_CLASS, f"col{c}"]),
- }
- for c in range(len(clabels[0]))
- if c not in hidden_columns
- ]
- )
+ return head
- head.append(index_header_row)
- d.update({"head": head})
+ def _translate_body(self, data_class, row_heading_class):
+ """
+ Build each <tr> in table <body> in the following format:
+ +--------------------------------------------+---------------------------+
+ | index_header_0 ... index_header_n | data_by_column |
+ +--------------------------------------------+---------------------------+
+
+ Also add elements to the cellstyle_map for more efficient grouped elements in
+ <style></style> block
+ """
+ # for sparsifying a MultiIndex
+ idx_lengths = _get_level_lengths(self.index)
+
+ rlabels = self.data.index.tolist()
+ if self.data.index.nlevels == 1:
+ rlabels = [[x] for x in rlabels]
body = []
for r, row_tup in enumerate(self.data.itertuples()):
- row_es = []
- for c, value in enumerate(rlabels[r]):
- rid = [
- ROW_HEADING_CLASS,
- f"level{c}",
- f"row{r}",
- ]
- es = {
- "type": "th",
- "is_visible": (_is_visible(r, c, idx_lengths) and not hidden_index),
- "value": value,
- "display_value": value,
- "id": "_".join(rid[1:]),
- "class": " ".join(rid),
- }
- rowspan = idx_lengths.get((c, r), 0)
- if rowspan > 1:
- es["attributes"] = f'rowspan="{rowspan}"'
- row_es.append(es)
+ index_headers = [
+ _element(
+ "th",
+ f"{row_heading_class} level{c} row{r}",
+ value,
+ (_is_visible(r, c, idx_lengths) and not self.hidden_index),
+ id=f"level{c}_row{r}",
+ attributes=(
+ f'rowspan="{idx_lengths.get((c, r), 0)}"'
+ if idx_lengths.get((c, r), 0) > 1
+ else ""
+ ),
+ )
+ for c, value in enumerate(rlabels[r])
+ ]
+ data = []
for c, value in enumerate(row_tup[1:]):
- formatter = self._display_funcs[(r, c)]
- row_dict = {
- "type": "td",
- "value": value,
- "display_value": formatter(value),
- "is_visible": (c not in hidden_columns),
- "attributes": "",
- }
-
- # only add an id if the cell has a style
- props: CSSList = []
- if self.cell_ids or (r, c) in ctx:
- row_dict["id"] = f"row{r}_col{c}"
- props.extend(ctx[r, c])
-
# add custom classes from cell context
cls = ""
- if (r, c) in cell_context:
- cls = " " + cell_context[r, c]
- row_dict["class"] = f"{DATA_CLASS} row{r} col{c}{cls}"
-
- row_es.append(row_dict)
- if props: # (), [] won't be in cellstyle_map, cellstyle respectively
- cellstyle_map[tuple(props)].append(f"row{r}_col{c}")
- body.append(row_es)
- d.update({"body": body})
-
- cellstyle: list[dict[str, CSSList | list[str]]] = [
- {"props": list(props), "selectors": selectors}
- for props, selectors in cellstyle_map.items()
- ]
- d.update({"cellstyle": cellstyle})
+ if (r, c) in self.cell_context:
+ cls = " " + self.cell_context[r, c]
+
+ data_element = _element(
+ "td",
+ f"{data_class} row{r} col{c}{cls}",
+ value,
+ (c not in self.hidden_columns),
+ attributes="",
+ display_value=self._display_funcs[(r, c)](value),
+ )
- table_attr = self.table_attributes
- use_mathjax = get_option("display.html.use_mathjax")
- if not use_mathjax:
- table_attr = table_attr or ""
- if 'class="' in table_attr:
- table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ')
- else:
- table_attr += ' class="tex2jax_ignore"'
- d.update({"table_attributes": table_attr})
+ # only add an id if the cell has a style
+ if self.cell_ids or (r, c) in self.ctx:
+ data_element["id"] = f"row{r}_col{c}"
+ if (r, c) in self.ctx and self.ctx[r, c]: # only add if non-empty
+ self.cellstyle_map[tuple(self.ctx[r, c])].append(
+ f"row{r}_col{c}"
+ )
- if self.tooltips:
- d = self.tooltips._translate(self.data, self.uuid, d)
+ data.append(data_element)
- return d
+ body.append(index_headers + data)
+ return body
def format(
self,
@@ -502,6 +513,27 @@ def format(
return self
+def _element(
+ html_element: str,
+ html_class: str,
+ value: Any,
+ is_visible: bool,
+ **kwargs,
+) -> dict:
+ """
+ Template to return container with information for a <td></td> or <th></th> element.
+ """
+ if "display_value" not in kwargs:
+ kwargs["display_value"] = value
+ return {
+ "type": html_element,
+ "value": value,
+ "class": html_class,
+ "is_visible": is_visible,
+ **kwargs,
+ }
+
+
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py
index 56e9581f8785a..25a7eb36d6b48 100644
--- a/pandas/tests/io/formats/style/test_style.py
+++ b/pandas/tests/io/formats/style/test_style.py
@@ -273,6 +273,7 @@ def test_empty_index_name_doesnt_display(self):
"type": "th",
"value": "A",
"is_visible": True,
+ "attributes": "",
},
{
"class": "col_heading level0 col1",
@@ -280,6 +281,7 @@ def test_empty_index_name_doesnt_display(self):
"type": "th",
"value": "B",
"is_visible": True,
+ "attributes": "",
},
{
"class": "col_heading level0 col2",
@@ -287,6 +289,7 @@ def test_empty_index_name_doesnt_display(self):
"type": "th",
"value": "C",
"is_visible": True,
+ "attributes": "",
},
]
]
@@ -295,6 +298,7 @@ def test_empty_index_name_doesnt_display(self):
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
+ # TODO: this test can be minimised to address the test more directly
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index("A").style._translate()
@@ -313,6 +317,7 @@ def test_index_name(self):
"value": "B",
"display_value": "B",
"is_visible": True,
+ "attributes": "",
},
{
"class": "col_heading level0 col1",
@@ -320,12 +325,31 @@ def test_index_name(self):
"value": "C",
"display_value": "C",
"is_visible": True,
+ "attributes": "",
},
],
[
- {"class": "index_name level0", "type": "th", "value": "A"},
- {"class": "blank col0", "type": "th", "value": self.blank_value},
- {"class": "blank col1", "type": "th", "value": self.blank_value},
+ {
+ "class": "index_name level0",
+ "type": "th",
+ "value": "A",
+ "is_visible": True,
+ "display_value": "A",
+ },
+ {
+ "class": "blank col0",
+ "type": "th",
+ "value": self.blank_value,
+ "is_visible": True,
+ "display_value": self.blank_value,
+ },
+ {
+ "class": "blank col1",
+ "type": "th",
+ "value": self.blank_value,
+ "is_visible": True,
+ "display_value": self.blank_value,
+ },
],
]
@@ -333,6 +357,7 @@ def test_index_name(self):
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
+ # TODO: this test can be minimised to address the test more directly
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index(["A", "B"]).style._translate()
@@ -358,12 +383,31 @@ def test_multiindex_name(self):
"value": "C",
"display_value": "C",
"is_visible": True,
+ "attributes": "",
},
],
[
- {"class": "index_name level0", "type": "th", "value": "A"},
- {"class": "index_name level1", "type": "th", "value": "B"},
- {"class": "blank col0", "type": "th", "value": self.blank_value},
+ {
+ "class": "index_name level0",
+ "type": "th",
+ "value": "A",
+ "is_visible": True,
+ "display_value": "A",
+ },
+ {
+ "class": "index_name level1",
+ "type": "th",
+ "value": "B",
+ "is_visible": True,
+ "display_value": "B",
+ },
+ {
+ "class": "blank col0",
+ "type": "th",
+ "value": self.blank_value,
+ "is_visible": True,
+ "display_value": self.blank_value,
+ },
],
]
@@ -838,7 +882,7 @@ def test_mi_sparse(self):
"class": "row_heading level0 row0",
"id": "level0_row0",
}
- tm.assert_dict_equal(body_0, expected_0)
+ assert body_0 == expected_0
body_1 = result["body"][0][1]
expected_1 = {
@@ -848,8 +892,9 @@ def test_mi_sparse(self):
"type": "th",
"class": "row_heading level1 row0",
"id": "level1_row0",
+ "attributes": "",
}
- tm.assert_dict_equal(body_1, expected_1)
+ assert body_1 == expected_1
body_10 = result["body"][1][0]
expected_10 = {
@@ -859,8 +904,9 @@ def test_mi_sparse(self):
"type": "th",
"class": "row_heading level0 row1",
"id": "level0_row1",
+ "attributes": "",
}
- tm.assert_dict_equal(body_10, expected_10)
+ assert body_10 == expected_10
head = result["head"][0]
expected = [
@@ -884,21 +930,26 @@ def test_mi_sparse(self):
"value": "A",
"is_visible": True,
"display_value": "A",
+ "attributes": "",
},
]
assert head == expected
def test_mi_sparse_disabled(self):
+ df = DataFrame(
+ {"A": [1, 2]}, index=pd.MultiIndex.from_arrays([["a", "a"], [0, 1]])
+ )
+ result = df.style._translate()["body"]
+ assert 'rowspan="2"' in result[0][0]["attributes"]
+ assert result[1][0]["is_visible"] is False
+
with pd.option_context("display.multi_sparse", False):
- df = DataFrame(
- {"A": [1, 2]}, index=pd.MultiIndex.from_arrays([["a", "a"], [0, 1]])
- )
- result = df.style._translate()
- body = result["body"]
- for row in body:
- assert "attributes" not in row[0]
+ result = df.style._translate()["body"]
+ assert 'rowspan="2"' not in result[0][0]["attributes"]
+ assert result[1][0]["is_visible"] is True
def test_mi_sparse_index_names(self):
+ # TODO this test is verbose can be minimised to more directly target test
df = DataFrame(
{"A": [1, 2]},
index=pd.MultiIndex.from_arrays(
@@ -908,14 +959,33 @@ def test_mi_sparse_index_names(self):
result = df.style._translate()
head = result["head"][1]
expected = [
- {"class": "index_name level0", "value": "idx_level_0", "type": "th"},
- {"class": "index_name level1", "value": "idx_level_1", "type": "th"},
- {"class": "blank col0", "value": self.blank_value, "type": "th"},
+ {
+ "class": "index_name level0",
+ "value": "idx_level_0",
+ "type": "th",
+ "is_visible": True,
+ "display_value": "idx_level_0",
+ },
+ {
+ "class": "index_name level1",
+ "value": "idx_level_1",
+ "type": "th",
+ "is_visible": True,
+ "display_value": "idx_level_1",
+ },
+ {
+ "class": "blank col0",
+ "value": self.blank_value,
+ "type": "th",
+ "is_visible": True,
+ "display_value": self.blank_value,
+ },
]
assert head == expected
def test_mi_sparse_column_names(self):
+ # TODO this test is verbose - could be minimised
df = DataFrame(
np.arange(16).reshape(4, 4),
index=pd.MultiIndex.from_arrays(
@@ -949,6 +1019,7 @@ def test_mi_sparse_column_names(self):
"is_visible": True,
"type": "th",
"value": 1,
+ "attributes": "",
},
{
"class": "col_heading level1 col1",
@@ -956,6 +1027,7 @@ def test_mi_sparse_column_names(self):
"is_visible": True,
"type": "th",
"value": 0,
+ "attributes": "",
},
{
"class": "col_heading level1 col2",
@@ -963,6 +1035,7 @@ def test_mi_sparse_column_names(self):
"is_visible": True,
"type": "th",
"value": 1,
+ "attributes": "",
},
{
"class": "col_heading level1 col3",
@@ -970,6 +1043,7 @@ def test_mi_sparse_column_names(self):
"is_visible": True,
"type": "th",
"value": 0,
+ "attributes": "",
},
]
assert head == expected
| This composites `Styler._translate()` into:
```
def _translate():
...
self._translate_header()
...
self._translate_body()
...
```
where the `_translate_header()` and `_translate_body()` methods are given some documentation explaining what they are building. The code is not fundamentally changed, but variable names are renamed to add clarity to match the documentation and loop comprehensions replace `for loops` where possible.
Some tests are minimally altered since the generic `_element` method now returns more dict keys for some elements. | https://api.github.com/repos/pandas-dev/pandas/pulls/40898 | 2021-04-12T16:08:10Z | 2021-04-13T15:26:21Z | 2021-04-13T15:26:21Z | 2021-04-13T15:26:25Z |
Backport PR #40878 on branch 1.2.x (REGR: ufunc with DataFrame input not passing all kwargs) | diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst
index 54652ecc4dceb..dd74091b64014 100644
--- a/doc/source/whatsnew/v1.2.4.rst
+++ b/doc/source/whatsnew/v1.2.4.rst
@@ -21,6 +21,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.where` not returning a copy in the case of an all True condition (:issue:`39595`)
- Fixed regression in :meth:`DataFrame.replace` raising ``IndexError`` when ``regex`` was a multi-key dictionary (:issue:`39338`)
- Fixed regression in repr of floats in an ``object`` column not respecting ``float_format`` when printed in the console or outputted through :meth:`DataFrame.to_string`, :meth:`DataFrame.to_html`, and :meth:`DataFrame.to_latex` (:issue:`40024`)
+- Fixed regression in NumPy ufuncs such as ``np.add`` not passing through all arguments for :class:`DataFrame` (:issue:`40662`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py
index cb185dcf78f63..8d02ddef29593 100644
--- a/pandas/core/arraylike.py
+++ b/pandas/core/arraylike.py
@@ -351,15 +351,17 @@ def reconstruct(result):
# * len(inputs) > 1 is doable when we know that we have
# aligned blocks / dtypes.
inputs = tuple(np.asarray(x) for x in inputs)
- result = getattr(ufunc, method)(*inputs)
+ result = getattr(ufunc, method)(*inputs, **kwargs)
elif self.ndim == 1:
# ufunc(series, ...)
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
else:
# ufunc(dataframe)
- if method == "__call__":
+ if method == "__call__" and not kwargs:
# for np.<ufunc>(..) calls
+ # kwargs cannot necessarily be handled block-by-block, so only
+ # take this path if there are no kwargs
mgr = inputs[0]._mgr
result = mgr.apply(getattr(ufunc, method))
else:
diff --git a/pandas/tests/frame/test_ufunc.py b/pandas/tests/frame/test_ufunc.py
index 83fd3db72a90c..19ebae449ecc3 100644
--- a/pandas/tests/frame/test_ufunc.py
+++ b/pandas/tests/frame/test_ufunc.py
@@ -1,3 +1,5 @@
+from functools import partial
+
import numpy as np
import pytest
@@ -55,6 +57,42 @@ def test_binary_input_dispatch_binop(dtype):
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize(
+ "func,arg,expected",
+ [
+ (np.add, 1, [2, 3, 4, 5]),
+ (
+ partial(np.add, where=[[False, True], [True, False]]),
+ np.array([[1, 1], [1, 1]]),
+ [0, 3, 4, 0],
+ ),
+ (np.power, np.array([[1, 1], [2, 2]]), [1, 2, 9, 16]),
+ (np.subtract, 2, [-1, 0, 1, 2]),
+ (
+ partial(np.negative, where=np.array([[False, True], [True, False]])),
+ None,
+ [0, -2, -3, 0],
+ ),
+ ],
+)
+def test_ufunc_passes_args(func, arg, expected, request):
+ # GH#40662
+ arr = np.array([[1, 2], [3, 4]])
+ df = pd.DataFrame(arr)
+ result_inplace = np.zeros_like(arr)
+ # 1-argument ufunc
+ if arg is None:
+ result = func(df, out=result_inplace)
+ else:
+ result = func(df, arg, out=result_inplace)
+
+ expected = np.array(expected).reshape(2, 2)
+ tm.assert_numpy_array_equal(result_inplace, expected)
+
+ expected = pd.DataFrame(expected)
+ tm.assert_frame_equal(result, expected)
+
+
@pytest.mark.parametrize("dtype_a", dtypes)
@pytest.mark.parametrize("dtype_b", dtypes)
def test_binary_input_aligns_columns(dtype_a, dtype_b):
| Backport PR #40878: REGR: ufunc with DataFrame input not passing all kwargs | https://api.github.com/repos/pandas-dev/pandas/pulls/40895 | 2021-04-12T14:29:47Z | 2021-04-12T15:46:14Z | 2021-04-12T15:46:14Z | 2021-04-12T15:46:14Z |
Backport PR #40880 on branch 1.2.x (DOC: 1.2.4 release date) | diff --git a/doc/source/whatsnew/v1.2.4.rst b/doc/source/whatsnew/v1.2.4.rst
index fffdf333178fc..54652ecc4dceb 100644
--- a/doc/source/whatsnew/v1.2.4.rst
+++ b/doc/source/whatsnew/v1.2.4.rst
@@ -1,7 +1,7 @@
.. _whatsnew_124:
-What's new in 1.2.4 (April ??, 2021)
----------------------------------------
+What's new in 1.2.4 (April 12, 2021)
+------------------------------------
These are the changes in pandas 1.2.4. See :ref:`release` for a full changelog
including other versions of pandas.
@@ -24,26 +24,6 @@ Fixed regressions
.. ---------------------------------------------------------------------------
-.. _whatsnew_124.bug_fixes:
-
-Bug fixes
-~~~~~~~~~
-
--
--
-
-.. ---------------------------------------------------------------------------
-
-.. _whatsnew_124.other:
-
-Other
-~~~~~
-
--
--
-
-.. ---------------------------------------------------------------------------
-
.. _whatsnew_124.contributors:
Contributors
| Backport PR #40880: DOC: 1.2.4 release date | https://api.github.com/repos/pandas-dev/pandas/pulls/40894 | 2021-04-12T12:17:57Z | 2021-04-12T13:30:57Z | 2021-04-12T13:30:57Z | 2021-04-12T13:30:57Z |
⬆️ UPGRADE: Autoupdate pre-commit config | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b6df108a3166c..fa5bc10f979ed 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -51,7 +51,7 @@ repos:
hooks:
- id: isort
- repo: https://github.com/asottile/pyupgrade
- rev: v2.11.0
+ rev: v2.12.0
hooks:
- id: pyupgrade
args: [--py37-plus]
| <!-- START pr-commits -->
<!-- END pr-commits -->
## Base PullRequest
default branch (https://github.com/pandas-dev/pandas/tree/master)
## Command results
<details>
<summary>Details: </summary>
<details>
<summary><em>add path</em></summary>
```Shell
/home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/bin
```
</details>
<details>
<summary><em>pip install pre-commit</em></summary>
```Shell
Collecting pre-commit
Downloading pre_commit-2.12.0-py2.py3-none-any.whl (189 kB)
Collecting toml
Using cached toml-0.10.2-py2.py3-none-any.whl (16 kB)
Collecting nodeenv>=0.11.1
Downloading nodeenv-1.6.0-py2.py3-none-any.whl (21 kB)
Collecting virtualenv>=20.0.8
Downloading virtualenv-20.4.3-py2.py3-none-any.whl (7.2 MB)
Collecting cfgv>=2.0.0
Using cached cfgv-3.2.0-py2.py3-none-any.whl (7.3 kB)
Collecting identify>=1.0.0
Downloading identify-2.2.3-py2.py3-none-any.whl (98 kB)
Collecting pyyaml>=5.1
Downloading PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl (630 kB)
Collecting distlib<1,>=0.3.1
Using cached distlib-0.3.1-py2.py3-none-any.whl (335 kB)
Collecting six<2,>=1.9.0
Using cached six-1.15.0-py2.py3-none-any.whl (10 kB)
Collecting appdirs<2,>=1.4.3
Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB)
Collecting filelock<4,>=3.0.0
Using cached filelock-3.0.12-py3-none-any.whl (7.6 kB)
Installing collected packages: six, filelock, distlib, appdirs, virtualenv, toml, pyyaml, nodeenv, identify, cfgv, pre-commit
Successfully installed appdirs-1.4.4 cfgv-3.2.0 distlib-0.3.1 filelock-3.0.12 identify-2.2.3 nodeenv-1.6.0 pre-commit-2.12.0 pyyaml-5.4.1 six-1.15.0 toml-0.10.2 virtualenv-20.4.3
```
</details>
<details>
<summary><em>pre-commit autoupdate || (exit 0);</em></summary>
```Shell
Updating https://github.com/MarcoGorelli/absolufy-imports ... [INFO] Initializing environment for https://github.com/MarcoGorelli/absolufy-imports.
already up to date.
Updating https://github.com/python/black ... already up to date.
Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell.
already up to date.
Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.
already up to date.
Updating https://github.com/cpplint/cpplint ... [INFO] Initializing environment for https://github.com/cpplint/cpplint.
=====> /home/runner/.cache/pre-commit/repo2ir9bkt3/.pre-commit-hooks.yaml does not exist
Updating https://gitlab.com/pycqa/flake8 ... [INFO] Initializing environment for https://gitlab.com/pycqa/flake8.
already up to date.
Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort.
already up to date.
Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade.
updating v2.11.0 -> v2.12.0.
Updating https://github.com/pre-commit/pygrep-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pygrep-hooks.
already up to date.
Updating https://github.com/asottile/yesqa ... already up to date.
```
</details>
<details>
<summary><em>pre-commit run -a || (exit 0);</em></summary>
```Shell
[INFO] Initializing environment for https://github.com/cpplint/cpplint.
[INFO] Initializing environment for https://gitlab.com/pycqa/flake8:flake8-bugbear>=21.3.2,flake8-comprehensions>=3.1.0.
[INFO] Initializing environment for https://github.com/asottile/yesqa:flake8==3.9.0.
[INFO] Installing environment for https://github.com/MarcoGorelli/absolufy-imports.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/python/black.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/codespell-project/codespell.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/cpplint/cpplint.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/PyCQA/isort.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/pyupgrade.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/yesqa.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
absolufy-imports....................................................................................Passed
black...............................................................................................Passed
codespell...........................................................................................Passed
Fix End of Files....................................................................................Passed
Trim Trailing Whitespace............................................................................Passed
cpplint.............................................................................................Passed
flake8..............................................................................................Passed
flake8 (cython).....................................................................................Passed
flake8 (cython template)............................................................................Passed
isort...............................................................................................Passed
pyupgrade...........................................................................................Passed
rst ``code`` is two backticks.......................................................................Passed
rst directives end with two colons..................................................................Passed
rst ``inline code`` next to normal text.............................................................Passed
Strip unnecessary `# noqa`s.........................................................................Passed
flake8-rst..........................................................................................Passed
Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias.......................Passed
Check for inconsistent use of pandas namespace......................................................Passed
Check code for instances of os.remove...............................................................Passed
Unwanted patterns...................................................................................Passed
Unwanted patterns in tests..........................................................................Passed
Generate pip dependency from conda..................................................................Passed
Check flake8 version is synced across flake8, yesqa, and environment.yml............................Passed
Validate correct capitalization among titles in documentation.......................................Passed
Check for use of bare pytest raises.................................................................Passed
Check for use of private functions across modules...................................................Passed
Check for import of private attributes across modules...............................................Passed
Check for use of not concatenated strings...........................................................Passed
Check for strings with wrong placed spaces..........................................................Passed
Import pandas.array as pd_array in core.............................................................Passed
Use bool_t instead of bool in pandas/core/generic.py................................................Passed
```
</details>
</details>
## Changed files
<details>
<summary>Changed file: </summary>
- .pre-commit-config.yaml
</details>
<hr>
[:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action) | https://api.github.com/repos/pandas-dev/pandas/pulls/40890 | 2021-04-12T07:20:11Z | 2021-04-12T08:25:46Z | 2021-04-12T08:25:45Z | 2021-04-12T08:25:50Z |
Raising ValueError when xlim and ylim contain non-int and non-float dtype elements | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index c9267a756bef3..d4a179903e174 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -753,7 +753,7 @@ Plotting
- Prevent warnings when matplotlib's ``constrained_layout`` is enabled (:issue:`25261`)
- Bug in :func:`DataFrame.plot` was showing the wrong colors in the legend if the function was called repeatedly and some calls used ``yerr`` while others didn't (partial fix of :issue:`39522`)
- Bug in :func:`DataFrame.plot` was showing the wrong colors in the legend if the function was called repeatedly and some calls used ``secondary_y`` and others use ``legend=False`` (:issue:`40044`)
-
+- Bug in :func:`DataFrame.plot` was not raising for invalid ``xlim`` and ``ylim`` dtypes (:issue:`40781`)
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 7ddab91a24ec0..bd35e84d92e41 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -14,6 +14,7 @@
from pandas.core.dtypes.common import (
is_categorical_dtype,
+ is_datetime64_any_dtype,
is_extension_array_dtype,
is_float,
is_float_dtype,
@@ -489,6 +490,17 @@ def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
+ def _has_valid_lim_dtype(self, list_iterable: list) -> bool:
+ """Check if an iterable has float, int or datetime datatype"""
+ # GH40781
+ if (
+ all([is_datetime64_any_dtype(lim) for lim in list_iterable])
+ or np.issubdtype(np.array(list_iterable).dtype, np.int)
+ or np.issubdtype(np.array(list_iterable).dtype, np.float)
+ ):
+ return True
+ return False
+
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
@@ -511,10 +523,24 @@ def _adorn_subplots(self):
if self.xticks is not None:
ax.set_xticks(self.xticks)
+ # Addressing issue #GH40781 raising ValueError
+ # if xlim or ylim contain non float or non int dtype
if self.ylim is not None:
+ if not self._has_valid_lim_dtype(self.ylim):
+ raise ValueError(
+ "`ylim` contains values"
+ " which are not of float, int or numpy.datetime64 dtype"
+ f" in {self.ylim}"
+ )
ax.set_ylim(self.ylim)
if self.xlim is not None:
+ if not self._has_valid_lim_dtype(self.xlim):
+ raise ValueError(
+ "`xlim` contains values"
+ " which are not of float, int or numpy.datetime64 dtype"
+ f" in {self.xlim}"
+ )
ax.set_xlim(self.xlim)
# GH9093, currently Pandas does not show ylabel, so if users provide
diff --git a/pandas/tests/plotting/test_xlim_ylim_dtype.py b/pandas/tests/plotting/test_xlim_ylim_dtype.py
new file mode 100644
index 0000000000000..1f85708253b2e
--- /dev/null
+++ b/pandas/tests/plotting/test_xlim_ylim_dtype.py
@@ -0,0 +1,54 @@
+from datetime import datetime as dt
+
+import numpy as np
+import pytest
+
+pytest.importorskip("matplotlib")
+from pandas import DataFrame
+
+pytestmark = pytest.mark.slow
+
+
+@pytest.mark.parametrize(
+ "df, lim",
+ [
+ (
+ DataFrame(
+ {
+ "A": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ "B": [dt.now() for i in range(0, 10)],
+ }
+ ),
+ [
+ ["1", "3"],
+ [dt.now(), dt.now()],
+ [1, "2"],
+ [0.1, "0.2"],
+ [np.datetime64(dt.now()), dt.now()],
+ ],
+ )
+ ],
+)
+def test_axis_lim_invalid(df, lim):
+ for elem in lim:
+ with pytest.raises(ValueError, match="`xlim` contains values"):
+ df.plot.line(x="A", xlim=elem)
+
+
+@pytest.mark.parametrize(
+ "df, lim",
+ [
+ (
+ DataFrame(
+ {
+ "A": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ "B": [dt.now() for i in range(0, 10)],
+ }
+ ),
+ [[1, 3], [0.1, 0.2], [np.datetime64(dt.now()), np.datetime64(dt.now())]],
+ )
+ ],
+)
+def test_axis_lim_valid(df, lim):
+ for elem in lim:
+ df.plot.line(x="A", xlim=elem)
| - [x] closes #40781
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40889 | 2021-04-11T23:55:09Z | 2021-10-08T18:24:10Z | null | 2021-10-08T18:25:23Z |
TST: add test for Index.where when needing to cast to object dtype | diff --git a/pandas/tests/indexes/numeric/test_indexing.py b/pandas/tests/indexes/numeric/test_indexing.py
index 5f2f8f75045bb..43dc1daf6f392 100644
--- a/pandas/tests/indexes/numeric/test_indexing.py
+++ b/pandas/tests/indexes/numeric/test_indexing.py
@@ -376,6 +376,18 @@ def test_where(self, klass, index):
result = index.where(klass(cond))
tm.assert_index_equal(result, expected)
+ def test_maybe_cast_with_dtype(self):
+ # https://github.com/pandas-dev/pandas/issues/32413
+ index = Index([1, np.nan])
+
+ cond = index.notna()
+ other = "a" + Index(range(2)).astype(str)
+
+ fixed_index = index.where(cond, other)
+
+ tm.assert_index_equal(other, Index(["a0", "a1"]))
+ tm.assert_index_equal(fixed_index, Index([1.0, "a1"]))
+
class TestTake:
@pytest.mark.parametrize("klass", [Float64Index, Int64Index, UInt64Index])
| - [x] closes #32413
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry: Adds a basic test for #32413.
Hey! This is my first PR with actual code: added a very basic test to validate #32413.
Questions:
* What is the best place for this test? (currently put it in `pandas/tests/indexes/test_indexing.py`)
* Should I try to create another test that uses the `index` input argument? | https://api.github.com/repos/pandas-dev/pandas/pulls/40888 | 2021-04-11T23:28:14Z | 2021-09-01T00:08:28Z | null | 2021-09-01T00:08:29Z |
TYP overload fillna #40737 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 045776c3f5c50..37fc5de95b3d2 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5007,6 +5007,121 @@ def rename(
errors=errors,
)
+ @overload
+ def fillna(
+ self,
+ value=...,
+ method: str | None = ...,
+ axis: Axis | None = ...,
+ inplace: Literal[False] = ...,
+ limit=...,
+ downcast=...,
+ ) -> DataFrame:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value,
+ method: str | None,
+ axis: Axis | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ *,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value,
+ *,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ *,
+ method: str | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ *,
+ axis: Axis | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ *,
+ method: str | None,
+ axis: Axis | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value,
+ *,
+ axis: Axis | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value,
+ method: str | None,
+ *,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value=...,
+ method: str | None = ...,
+ axis: Axis | None = ...,
+ inplace: bool = ...,
+ limit=...,
+ downcast=...,
+ ) -> DataFrame | None:
+ ...
+
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 4b36b846ef9c4..5c605a6b441c6 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4581,6 +4581,121 @@ def drop(
errors=errors,
)
+ @overload
+ def fillna(
+ self,
+ value=...,
+ method: str | None = ...,
+ axis: Axis | None = ...,
+ inplace: Literal[False] = ...,
+ limit=...,
+ downcast=...,
+ ) -> Series:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value,
+ method: str | None,
+ axis: Axis | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ *,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value,
+ *,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ *,
+ method: str | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ *,
+ axis: Axis | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ *,
+ method: str | None,
+ axis: Axis | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value,
+ *,
+ axis: Axis | None,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value,
+ method: str | None,
+ *,
+ inplace: Literal[True],
+ limit=...,
+ downcast=...,
+ ) -> None:
+ ...
+
+ @overload
+ def fillna(
+ self,
+ value=...,
+ method: str | None = ...,
+ axis: Axis | None = ...,
+ inplace: bool = ...,
+ limit=...,
+ downcast=...,
+ ) -> Series | None:
+ ...
+
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index f2027f2707a8b..dc4550484fa3b 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -5,7 +5,6 @@
Pattern,
Set,
Union,
- cast,
)
import unicodedata
import warnings
@@ -371,9 +370,7 @@ def _str_get_dummies(self, sep="|"):
try:
arr = sep + arr + sep
except TypeError:
- arr = cast(Series, arr)
arr = sep + arr.astype(str) + sep
- arr = cast(Series, arr)
tags: Set[str] = set()
for ts in Series(arr).str.split(sep):
| - [x] closes #40737
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them | https://api.github.com/repos/pandas-dev/pandas/pulls/40887 | 2021-04-11T23:15:42Z | 2021-04-15T21:21:57Z | 2021-04-15T21:21:56Z | 2021-04-15T21:21:57Z |
Addressing #40781 to raise ValueError as per discussion when a list with xlim or ylim is supplied, that contains elements in other format than integer or float type. | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index c9267a756bef3..5ac5d30fe49f8 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -753,7 +753,7 @@ Plotting
- Prevent warnings when matplotlib's ``constrained_layout`` is enabled (:issue:`25261`)
- Bug in :func:`DataFrame.plot` was showing the wrong colors in the legend if the function was called repeatedly and some calls used ``yerr`` while others didn't (partial fix of :issue:`39522`)
- Bug in :func:`DataFrame.plot` was showing the wrong colors in the legend if the function was called repeatedly and some calls used ``secondary_y`` and others use ``legend=False`` (:issue:`40044`)
-
+- Bug in :func:`DataFrame.plot.line` was taking ``xlim`` and ``ylim`` kwargs even if they had string elements, now ``ValueError`` is raised if elements are not ``int`` or ``float`` dtype.
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 7ddab91a24ec0..525a5f75e9b07 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -505,6 +505,7 @@ def _adorn_subplots(self):
)
for ax in self.axes:
+
if self.yticks is not None:
ax.set_yticks(self.yticks)
@@ -512,10 +513,22 @@ def _adorn_subplots(self):
ax.set_xticks(self.xticks)
if self.ylim is not None:
- ax.set_ylim(self.ylim)
+ for elem in self.ylim: #Addressing issue #40781 raising ValueError if provided data type is not float or int.
+ if not is_float(elem) and not is_integer(elem):
+ raise ValueError(
+ "`ylim` should contain y-Axis plot range in either float or integer datatype.\n"
+ f"`ylim` had unsupported datatype of {type(elem)} with value {elem}. \n"
+ )
+ ax.set_ylim(self.ylim)
if self.xlim is not None:
- ax.set_xlim(self.xlim)
+ for elem in self.xlim: #Addressing issue #40781 raising ValueError if provided data type is not float or int.
+ if not is_float(elem) and not is_integer(elem):
+ raise ValueError(
+ "`xlim` should contain y-Axis plot range in either float or integer datatype.\n"
+ f"`xlim` had unsupported datatype of {type(elem)} with value {elem}.\n"
+ )
+ ax.set_xlim(self.xlim)
# GH9093, currently Pandas does not show ylabel, so if users provide
# ylabel will set it as ylabel in the plot.
| - [x] closes #40781
- [x] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/40886 | 2021-04-11T22:11:41Z | 2021-04-11T23:26:21Z | null | 2021-04-11T23:26:21Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.