title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
TST: flesh out EA setitem tests | diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index f1fa74192d4df..cb9a19b438feb 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -8,6 +8,35 @@
class BaseSetitemTests(BaseExtensionTests):
+ @pytest.fixture(
+ params=[
+ lambda x: x.index,
+ lambda x: list(x.index),
+ lambda x: slice(None),
+ lambda x: slice(0, len(x)),
+ lambda x: range(len(x)),
+ lambda x: list(range(len(x))),
+ lambda x: np.ones(len(x), dtype=bool),
+ ],
+ ids=[
+ "index",
+ "list[index]",
+ "null_slice",
+ "full_slice",
+ "range",
+ "list(range)",
+ "mask",
+ ],
+ )
+ def full_indexer(self, request):
+ """
+ Fixture for an indexer to pass to obj.loc to get/set the full length of the
+ object.
+
+ In some cases, assumes that obj.index is the default RangeIndex.
+ """
+ return request.param
+
def test_setitem_scalar_series(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
@@ -305,30 +334,20 @@ def test_setitem_preserves_views(self, data):
assert view1[0] == data[1]
assert view2[0] == data[1]
- def test_setitem_dataframe_column_with_index(self, data):
+ def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
# https://github.com/pandas-dev/pandas/issues/32395
df = expected = pd.DataFrame({"data": pd.Series(data)})
result = pd.DataFrame(index=df.index)
- result.loc[df.index, "data"] = df["data"]
- self.assert_frame_equal(result, expected)
- def test_setitem_dataframe_column_without_index(self, data):
- # https://github.com/pandas-dev/pandas/issues/32395
- df = expected = pd.DataFrame({"data": pd.Series(data)})
- result = pd.DataFrame(index=df.index)
- result.loc[:, "data"] = df["data"]
+ key = full_indexer(df)
+ result.loc[key, "data"] = df["data"]
self.assert_frame_equal(result, expected)
- def test_setitem_series_with_index(self, data):
+ def test_setitem_series(self, data, full_indexer):
# https://github.com/pandas-dev/pandas/issues/32395
ser = expected = pd.Series(data, name="data")
result = pd.Series(index=ser.index, dtype=object, name="data")
- result.loc[ser.index] = ser
- self.assert_series_equal(result, expected)
- def test_setitem_series_without_index(self, data):
- # https://github.com/pandas-dev/pandas/issues/32395
- ser = expected = pd.Series(data, name="data")
- result = pd.Series(index=ser.index, dtype=object, name="data")
- result.loc[:] = ser
+ key = full_indexer(ser)
+ result.loc[key] = ser
self.assert_series_equal(result, expected)
| Preliminary to fixing some behavior that these hit | https://api.github.com/repos/pandas-dev/pandas/pulls/39034 | 2021-01-08T04:44:42Z | 2021-01-08T14:12:27Z | 2021-01-08T14:12:27Z | 2021-01-08T15:51:42Z |
Backport PR #39023 on branch 1.2.x (Fix regression in setitem when expanding DataFrame with specific column name format) | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index baeca87b8c4f8..4b7a4180ee9f9 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -20,6 +20,7 @@ Fixed regressions
- Fixed regression in repr of float-like strings of an ``object`` dtype having trailing 0's truncated after the decimal (:issue:`38708`)
- Fixed regression in :meth:`DataFrame.groupby()` with :class:`Categorical` grouping column not showing unused categories for ``grouped.indices`` (:issue:`38642`)
- Fixed regression in :meth:`DataFrame.any` and :meth:`DataFrame.all` not returning a result for tz-aware ``datetime64`` columns (:issue:`38723`)
+- Fixed regression in :meth:`DataFrame.__setitem__` raising ``ValueError`` when expanding :class:`DataFrame` and new column is from type ``"0 - name"`` (:issue:`39010`)
- Fixed regression in :meth:`.GroupBy.sem` where the presence of non-numeric columns would cause an error instead of being dropped (:issue:`38774`)
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index d8b0ad739b056..73cf20979a8ad 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1382,7 +1382,7 @@ def is_bool_dtype(arr_or_dtype) -> bool:
return False
try:
dtype = get_dtype(arr_or_dtype)
- except TypeError:
+ except (TypeError, ValueError):
return False
if isinstance(arr_or_dtype, CategoricalDtype):
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 19d80b714a674..128f505402eff 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -557,6 +557,11 @@ def test_is_bool_dtype():
assert com.is_bool_dtype("boolean")
+def test_is_bool_dtype_numpy_error():
+ # GH39010
+ assert not com.is_bool_dtype("0 - Name")
+
+
@pytest.mark.filterwarnings("ignore:'is_extension_type' is deprecated:FutureWarning")
@pytest.mark.parametrize(
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 19d2f8301037a..cedef4784e4a1 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -356,6 +356,13 @@ def test_setitem_listlike_views(self):
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
+ def test_setitem_string_column_numpy_dtype_raising(self):
+ # GH#39010
+ df = DataFrame([[1, 2], [3, 4]])
+ df["0 - Name"] = [5, 6]
+ expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
+ tm.assert_frame_equal(df, expected)
+
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
| Backport PR #39023: Fix regression in setitem when expanding DataFrame with specific column name format | https://api.github.com/repos/pandas-dev/pandas/pulls/39032 | 2021-01-08T00:20:10Z | 2021-01-08T01:14:21Z | 2021-01-08T01:14:21Z | 2021-01-08T01:14:21Z |
CLN: add typing to dtype arg in core/internals, core/reshape and core (GH38808) | diff --git a/pandas/core/base.py b/pandas/core/base.py
index afc22a8446dce..b603ba31f51dd 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -19,7 +19,7 @@
import numpy as np
import pandas._libs.lib as lib
-from pandas._typing import DtypeObj, IndexLabel
+from pandas._typing import Dtype, DtypeObj, IndexLabel
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
@@ -500,7 +500,13 @@ def array(self) -> ExtensionArray:
"""
raise AbstractMethodError(self)
- def to_numpy(self, dtype=None, copy=False, na_value=lib.no_default, **kwargs):
+ def to_numpy(
+ self,
+ dtype: Optional[Dtype] = None,
+ copy: bool = False,
+ na_value=lib.no_default,
+ **kwargs,
+ ):
"""
A NumPy ndarray representing the values in this Series or Index.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fe86bf3f582ca..2f4340c17c5a7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -37,6 +37,8 @@
from pandas._typing import (
Axis,
CompressionOptions,
+ Dtype,
+ DtypeArg,
FilePathOrBuffer,
FrameOrSeries,
IndexKeyFunc,
@@ -44,6 +46,7 @@
JSONSerializable,
Label,
Level,
+ NpDtype,
Renamer,
StorageOptions,
TimedeltaConvertibleTypes,
@@ -210,7 +213,9 @@ def __init__(
object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True))
@classmethod
- def _init_mgr(cls, mgr, axes, dtype=None, copy: bool = False) -> BlockManager:
+ def _init_mgr(
+ cls, mgr, axes, dtype: Optional[Dtype] = None, copy: bool = False
+ ) -> BlockManager:
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
@@ -1901,7 +1906,7 @@ def empty(self) -> bool_t:
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(
@@ -2642,7 +2647,7 @@ def to_sql(
index: bool_t = True,
index_label=None,
chunksize=None,
- dtype=None,
+ dtype: Optional[DtypeArg] = None,
method=None,
) -> None:
"""
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 94c7d325d0bc8..06ed64401a38f 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -15,7 +15,7 @@
)
from pandas._libs.internals import BlockPlacement
from pandas._libs.tslibs import conversion
-from pandas._typing import ArrayLike, DtypeObj, Scalar, Shape
+from pandas._typing import ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
@@ -241,7 +241,7 @@ def array_values(self) -> ExtensionArray:
"""
return PandasArray(self.values)
- def get_values(self, dtype=None):
+ def get_values(self, dtype: Optional[Dtype] = None):
"""
return an internal format, currently just the ndarray
this is often overridden to handle to_dense like operations
@@ -1669,7 +1669,7 @@ def setitem(self, indexer, value):
self.values[indexer] = value
return self
- def get_values(self, dtype=None):
+ def get_values(self, dtype: Optional[Dtype] = None):
# ExtensionArrays must be iterable, so this works.
# TODO(EA2D): reshape not needed with 2D EAs
return np.asarray(self.values).reshape(self.shape)
@@ -1990,7 +1990,7 @@ class DatetimeLikeBlockMixin(Block):
_can_hold_na = True
- def get_values(self, dtype=None):
+ def get_values(self, dtype: Optional[Dtype] = None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
@@ -2168,7 +2168,7 @@ def is_view(self) -> bool:
# check the ndarray values of the DatetimeIndex values
return self.values._data.base is not None
- def get_values(self, dtype=None):
+ def get_values(self, dtype: Optional[Dtype] = None):
"""
Returns an ndarray of values.
@@ -2449,7 +2449,7 @@ def replace(
# Constructor Helpers
-def get_block_type(values, dtype=None):
+def get_block_type(values, dtype: Optional[Dtype] = None):
"""
Find the appropriate Block subclass to use for the given values and dtype.
@@ -2464,7 +2464,7 @@ def get_block_type(values, dtype=None):
"""
# We use vtype and kind checks because they are much more performant
# than is_foo_dtype
- dtype = dtype or values.dtype
+ dtype = cast(np.dtype, pandas_dtype(dtype) if dtype else values.dtype)
vtype = dtype.type
kind = dtype.kind
@@ -2500,7 +2500,7 @@ def get_block_type(values, dtype=None):
return cls
-def make_block(values, placement, klass=None, ndim=None, dtype=None):
+def make_block(values, placement, klass=None, ndim=None, dtype: Optional[Dtype] = None):
# Ensure that we don't allow PandasArray / PandasDtype in internals.
# For now, blocks should be backed by ndarrays when possible.
if isinstance(values, ABCPandasArray):
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index d44a3df45587a..d27efd98ab079 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -17,7 +17,7 @@
import numpy as np
from pandas._libs import internals as libinternals, lib
-from pandas._typing import ArrayLike, DtypeObj, Label, Shape
+from pandas._typing import ArrayLike, Dtype, DtypeObj, Label, Shape
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_bool_kwarg
@@ -816,7 +816,7 @@ def copy_func(ax):
def as_array(
self,
transpose: bool = False,
- dtype=None,
+ dtype: Optional[Dtype] = None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
@@ -872,7 +872,9 @@ def as_array(
return arr.transpose() if transpose else arr
- def _interleave(self, dtype=None, na_value=lib.no_default) -> np.ndarray:
+ def _interleave(
+ self, dtype: Optional[Dtype] = None, na_value=lib.no_default
+ ) -> np.ndarray:
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
@@ -1842,7 +1844,7 @@ def _simple_blockify(tuples, dtype) -> List[Block]:
return [block]
-def _multi_blockify(tuples, dtype=None):
+def _multi_blockify(tuples, dtype: Optional[Dtype] = None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index d855886fb725f..fc8d2aee1e6cd 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -6,6 +6,7 @@
import pandas._libs.algos as libalgos
import pandas._libs.reshape as libreshape
from pandas._libs.sparse import IntIndex
+from pandas._typing import Dtype
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
@@ -732,11 +733,11 @@ def get_dummies(
data,
prefix=None,
prefix_sep="_",
- dummy_na=False,
+ dummy_na: bool = False,
columns=None,
- sparse=False,
- drop_first=False,
- dtype=None,
+ sparse: bool = False,
+ drop_first: bool = False,
+ dtype: Optional[Dtype] = None,
) -> "DataFrame":
"""
Convert categorical variable into dummy/indicator variables.
@@ -921,7 +922,7 @@ def _get_dummies_1d(
dummy_na=False,
sparse=False,
drop_first=False,
- dtype=None,
+ dtype: Optional[Dtype] = None,
):
from pandas.core.reshape.concat import concat
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f3f80677d0fe4..668cad4f64ac3 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -28,10 +28,12 @@
AggFuncType,
ArrayLike,
Axis,
+ Dtype,
DtypeObj,
FrameOrSeriesUnion,
IndexKeyFunc,
Label,
+ NpDtype,
StorageOptions,
ValueKeyFunc,
)
@@ -214,7 +216,13 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
# Constructors
def __init__(
- self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
+ self,
+ data=None,
+ index=None,
+ dtype: Optional[Dtype] = None,
+ name=None,
+ copy: bool = False,
+ fastpath: bool = False,
):
if (
@@ -337,7 +345,7 @@ def __init__(
self.name = name
self._set_axis(0, index, fastpath=True)
- def _init_dict(self, data, index=None, dtype=None):
+ def _init_dict(self, data, index=None, dtype: Optional[Dtype] = None):
"""
Derive the "_mgr" and "index" attributes of a new Series from a
dictionary input.
@@ -612,7 +620,7 @@ def __len__(self) -> int:
"""
return len(self._mgr)
- def view(self, dtype=None) -> "Series":
+ def view(self, dtype: Optional[Dtype] = None) -> "Series":
"""
Create a new view of the Series.
@@ -686,7 +694,7 @@ def view(self, dtype=None) -> "Series":
# NDArray Compat
_HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
"""
Return the values as a NumPy array.
| incremental PR for issue #38808
- [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39030 | 2021-01-07T23:30:44Z | 2021-01-08T23:09:18Z | 2021-01-08T23:09:18Z | 2021-01-08T23:09:19Z |
BUG: read_csv does not close file during an error in _make_reader | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 849b599141c2b..1c8db4dd32393 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -40,7 +40,7 @@ Bug fixes
~~~~~~~~~
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
--
+- Bug in :func:`read_csv` not closing an opened file handle when a ``csv.Error`` or ``UnicodeDecodeError`` occurred while initializing (:issue:`39024`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index ca817be5d2ff6..e58e59a722b7a 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2297,7 +2297,11 @@ def __init__(self, f: Union[FilePathOrBuffer, List], **kwds):
self._open_handles(f, kwds)
assert self.handles is not None
assert hasattr(self.handles.handle, "readline")
- self._make_reader(self.handles.handle)
+ try:
+ self._make_reader(self.handles.handle)
+ except (csv.Error, UnicodeDecodeError):
+ self.close()
+ raise
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if it is specified.
diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py
index a2787ddad3683..57defb400b842 100644
--- a/pandas/tests/io/parser/common/test_read_errors.py
+++ b/pandas/tests/io/parser/common/test_read_errors.py
@@ -3,13 +3,17 @@
specific classification into the other test modules.
"""
import codecs
+import csv
from io import StringIO
import os
+from pathlib import Path
+import warnings
import numpy as np
import pytest
from pandas.errors import EmptyDataError, ParserError
+import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
@@ -208,3 +212,22 @@ def test_null_byte_char(all_parsers):
msg = "NULL byte detected"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), names=names)
+
+
+@td.check_file_leaks
+def test_open_file(all_parsers):
+ # GH 39024
+ parser = all_parsers
+ if parser.engine == "c":
+ pytest.skip()
+
+ with tm.ensure_clean() as path:
+ file = Path(path)
+ file.write_bytes(b"\xe4\na\n1")
+
+ # should not trigger a ResourceWarning
+ warnings.simplefilter("always", category=ResourceWarning)
+ with warnings.catch_warnings(record=True) as record:
+ with pytest.raises(csv.Error, match="Could not determine delimiter"):
+ parser.read_csv(file, sep=None)
+ assert len(record) == 0, record[0].message
| - [x] closes #39024
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
I don't understand why `td.check_file_leaks` doesn't complain about the left opened file (at least for me locally). I commented the close call on purpose to see whether the test fails at least for the CI.
@jbrockmendel I think you have debugged ResourceWarnings in the past. Do you know why the test doesn't fail? Even putting a `open("foo", mode="w")` in the test doesn't make it fail.
[The test case is different from #39024 but the symptoms are the same. Unless the except clause is narrowed down to specific exceptions, this PR will fix #39024] | https://api.github.com/repos/pandas-dev/pandas/pulls/39029 | 2021-01-07T22:14:19Z | 2021-01-13T18:03:45Z | 2021-01-13T18:03:45Z | 2021-01-14T11:05:23Z |
BUG: Resample.aggregate raising TypeError instead of SpecificationError with missing keys dtypes | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 886469837d184..61e747e1d5a53 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -306,6 +306,7 @@ Groupby/resample/rolling
- Bug in :meth:`.GroupBy.indices` would contain non-existent indices when null values were present in the groupby keys (:issue:`9304`)
- Fixed bug in :meth:`DataFrameGroupBy.sum` and :meth:`SeriesGroupBy.sum` causing loss of precision through using Kahan summation (:issue:`38778`)
- Fixed bug in :meth:`DataFrameGroupBy.cumsum`, :meth:`SeriesGroupBy.cumsum`, :meth:`DataFrameGroupBy.mean` and :meth:`SeriesGroupBy.mean` causing loss of precision through using Kahan summation (:issue:`38934`)
+- Bug in :meth:`.Resampler.aggregate` and :meth:`DataFrame.transform` raising ``TypeError`` instead of ``SpecificationError`` when missing keys having mixed dtypes (:issue:`39025`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index c64f0bd71cf84..cd169a250b49b 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -35,6 +35,7 @@
from pandas.core.dtypes.common import is_dict_like, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCNDFrame, ABCSeries
+from pandas.core.algorithms import safe_sort
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.indexes.api import Index
@@ -482,9 +483,10 @@ def transform_dict_like(
if obj.ndim != 1:
# Check for missing columns on a frame
- cols = sorted(set(func.keys()) - set(obj.columns))
+ cols = set(func.keys()) - set(obj.columns)
if len(cols) > 0:
- raise SpecificationError(f"Column(s) {cols} do not exist")
+ cols_sorted = list(safe_sort(list(cols)))
+ raise SpecificationError(f"Column(s) {cols_sorted} do not exist")
# Can't use func.values(); wouldn't work for a Series
if any(is_dict_like(v) for _, v in func.items()):
@@ -738,7 +740,11 @@ def agg_dict_like(
if isinstance(selected_obj, ABCDataFrame) and len(
selected_obj.columns.intersection(keys)
) != len(keys):
- cols = sorted(set(keys) - set(selected_obj.columns.intersection(keys)))
+ cols = list(
+ safe_sort(
+ list(set(keys) - set(selected_obj.columns.intersection(keys))),
+ )
+ )
raise SpecificationError(f"Column(s) {cols} do not exist")
from pandas.core.reshape.concat import concat
diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py
index db5b2f3d86dfe..bff0306a50ee6 100644
--- a/pandas/tests/frame/apply/test_frame_transform.py
+++ b/pandas/tests/frame/apply/test_frame_transform.py
@@ -253,8 +253,24 @@ def f(x, a, b, c):
def test_transform_missing_columns(axis):
- # GH 35964
+ # GH#35964
df = DataFrame({"A": [1, 2], "B": [3, 4]})
match = re.escape("Column(s) ['C'] do not exist")
with pytest.raises(SpecificationError, match=match):
df.transform({"C": "cumsum"})
+
+
+def test_transform_none_to_type():
+ # GH#34377
+ df = DataFrame({"a": [None]})
+ msg = "Transform function failed"
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"a": int})
+
+
+def test_transform_mixed_column_name_dtypes():
+ # GH39025
+ df = DataFrame({"a": ["1"]})
+ msg = r"Column\(s\) \[1, 'b'\] do not exist"
+ with pytest.raises(SpecificationError, match=msg):
+ df.transform({"a": int, 1: str, "b": int})
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 2cd9bb70385bf..d217957cbe08a 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -297,6 +297,21 @@ def test_agg_consistency():
r.agg({"r1": "mean", "r2": "sum"})
+def test_agg_consistency_int_str_column_mix():
+ # GH#39025
+ df = DataFrame(
+ np.random.randn(1000, 2),
+ index=pd.date_range("1/1/2012", freq="S", periods=1000),
+ columns=[1, "a"],
+ )
+
+ r = df.resample("3T")
+
+ msg = r"Column\(s\) \[2, 'b'\] do not exist"
+ with pytest.raises(pd.core.base.SpecificationError, match=msg):
+ r.agg({2: "mean", "b": "sum"})
+
+
# TODO: once GH 14008 is fixed, move these tests into
# `Base` test class
diff --git a/pandas/tests/series/apply/test_series_transform.py b/pandas/tests/series/apply/test_series_transform.py
index 992aaa540a65f..73cc789c6eb3a 100644
--- a/pandas/tests/series/apply/test_series_transform.py
+++ b/pandas/tests/series/apply/test_series_transform.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Series, concat
+from pandas import Series, concat
import pandas._testing as tm
from pandas.core.base import SpecificationError
from pandas.core.groupby.base import transformation_kernels
@@ -65,14 +65,6 @@ def test_transform_wont_agg(string_series):
string_series.transform(["sqrt", "max"])
-def test_transform_none_to_type():
- # GH34377
- df = DataFrame({"a": [None]})
- msg = "Transform function failed"
- with pytest.raises(ValueError, match=msg):
- df.transform({"a": int})
-
-
def test_transform_axis_1_raises():
# GH 35964
msg = "No axis named 1 for object type Series"
| - [x] closes #39025
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39028 | 2021-01-07T21:58:05Z | 2021-01-08T23:17:59Z | 2021-01-08T23:17:59Z | 2021-01-08T23:18:38Z |
REF: make FreqGroup an Enum | diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx
index 70acb42712201..415bdf74db80a 100644
--- a/pandas/_libs/tslibs/dtypes.pyx
+++ b/pandas/_libs/tslibs/dtypes.pyx
@@ -23,7 +23,7 @@ cdef class PeriodDtypeBase:
return self._dtype_code == other._dtype_code
@property
- def freq_group(self) -> int:
+ def freq_group_code(self) -> int:
# See also: libperiod.get_freq_group
return (self._dtype_code // 1000) * 1000
@@ -37,7 +37,6 @@ cdef class PeriodDtypeBase:
from .offsets import to_offset
freqstr = _reverse_period_code_map.get(self._dtype_code)
- # equiv: freqstr = libfrequencies.get_freq_str(self._dtype_code)
return to_offset(freqstr)
@@ -134,7 +133,7 @@ cdef dict attrname_to_abbrevs = _attrname_to_abbrevs
cdef dict _abbrev_to_attrnames = {v: k for k, v in attrname_to_abbrevs.items()}
-class FreqGroup:
+class FreqGroup(Enum):
# Mirrors c_FreqGroup in the .pxd file
FR_ANN = 1000
FR_QTR = 2000
@@ -151,9 +150,10 @@ class FreqGroup:
FR_UND = -10000 # undefined
@staticmethod
- def get_freq_group(code: int) -> int:
- # See also: PeriodDtypeBase.freq_group
- return (code // 1000) * 1000
+ def get_freq_group(code: int) -> "FreqGroup":
+ # See also: PeriodDtypeBase.freq_group_code
+ code = (code // 1000) * 1000
+ return FreqGroup(code)
class Resolution(Enum):
@@ -178,8 +178,7 @@ class Resolution(Enum):
return self.value >= other.value
@property
- def freq_group(self):
- # TODO: annotate as returning FreqGroup once that is an enum
+ def freq_group(self) -> FreqGroup:
if self == Resolution.RESO_NS:
return FreqGroup.FR_NS
elif self == Resolution.RESO_US:
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index edcc1f29a5ec2..96a075dd21bf9 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -1068,11 +1068,11 @@ def _range_from_fields(
if quarter is not None:
if freq is None:
freq = to_offset("Q")
- base = FreqGroup.FR_QTR
+ base = FreqGroup.FR_QTR.value
else:
freq = to_offset(freq)
base = libperiod.freq_to_dtype_code(freq)
- if base != FreqGroup.FR_QTR:
+ if base != FreqGroup.FR_QTR.value:
raise AssertionError("base must equal FR_QTR")
freqstr = freq.freqstr
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 932868451058f..8609c61065327 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -571,7 +571,7 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
raise KeyError
grp = reso.freq_group
- per = Period(parsed, freq=grp)
+ per = Period(parsed, freq=grp.value)
start, end = per.start_time, per.end_time
# GH 24076
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 7762198246603..8fe92ed757401 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -506,8 +506,8 @@ def get_loc(self, key, method=None, tolerance=None):
raise KeyError(f"Cannot interpret '{key}' as period") from err
reso = Resolution.from_attrname(reso)
- grp = reso.freq_group
- freqn = self.dtype.freq_group
+ grp = reso.freq_group.value
+ freqn = self.dtype.freq_group_code
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
@@ -580,15 +580,15 @@ def _maybe_cast_slice_bound(self, label, side: str, kind: str):
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
grp = reso.freq_group
- iv = Period(parsed, freq=grp)
+ iv = Period(parsed, freq=grp.value)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
grp = reso.freq_group
- freqn = self.dtype.freq_group
+ freqn = self.dtype.freq_group_code
- if not grp < freqn:
+ if not grp.value < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 978010efd7ee5..3d2d69162c70a 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -510,28 +510,28 @@ def _daily_finder(vmin, vmax, freq: BaseOffset):
periodsperday = -1
- if dtype_code >= FreqGroup.FR_HR:
- if dtype_code == FreqGroup.FR_NS:
+ if dtype_code >= FreqGroup.FR_HR.value:
+ if dtype_code == FreqGroup.FR_NS.value:
periodsperday = 24 * 60 * 60 * 1000000000
- elif dtype_code == FreqGroup.FR_US:
+ elif dtype_code == FreqGroup.FR_US.value:
periodsperday = 24 * 60 * 60 * 1000000
- elif dtype_code == FreqGroup.FR_MS:
+ elif dtype_code == FreqGroup.FR_MS.value:
periodsperday = 24 * 60 * 60 * 1000
- elif dtype_code == FreqGroup.FR_SEC:
+ elif dtype_code == FreqGroup.FR_SEC.value:
periodsperday = 24 * 60 * 60
- elif dtype_code == FreqGroup.FR_MIN:
+ elif dtype_code == FreqGroup.FR_MIN.value:
periodsperday = 24 * 60
- elif dtype_code == FreqGroup.FR_HR:
+ elif dtype_code == FreqGroup.FR_HR.value:
periodsperday = 24
else: # pragma: no cover
raise ValueError(f"unexpected frequency: {dtype_code}")
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
- elif dtype_code == FreqGroup.FR_BUS:
+ elif dtype_code == FreqGroup.FR_BUS.value:
periodsperyear = 261
periodspermonth = 19
- elif dtype_code == FreqGroup.FR_DAY:
+ elif dtype_code == FreqGroup.FR_DAY.value:
periodsperyear = 365
periodspermonth = 28
elif FreqGroup.get_freq_group(dtype_code) == FreqGroup.FR_WK:
@@ -661,7 +661,7 @@ def _second_finder(label_interval):
elif span <= periodsperyear // 4:
month_start = period_break(dates_, "month")
info_maj[month_start] = True
- if dtype_code < FreqGroup.FR_HR:
+ if dtype_code < FreqGroup.FR_HR.value:
info["min"] = True
else:
day_start = period_break(dates_, "day")
@@ -872,14 +872,15 @@ def _annual_finder(vmin, vmax, freq):
def get_finder(freq: BaseOffset):
dtype_code = freq._period_dtype_code
fgroup = (dtype_code // 1000) * 1000
+ fgroup = FreqGroup(fgroup)
if fgroup == FreqGroup.FR_ANN:
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
- elif dtype_code == FreqGroup.FR_MTH:
+ elif dtype_code == FreqGroup.FR_MTH.value:
return _monthly_finder
- elif (dtype_code >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK:
+ elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK:
return _daily_finder
else: # pragma: no cover
raise NotImplementedError(f"Unsupported frequency: {dtype_code}")
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index ae4fff7b495d0..e04b03e5b0420 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -215,7 +215,7 @@ def use_dynamic_x(ax: "Axes", data: FrameOrSeriesUnion) -> bool:
if isinstance(data.index, ABCDatetimeIndex):
base = to_offset(freq)._period_dtype_code
x = data.index
- if base <= FreqGroup.FR_DAY:
+ if base <= FreqGroup.FR_DAY.value:
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp().tz_localize(x.tz) == x[0]
return True
| https://api.github.com/repos/pandas-dev/pandas/pulls/39027 | 2021-01-07T21:44:54Z | 2021-01-08T14:11:22Z | 2021-01-08T14:11:22Z | 2021-01-08T15:53:46Z | |
TYP/CLN: Use futures annotations in apply | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index edb6b97a73e7f..ac98f3736be6d 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import abc
import inspect
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Type, cast
@@ -33,7 +35,7 @@
def frame_apply(
- obj: "DataFrame",
+ obj: DataFrame,
how: str,
func: AggFuncType,
axis: Axis = 0,
@@ -69,22 +71,22 @@ class FrameApply(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
- def result_index(self) -> "Index":
+ def result_index(self) -> Index:
pass
@property
@abc.abstractmethod
- def result_columns(self) -> "Index":
+ def result_columns(self) -> Index:
pass
@property
@abc.abstractmethod
- def series_generator(self) -> Iterator["Series"]:
+ def series_generator(self) -> Iterator[Series]:
pass
@abc.abstractmethod
def wrap_results_for_axis(
- self, results: ResType, res_index: "Index"
+ self, results: ResType, res_index: Index
) -> FrameOrSeriesUnion:
pass
@@ -92,7 +94,7 @@ def wrap_results_for_axis(
def __init__(
self,
- obj: "DataFrame",
+ obj: DataFrame,
how: str,
func,
raw: bool,
@@ -131,15 +133,15 @@ def f(x):
self.f: AggFuncType = f
@property
- def res_columns(self) -> "Index":
+ def res_columns(self) -> Index:
return self.result_columns
@property
- def columns(self) -> "Index":
+ def columns(self) -> Index:
return self.obj.columns
@property
- def index(self) -> "Index":
+ def index(self) -> Index:
return self.obj.index
@cache_readonly
@@ -147,11 +149,11 @@ def values(self):
return self.obj.values
@cache_readonly
- def dtypes(self) -> "Series":
+ def dtypes(self) -> Series:
return self.obj.dtypes
@property
- def agg_axis(self) -> "Index":
+ def agg_axis(self) -> Index:
return self.obj._get_agg_axis(self.axis)
def get_result(self):
@@ -311,7 +313,7 @@ def wrapper(*args, **kwargs):
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
- def apply_broadcast(self, target: "DataFrame") -> "DataFrame":
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
assert callable(self.f)
result_values = np.empty_like(target.values)
@@ -346,7 +348,7 @@ def apply_standard(self):
# wrap results
return self.wrap_results(results, res_index)
- def apply_series_generator(self) -> Tuple[ResType, "Index"]:
+ def apply_series_generator(self) -> Tuple[ResType, Index]:
assert callable(self.f)
series_gen = self.series_generator
@@ -365,7 +367,7 @@ def apply_series_generator(self) -> Tuple[ResType, "Index"]:
return results, res_index
- def wrap_results(self, results: ResType, res_index: "Index") -> FrameOrSeriesUnion:
+ def wrap_results(self, results: ResType, res_index: Index) -> FrameOrSeriesUnion:
from pandas import Series
# see if we can infer the results
@@ -392,7 +394,7 @@ def wrap_results(self, results: ResType, res_index: "Index") -> FrameOrSeriesUni
class FrameRowApply(FrameApply):
axis = 0
- def apply_broadcast(self, target: "DataFrame") -> "DataFrame":
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
return super().apply_broadcast(target)
@property
@@ -400,15 +402,15 @@ def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
- def result_index(self) -> "Index":
+ def result_index(self) -> Index:
return self.columns
@property
- def result_columns(self) -> "Index":
+ def result_columns(self) -> Index:
return self.index
def wrap_results_for_axis(
- self, results: ResType, res_index: "Index"
+ self, results: ResType, res_index: Index
) -> FrameOrSeriesUnion:
""" return the results for the rows """
@@ -452,7 +454,7 @@ def wrap_results_for_axis(
class FrameColumnApply(FrameApply):
axis = 1
- def apply_broadcast(self, target: "DataFrame") -> "DataFrame":
+ def apply_broadcast(self, target: DataFrame) -> DataFrame:
result = super().apply_broadcast(target.T)
return result.T
@@ -483,15 +485,15 @@ def series_generator(self):
yield ser
@property
- def result_index(self) -> "Index":
+ def result_index(self) -> Index:
return self.index
@property
- def result_columns(self) -> "Index":
+ def result_columns(self) -> Index:
return self.columns
def wrap_results_for_axis(
- self, results: ResType, res_index: "Index"
+ self, results: ResType, res_index: Index
) -> FrameOrSeriesUnion:
""" return the results for the columns """
result: FrameOrSeriesUnion
@@ -511,7 +513,7 @@ def wrap_results_for_axis(
return result
- def infer_to_same_shape(self, results: ResType, res_index: "Index") -> "DataFrame":
+ def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:
""" infer the results to the same shape as the input object """
result = self.obj._constructor(data=results)
result = result.T
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39026 | 2021-01-07T21:39:52Z | 2021-01-08T13:09:00Z | 2021-01-08T13:09:00Z | 2021-01-09T01:57:48Z |
Fix regression in setitem when expanding DataFrame with specific column name format | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 5695c817b5a3a..39e5b67fbbc37 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -20,6 +20,7 @@ Fixed regressions
- Fixed regression in repr of float-like strings of an ``object`` dtype having trailing 0's truncated after the decimal (:issue:`38708`)
- Fixed regression in :meth:`DataFrame.groupby()` with :class:`Categorical` grouping column not showing unused categories for ``grouped.indices`` (:issue:`38642`)
- Fixed regression in :meth:`DataFrame.any` and :meth:`DataFrame.all` not returning a result for tz-aware ``datetime64`` columns (:issue:`38723`)
+- Fixed regression in :meth:`DataFrame.__setitem__` raising ``ValueError`` when expanding :class:`DataFrame` and new column is from type ``"0 - name"`` (:issue:`39010`)
- Fixed regression in :meth:`.GroupBy.sem` where the presence of non-numeric columns would cause an error instead of being dropped (:issue:`38774`)
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 1993c41db03f8..9861a466b2d2f 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1395,7 +1395,7 @@ def is_bool_dtype(arr_or_dtype) -> bool:
return False
try:
dtype = get_dtype(arr_or_dtype)
- except TypeError:
+ except (TypeError, ValueError):
return False
if isinstance(arr_or_dtype, CategoricalDtype):
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 8df61394e8e7e..a5522e503c7f4 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -557,6 +557,11 @@ def test_is_bool_dtype():
assert com.is_bool_dtype("boolean")
+def test_is_bool_dtype_numpy_error():
+ # GH39010
+ assert not com.is_bool_dtype("0 - Name")
+
+
@pytest.mark.filterwarnings("ignore:'is_extension_type' is deprecated:FutureWarning")
@pytest.mark.parametrize(
"check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)]
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 28b1f02ff020c..a838b09b39be6 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -366,6 +366,13 @@ def test_setitem_listlike_views(self):
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
+ def test_setitem_string_column_numpy_dtype_raising(self):
+ # GH#39010
+ df = DataFrame([[1, 2], [3, 4]])
+ df["0 - Name"] = [5, 6]
+ expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
+ tm.assert_frame_equal(df, expected)
+
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
| - [x] closes #39010
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
We obviously want cast to object if value is string. | https://api.github.com/repos/pandas-dev/pandas/pulls/39023 | 2021-01-07T19:30:03Z | 2021-01-08T00:19:44Z | 2021-01-08T00:19:44Z | 2021-01-08T08:41:05Z |
Deprecate DataFrame indexer for iloc setitem and getitem | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 41db72612a66b..c5ff5265f6798 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -128,6 +128,7 @@ Other enhancements
- :meth:`.Rolling.sum`, :meth:`.Expanding.sum`, :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.Rolling.median`, :meth:`.Expanding.median`, :meth:`.Rolling.max`, :meth:`.Expanding.max`, :meth:`.Rolling.min`, and :meth:`.Expanding.min` now support ``Numba`` execution with the ``engine`` keyword (:issue:`38895`)
- :meth:`DataFrame.apply` can now accept NumPy unary operators as strings, e.g. ``df.apply("sqrt")``, which was already the case for :meth:`Series.apply` (:issue:`39116`)
- :meth:`DataFrame.apply` can now accept non-callable DataFrame properties as strings, e.g. ``df.apply("size")``, which was already the case for :meth:`Series.apply` (:issue:`39116`)
+- Disallow :class:`DataFrame` indexer for ``iloc`` for :meth:`Series.__getitem__` and :meth:`DataFrame.__getitem__`, (:issue:`39004`)
- :meth:`Series.apply` can now accept list-like or dictionary-like arguments that aren't lists or dictionaries, e.g. ``ser.apply(np.array(["sum", "mean"]))``, which was already the case for :meth:`DataFrame.apply` (:issue:`39140`)
- :meth:`DataFrame.plot.scatter` can now accept a categorical column as the argument to ``c`` (:issue:`12380`, :issue:`31357`)
- :meth:`.Styler.set_tooltips` allows on hover tooltips to be added to styled HTML dataframes (:issue:`35643`, :issue:`21266`, :issue:`39317`)
@@ -318,6 +319,7 @@ Deprecations
- Deprecated comparison of :class:`Timestamp` object with ``datetime.date`` objects. Instead of e.g. ``ts <= mydate`` use ``ts <= pd.Timestamp(mydate)`` or ``ts.date() <= mydate`` (:issue:`36131`)
- Deprecated :attr:`Rolling.win_type` returning ``"freq"`` (:issue:`38963`)
- Deprecated :attr:`Rolling.is_datetimelike` (:issue:`38963`)
+- Deprecated :class:`DataFrame` indexer for :meth:`Series.__setitem__` and :meth:`DataFrame.__setitem__` (:issue:`39004`)
- Deprecated :meth:`core.window.ewm.ExponentialMovingWindow.vol` (:issue:`39220`)
- Using ``.astype`` to convert between ``datetime64[ns]`` dtype and :class:`DatetimeTZDtype` is deprecated and will raise in a future version, use ``obj.tz_localize`` or ``obj.dt.tz_localize`` instead (:issue:`38622`)
- Deprecated casting ``datetime.date`` objects to ``datetime64`` when used as ``fill_value`` in :meth:`DataFrame.unstack`, :meth:`DataFrame.shift`, :meth:`Series.shift`, and :meth:`DataFrame.reindex`, pass ``pd.Timestamp(dateobj)`` instead (:issue:`39767`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index cfe16627d5c64..e322cb23eba95 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1421,6 +1421,15 @@ def _has_valid_setitem_indexer(self, indexer) -> bool:
if isinstance(indexer, dict):
raise IndexError("iloc cannot enlarge its target object")
+ if isinstance(indexer, ABCDataFrame):
+ warnings.warn(
+ "DataFrame indexer for .iloc is deprecated and will be removed in"
+ "a future version.\n"
+ "consider using .loc with a DataFrame indexer for automatic alignment.",
+ FutureWarning,
+ stacklevel=3,
+ )
+
if not isinstance(indexer, tuple):
indexer = _tuplify(self.ndim, indexer)
@@ -1508,6 +1517,12 @@ def _get_list_axis(self, key, axis: int):
raise IndexError("positional indexers are out-of-bounds") from err
def _getitem_axis(self, key, axis: int):
+ if isinstance(key, ABCDataFrame):
+ raise IndexError(
+ "DataFrame indexer is not allowed for .iloc\n"
+ "Consider using .loc for automatic alignment."
+ )
+
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 43ffc9e8eaedd..d0fdf81121c71 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -1090,6 +1090,20 @@ def test_iloc_getitem_setitem_fancy_exceptions(self, float_frame):
# GH#32257 we let numpy do validation, get their exception
float_frame.iloc[:, :, :] = 1
+ def test_iloc_frame_indexer(self):
+ # GH#39004
+ df = DataFrame({"a": [1, 2, 3]})
+ indexer = DataFrame({"a": [True, False, True]})
+ with tm.assert_produces_warning(FutureWarning):
+ df.iloc[indexer] = 1
+
+ msg = (
+ "DataFrame indexer is not allowed for .iloc\n"
+ "Consider using .loc for automatic alignment."
+ )
+ with pytest.raises(IndexError, match=msg):
+ df.iloc[indexer]
+
class TestILocSetItemDuplicateColumns:
def test_iloc_setitem_scalar_duplicate_columns(self):
| - [x] closes #39004
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39022 | 2021-01-07T19:05:34Z | 2021-03-02T23:22:00Z | 2021-03-02T23:21:59Z | 2021-03-04T21:59:03Z |
Backport PR #38997 on branch 1.2.x (REGR: errors='replace' when encoding/errors are not specified) | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 5695c817b5a3a..baeca87b8c4f8 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -24,6 +24,7 @@ Fixed regressions
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
- Fixed regression in :meth:`Rolling.skew` and :meth:`Rolling.kurt` modifying the object inplace (:issue:`38908`)
+- Fixed regression in :meth:`read_csv` and other read functions were the encoding error policy (``errors``) did not default to ``"replace"`` when no encoding was specified (:issue:`38989`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/common.py b/pandas/io/common.py
index c189c3046b4f3..e838e10a27d21 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -547,8 +547,7 @@ def get_handle(
Returns the dataclass IOHandles
"""
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
- if encoding is None:
- encoding = "utf-8"
+ encoding_passed, encoding = encoding, encoding or "utf-8"
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
@@ -635,6 +634,9 @@ def get_handle(
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
if ioargs.encoding and "b" not in ioargs.mode:
+ if errors is None and encoding_passed is None:
+ # ignore errors when no encoding is specified
+ errors = "replace"
# Encoding
handle = open(
handle,
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 34cb00e89ea0c..2ea944d9502b3 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -418,3 +418,11 @@ def test_is_fsspec_url():
assert not icom.is_fsspec_url("random:pandas/somethingelse.com")
assert not icom.is_fsspec_url("/local/path")
assert not icom.is_fsspec_url("relative/local/path")
+
+
+def test_default_errors():
+ # GH 38989
+ with tm.ensure_clean() as path:
+ file = Path(path)
+ file.write_bytes(b"\xe4\na\n1")
+ tm.assert_frame_equal(pd.read_csv(file, skiprows=[0]), pd.DataFrame({"a": [1]}))
| Backport PR #38997: REGR: errors='replace' when encoding/errors are not specified | https://api.github.com/repos/pandas-dev/pandas/pulls/39021 | 2021-01-07T18:49:07Z | 2021-01-07T21:23:20Z | 2021-01-07T21:23:20Z | 2021-01-07T21:23:21Z |
Backport PR #39019 on branch 1.2.x (DOC: np.bool -> np.bool_) | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 717334bfe1299..90d65327ea980 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -144,6 +144,11 @@ repos:
\#\ type:\s?ignore(?!\[)
language: pygrep
types: [python]
+ - id: np-bool
+ name: Check for use of np.bool instead of np.bool_
+ entry: np\.bool[^_8]
+ language: pygrep
+ types_or: [python, cython, rst]
- id: no-os-remove
name: Check code for instances of os.remove
entry: os\.remove
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 6ce63ff8badca..6cc8e15786795 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -625,7 +625,7 @@ class TransformBools:
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
- transitions = np.zeros(N, dtype=np.bool)
+ transitions = np.zeros(N, dtype=np.bool_)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({"signal": np.random.rand(N)})
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index ffecaa222e1f9..8d38c12252df4 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -2229,7 +2229,7 @@ Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFra
.. ipython:: python
dft1 = pd.DataFrame({"a": [1, 0, 1], "b": [4, 5, 6], "c": [7, 8, 9]})
- dft1 = dft1.astype({"a": np.bool, "c": np.float64})
+ dft1 = dft1.astype({"a": np.bool_, "c": np.float64})
dft1
dft1.dtypes
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index 5a6f56388dee5..77791b4b7e491 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -1406,7 +1406,7 @@ Often it's useful to obtain the lower (or upper) triangular form of a correlatio
df = pd.DataFrame(np.random.random(size=(100, 5)))
corr_mat = df.corr()
- mask = np.tril(np.ones_like(corr_mat, dtype=np.bool), k=-1)
+ mask = np.tril(np.ones_like(corr_mat, dtype=np.bool_), k=-1)
corr_mat.where(mask)
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 197738330efe1..e67769bc774b0 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -492,7 +492,7 @@ def test_float_types(self, np_type, path):
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, np_type, path):
- # Test np.bool values read come back as float.
+ # Test np.bool8 and np.bool_ values read come back as float.
df = DataFrame([1, 0, True, False], dtype=np_type)
df.to_excel(path, "test1")
| Backport PR #39019: DOC: np.bool -> np.bool_ | https://api.github.com/repos/pandas-dev/pandas/pulls/39020 | 2021-01-07T18:48:43Z | 2021-01-07T21:23:37Z | 2021-01-07T21:23:37Z | 2021-01-07T21:23:37Z |
DOC: np.bool -> np.bool_ | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 52f923c41cbd4..9601be40fdebb 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -152,6 +152,11 @@ repos:
\#\ type:\s?ignore(?!\[)
language: pygrep
types: [python]
+ - id: np-bool
+ name: Check for use of np.bool instead of np.bool_
+ entry: np\.bool[^_8]
+ language: pygrep
+ types_or: [python, cython, rst]
- id: no-os-remove
name: Check code for instances of os.remove
entry: os\.remove
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index b4d9db95af163..806cf38ad90b6 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -627,7 +627,7 @@ class TransformBools:
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
- transitions = np.zeros(N, dtype=np.bool)
+ transitions = np.zeros(N, dtype=np.bool_)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({"signal": np.random.rand(N)})
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index ffecaa222e1f9..8d38c12252df4 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -2229,7 +2229,7 @@ Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFra
.. ipython:: python
dft1 = pd.DataFrame({"a": [1, 0, 1], "b": [4, 5, 6], "c": [7, 8, 9]})
- dft1 = dft1.astype({"a": np.bool, "c": np.float64})
+ dft1 = dft1.astype({"a": np.bool_, "c": np.float64})
dft1
dft1.dtypes
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index 92905836b763c..66b564838e5e2 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -1406,7 +1406,7 @@ Often it's useful to obtain the lower (or upper) triangular form of a correlatio
df = pd.DataFrame(np.random.random(size=(100, 5)))
corr_mat = df.corr()
- mask = np.tril(np.ones_like(corr_mat, dtype=np.bool), k=-1)
+ mask = np.tril(np.ones_like(corr_mat, dtype=np.bool_), k=-1)
corr_mat.where(mask)
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index c930acd179330..b12413fbb56c6 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -492,7 +492,7 @@ def test_float_types(self, np_type, path):
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, np_type, path):
- # Test np.bool values read come back as float.
+ # Test np.bool8 and np.bool_ values read come back as float.
df = DataFrame([1, 0, True, False], dtype=np_type)
df.to_excel(path, "test1")
| xref #34848, #34835
a couple missed that are causing doc build failures with numpy-1.20.0rc2, https://github.com/pandas-dev/pandas/pull/36092/checks?check_run_id=1662818738
probably want to backport this to prevent possible future ci failures | https://api.github.com/repos/pandas-dev/pandas/pulls/39019 | 2021-01-07T15:52:25Z | 2021-01-07T18:47:08Z | 2021-01-07T18:47:08Z | 2021-01-07T18:59:32Z |
CLN: add typing to dtype arg in core/common.py (GH38808) | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 622d903b03579..a6514b5167460 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -8,13 +8,13 @@
import contextlib
from functools import partial
import inspect
-from typing import Any, Collection, Iterable, Iterator, List, Union, cast
+from typing import Any, Collection, Iterable, Iterator, List, Optional, Union, cast
import warnings
import numpy as np
from pandas._libs import lib
-from pandas._typing import AnyArrayLike, Scalar, T
+from pandas._typing import AnyArrayLike, NpDtype, Scalar, T
from pandas.compat.numpy import np_version_under1p18
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
@@ -195,7 +195,7 @@ def count_not_none(*args) -> int:
return sum(x is not None for x in args)
-def asarray_tuplesafe(values, dtype=None):
+def asarray_tuplesafe(values, dtype: Optional[NpDtype] = None) -> np.ndarray:
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
values = list(values)
@@ -218,7 +218,7 @@ def asarray_tuplesafe(values, dtype=None):
return result
-def index_labels_to_array(labels, dtype=None):
+def index_labels_to_array(labels, dtype: Optional[NpDtype] = None) -> np.ndarray:
"""
Transform label or iterable of labels to array, for use in Index.
| Follow the issue - https://github.com/pandas-dev/pandas/issues/38808
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39018 | 2021-01-07T12:33:52Z | 2021-01-07T14:07:40Z | 2021-01-07T14:07:40Z | 2021-01-07T14:07:44Z |
DOC: Add whatsnew | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 9e557a0020f1e..3efb620f6ca65 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -201,7 +201,7 @@ Datetimelike
- Bug in constructing a :class:`Series` or :class:`DataFrame` with a ``datetime`` object out of bounds for ``datetime64[ns]`` dtype or a ``timedelta`` object ouf of bounds for ``timedelta64[ns]`` dtype (:issue:`38792`, :issue:`38965`)
- Bug in :meth:`DatetimeIndex.intersection`, :meth:`DatetimeIndex.symmetric_difference`, :meth:`PeriodIndex.intersection`, :meth:`PeriodIndex.symmetric_difference` always returning object-dtype when operating with :class:`CategoricalIndex` (:issue:`38741`)
- Bug in :meth:`Series.where` incorrectly casting ``datetime64`` values to ``int64`` (:issue:`37682`)
--
+- Bug in :class:`Categorical` incorrectly typecasting ``datetime`` object to ``Timestamp`` (:issue:`38878`)
Timedelta
^^^^^^^^^
| - [x] closes #38878
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/39014 | 2021-01-07T08:58:40Z | 2021-01-08T14:17:28Z | 2021-01-08T14:17:27Z | 2021-01-10T11:00:28Z |
BUG: read_csv raising ValueError for tru_values/false_values and boolean dtype | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 886469837d184..6f6b6743d8289 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -284,6 +284,7 @@ I/O
- Bug in :func:`json_normalize` resulting in the first element of a generator object not being included in the returned ``DataFrame`` (:issue:`35923`)
- Bug in :func:`read_excel` forward filling :class:`MultiIndex` names with multiple header and index columns specified (:issue:`34673`)
- :func:`pandas.read_excel` now respects :func:``pandas.set_option`` (:issue:`34252`)
+- Bug in :func:`read_csv` not switching ``true_values`` and ``false_values`` for nullable ``boolean`` dtype (:issue:`34655`)
- Bug in :func:``read_json`` when ``orient="split"`` does not maintan numeric string index (:issue:`28556`)
Period
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 4995252d7aafd..a72a2ff8eaf28 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -1084,11 +1084,18 @@ cdef class TextReader:
elif is_extension_array_dtype(dtype):
result, na_count = self._string_convert(i, start, end, na_filter,
na_hashset)
+
array_type = dtype.construct_array_type()
try:
# use _from_sequence_of_strings if the class defines it
- result = array_type._from_sequence_of_strings(result,
- dtype=dtype)
+ if is_bool_dtype(dtype):
+ true_values = [x.decode() for x in self.true_values]
+ false_values = [x.decode() for x in self.false_values]
+ result = array_type._from_sequence_of_strings(
+ result, dtype=dtype, true_values=true_values,
+ false_values=false_values)
+ else:
+ result = array_type._from_sequence_of_strings(result, dtype=dtype)
except NotImplementedError:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index bbbc0911b4846..2bc908186f7f4 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -257,6 +257,8 @@ class BooleanArray(BaseMaskedArray):
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
+ _TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
+ _FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
@@ -282,14 +284,23 @@ def _from_sequence(
@classmethod
def _from_sequence_of_strings(
- cls, strings: List[str], *, dtype: Optional[Dtype] = None, copy: bool = False
+ cls,
+ strings: List[str],
+ *,
+ dtype: Optional[Dtype] = None,
+ copy: bool = False,
+ true_values: Optional[List[str]] = None,
+ false_values: Optional[List[str]] = None,
) -> "BooleanArray":
+ true_values_union = cls._TRUE_VALUES.union(true_values or [])
+ false_values_union = cls._FALSE_VALUES.union(false_values or [])
+
def map_string(s):
if isna(s):
return s
- elif s in ["True", "TRUE", "true", "1", "1.0"]:
+ elif s in true_values_union:
return True
- elif s in ["False", "FALSE", "false", "0", "0.0"]:
+ elif s in false_values_union:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 6e9cc18358153..ca817be5d2ff6 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1812,7 +1812,15 @@ def _cast_types(self, values, cast_type, column):
cast_type = pandas_dtype(cast_type)
array_type = cast_type.construct_array_type()
try:
- return array_type._from_sequence_of_strings(values, dtype=cast_type)
+ if is_bool_dtype(cast_type):
+ return array_type._from_sequence_of_strings(
+ values,
+ dtype=cast_type,
+ true_values=self.true_values,
+ false_values=self.false_values,
+ )
+ else:
+ return array_type._from_sequence_of_strings(values, dtype=cast_type)
except NotImplementedError as err:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
index ec1ccf009b8de..5ffd909d316bf 100644
--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
@@ -213,3 +213,25 @@ def decimal_number_check(parser, numeric_decimal, thousands, float_precision):
)
val = df.iloc[0, 0]
assert val == numeric_decimal[1]
+
+
+def test_true_values_cast_to_bool(all_parsers):
+ # GH#34655
+ text = """a,b
+yes,xxx
+no,yyy
+1,zzz
+0,aaa
+ """
+ parser = all_parsers
+ result = parser.read_csv(
+ StringIO(text),
+ true_values=["yes"],
+ false_values=["no"],
+ dtype={"a": "boolean"},
+ )
+ expected = DataFrame(
+ {"a": [True, False, True, False], "b": ["xxx", "yyy", "zzz", "aaa"]}
+ )
+ expected["a"] = expected["a"].astype("boolean")
+ tm.assert_frame_equal(result, expected)
| - [x] closes #34655
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
I am not really experienced with cython, so I would appreciate feedback on the switiching function. This was not done previously in case of ea boolean dtype, hence why this was failing before. | https://api.github.com/repos/pandas-dev/pandas/pulls/39012 | 2021-01-07T00:06:13Z | 2021-01-09T22:18:23Z | 2021-01-09T22:18:22Z | 2021-01-09T22:22:25Z |
ENH: making value_counts stable/keeping original ordering | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index ff11ebc022ffb..2234b870ac9c0 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -356,6 +356,7 @@ Reshaping
- Bug in :func:`join` over :class:`MultiIndex` returned wrong result, when one of both indexes had only one level (:issue:`36909`)
- :meth:`merge_asof` raises ``ValueError`` instead of cryptic ``TypeError`` in case of non-numerical merge columns (:issue:`29130`)
- Bug in :meth:`DataFrame.join` not assigning values correctly when having :class:`MultiIndex` where at least one dimension is from dtype ``Categorical`` with non-alphabetically sorted categories (:issue:`38502`)
+- :meth:`Series.value_counts` returns keys in original order (:issue:`12679`, :issue:`11227`)
- Bug in :meth:`DataFrame.apply` would give incorrect results when used with a string argument and ``axis=1`` when the axis argument was not supported and now raises a ``ValueError`` instead (:issue:`39211`)
-
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index 276f162545399..a3e72ed858392 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -19,13 +19,6 @@ cdef kh{{name}}_t to_kh{{name}}_t({{name}}_t val) nogil:
res.imag = val.imag
return res
-
-cdef {{name}}_t to_{{name}}(kh{{name}}_t val) nogil:
- cdef {{name}}_t res
- res.real = val.real
- res.imag = val.imag
- return res
-
{{endfor}}
diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in
index f8f541235dcb7..b4da5a3c7fb09 100644
--- a/pandas/_libs/hashtable_func_helper.pxi.in
+++ b/pandas/_libs/hashtable_func_helper.pxi.in
@@ -6,26 +6,26 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
{{py:
-# dtype, ttype, c_type, to_c_type, to_dtype
-dtypes = [('complex128', 'complex128', 'khcomplex128_t',
- 'to_khcomplex128_t', 'to_complex128'),
- ('complex64', 'complex64', 'khcomplex64_t',
- 'to_khcomplex64_t', 'to_complex64'),
- ('float64', 'float64', 'float64_t', '', ''),
- ('float32', 'float32', 'float32_t', '', ''),
- ('uint64', 'uint64', 'uint64_t', '', ''),
- ('uint32', 'uint32', 'uint32_t', '', ''),
- ('uint16', 'uint16', 'uint16_t', '', ''),
- ('uint8', 'uint8', 'uint8_t', '', ''),
- ('object', 'pymap', 'object', '', ''),
- ('int64', 'int64', 'int64_t', '', ''),
- ('int32', 'int32', 'int32_t', '', ''),
- ('int16', 'int16', 'int16_t', '', ''),
- ('int8', 'int8', 'int8_t', '', '')]
+# name, dtype, ttype, c_type, to_c_type
+dtypes = [('Complex128', 'complex128', 'complex128',
+ 'khcomplex128_t', 'to_khcomplex128_t'),
+ ('Complex64', 'complex64', 'complex64',
+ 'khcomplex64_t', 'to_khcomplex64_t'),
+ ('Float64', 'float64', 'float64', 'float64_t', ''),
+ ('Float32', 'float32', 'float32', 'float32_t', ''),
+ ('UInt64', 'uint64', 'uint64', 'uint64_t', ''),
+ ('UInt32', 'uint32', 'uint32', 'uint32_t', ''),
+ ('UInt16', 'uint16', 'uint16', 'uint16_t', ''),
+ ('UInt8', 'uint8', 'uint8', 'uint8_t', ''),
+ ('Object', 'object', 'pymap', 'object', ''),
+ ('Int64', 'int64', 'int64', 'int64_t', ''),
+ ('Int32', 'int32', 'int32', 'int32_t', ''),
+ ('Int16', 'int16', 'int16', 'int16_t', ''),
+ ('Int8', 'int8', 'int8', 'int8_t', '')]
}}
-{{for dtype, ttype, c_type, to_c_type, to_dtype in dtypes}}
+{{for name, dtype, ttype, c_type, to_c_type in dtypes}}
@cython.wraparound(False)
@@ -77,54 +77,77 @@ cdef build_count_table_{{dtype}}(const {{dtype}}_t[:] values,
@cython.wraparound(False)
@cython.boundscheck(False)
{{if dtype == 'object'}}
-cpdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna):
+cpdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, navalue=np.NaN):
{{else}}
cpdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna):
{{endif}}
cdef:
Py_ssize_t i = 0
+ Py_ssize_t n = len(values)
+ size_t unique_key_index = 0
+ size_t unique_key_count = 0
kh_{{ttype}}_t *table
- {{if dtype != 'object'}}
- {{dtype}}_t[:] result_keys
- int64_t[:] result_counts
- {{endif}}
-
# Don't use Py_ssize_t, since table.n_buckets is unsigned
khiter_t k
+ bint is_null
+
+ {{c_type}} val
+
+ int ret = 0
+
+ # we track the order in which keys are first seen (GH39009),
+ # khash-map isn't insertion-ordered, thus:
+ # table maps key to index_of_appearence
+ # result_keys maps index_of_appearence to key
+ # result_counts maps index_of_appearence to number of elements
+ result_keys = {{name}}Vector()
+ result_counts = Int64Vector()
table = kh_init_{{ttype}}()
+
{{if dtype == 'object'}}
- build_count_table_{{dtype}}(values, table, 1)
+ kh_resize_{{ttype}}(table, n // 10)
+
+ for i in range(n):
+ val = values[i]
+ is_null = checknull(val)
+ if not is_null or not dropna:
+ # all nas become the same representative:
+ if is_null:
+ val = navalue
+ k = kh_get_{{ttype}}(table, <PyObject*>val)
+ if k != table.n_buckets:
+ unique_key_index = table.vals[k]
+ result_counts.data.data[unique_key_index] += 1
+ else:
+ k = kh_put_{{ttype}}(table, <PyObject*>val, &ret)
+ table.vals[k] = unique_key_count
+ result_keys.append(val)
+ result_counts.append(1)
+ unique_key_count+=1
{{else}}
- build_count_table_{{dtype}}(values, table, dropna)
- {{endif}}
+ kh_resize_{{ttype}}(table, n)
- result_keys = np.empty(table.n_occupied, '{{dtype}}')
- result_counts = np.zeros(table.n_occupied, dtype=np.int64)
+ for i in range(n):
+ val = {{to_c_type}}(values[i])
- {{if dtype == 'object'}}
- for k in range(table.n_buckets):
- if kh_exist_{{ttype}}(table, k):
- result_keys[i] = <{{dtype}}>table.keys[k]
- result_counts[i] = table.vals[k]
- i += 1
- {{else}}
- with nogil:
- for k in range(table.n_buckets):
- if kh_exist_{{ttype}}(table, k):
- result_keys[i] = {{to_dtype}}(table.keys[k])
- result_counts[i] = table.vals[k]
- i += 1
+ if not is_nan_{{c_type}}(val) or not dropna:
+ k = kh_get_{{ttype}}(table, val)
+ if k != table.n_buckets:
+ unique_key_index = table.vals[k]
+ result_counts.data.data[unique_key_index] += 1
+ else:
+ k = kh_put_{{ttype}}(table, val, &ret)
+ table.vals[k] = unique_key_count
+ result_keys.append(val)
+ result_counts.append(1)
+ unique_key_count+=1
{{endif}}
kh_destroy_{{ttype}}(table)
- {{if dtype == 'object'}}
- return result_keys, result_counts
- {{else}}
- return np.asarray(result_keys), np.asarray(result_counts)
- {{endif}}
+ return result_keys.to_array(), result_counts.to_array()
@cython.wraparound(False)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index ed7ae75117c5c..968b39088e684 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -866,11 +866,6 @@ def value_counts_arraylike(values, dropna: bool):
f = getattr(htable, f"value_count_{ndtype}")
keys, counts = f(values, dropna)
- mask = isna(values)
- if not dropna and mask.any() and not isna(keys).any():
- keys = np.insert(keys, 0, np.NaN)
- counts = np.insert(counts, 0, mask.sum())
-
keys = _reconstruct_data(keys, original.dtype, original)
return keys, counts
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 631f67ced77dd..09e2e80f45b3d 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1004,9 +1004,9 @@ def value_counts(
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
+ 1.0 1
2.0 1
4.0 1
- 1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
@@ -1015,9 +1015,9 @@ def value_counts(
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
+ 1.0 0.2
2.0 0.2
4.0 0.2
- 1.0 0.2
dtype: float64
**bins**
@@ -1039,10 +1039,10 @@ def value_counts(
>>> s.value_counts(dropna=False)
3.0 2
+ 1.0 1
2.0 1
- NaN 1
4.0 1
- 1.0 1
+ NaN 1
dtype: int64
"""
return value_counts(
diff --git a/pandas/tests/arrays/boolean/test_function.py b/pandas/tests/arrays/boolean/test_function.py
index 0f8743489b412..d90655b6e2820 100644
--- a/pandas/tests/arrays/boolean/test_function.py
+++ b/pandas/tests/arrays/boolean/test_function.py
@@ -77,18 +77,18 @@ def test_ufunc_reduce_raises(values):
def test_value_counts_na():
arr = pd.array([True, False, pd.NA], dtype="boolean")
result = arr.value_counts(dropna=False)
- expected = pd.Series([1, 1, 1], index=[False, True, pd.NA], dtype="Int64")
+ expected = pd.Series([1, 1, 1], index=[True, False, pd.NA], dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
- expected = pd.Series([1, 1], index=[False, True], dtype="Int64")
+ expected = pd.Series([1, 1], index=[True, False], dtype="Int64")
tm.assert_series_equal(result, expected)
def test_value_counts_with_normalize():
s = pd.Series([True, False, pd.NA], dtype="boolean")
result = s.value_counts(normalize=True)
- expected = pd.Series([1, 1], index=[False, True], dtype="Float64") / 2
+ expected = pd.Series([1, 1], index=[True, False], dtype="Float64") / 2
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py
index 5365929213503..d14de990d8268 100644
--- a/pandas/tests/arrays/string_/test_string.py
+++ b/pandas/tests/arrays/string_/test_string.py
@@ -497,7 +497,7 @@ def test_value_counts_na(dtype, request):
arr = pd.array(["a", "b", "a", pd.NA], dtype=dtype)
result = arr.value_counts(dropna=False)
- expected = pd.Series([2, 1, 1], index=["a", pd.NA, "b"], dtype="Int64")
+ expected = pd.Series([2, 1, 1], index=["a", "b", pd.NA], dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index ea44e5d477fc6..587d3c466c631 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -288,7 +288,7 @@ def test_value_counts_preserves_tz(self):
arr[-2] = pd.NaT
result = arr.value_counts()
- expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]])
+ expected = pd.Series([4, 2, 1], index=[dti[0], dti[1], pd.NaT])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["pad", "backfill"])
diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py
index ed1c3fcce378c..15bafb7a835ba 100644
--- a/pandas/tests/frame/methods/test_describe.py
+++ b/pandas/tests/frame/methods/test_describe.py
@@ -371,7 +371,7 @@ def test_describe_does_not_raise_error_for_dictlike_elements(self):
# GH#32409
df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}])
expected = DataFrame(
- {"test": [2, 2, {"a": "2"}, 1]}, index=["count", "unique", "top", "freq"]
+ {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"]
)
result = df.describe()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py
index 4f650807afd30..5bf652c206a5f 100644
--- a/pandas/tests/libs/test_hashtable.py
+++ b/pandas/tests/libs/test_hashtable.py
@@ -272,6 +272,15 @@ def test_value_count(self, dtype, type_suffix, writable):
tm.assert_numpy_array_equal(np.sort(keys), expected)
assert np.all(counts == 5)
+ def test_value_count_stable(self, dtype, type_suffix, writable):
+ # GH12679
+ value_count = get_ht_function("value_count", type_suffix)
+ values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)
+ values.flags.writeable = writable
+ keys, counts = value_count(values, False)
+ tm.assert_numpy_array_equal(keys, values)
+ assert np.all(counts == 1)
+
def test_duplicated_first(self, dtype, type_suffix, writable):
N = 100
duplicated = get_ht_function("duplicated", type_suffix)
diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py
index f22b1be672190..505b879660ff1 100644
--- a/pandas/tests/series/methods/test_value_counts.py
+++ b/pandas/tests/series/methods/test_value_counts.py
@@ -185,7 +185,7 @@ def test_value_counts_categorical_with_nan(self):
(
Series([False, True, True, pd.NA]),
False,
- Series([2, 1, 1], index=[True, pd.NA, False]),
+ Series([2, 1, 1], index=[True, False, pd.NA]),
),
(
Series([False, True, True, pd.NA]),
@@ -195,7 +195,7 @@ def test_value_counts_categorical_with_nan(self):
(
Series(range(3), index=[True, False, np.nan]).index,
False,
- Series([1, 1, 1], index=[pd.NA, False, True]),
+ Series([1, 1, 1], index=[True, False, np.nan]),
),
],
)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index fb982c02acd99..88757b96085aa 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -6,7 +6,7 @@
import pytest
from pandas._libs import algos as libalgos, hashtable as ht
-from pandas.compat import IS64, np_array_datetime64_compat
+from pandas.compat import np_array_datetime64_compat
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
@@ -1272,12 +1272,10 @@ def test_value_counts_uint64(self):
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2 ** 63], dtype=object)
- expected = Series([1, 1], index=[2 ** 63, -1])
+ expected = Series([1, 1], index=[-1, 2 ** 63])
result = algos.value_counts(arr)
- # 32-bit linux has a different ordering
- if IS64:
- tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(result, expected)
class TestDuplicated:
| closes #12679
closes #11227
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
The order of the returned keys for `value_counts` aren't arbitrary (i.e. depending on the used hash function) but are original ordering (when sorted this applies for the keys with the same number of values).
| https://api.github.com/repos/pandas-dev/pandas/pulls/39009 | 2021-01-06T21:07:00Z | 2021-01-22T21:42:34Z | 2021-01-22T21:42:34Z | 2021-01-27T19:05:04Z |
CLN: inspect_excel_format | diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index b0ec8a1082a0e..8911696230c03 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -2,10 +2,10 @@
import datetime
from distutils.version import LooseVersion
import inspect
-from io import BufferedIOBase, BytesIO, RawIOBase
+from io import BytesIO
import os
from textwrap import fill
-from typing import IO, Any, Dict, Mapping, Optional, Union, cast
+from typing import Any, Dict, Mapping, Optional, Union, cast
import warnings
import zipfile
@@ -906,24 +906,18 @@ def close(self):
@doc(storage_options=_shared_docs["storage_options"])
def inspect_excel_format(
- path: Optional[str] = None,
- content: Union[None, BufferedIOBase, RawIOBase, bytes] = None,
+ content_or_path: FilePathOrBuffer,
storage_options: StorageOptions = None,
) -> str:
"""
Inspect the path or content of an excel file and get its format.
- At least one of path or content must be not None. If both are not None,
- content will take precedence.
-
Adopted from xlrd: https://github.com/python-excel/xlrd.
Parameters
----------
- path : str, optional
- Path to file to inspect. May be a URL.
- content : file-like object, optional
- Content of file to inspect.
+ content_or_path : str or file-like object
+ Path to file or content of file to inspect. May be a URL.
{storage_options}
Returns
@@ -938,12 +932,8 @@ def inspect_excel_format(
BadZipFile
If resulting stream does not have an XLS signature and is not a valid zipfile.
"""
- content_or_path: Union[None, str, BufferedIOBase, RawIOBase, IO[bytes]]
- if isinstance(content, bytes):
- content_or_path = BytesIO(content)
- else:
- content_or_path = content or path
- assert content_or_path is not None
+ if isinstance(content_or_path, bytes):
+ content_or_path = BytesIO(content_or_path)
with get_handle(
content_or_path, "rb", storage_options=storage_options, is_text=False
@@ -1069,7 +1059,7 @@ def __init__(
ext = "xls"
else:
ext = inspect_excel_format(
- content=path_or_buffer, storage_options=storage_options
+ content_or_path=path_or_buffer, storage_options=storage_options
)
if engine is None:
| - [x] closes #38823
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
cc @rhshadrach | https://api.github.com/repos/pandas-dev/pandas/pulls/39008 | 2021-01-06T21:05:55Z | 2021-01-08T21:39:23Z | 2021-01-08T21:39:23Z | 2021-01-08T22:11:48Z |
DOC: Clarify index_col behavior for read_csv | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 0c09ea3e0e2fc..8c7e01dd999d3 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -112,8 +112,9 @@ index_col : int, str, sequence of int / str, or False, default ``None``
The default value of ``None`` instructs pandas to guess. If the number of
fields in the column header row is equal to the number of fields in the body
- of the data file, then a default index is used. If it is one larger, then
- the first field is used as an index.
+ of the data file, then a default index is used. If it is larger, then
+ the first columns are used as index so that the remaining number of fields in
+ the body are equal to the number of fields in the header.
usecols : list-like or callable, default ``None``
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
| - [x] closes #38830
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
| https://api.github.com/repos/pandas-dev/pandas/pulls/39006 | 2021-01-06T19:53:22Z | 2021-01-06T22:56:26Z | 2021-01-06T22:56:26Z | 2021-01-06T23:01:11Z |
TST: add note about scope of base extension tests to all files | diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index 0fde1e8a2fdb8..281bbc21e3106 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 29790d14f93cc..1f0181eec8830 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index 817881e00fa99..30dd6193846a4 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index ffd56b9c23bc8..86f9080571459 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import numpy as np
import pytest
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index d49c4c5cf4889..d0a3ef17afdbc 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -1,3 +1,18 @@
+"""
+This file contains a minimal set of tests for compliance with the extension
+array interface test suite, and should contain no other tests.
+The test suite for the full functionality of the array is located in
+`pandas/tests/arrays/`.
+
+The tests in this file are inherited from the BaseExtensionTests, and only
+minimal tweaks should be applied to get the tests passing (by overwriting a
+parent method).
+
+Additional tests should either be added to one of the BaseExtensionTests
+classes (if they are relevant for the extension interface for all dtypes), or
+be added to the array-specific tests in `pandas/tests/arrays/`.
+
+"""
import string
import numpy as np
| We already had this note in about half of the files in this directory, copied it to include in the other files as well. | https://api.github.com/repos/pandas-dev/pandas/pulls/39003 | 2021-01-06T15:47:35Z | 2021-01-06T18:34:18Z | 2021-01-06T18:34:18Z | 2021-01-12T08:05:04Z |
Remove Scatter and Hexbin from Series plot documentation | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index e891017b37bc1..795239ab78c6e 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -630,8 +630,8 @@ class PlotAccessor(PandasObject):
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- - 'scatter' : scatter plot
- - 'hexbin' : hexbin plot.
+ - 'scatter' : scatter plot (DataFrame only)
+ - 'hexbin' : hexbin plot (DataFrame only)
ax : matplotlib axes object, default None
An axes of the current figure.
subplots : bool, default False
| - [x] closes #38976
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] Remove scatter and hexbin entry from https://github.com/pandas-dev/pandas/blob/v1.2.0/pandas/plotting/_core.py#L603-L1708
| https://api.github.com/repos/pandas-dev/pandas/pulls/39000 | 2021-01-06T11:56:45Z | 2021-01-06T18:36:17Z | 2021-01-06T18:36:16Z | 2021-01-06T18:36:21Z |
DOC: Update contributing.rst | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index b810c71e3daa6..90ecee8cf9312 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -698,6 +698,12 @@ to run its checks with::
without needing to have done ``pre-commit install`` beforehand.
+If you want to run checks on all recently commited files on upstream/master you can use::
+
+ pre-commit run --from-ref=upstream/master --to-ref=HEAD --all-files
+
+without needing to have done ``pre-commit install`` beforehand.
+
.. note::
If you have conflicting installations of ``virtualenv``, then you may get an
| - [x] closes #38938
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38998 | 2021-01-06T10:52:38Z | 2021-01-06T15:02:41Z | 2021-01-06T15:02:41Z | 2021-01-10T11:03:10Z |
REGR: errors='replace' when encoding/errors are not specified | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 5695c817b5a3a..baeca87b8c4f8 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -24,6 +24,7 @@ Fixed regressions
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
- Fixed regression in :meth:`Rolling.skew` and :meth:`Rolling.kurt` modifying the object inplace (:issue:`38908`)
+- Fixed regression in :meth:`read_csv` and other read functions were the encoding error policy (``errors``) did not default to ``"replace"`` when no encoding was specified (:issue:`38989`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 642684ca61480..8f04724773a8a 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -553,8 +553,7 @@ def get_handle(
Returns the dataclass IOHandles
"""
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
- if encoding is None:
- encoding = "utf-8"
+ encoding_passed, encoding = encoding, encoding or "utf-8"
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
@@ -641,6 +640,9 @@ def get_handle(
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
if ioargs.encoding and "b" not in ioargs.mode:
+ if errors is None and encoding_passed is None:
+ # ignore errors when no encoding is specified
+ errors = "replace"
# Encoding
handle = open(
handle,
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index d445bece593d1..725c14f410357 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -419,3 +419,11 @@ def test_is_fsspec_url():
assert not icom.is_fsspec_url("random:pandas/somethingelse.com")
assert not icom.is_fsspec_url("/local/path")
assert not icom.is_fsspec_url("relative/local/path")
+
+
+def test_default_errors():
+ # GH 38989
+ with tm.ensure_clean() as path:
+ file = Path(path)
+ file.write_bytes(b"\xe4\na\n1")
+ tm.assert_frame_equal(pd.read_csv(file, skiprows=[0]), pd.DataFrame({"a": [1]}))
| - [x] closes #38989
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
Should 1.3 use `errors='replace'` when no `encoding/errors` are specified or use `errors=None` (strict)? | https://api.github.com/repos/pandas-dev/pandas/pulls/38997 | 2021-01-06T06:02:23Z | 2021-01-07T18:48:20Z | 2021-01-07T18:48:20Z | 2021-01-07T19:07:02Z |
ENH: Add table-wise numba rolling to other agg funcions | diff --git a/ci/deps/azure-37-slow.yaml b/ci/deps/azure-37-slow.yaml
index 05b33fa351ac9..5d097e397992c 100644
--- a/ci/deps/azure-37-slow.yaml
+++ b/ci/deps/azure-37-slow.yaml
@@ -36,3 +36,4 @@ dependencies:
- xlwt
- moto
- flask
+ - numba
diff --git a/ci/deps/azure-38-slow.yaml b/ci/deps/azure-38-slow.yaml
index fd40f40294b7f..0a4107917f01a 100644
--- a/ci/deps/azure-38-slow.yaml
+++ b/ci/deps/azure-38-slow.yaml
@@ -34,3 +34,4 @@ dependencies:
- xlwt
- moto
- flask
+ - numba
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 886469837d184..9e557a0020f1e 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -37,7 +37,7 @@ For example:
:class:`Rolling` and :class:`Expanding` now support a ``method`` argument with a
``'table'`` option that performs the windowing operation over an entire :class:`DataFrame`.
-See ref:`window.overview` for performance and functional benefits. (:issue:`15095`)
+See ref:`window.overview` for performance and functional benefits. (:issue:`15095`, :issue:`38995`)
.. _whatsnew_130.enhancements.other:
diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py
index 46b47b7e988c4..aa69d4fa675cd 100644
--- a/pandas/core/window/numba_.py
+++ b/pandas/core/window/numba_.py
@@ -1,3 +1,4 @@
+import functools
from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
@@ -220,3 +221,21 @@ def roll_table(
return result
return roll_table
+
+
+# This function will no longer be needed once numba supports
+# axis for all np.nan* agg functions
+# https://github.com/numba/numba/issues/1269
+@functools.lru_cache(maxsize=None)
+def generate_manual_numpy_nan_agg_with_axis(nan_func):
+ numba = import_optional_dependency("numba")
+
+ @numba.jit(nopython=True, nogil=True, parallel=True)
+ def nan_agg_with_axis(table):
+ result = np.empty(table.shape[1])
+ for i in numba.prange(table.shape[1]):
+ partition = table[:, i]
+ result[i] = nan_func(partition)
+ return result
+
+ return nan_agg_with_axis
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index a4612a4c8ed5d..393c517a63660 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -65,6 +65,7 @@
VariableWindowIndexer,
)
from pandas.core.window.numba_ import (
+ generate_manual_numpy_nan_agg_with_axis,
generate_numba_apply_func,
generate_numba_table_func,
)
@@ -1378,16 +1379,15 @@ def sum(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("sum", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nansum with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nansum)
+ else:
+ func = np.nansum
+
return self.apply(
- np.nansum,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_sum
return self._apply(window_func, name="sum", **kwargs)
@@ -1424,16 +1424,15 @@ def max(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("max", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nanmax with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nanmax)
+ else:
+ func = np.nanmax
+
return self.apply(
- np.nanmax,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_max
return self._apply(window_func, name="max", **kwargs)
@@ -1496,16 +1495,15 @@ def min(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("min", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nanmin with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nanmin)
+ else:
+ func = np.nanmin
+
return self.apply(
- np.nanmin,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_min
return self._apply(window_func, name="min", **kwargs)
@@ -1514,16 +1512,15 @@ def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("mean", args, kwargs)
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nanmean with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nanmean)
+ else:
+ func = np.nanmean
+
return self.apply(
- np.nanmean,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_mean
return self._apply(window_func, name="mean", **kwargs)
@@ -1584,16 +1581,15 @@ def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
def median(self, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
if self.method == "table":
- raise NotImplementedError("method='table' is not supported.")
- # Once numba supports np.nanmedian with axis, args will be relevant.
- # https://github.com/numba/numba/issues/6610
- args = () if self.method == "single" else (0,)
+ func = generate_manual_numpy_nan_agg_with_axis(np.nanmedian)
+ else:
+ func = np.nanmedian
+
return self.apply(
- np.nanmedian,
+ func,
raw=True,
engine=engine,
engine_kwargs=engine_kwargs,
- args=args,
)
window_func = window_aggregations.roll_median_c
return self._apply(window_func, name="median", **kwargs)
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index 9d9c216801d73..173e39ef42908 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -163,6 +163,7 @@ def test_invalid_kwargs_nopython():
@td.skip_if_no("numba", "0.46.0")
+@pytest.mark.slow
@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestTableMethod:
@@ -177,9 +178,6 @@ def f(x):
f, engine="numba", raw=True
)
- @pytest.mark.xfail(
- raises=NotImplementedError, reason="method='table' is not supported."
- )
def test_table_method_rolling_methods(
self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
@@ -247,9 +245,6 @@ def f(x):
)
tm.assert_frame_equal(result, expected)
- @pytest.mark.xfail(
- raises=NotImplementedError, reason="method='table' is not supported."
- )
def test_table_method_expanding_methods(
self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
Timings for a wide table
```
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.rand(10, 10**5))
roll_single = df.rolling(2, method="single")
roll_table = df.rolling(2, method="table")
%timeit roll_single.mean()
4.92 s ± 463 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
roll_single.mean(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
%timeit roll_single.mean(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
5.72 s ± 430 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
roll_table.mean(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
%timeit roll_table.mean(engine="numba", engine_kwargs={"nopython": True, "nogil": True, "parallel": True})
10.3 ms ± 1.23 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/38995 | 2021-01-06T05:44:52Z | 2021-01-07T21:24:06Z | 2021-01-07T21:24:06Z | 2021-05-26T15:54:22Z |
DOC: elaborate on copies vs in place operations in comparison docs | diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst
index 2b316cccb7fc9..54b45dc20db20 100644
--- a/doc/source/getting_started/comparison/comparison_with_sas.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sas.rst
@@ -62,6 +62,12 @@ see the :ref:`indexing documentation<indexing>` for much more on how to use an
``Index`` effectively.
+Copies vs. in place operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. include:: includes/copies.rst
+
+
Data input / output
-------------------
diff --git a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
index e9d687bc07999..c92d2a660d753 100644
--- a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
+++ b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
@@ -65,6 +65,13 @@ particular row don't change.
See the :ref:`indexing documentation<indexing>` for much more on how to use an ``Index``
effectively.
+
+Copies vs. in place operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. include:: includes/copies.rst
+
+
Data input / output
-------------------
diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
index 890f0cbe50424..fcfa03a8bce5f 100644
--- a/doc/source/getting_started/comparison/comparison_with_sql.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sql.rst
@@ -23,6 +23,13 @@ structure.
tips = pd.read_csv(url)
tips
+
+Copies vs. in place operations
+------------------------------
+
+.. include:: includes/copies.rst
+
+
SELECT
------
In SQL, selection is done using a comma-separated list of columns you'd like to select (or a ``*``
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index 43cb775b5461d..94c45adcccc82 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -61,6 +61,12 @@ see the :ref:`indexing documentation<indexing>` for much more on how to use an
``Index`` effectively.
+Copies vs. in place operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. include:: includes/copies.rst
+
+
Data input / output
-------------------
diff --git a/doc/source/getting_started/comparison/includes/column_selection.rst b/doc/source/getting_started/comparison/includes/column_selection.rst
index b925af1294f54..071645c9718cb 100644
--- a/doc/source/getting_started/comparison/includes/column_selection.rst
+++ b/doc/source/getting_started/comparison/includes/column_selection.rst
@@ -1,5 +1,4 @@
-The same operations are expressed in pandas below. Note that these operations do not happen in
-place. To make these changes persist, assign the operation back to a variable.
+The same operations are expressed in pandas below.
Keep certain columns
''''''''''''''''''''
diff --git a/doc/source/getting_started/comparison/includes/copies.rst b/doc/source/getting_started/comparison/includes/copies.rst
new file mode 100644
index 0000000000000..08ccd47624932
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/copies.rst
@@ -0,0 +1,23 @@
+Most pandas operations return copies of the ``Series``/``DataFrame``. To make the changes "stick",
+you'll need to either assign to a new variable:
+
+ .. code-block:: python
+
+ sorted_df = df.sort_values("col1")
+
+
+or overwrite the original one:
+
+ .. code-block:: python
+
+ df = df.sort_values("col1")
+
+.. note::
+
+ You will see an ``inplace=True`` keyword argument available for some methods:
+
+ .. code-block:: python
+
+ df.sort_values("col1", inplace=True)
+
+ Its use is discouraged. :ref:`More information. <indexing.view_versus_copy>`
| <img width="782" alt="Screen Shot 2021-01-06 at 12 15 05 AM" src="https://user-images.githubusercontent.com/86842/103731907-437e7280-4fb4-11eb-9e6d-e702f117656c.png">
- [ ] ~~closes #xxxx~~
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] ~~whatsnew entry~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/38994 | 2021-01-06T05:20:24Z | 2021-01-08T14:14:30Z | 2021-01-08T14:14:29Z | 2021-01-10T01:13:26Z |
DOC: add more sections to spreadsheet comparison | diff --git a/doc/source/_static/spreadsheets/conditional.png b/doc/source/_static/spreadsheets/conditional.png
new file mode 100644
index 0000000000000..d518ff19dc760
Binary files /dev/null and b/doc/source/_static/spreadsheets/conditional.png differ
diff --git a/doc/source/_static/spreadsheets/filter.png b/doc/source/_static/spreadsheets/filter.png
new file mode 100644
index 0000000000000..b4c929793ca44
Binary files /dev/null and b/doc/source/_static/spreadsheets/filter.png differ
diff --git a/doc/source/_static/spreadsheets/find.png b/doc/source/_static/spreadsheets/find.png
new file mode 100644
index 0000000000000..223b2e6fc762f
Binary files /dev/null and b/doc/source/_static/spreadsheets/find.png differ
diff --git a/doc/source/_static/logo_excel.svg b/doc/source/_static/spreadsheets/logo_excel.svg
similarity index 100%
rename from doc/source/_static/logo_excel.svg
rename to doc/source/_static/spreadsheets/logo_excel.svg
diff --git a/doc/source/_static/excel_pivot.png b/doc/source/_static/spreadsheets/pivot.png
similarity index 100%
rename from doc/source/_static/excel_pivot.png
rename to doc/source/_static/spreadsheets/pivot.png
diff --git a/doc/source/_static/spreadsheets/sort.png b/doc/source/_static/spreadsheets/sort.png
new file mode 100644
index 0000000000000..253f2f3bfb9ba
Binary files /dev/null and b/doc/source/_static/spreadsheets/sort.png differ
diff --git a/doc/source/_static/spreadsheets/vlookup.png b/doc/source/_static/spreadsheets/vlookup.png
new file mode 100644
index 0000000000000..e96da01da1eeb
Binary files /dev/null and b/doc/source/_static/spreadsheets/vlookup.png differ
diff --git a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
index 7b779b02e20f8..e9d687bc07999 100644
--- a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
+++ b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
@@ -52,9 +52,12 @@ pandas, if no index is specified, a :class:`~pandas.RangeIndex` is used by defau
second row = 1, and so on), analogous to row headings/numbers in spreadsheets.
In pandas, indexes can be set to one (or multiple) unique values, which is like having a column that
-use use as the row identifier in a worksheet. Unlike spreadsheets, these ``Index`` values can actually be
-used to reference the rows. For example, in spreadsheets, you would reference the first row as ``A1:Z1``,
-while in pandas you could use ``populations.loc['Chicago']``.
+is used as the row identifier in a worksheet. Unlike most spreadsheets, these ``Index`` values can
+actually be used to reference the rows. (Note that `this can be done in Excel with structured
+references
+<https://support.microsoft.com/en-us/office/using-structured-references-with-excel-tables-f5ed2452-2337-4f71-bed3-c8ae6d2b276e>`_.)
+For example, in spreadsheets, you would reference the first row as ``A1:Z1``, while in pandas you
+could use ``populations.loc['Chicago']``.
Index values are also persistent, so if you re-order the rows in a ``DataFrame``, the label for a
particular row don't change.
@@ -62,11 +65,18 @@ particular row don't change.
See the :ref:`indexing documentation<indexing>` for much more on how to use an ``Index``
effectively.
-Commonly used spreadsheet functionalities
------------------------------------------
+Data input / output
+-------------------
-Importing data
-~~~~~~~~~~~~~~
+Constructing a DataFrame from values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In a spreadsheet, `values can be typed directly into cells <https://support.microsoft.com/en-us/office/enter-data-manually-in-worksheet-cells-c798181d-d75a-41b1-92ad-6c0800f80038>`_.
+
+.. include:: includes/construct_dataframe.rst
+
+Reading external data
+~~~~~~~~~~~~~~~~~~~~~
Both `Excel <https://support.microsoft.com/en-us/office/import-data-from-external-data-sources-power-query-be4330b3-5356-486c-a168-b68e9e616f5a>`__
and :ref:`pandas <10min_tut_02_read_write>` can import data from various sources in various
@@ -96,6 +106,248 @@ In pandas, you pass the URL or local path of the CSV file to :func:`~pandas.read
tips = pd.read_csv(url)
tips
+Like `Excel's Text Import Wizard <https://support.microsoft.com/en-us/office/text-import-wizard-c5b02af6-fda1-4440-899f-f78bafe41857>`_,
+``read_csv`` can take a number of parameters to specify how the data should be parsed. For
+example, if the data was instead tab delimited, and did not have column names, the pandas command
+would be:
+
+.. code-block:: python
+
+ tips = pd.read_csv("tips.csv", sep="\t", header=None)
+
+ # alternatively, read_table is an alias to read_csv with tab delimiter
+ tips = pd.read_table("tips.csv", header=None)
+
+
+Limiting output
+~~~~~~~~~~~~~~~
+
+Spreadsheet programs will only show one screenful of data at a time and then allow you to scroll, so
+there isn't really a need to limit output. In pandas, you'll need to put a little more thought into
+controlling how your ``DataFrame``\s are displayed.
+
+.. include:: includes/limit.rst
+
+
+Exporting data
+~~~~~~~~~~~~~~
+
+By default, desktop spreadsheet software will save to its respective file format (``.xlsx``, ``.ods``, etc). You can, however, `save to other file formats <https://support.microsoft.com/en-us/office/save-a-workbook-in-another-file-format-6a16c862-4a36-48f9-a300-c2ca0065286e>`_.
+
+:ref:`pandas can create Excel files <io.excel_writer>`, :ref:`CSV <io.store_in_csv>`, or :ref:`a number of other formats <io>`.
+
+Data operations
+---------------
+
+Operations on columns
+~~~~~~~~~~~~~~~~~~~~~
+
+In spreadsheets, `formulas
+<https://support.microsoft.com/en-us/office/overview-of-formulas-in-excel-ecfdc708-9162-49e8-b993-c311f47ca173>`_
+are often created in individual cells and then `dragged
+<https://support.microsoft.com/en-us/office/copy-a-formula-by-dragging-the-fill-handle-in-excel-for-mac-dd928259-622b-473f-9a33-83aa1a63e218>`_
+into other cells to compute them for other columns. In pandas, you're able to do operations on whole
+columns directly.
+
+.. include:: includes/column_operations.rst
+
+Note that we aren't having to tell it to do that subtraction cell-by-cell — pandas handles that for
+us. See :ref:`how to create new columns derived from existing columns <10min_tut_05_columns>`.
+
+
+Filtering
+~~~~~~~~~
+
+`In Excel, filtering is done through a graphical menu. <https://support.microsoft.com/en-us/office/filter-data-in-a-range-or-table-01832226-31b5-4568-8806-38c37dcc180e>`_
+
+.. image:: ../../_static/spreadsheets/filter.png
+ :alt: Screenshot showing filtering of the total_bill column to values greater than 10
+ :align: center
+
+.. include:: includes/filtering.rst
+
+If/then logic
+~~~~~~~~~~~~~
+
+Let's say we want to make a ``bucket`` column with values of ``low`` and ``high``, based on whether
+the ``total_bill`` is less or more than $10.
+
+In spreadsheets, logical comparison can be done with `conditional formulas
+<https://support.microsoft.com/en-us/office/create-conditional-formulas-ca916c57-abd8-4b44-997c-c309b7307831>`_.
+We'd use a formula of ``=IF(A2 < 10, "low", "high")``, dragged to all cells in a new ``bucket``
+column.
+
+.. image:: ../../_static/spreadsheets/conditional.png
+ :alt: Screenshot showing the formula from above in a bucket column of the tips spreadsheet
+ :align: center
+
+.. include:: includes/if_then.rst
+
+Date functionality
+~~~~~~~~~~~~~~~~~~
+
+*This section will refer to "dates", but timestamps are handled similarly.*
+
+We can think of date functionality in two parts: parsing, and output. In spreadsheets, date values
+are generally parsed automatically, though there is a `DATEVALUE
+<https://support.microsoft.com/en-us/office/datevalue-function-df8b07d4-7761-4a93-bc33-b7471bbff252>`_
+function if you need it. In pandas, you need to explicitly convert plain text to datetime objects,
+either :ref:`while reading from a CSV <io.read_csv_table.datetime>` or :ref:`once in a DataFrame
+<10min_tut_09_timeseries.properties>`.
+
+Once parsed, spreadsheets display the dates in a default format, though `the format can be changed
+<https://support.microsoft.com/en-us/office/format-a-date-the-way-you-want-8e10019e-d5d8-47a1-ba95-db95123d273e>`_.
+In pandas, you'll generally want to keep dates as ``datetime`` objects while you're doing
+calculations with them. Outputting *parts* of dates (such as the year) is done through `date
+functions
+<https://support.microsoft.com/en-us/office/date-and-time-functions-reference-fd1b5961-c1ae-4677-be58-074152f97b81>`_
+in spreadsheets, and :ref:`datetime properties <10min_tut_09_timeseries.properties>` in pandas.
+
+Given ``date1`` and ``date2`` in columns ``A`` and ``B`` of a spreadsheet, you might have these
+formulas:
+
+.. list-table::
+ :header-rows: 1
+ :widths: auto
+
+ * - column
+ - formula
+ * - ``date1_year``
+ - ``=YEAR(A2)``
+ * - ``date2_month``
+ - ``=MONTH(B2)``
+ * - ``date1_next``
+ - ``=DATE(YEAR(A2),MONTH(A2)+1,1)``
+ * - ``months_between``
+ - ``=DATEDIF(A2,B2,"M")``
+
+The equivalent pandas operations are shown below.
+
+.. include:: includes/time_date.rst
+
+See :ref:`timeseries` for more details.
+
+
+Selection of columns
+~~~~~~~~~~~~~~~~~~~~
+
+In spreadsheets, you can select columns you want by:
+
+- `Hiding columns <https://support.microsoft.com/en-us/office/hide-or-show-rows-or-columns-659c2cad-802e-44ee-a614-dde8443579f8>`_
+- `Deleting columns <https://support.microsoft.com/en-us/office/insert-or-delete-rows-and-columns-6f40e6e4-85af-45e0-b39d-65dd504a3246>`_
+- `Referencing a range <https://support.microsoft.com/en-us/office/create-or-change-a-cell-reference-c7b8b95d-c594-4488-947e-c835903cebaa>`_ from one worksheet into another
+
+Since spreadsheet columns are typically `named in a header row
+<https://support.microsoft.com/en-us/office/turn-excel-table-headers-on-or-off-c91d1742-312c-4480-820f-cf4b534c8b3b>`_,
+renaming a column is simply a matter of changing the text in that first cell.
+
+.. include:: includes/column_selection.rst
+
+
+Sorting by values
+~~~~~~~~~~~~~~~~~
+
+Sorting in spreadsheets is accomplished via `the sort dialog <https://support.microsoft.com/en-us/office/sort-data-in-a-range-or-table-62d0b95d-2a90-4610-a6ae-2e545c4a4654>`_.
+
+.. image:: ../../_static/spreadsheets/sort.png
+ :alt: Screenshot of dialog from Excel showing sorting by the sex then total_bill columns
+ :align: center
+
+.. include:: includes/sorting.rst
+
+String processing
+-----------------
+
+Finding length of string
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+In spreadsheets, the number of characters in text can be found with the `LEN
+<https://support.microsoft.com/en-us/office/len-lenb-functions-29236f94-cedc-429d-affd-b5e33d2c67cb>`_
+function. This can be used with the `TRIM
+<https://support.microsoft.com/en-us/office/trim-function-410388fa-c5df-49c6-b16c-9e5630b479f9>`_
+function to remove extra whitespace.
+
+::
+
+ =LEN(TRIM(A2))
+
+.. include:: includes/length.rst
+
+Note this will still include multiple spaces within the string, so isn't 100% equivalent.
+
+
+Finding position of substring
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The `FIND
+<https://support.microsoft.com/en-us/office/find-findb-functions-c7912941-af2a-4bdf-a553-d0d89b0a0628>`_
+spreadsheet function returns the position of a substring, with the first character being ``1``.
+
+.. image:: ../../_static/spreadsheets/sort.png
+ :alt: Screenshot of FIND formula being used in Excel
+ :align: center
+
+.. include:: includes/find_substring.rst
+
+
+Extracting substring by position
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Spreadsheets have a `MID
+<https://support.microsoft.com/en-us/office/mid-midb-functions-d5f9e25c-d7d6-472e-b568-4ecb12433028>`_
+formula for extracting a substring from a given position. To get the first character::
+
+ =MID(A2,1,1)
+
+.. include:: includes/extract_substring.rst
+
+
+Extracting nth word
+~~~~~~~~~~~~~~~~~~~
+
+In Excel, you might use the `Text to Columns Wizard
+<https://support.microsoft.com/en-us/office/split-text-into-different-columns-with-the-convert-text-to-columns-wizard-30b14928-5550-41f5-97ca-7a3e9c363ed7>`_
+for splitting text and retrieving a specific column. (Note `it's possible to do so through a formula
+as well <https://exceljet.net/formula/extract-nth-word-from-text-string>`_.)
+
+.. include:: includes/nth_word.rst
+
+
+Changing case
+~~~~~~~~~~~~~
+
+Spreadsheets provide `UPPER, LOWER, and PROPER functions
+<https://support.microsoft.com/en-us/office/change-the-case-of-text-01481046-0fa7-4f3b-a693-496795a7a44d>`_
+for converting text to upper, lower, and title case, respectively.
+
+.. include:: includes/case.rst
+
+
+Merging
+-------
+
+.. include:: includes/merge_setup.rst
+
+In Excel, there are `merging of tables can be done through a VLOOKUP
+<https://support.microsoft.com/en-us/office/how-can-i-merge-two-or-more-tables-c80a9fce-c1ab-4425-bb96-497dd906d656>`_.
+
+.. image:: ../../_static/spreadsheets/vlookup.png
+ :alt: Screenshot showing a VLOOKUP formula between two tables in Excel, with some values being filled in and others with "#N/A"
+ :align: center
+
+.. include:: includes/merge.rst
+
+``merge`` has a number of advantages over ``VLOOKUP``:
+
+* The lookup value doesn't need to be the first column of the lookup table
+* If multiple rows are matched, there will be one row for each match, instead of just the first
+* It will include all columns from the lookup table, instead of just a single specified column
+* It supports :ref:`more complex join operations <merging.join>`
+
+
+Other considerations
+--------------------
+
Fill Handle
~~~~~~~~~~~
@@ -117,21 +369,6 @@ This can be achieved by creating a series and assigning it to the desired cells.
df
-Filters
-~~~~~~~
-
-Filters can be achieved by using slicing.
-
-The examples filter by 0 on column AAA, and also show how to filter by multiple
-values.
-
-.. ipython:: python
-
- df[df.AAA == 0]
-
- df[(df.AAA == 0) | (df.AAA == 2)]
-
-
Drop Duplicates
~~~~~~~~~~~~~~~
@@ -152,7 +389,6 @@ This is supported in pandas via :meth:`~DataFrame.drop_duplicates`.
df.drop_duplicates(["class", "student_count"])
-
Pivot Tables
~~~~~~~~~~~~
@@ -162,7 +398,8 @@ let's find the average gratuity by size of the party and sex of the server.
In Excel, we use the following configuration for the PivotTable:
-.. image:: ../../_static/excel_pivot.png
+.. image:: ../../_static/spreadsheets/pivot.png
+ :alt: Screenshot showing a PivotTable in Excel, using sex as the column, size as the rows, then average tip as the values
:align: center
The equivalent in pandas:
@@ -173,81 +410,34 @@ The equivalent in pandas:
tips, values="tip", index=["size"], columns=["sex"], aggfunc=np.average
)
-Formulas
-~~~~~~~~
-In spreadsheets, `formulas <https://support.microsoft.com/en-us/office/overview-of-formulas-in-excel-ecfdc708-9162-49e8-b993-c311f47ca173>`_
-are often created in individual cells and then `dragged <https://support.microsoft.com/en-us/office/copy-a-formula-by-dragging-the-fill-handle-in-excel-for-mac-dd928259-622b-473f-9a33-83aa1a63e218>`_
-into other cells to compute them for other columns. In pandas, you'll be doing more operations on
-full columns.
+Adding a row
+~~~~~~~~~~~~
-As an example, let's create a new column "girls_count" and try to compute the number of boys in
-each class.
+Assuming we are using a :class:`~pandas.RangeIndex` (numbered ``0``, ``1``, etc.), we can use :meth:`DataFrame.append` to add a row to the bottom of a ``DataFrame``.
.. ipython:: python
- df["girls_count"] = [21, 12, 21, 31, 23, 17]
- df
- df["boys_count"] = df["student_count"] - df["girls_count"]
df
+ new_row = {"class": "E", "student_count": 51, "all_pass": True}
+ df.append(new_row, ignore_index=True)
-Note that we aren't having to tell it to do that subtraction cell-by-cell — pandas handles that for
-us. See :ref:`how to create new columns derived from existing columns <10min_tut_05_columns>`.
-VLOOKUP
-~~~~~~~
-
-.. ipython:: python
+Find and Replace
+~~~~~~~~~~~~~~~~
- import random
-
- first_names = [
- "harry",
- "ron",
- "hermione",
- "rubius",
- "albus",
- "severus",
- "luna",
- ]
- keys = [1, 2, 3, 4, 5, 6, 7]
- df1 = pd.DataFrame({"keys": keys, "first_names": first_names})
- df1
-
- surnames = [
- "hadrid",
- "malfoy",
- "lovegood",
- "dumbledore",
- "grindelwald",
- "granger",
- "weasly",
- "riddle",
- "longbottom",
- "snape",
- ]
- keys = [random.randint(1, 7) for x in range(0, 10)]
- random_names = pd.DataFrame({"surnames": surnames, "keys": keys})
-
- random_names
-
- random_names.merge(df1, on="keys", how="left")
-
-Adding a row
-~~~~~~~~~~~~
-
-To appended a row, we can just assign values to an index using :meth:`~DataFrame.loc`.
-
-NOTE: If the index already exists, the values in that index will be over written.
+`Excel's Find dialog <https://support.microsoft.com/en-us/office/find-or-replace-text-and-numbers-on-a-worksheet-0e304ca5-ecef-4808-b90f-fdb42f892e90>`_
+takes you to cells that match, one by one. In pandas, this operation is generally done for an
+entire column or ``DataFrame`` at once through :ref:`conditional expressions <10min_tut_03_subset.rows_and_columns>`.
.. ipython:: python
- df1.loc[7] = [8, "tonks"]
- df1
+ tips
+ tips == "Sun"
+ tips["day"].str.contains("S")
+pandas' :meth:`~DataFrame.replace` is comparable to Excel's ``Replace All``.
-Search and Replace
-~~~~~~~~~~~~~~~~~~
+.. ipython:: python
-The ``replace`` method that comes associated with the ``DataFrame`` object can perform
-this function. Please see `pandas.DataFrame.replace <https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.replace.html>`__ for examples.
+ tips.replace("Thur", "Thu")
diff --git a/doc/source/getting_started/comparison/includes/column_operations.rst b/doc/source/getting_started/comparison/includes/column_operations.rst
index bc5db8e6b8038..b23b931ed2db1 100644
--- a/doc/source/getting_started/comparison/includes/column_operations.rst
+++ b/doc/source/getting_started/comparison/includes/column_operations.rst
@@ -1,4 +1,4 @@
-pandas provides similar vectorized operations by specifying the individual ``Series`` in the
+pandas provides vectorized operations by specifying the individual ``Series`` in the
``DataFrame``. New columns can be assigned in the same way. The :meth:`DataFrame.drop` method drops
a column from the ``DataFrame``.
diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst
index de47bd5b72148..cd5dfb84fee31 100644
--- a/doc/source/getting_started/index.rst
+++ b/doc/source/getting_started/index.rst
@@ -626,7 +626,7 @@ the pandas-equivalent operations compared to software you already know:
</div>
<div class="col-lg-6 col-md-6 col-sm-6 col-xs-12 d-flex">
<div class="card text-center intro-card shadow">
- <img src="../_static/logo_excel.svg" class="card-img-top" alt="Excel logo" height="52">
+ <img src="../_static/spreadsheets/logo_excel.svg" class="card-img-top" alt="Excel logo" height="52">
<div class="card-body flex-fill">
<p class="card-text">Users of <a href="https://en.wikipedia.org/wiki/Microsoft_Excel">Excel</a>
or other spreadsheet programs will find that many of the concepts are transferrable to pandas.</p>
diff --git a/doc/source/getting_started/intro_tutorials/03_subset_data.rst b/doc/source/getting_started/intro_tutorials/03_subset_data.rst
index fe3eae6c42959..4106b0e064823 100644
--- a/doc/source/getting_started/intro_tutorials/03_subset_data.rst
+++ b/doc/source/getting_started/intro_tutorials/03_subset_data.rst
@@ -268,6 +268,8 @@ For more dedicated functions on missing values, see the user guide section about
</div>
+.. _10min_tut_03_subset.rows_and_columns:
+
How do I select specific rows and columns from a ``DataFrame``?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/getting_started/intro_tutorials/09_timeseries.rst b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
index 598d3514baa15..b9cab0747196e 100644
--- a/doc/source/getting_started/intro_tutorials/09_timeseries.rst
+++ b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
@@ -58,6 +58,8 @@ Westminster* in respectively Paris, Antwerp and London.
How to handle time series data with ease?
-----------------------------------------
+.. _10min_tut_09_timeseries.properties:
+
Using pandas datetime properties
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 9c9ad9538f488..1156ddd6da410 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -232,6 +232,8 @@ verbose : boolean, default ``False``
skip_blank_lines : boolean, default ``True``
If ``True``, skip over blank lines rather than interpreting as NaN values.
+.. _io.read_csv_table.datetime:
+
Datetime handling
+++++++++++++++++
| [Preview (link to PDF on Google Drive)](https://drive.google.com/file/d/1uUyTQyEAX3F6h4EJqKz7Mz4KehGBnAig/view?usp=sharing)
This pull request gets closer to full parity with [SAS](https://pandas.pydata.org/pandas-docs/stable/getting_started/comparison/comparison_with_sas.html)/[STATA](https://pandas.pydata.org/pandas-docs/stable/getting_started/comparison/comparison_with_stata.html) comparison pages by adding the Data Input/Output through Merging sections. It still needs Missing Data and GroupBy, but wanted to get this in while I was at a good stopping place. Each section was done in its own commit, if it's easier to review that way.
---
- [x] ~~closes~~ part of https://github.com/pandas-dev/pandas/issues/38990
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] ~~whatsnew entry~~ | https://api.github.com/repos/pandas-dev/pandas/pulls/38993 | 2021-01-06T03:27:45Z | 2021-01-06T14:53:11Z | 2021-01-06T14:53:10Z | 2021-01-06T14:53:19Z |
ENH: 2D support for MaskedArray | diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 82f9280870d59..bf78a3cdefbdd 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -637,7 +637,7 @@ def pad_inplace(numeric_object_t[:] values, uint8_t[:] mask, limit=None):
@cython.boundscheck(False)
@cython.wraparound(False)
-def pad_2d_inplace(numeric_object_t[:, :] values, const uint8_t[:, :] mask, limit=None):
+def pad_2d_inplace(numeric_object_t[:, :] values, uint8_t[:, :] mask, limit=None):
cdef:
Py_ssize_t i, j, N, K
numeric_object_t val
@@ -656,10 +656,11 @@ def pad_2d_inplace(numeric_object_t[:, :] values, const uint8_t[:, :] mask, limi
val = values[j, 0]
for i in range(N):
if mask[j, i]:
- if fill_count >= lim:
+ if fill_count >= lim or i == 0:
continue
fill_count += 1
values[j, i] = val
+ mask[j, i] = False
else:
fill_count = 0
val = values[j, i]
@@ -759,7 +760,7 @@ def backfill_inplace(numeric_object_t[:] values, uint8_t[:] mask, limit=None):
def backfill_2d_inplace(numeric_object_t[:, :] values,
- const uint8_t[:, :] mask,
+ uint8_t[:, :] mask,
limit=None):
pad_2d_inplace(values[:, ::-1], mask[:, ::-1], limit)
diff --git a/pandas/core/array_algos/masked_reductions.py b/pandas/core/array_algos/masked_reductions.py
index 01bb3d50c0da7..66a3152de1499 100644
--- a/pandas/core/array_algos/masked_reductions.py
+++ b/pandas/core/array_algos/masked_reductions.py
@@ -3,7 +3,10 @@
for missing values.
"""
-from typing import Callable
+from typing import (
+ Callable,
+ Optional,
+)
import numpy as np
@@ -19,6 +22,7 @@ def _sumprod(
*,
skipna: bool = True,
min_count: int = 0,
+ axis: Optional[int] = None,
):
"""
Sum or product for 1D masked array.
@@ -36,36 +40,55 @@ def _sumprod(
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
+ axis : int, optional, default None
"""
if not skipna:
- if mask.any() or check_below_min_count(values.shape, None, min_count):
+ if mask.any(axis=axis) or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
- return func(values)
+ return func(values, axis=axis)
else:
- if check_below_min_count(values.shape, mask, min_count):
+ if check_below_min_count(values.shape, mask, min_count) and (
+ axis is None or values.ndim == 1
+ ):
return libmissing.NA
- return func(values, where=~mask)
+
+ return func(values, where=~mask, axis=axis)
def sum(
- values: np.ndarray, mask: np.ndarray, *, skipna: bool = True, min_count: int = 0
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ min_count: int = 0,
+ axis: Optional[int] = None,
):
return _sumprod(
- np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count
+ np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
)
def prod(
- values: np.ndarray, mask: np.ndarray, *, skipna: bool = True, min_count: int = 0
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ min_count: int = 0,
+ axis: Optional[int] = None,
):
return _sumprod(
- np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count
+ np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
)
def _minmax(
- func: Callable, values: np.ndarray, mask: np.ndarray, *, skipna: bool = True
+ func: Callable,
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ axis: Optional[int] = None,
):
"""
Reduction for 1D masked array.
@@ -80,6 +103,7 @@ def _minmax(
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
+ axis : int, optional, default None
"""
if not skipna:
if mask.any() or not values.size:
@@ -96,14 +120,27 @@ def _minmax(
return libmissing.NA
-def min(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
- return _minmax(np.min, values=values, mask=mask, skipna=skipna)
+def min(
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ axis: Optional[int] = None,
+):
+ return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis)
-def max(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
- return _minmax(np.max, values=values, mask=mask, skipna=skipna)
+def max(
+ values: np.ndarray,
+ mask: np.ndarray,
+ *,
+ skipna: bool = True,
+ axis: Optional[int] = None,
+):
+ return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis)
+# TODO: axis kwarg
def mean(values: np.ndarray, mask: np.ndarray, skipna: bool = True):
if not values.size or mask.all():
return libmissing.NA
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py
index e43e66fed8957..3769c686da029 100644
--- a/pandas/core/arrays/_mixins.py
+++ b/pandas/core/arrays/_mixins.py
@@ -298,27 +298,6 @@ def _wrap_reduction_result(self, axis: int | None, result):
return self._box_func(result)
return self._from_backing_data(result)
- # ------------------------------------------------------------------------
-
- def __repr__(self) -> str:
- if self.ndim == 1:
- return super().__repr__()
-
- from pandas.io.formats.printing import format_object_summary
-
- # the short repr has no trailing newline, while the truncated
- # repr does. So we include a newline in our template, and strip
- # any trailing newlines from format_object_summary
- lines = [
- format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
- ", \n"
- )
- for x in self
- ]
- data = ",\n".join(lines)
- class_name = f"<{type(self).__name__}>"
- return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}"
-
# ------------------------------------------------------------------------
# __array_function__ methods
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 99c4944a1cfa7..bf54f7166e14d 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1209,6 +1209,9 @@ def view(self, dtype: Dtype | None = None) -> ArrayLike:
# ------------------------------------------------------------------------
def __repr__(self) -> str:
+ if self.ndim > 1:
+ return self._repr_2d()
+
from pandas.io.formats.printing import format_object_summary
# the short repr has no trailing newline, while the truncated
@@ -1220,6 +1223,22 @@ def __repr__(self) -> str:
class_name = f"<{type(self).__name__}>\n"
return f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}"
+ def _repr_2d(self) -> str:
+ from pandas.io.formats.printing import format_object_summary
+
+ # the short repr has no trailing newline, while the truncated
+ # repr does. So we include a newline in our template, and strip
+ # any trailing newlines from format_object_summary
+ lines = [
+ format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
+ ", \n"
+ )
+ for x in self
+ ]
+ data = ",\n".join(lines)
+ class_name = f"<{type(self).__name__}>"
+ return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}"
+
def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
"""
Formatting function for scalar values.
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index 1df7c191bdb68..58e7abbbe1ddd 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -21,6 +21,7 @@
npt,
type_t,
)
+from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_bool_dtype,
@@ -245,10 +246,8 @@ def coerce_to_array(
if mask_values is not None:
mask = mask | mask_values
- if values.ndim != 1:
- raise ValueError("values must be a 1D list-like")
- if mask.ndim != 1:
- raise ValueError("mask must be a 1D list-like")
+ if values.shape != mask.shape:
+ raise ValueError("values.shape and mask.shape must match")
return values, mask
@@ -447,6 +446,144 @@ def _values_for_argsort(self) -> np.ndarray:
data[self._mask] = -1
return data
+ def any(self, *, skipna: bool = True, axis: int | None = 0, **kwargs):
+ """
+ Return whether any element is True.
+
+ Returns False unless there is at least one element that is True.
+ By default, NAs are skipped. If ``skipna=False`` is specified and
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
+ is used as for logical operations.
+
+ Parameters
+ ----------
+ skipna : bool, default True
+ Exclude NA values. If the entire array is NA and `skipna` is
+ True, then the result will be False, as for an empty array.
+ If `skipna` is False, the result will still be True if there is
+ at least one element that is True, otherwise NA will be returned
+ if there are NA's present.
+ axis : int or None, default 0
+ **kwargs : any, default None
+ Additional keywords have no effect but might be accepted for
+ compatibility with NumPy.
+
+ Returns
+ -------
+ bool or :attr:`pandas.NA`
+
+ See Also
+ --------
+ numpy.any : Numpy version of this method.
+ BooleanArray.all : Return whether all elements are True.
+
+ Examples
+ --------
+ The result indicates whether any element is True (and by default
+ skips NAs):
+
+ >>> pd.array([True, False, True]).any()
+ True
+ >>> pd.array([True, False, pd.NA]).any()
+ True
+ >>> pd.array([False, False, pd.NA]).any()
+ False
+ >>> pd.array([], dtype="boolean").any()
+ False
+ >>> pd.array([pd.NA], dtype="boolean").any()
+ False
+
+ With ``skipna=False``, the result can be NA if this is logically
+ required (whether ``pd.NA`` is True or False influences the result):
+
+ >>> pd.array([True, False, pd.NA]).any(skipna=False)
+ True
+ >>> pd.array([False, False, pd.NA]).any(skipna=False)
+ <NA>
+ """
+ kwargs.pop("axis", None)
+ nv.validate_any((), kwargs)
+
+ values = self._data.copy()
+ np.putmask(values, self._mask, False)
+ result = values.any(axis=axis)
+
+ if skipna:
+ return result
+ else:
+ if result or self.size == 0 or not self._mask.any():
+ return result
+ else:
+ return self.dtype.na_value
+
+ def all(self, *, skipna: bool = True, axis: int | None = 0, **kwargs):
+ """
+ Return whether all elements are True.
+
+ Returns True unless there is at least one element that is False.
+ By default, NAs are skipped. If ``skipna=False`` is specified and
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
+ is used as for logical operations.
+
+ Parameters
+ ----------
+ skipna : bool, default True
+ Exclude NA values. If the entire array is NA and `skipna` is
+ True, then the result will be True, as for an empty array.
+ If `skipna` is False, the result will still be False if there is
+ at least one element that is False, otherwise NA will be returned
+ if there are NA's present.
+ axis : int or None, default 0
+ **kwargs : any, default None
+ Additional keywords have no effect but might be accepted for
+ compatibility with NumPy.
+
+ Returns
+ -------
+ bool or :attr:`pandas.NA`
+
+ See Also
+ --------
+ numpy.all : Numpy version of this method.
+ BooleanArray.any : Return whether any element is True.
+
+ Examples
+ --------
+ The result indicates whether any element is True (and by default
+ skips NAs):
+
+ >>> pd.array([True, True, pd.NA]).all()
+ True
+ >>> pd.array([True, False, pd.NA]).all()
+ False
+ >>> pd.array([], dtype="boolean").all()
+ True
+ >>> pd.array([pd.NA], dtype="boolean").all()
+ True
+
+ With ``skipna=False``, the result can be NA if this is logically
+ required (whether ``pd.NA`` is True or False influences the result):
+
+ >>> pd.array([True, True, pd.NA]).all(skipna=False)
+ <NA>
+ >>> pd.array([True, False, pd.NA]).all(skipna=False)
+ False
+ """
+ kwargs.pop("axis", None)
+ nv.validate_all((), kwargs)
+
+ values = self._data.copy()
+ np.putmask(values, self._mask, True)
+ result = values.all(axis=axis)
+
+ if skipna:
+ return result
+ else:
+ if not result or self.size == 0 or not self._mask.any():
+ return result
+ else:
+ return self.dtype.na_value
+
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py
index 066f6ebdfcaa6..6d6cc03a1c83e 100644
--- a/pandas/core/arrays/floating.py
+++ b/pandas/core/arrays/floating.py
@@ -385,21 +385,21 @@ def _cmp_method(self, other, op):
return BooleanArray(result, mask)
- def sum(self, *, skipna=True, min_count=0, **kwargs):
+ def sum(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_sum((), kwargs)
- return super()._reduce("sum", skipna=skipna, min_count=min_count)
+ return super()._reduce("sum", skipna=skipna, min_count=min_count, axis=axis)
- def prod(self, *, skipna=True, min_count=0, **kwargs):
+ def prod(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_prod((), kwargs)
- return super()._reduce("prod", skipna=skipna, min_count=min_count)
+ return super()._reduce("prod", skipna=skipna, min_count=min_count, axis=axis)
- def min(self, *, skipna=True, **kwargs):
+ def min(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_min((), kwargs)
- return super()._reduce("min", skipna=skipna)
+ return super()._reduce("min", skipna=skipna, axis=axis)
- def max(self, *, skipna=True, **kwargs):
+ def max(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_max((), kwargs)
- return super()._reduce("max", skipna=skipna)
+ return super()._reduce("max", skipna=skipna, axis=axis)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 078adeb11d3fb..4d59832655162 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -458,21 +458,21 @@ def _cmp_method(self, other, op):
return BooleanArray(result, mask)
- def sum(self, *, skipna=True, min_count=0, **kwargs):
+ def sum(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_sum((), kwargs)
- return super()._reduce("sum", skipna=skipna, min_count=min_count)
+ return super()._reduce("sum", skipna=skipna, min_count=min_count, axis=axis)
- def prod(self, *, skipna=True, min_count=0, **kwargs):
+ def prod(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_prod((), kwargs)
- return super()._reduce("prod", skipna=skipna, min_count=min_count)
+ return super()._reduce("prod", skipna=skipna, min_count=min_count, axis=axis)
- def min(self, *, skipna=True, **kwargs):
+ def min(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_min((), kwargs)
- return super()._reduce("min", skipna=skipna)
+ return super()._reduce("min", skipna=skipna, axis=axis)
- def max(self, *, skipna=True, **kwargs):
+ def max(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_max((), kwargs)
- return super()._reduce("max", skipna=skipna)
+ return super()._reduce("max", skipna=skipna, axis=axis)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 6a03456673604..0247cd717edec 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -22,6 +22,7 @@
Scalar,
ScalarIndexer,
SequenceIndexer,
+ Shape,
npt,
type_t,
)
@@ -34,10 +35,10 @@
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
+ is_bool,
is_bool_dtype,
is_dtype_equal,
is_float_dtype,
- is_integer,
is_integer_dtype,
is_object_dtype,
is_scalar,
@@ -120,6 +121,10 @@ class BaseMaskedArray(OpsMixin, ExtensionArray):
# The value used to fill '_data' to avoid upcasting
_internal_fill_value: Scalar
+ # our underlying data and mask are each ndarrays
+ _data: np.ndarray
+ _mask: np.ndarray
+
# Fill values used for any/all
_truthy_value = Scalar # bool(_truthy_value) = True
_falsey_value = Scalar # bool(_falsey_value) = False
@@ -131,12 +136,8 @@ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
"mask should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
- if values.ndim != 1:
- raise ValueError("values must be a 1D array")
- if mask.ndim != 1:
- raise ValueError("mask must be a 1D array")
if values.shape != mask.shape:
- raise ValueError("values and mask must have same shape")
+ raise ValueError("values.shape must match mask.shape")
if copy:
values = values.copy()
@@ -160,14 +161,16 @@ def __getitem__(self: BaseMaskedArrayT, item: SequenceIndexer) -> BaseMaskedArra
def __getitem__(
self: BaseMaskedArrayT, item: PositionalIndexer
) -> BaseMaskedArrayT | Any:
- if is_integer(item):
- if self._mask[item]:
+ item = check_array_indexer(self, item)
+
+ newmask = self._mask[item]
+ if is_bool(newmask):
+ # This is a scalar indexing
+ if newmask:
return self.dtype.na_value
return self._data[item]
- item = check_array_indexer(self, item)
-
- return type(self)(self._data[item], self._mask[item])
+ return type(self)(self._data[item], newmask)
@doc(ExtensionArray.fillna)
def fillna(
@@ -187,13 +190,13 @@ def fillna(
if mask.any():
if method is not None:
- func = missing.get_fill_func(method)
+ func = missing.get_fill_func(method, ndim=self.ndim)
new_values, new_mask = func(
- self._data.copy(),
+ self._data.copy().T,
limit=limit,
- mask=mask.copy(),
+ mask=mask.copy().T,
)
- return type(self)(new_values, new_mask.view(np.bool_))
+ return type(self)(new_values.T, new_mask.view(np.bool_).T)
else:
# fill with value
new_values = self.copy()
@@ -220,15 +223,52 @@ def __setitem__(self, key, value) -> None:
self._mask[key] = mask
def __iter__(self):
- for i in range(len(self)):
- if self._mask[i]:
- yield self.dtype.na_value
- else:
- yield self._data[i]
+ if self.ndim == 1:
+ for i in range(len(self)):
+ if self._mask[i]:
+ yield self.dtype.na_value
+ else:
+ yield self._data[i]
+ else:
+ for i in range(len(self)):
+ yield self[i]
def __len__(self) -> int:
return len(self._data)
+ @property
+ def shape(self) -> Shape:
+ return self._data.shape
+
+ @property
+ def ndim(self) -> int:
+ return self._data.ndim
+
+ def swapaxes(self: BaseMaskedArrayT, axis1, axis2) -> BaseMaskedArrayT:
+ data = self._data.swapaxes(axis1, axis2)
+ mask = self._mask.swapaxes(axis1, axis2)
+ return type(self)(data, mask)
+
+ def delete(self: BaseMaskedArrayT, loc, axis: int = 0) -> BaseMaskedArrayT:
+ data = np.delete(self._data, loc, axis=axis)
+ mask = np.delete(self._mask, loc, axis=axis)
+ return type(self)(data, mask)
+
+ def reshape(self: BaseMaskedArrayT, *args, **kwargs) -> BaseMaskedArrayT:
+ data = self._data.reshape(*args, **kwargs)
+ mask = self._mask.reshape(*args, **kwargs)
+ return type(self)(data, mask)
+
+ def ravel(self: BaseMaskedArrayT, *args, **kwargs) -> BaseMaskedArrayT:
+ # TODO: need to make sure we have the same order for data/mask
+ data = self._data.ravel(*args, **kwargs)
+ mask = self._mask.ravel(*args, **kwargs)
+ return type(self)(data, mask)
+
+ @property
+ def T(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
+ return type(self)(self._data.T, self._mask.T)
+
def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(~self._data, self._mask.copy())
@@ -454,10 +494,12 @@ def nbytes(self) -> int:
@classmethod
def _concat_same_type(
- cls: type[BaseMaskedArrayT], to_concat: Sequence[BaseMaskedArrayT]
+ cls: type[BaseMaskedArrayT],
+ to_concat: Sequence[BaseMaskedArrayT],
+ axis: int = 0,
) -> BaseMaskedArrayT:
- data = np.concatenate([x._data for x in to_concat])
- mask = np.concatenate([x._mask for x in to_concat])
+ data = np.concatenate([x._data for x in to_concat], axis=axis)
+ mask = np.concatenate([x._mask for x in to_concat], axis=axis)
return cls(data, mask)
def take(
@@ -466,15 +508,22 @@ def take(
*,
allow_fill: bool = False,
fill_value: Scalar | None = None,
+ axis: int = 0,
) -> BaseMaskedArrayT:
# we always fill with 1 internally
# to avoid upcasting
data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value
result = take(
- self._data, indexer, fill_value=data_fill_value, allow_fill=allow_fill
+ self._data,
+ indexer,
+ fill_value=data_fill_value,
+ allow_fill=allow_fill,
+ axis=axis,
)
- mask = take(self._mask, indexer, fill_value=True, allow_fill=allow_fill)
+ mask = take(
+ self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis
+ )
# if we are filling
# we only fill where the indexer is null
@@ -593,7 +642,8 @@ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in {"sum", "prod", "min", "max", "mean"}:
op = getattr(masked_reductions, name)
- return op(data, mask, skipna=skipna, **kwargs)
+ result = op(data, mask, skipna=skipna, **kwargs)
+ return result
# coerce to a nan-aware float if needed
# (we explicitly use NaN within reductions)
diff --git a/pandas/core/arrays/numeric.py b/pandas/core/arrays/numeric.py
index c5301a3bd3683..e1990dc064a84 100644
--- a/pandas/core/arrays/numeric.py
+++ b/pandas/core/arrays/numeric.py
@@ -152,6 +152,18 @@ def _arith_method(self, other, op):
_HANDLED_TYPES = (np.ndarray, numbers.Number)
+ def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
+ result = super()._reduce(name, skipna=skipna, **kwargs)
+ if isinstance(result, np.ndarray):
+ axis = kwargs["axis"]
+ if skipna:
+ # we only retain mask for all-NA rows/columns
+ mask = self._mask.all(axis=axis)
+ else:
+ mask = self._mask.any(axis=axis)
+ return type(self)(result, mask=mask)
+ return result
+
def __neg__(self):
return type(self)(-self._data, self._mask.copy())
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 8d150c8f6ad3d..d93fa4bbdd7fc 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -319,7 +319,9 @@ def __init__(self, values, copy=False):
def _validate(self):
"""Validate that we only store NA or strings."""
- if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):
+ if len(self._ndarray) and not lib.is_string_array(
+ self._ndarray.ravel("K"), skipna=True
+ ):
raise ValueError("StringArray requires a sequence of strings or pandas.NA")
if self._ndarray.dtype != "object":
raise ValueError(
@@ -447,9 +449,11 @@ def astype(self, dtype, copy=True):
return super().astype(dtype, copy)
- def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
+ def _reduce(
+ self, name: str, *, skipna: bool = True, axis: int | None = 0, **kwargs
+ ):
if name in ["min", "max"]:
- return getattr(self, name)(skipna=skipna)
+ return getattr(self, name)(skipna=skipna, axis=axis)
raise TypeError(f"Cannot perform reduction '{name}' with string dtype")
diff --git a/pandas/tests/arrays/boolean/test_construction.py b/pandas/tests/arrays/boolean/test_construction.py
index c9e96c437964f..f080bf7e03412 100644
--- a/pandas/tests/arrays/boolean/test_construction.py
+++ b/pandas/tests/arrays/boolean/test_construction.py
@@ -27,10 +27,10 @@ def test_boolean_array_constructor():
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, None)
- with pytest.raises(ValueError, match="values must be a 1D array"):
+ with pytest.raises(ValueError, match="values.shape must match mask.shape"):
BooleanArray(values.reshape(1, -1), mask)
- with pytest.raises(ValueError, match="mask must be a 1D array"):
+ with pytest.raises(ValueError, match="values.shape must match mask.shape"):
BooleanArray(values, mask.reshape(1, -1))
@@ -183,10 +183,10 @@ def test_coerce_to_array():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
- with pytest.raises(ValueError, match="values must be a 1D list-like"):
+ with pytest.raises(ValueError, match="values.shape and mask.shape must match"):
coerce_to_array(values.reshape(1, -1))
- with pytest.raises(ValueError, match="mask must be a 1D list-like"):
+ with pytest.raises(ValueError, match="values.shape and mask.shape must match"):
coerce_to_array(values, mask=mask.reshape(1, -1))
diff --git a/pandas/tests/extension/base/dim2.py b/pandas/tests/extension/base/dim2.py
index b80d2a3586b3b..b4a817cbc37ec 100644
--- a/pandas/tests/extension/base/dim2.py
+++ b/pandas/tests/extension/base/dim2.py
@@ -4,6 +4,11 @@
import numpy as np
import pytest
+from pandas.compat import (
+ IS64,
+ is_platform_windows,
+)
+
import pandas as pd
from pandas.tests.extension.base.base import BaseExtensionTests
@@ -194,9 +199,23 @@ def test_reductions_2d_axis0(self, data, method, request):
if method in ["sum", "prod"] and data.dtype.kind in ["i", "u"]:
# FIXME: kludge
if data.dtype.kind == "i":
- dtype = pd.Int64Dtype()
+ if is_platform_windows() or not IS64:
+ # FIXME: kludge for 32bit builds
+ if result.dtype.itemsize == 4:
+ dtype = pd.Int32Dtype()
+ else:
+ dtype = pd.Int64Dtype()
+ else:
+ dtype = pd.Int64Dtype()
else:
- dtype = pd.UInt64Dtype()
+ if is_platform_windows() or not IS64:
+ # FIXME: kludge for 32bit builds
+ if result.dtype.itemsize == 4:
+ dtype = pd.UInt32Dtype()
+ else:
+ dtype = pd.UInt64Dtype()
+ else:
+ dtype = pd.UInt64Dtype()
expected = data.astype(dtype)
assert type(expected) == type(data), type(expected)
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index 9260c342caa6b..9c4bf76b27c14 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -393,3 +393,7 @@ class TestUnaryOps(base.BaseUnaryOpsTests):
class TestParsing(base.BaseParsingTests):
pass
+
+
+class Test2DCompat(base.Dim2CompatTests):
+ pass
diff --git a/pandas/tests/extension/test_floating.py b/pandas/tests/extension/test_floating.py
index 173bc2d05af2f..500c2fbb74d17 100644
--- a/pandas/tests/extension/test_floating.py
+++ b/pandas/tests/extension/test_floating.py
@@ -223,3 +223,7 @@ class TestPrinting(base.BasePrintingTests):
class TestParsing(base.BaseParsingTests):
pass
+
+
+class Test2DCompat(base.Dim2CompatTests):
+ pass
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index 2cf4f8e415770..344b0be20fc7b 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -254,3 +254,7 @@ class TestPrinting(base.BasePrintingTests):
class TestParsing(base.BaseParsingTests):
pass
+
+
+class Test2DCompat(base.Dim2CompatTests):
+ pass
diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py
index 3d0edb70d1ced..af86c359c4c00 100644
--- a/pandas/tests/extension/test_string.py
+++ b/pandas/tests/extension/test_string.py
@@ -19,6 +19,7 @@
import pytest
import pandas as pd
+from pandas.core.arrays import ArrowStringArray
from pandas.core.arrays.string_ import StringDtype
from pandas.tests.extension import base
@@ -186,3 +187,13 @@ class TestPrinting(base.BasePrintingTests):
class TestGroupBy(base.BaseGroupbyTests):
pass
+
+
+class Test2DCompat(base.Dim2CompatTests):
+ @pytest.fixture(autouse=True)
+ def arrow_not_supported(self, data, request):
+ if isinstance(data, ArrowStringArray):
+ mark = pytest.mark.xfail(
+ reason="2D support not implemented for ArrowStringArray"
+ )
+ request.node.add_marker(mark)
| This doesn't in any way _use_ the 2D support, but opens up the option of incrementally fleshing out the tests. | https://api.github.com/repos/pandas-dev/pandas/pulls/38992 | 2021-01-06T02:55:49Z | 2021-10-16T17:58:42Z | 2021-10-16T17:58:42Z | 2022-04-15T20:39:23Z |
Backport PR #38987 on branch 1.2.x (Fix bug on master) | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index eba097cd8c345..a78af82ba4db8 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -2430,16 +2430,14 @@ Read a URL with no options:
.. ipython:: python
- url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
+ url = (
+ "https://raw.githubusercontent.com/pandas-dev/pandas/master/"
+ "pandas/tests/io/data/html/spam.html"
+ )
dfs = pd.read_html(url)
dfs
-.. note::
-
- The data from the above URL changes every Monday so the resulting data above
- and the data below may be slightly different.
-
-Read in the content of the file from the above URL and pass it to ``read_html``
+Read in the content of the "banklist.html" file and pass it to ``read_html``
as a string:
.. ipython:: python
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index ba8b1a8a0679d..aed1aaedf2fa3 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -129,6 +129,7 @@ def test_to_html_compat(self):
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
+ @pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
@@ -142,6 +143,7 @@ def test_banklist_url_positional_match(self):
assert_framelist_equal(df1, df2)
+ @pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
| Backport PR #38987: Fix bug on master | https://api.github.com/repos/pandas-dev/pandas/pulls/38991 | 2021-01-06T02:53:25Z | 2021-01-06T12:53:02Z | 2021-01-06T12:53:02Z | 2021-01-06T12:53:03Z |
Fix bug on master | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 9c9ad9538f488..01235958c5b22 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -2444,16 +2444,14 @@ Read a URL with no options:
.. ipython:: python
- url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
+ url = (
+ "https://raw.githubusercontent.com/pandas-dev/pandas/master/"
+ "pandas/tests/io/data/html/spam.html"
+ )
dfs = pd.read_html(url)
dfs
-.. note::
-
- The data from the above URL changes every Monday so the resulting data above
- and the data below may be slightly different.
-
-Read in the content of the file from the above URL and pass it to ``read_html``
+Read in the content of the "banklist.html" file and pass it to ``read_html``
as a string:
.. ipython:: python
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index e10cb10ca66c4..7b762e4891c14 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -129,6 +129,7 @@ def test_to_html_compat(self):
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
+ @pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
@@ -142,6 +143,7 @@ def test_banklist_url_positional_match(self):
assert_framelist_equal(df1, df2)
+ @pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
| - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
The banklist file was removed and replaced with banklist.csv. This causes failures on master.
This is just a temporary fix for the user guide.
cc @jreback
| https://api.github.com/repos/pandas-dev/pandas/pulls/38987 | 2021-01-06T02:02:54Z | 2021-01-06T02:52:33Z | 2021-01-06T02:52:33Z | 2021-01-06T02:53:30Z |
BUG: Datetimelike equality comparisons with Categorical | diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 7c093ebe00959..81bcff410a4d3 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -85,6 +85,20 @@ def test_compare_len1_raises(self):
with pytest.raises(ValueError, match="Lengths must match"):
idx <= idx[[0]]
+ @pytest.mark.parametrize(
+ "result",
+ [
+ pd.date_range("2020", periods=3),
+ pd.date_range("2020", periods=3, tz="UTC"),
+ pd.timedelta_range("0 days", periods=3),
+ pd.period_range("2020Q1", periods=3, freq="Q"),
+ ],
+ )
+ def test_compare_with_Categorical(self, result):
+ expected = pd.Categorical(result)
+ assert all(result == expected)
+ assert not any(result != expected)
+
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("as_index", [True, False])
def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered):
| - [ ] closes #30699
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38986 | 2021-01-06T01:37:23Z | 2021-01-08T14:10:03Z | 2021-01-08T14:10:02Z | 2021-01-08T14:10:07Z |
REF: de-duplication in libperiod | diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index f0d21a3a7a957..5d3ad559ea718 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -357,18 +357,15 @@ cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
return upsample_daytime(unix_date, af_info)
-cdef void MtoD_ym(int64_t ordinal, int *year, int *month) nogil:
- year[0] = ordinal // 12 + 1970
- month[0] = ordinal % 12 + 1
-
-
cdef int64_t asfreq_MtoDT(int64_t ordinal, asfreq_info *af_info) nogil:
cdef:
int64_t unix_date
int year, month
ordinal += af_info.is_end
- MtoD_ym(ordinal, &year, &month)
+
+ year = ordinal // 12 + 1970
+ month = ordinal % 12 + 1
unix_date = unix_date_from_ymd(year, month, 1)
unix_date -= af_info.is_end
@@ -449,10 +446,7 @@ cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info) nogil:
ordinal = downsample_daytime(ordinal, af_info)
pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts)
- if dts.month > af_info.to_end:
- return <int64_t>(dts.year + 1 - 1970)
- else:
- return <int64_t>(dts.year - 1970)
+ return dts_to_year_ordinal(&dts, af_info.to_end)
cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, npy_datetimestruct* dts) nogil:
@@ -483,7 +477,7 @@ cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info) nogil:
ordinal = downsample_daytime(ordinal, af_info)
pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts)
- return <int64_t>((dts.year - 1970) * 12 + dts.month - 1)
+ return dts_to_month_ordinal(&dts)
cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info) nogil:
@@ -716,6 +710,40 @@ cdef int64_t unix_date_from_ymd(int year, int month, int day) nogil:
return unix_date
+cdef inline int64_t dts_to_month_ordinal(npy_datetimestruct* dts) nogil:
+ # AKA: use npy_datetimestruct_to_datetime(NPY_FR_M, &dts)
+ return <int64_t>((dts.year - 1970) * 12 + dts.month - 1)
+
+
+cdef inline int64_t dts_to_year_ordinal(npy_datetimestruct *dts, int to_end) nogil:
+ cdef:
+ int64_t result
+
+ result = npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT.NPY_FR_Y, dts)
+ if dts.month > to_end:
+ return result + 1
+ else:
+ return result
+
+
+cdef inline int64_t dts_to_qtr_ordinal(npy_datetimestruct* dts, int to_end) nogil:
+ cdef:
+ int quarter
+
+ adjust_dts_for_qtr(dts, to_end)
+ quarter = month_to_quarter(dts.month)
+ return <int64_t>((dts.year - 1970) * 4 + quarter - 1)
+
+
+cdef inline int get_anchor_month(int freq, int freq_group) nogil:
+ cdef:
+ int fmonth
+ fmonth = freq - freq_group
+ if fmonth == 0:
+ fmonth = 12
+ return fmonth
+
+
# specifically _dont_ use cdvision or else ordinals near -1 are assigned to
# incorrect dates GH#19643
@cython.cdivision(False)
@@ -740,23 +768,12 @@ cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil:
freq_group = get_freq_group(freq)
if freq_group == FR_ANN:
- fmonth = freq - FR_ANN
- if fmonth == 0:
- fmonth = 12
-
- mdiff = dts.month - fmonth
- if mdiff <= 0:
- return dts.year - 1970
- else:
- return dts.year - 1970 + 1
+ fmonth = get_anchor_month(freq, freq_group)
+ return dts_to_year_ordinal(dts, fmonth)
elif freq_group == FR_QTR:
- fmonth = freq - FR_QTR
- if fmonth == 0:
- fmonth = 12
-
- mdiff = dts.month - fmonth + 12
- return (dts.year - 1970) * 4 + (mdiff - 1) // 3
+ fmonth = get_anchor_month(freq, freq_group)
+ return dts_to_qtr_ordinal(dts, fmonth)
elif freq_group == FR_WK:
unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, dts)
| https://api.github.com/repos/pandas-dev/pandas/pulls/38985 | 2021-01-06T01:01:31Z | 2021-01-06T18:34:47Z | 2021-01-06T18:34:47Z | 2021-01-06T18:44:48Z | |
BUG: MultiIndex.intersection duplicating nans in result | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 38b7a1d13c253..cbf0d4a4d708b 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -260,6 +260,7 @@ MultiIndex
^^^^^^^^^^
- Bug in :meth:`DataFrame.drop` raising ``TypeError`` when :class:`MultiIndex` is non-unique and no level is provided (:issue:`36293`)
+- Bug in :meth:`MultiIndex.intersection` duplicating ``NaN`` in result (:issue:`38623`)
- Bug in :meth:`MultiIndex.equals` incorrectly returning ``True`` when :class:`MultiIndex` containing ``NaN`` even when they are differntly ordered (:issue:`38439`)
- Bug in :meth:`MultiIndex.intersection` always returning empty when intersecting with :class:`CategoricalIndex` (:issue:`38653`)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 61b6b7ff19edc..f058645c4abda 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3578,16 +3578,9 @@ def _intersection(self, other, sort=False):
uniq_tuples = algos.unique(inner_tuples)
if uniq_tuples is None:
- other_uniq = set(rvals)
- seen = set()
- # pandas\core\indexes\multi.py:3503: error: "add" of "set" does not
- # return a value [func-returns-value]
- uniq_tuples = [
- x
- for x in lvals
- if x in other_uniq
- and not (x in seen or seen.add(x)) # type: ignore[func-returns-value]
- ]
+ left_unique = self.drop_duplicates()
+ indexer = left_unique.get_indexer(other.drop_duplicates())
+ uniq_tuples = left_unique.take(np.sort(indexer[indexer != -1]))
if sort is None:
uniq_tuples = sorted(uniq_tuples)
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index d5b29527ee08e..f872315374174 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -483,3 +483,12 @@ def test_intersection_different_names():
mi2 = MultiIndex.from_arrays([[1], [3]])
result = mi.intersection(mi2)
tm.assert_index_equal(result, mi2)
+
+
+def test_intersection_with_missing_values_on_both_sides(nulls_fixture):
+ # GH#38623
+ mi1 = MultiIndex.from_arrays([[3, nulls_fixture, 4, nulls_fixture], [1, 2, 4, 2]])
+ mi2 = MultiIndex.from_arrays([[3, nulls_fixture, 3], [1, 2, 4]])
+ result = mi1.intersection(mi2)
+ expected = MultiIndex.from_arrays([[3.0, nulls_fixture], [1, 2]])
+ tm.assert_index_equal(result, expected)
| - [x] xref #38623
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
This also aligns the multiindex implementation with the base implementation a bit more. | https://api.github.com/repos/pandas-dev/pandas/pulls/38984 | 2021-01-06T00:34:12Z | 2021-01-06T14:44:46Z | 2021-01-06T14:44:46Z | 2021-01-06T15:24:33Z |
REGR: Bug fix for ExtensionArray groupby aggregation on non-numeric types | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 36b4b4fa77c4a..849b599141c2b 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -28,6 +28,7 @@ Fixed regressions
- Fixed regression in :meth:`DataFrame.replace` raising ``ValueError`` when :class:`DataFrame` has dtype ``bytes`` (:issue:`38900`)
- Fixed regression in :meth:`DataFrameGroupBy.diff` raising for ``int8`` and ``int16`` columns (:issue:`39050`)
- Fixed regression that raised ``AttributeError`` with PyArrow versions [0.16.0, 1.0.0) (:issue:`38801`)
+- Fixed regression in :meth:`DataFrame.groupby` when aggregating an :class:`ExtensionDType` that could fail for non-numeric values (:issue:`38980`)
-
-
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 45897666b6ccf..2c0ba5b05c19b 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -540,7 +540,9 @@ def _ea_wrap_cython_operation(
result = type(orig_values)._from_sequence(res_values)
return result
- raise NotImplementedError(values.dtype)
+ raise NotImplementedError(
+ f"function is not implemented for this dtype: {values.dtype}"
+ )
@final
def _cython_operation(
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index 94d0ef7bbea84..c81304695f353 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -33,6 +33,22 @@ def test_groupby_extension_agg(self, as_index, data_for_grouping):
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
+ def test_groupby_agg_extension(self, data_for_grouping):
+ # GH#38980 groupby agg on extension type fails for non-numeric types
+ df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
+
+ expected = df.iloc[[0, 2, 4, 7]]
+ expected = expected.set_index("A")
+
+ result = df.groupby("A").agg({"B": "first"})
+ self.assert_frame_equal(result, expected)
+
+ result = df.groupby("A").agg("first")
+ self.assert_frame_equal(result, expected)
+
+ result = df.groupby("A").first()
+ self.assert_frame_equal(result, expected)
+
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 233b658d29782..08768bda312ba 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -197,6 +197,10 @@ class TestGroupby(BaseDecimal, base.BaseGroupbyTests):
def test_groupby_apply_identity(self, data_for_grouping):
super().test_groupby_apply_identity(data_for_grouping)
+ @pytest.mark.xfail(reason="GH#39098: Converts agg result to object")
+ def test_groupby_agg_extension(self, data_for_grouping):
+ super().test_groupby_agg_extension(data_for_grouping)
+
class TestSetitem(BaseDecimal, base.BaseSetitemTests):
pass
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 3a5e49796c53b..164a39498ec73 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -313,6 +313,10 @@ def test_groupby_extension_apply(self):
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super().test_groupby_extension_agg(as_index, data_for_grouping)
+ @pytest.mark.xfail(reason="GH#39098: Converts agg result to object")
+ def test_groupby_agg_extension(self, data_for_grouping):
+ super().test_groupby_agg_extension(data_for_grouping)
+
class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
def test_error(self, data, all_arithmetic_operators):
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index ced7ea9261310..86a0bc9213256 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -291,6 +291,22 @@ def test_groupby_extension_agg(self, as_index, data_for_grouping):
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
+ def test_groupby_agg_extension(self, data_for_grouping):
+ # GH#38980 groupby agg on extension type fails for non-numeric types
+ df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
+
+ expected = df.iloc[[0, 2, 4]]
+ expected = expected.set_index("A")
+
+ result = df.groupby("A").agg({"B": "first"})
+ self.assert_frame_equal(result, expected)
+
+ result = df.groupby("A").agg("first")
+ self.assert_frame_equal(result, expected)
+
+ result = df.groupby("A").first()
+ self.assert_frame_equal(result, expected)
+
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
| - [X] closes #38980
- [x] tests added / passed
- [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38982 | 2021-01-05T23:02:23Z | 2021-01-13T13:18:40Z | 2021-01-13T13:18:40Z | 2021-01-14T19:08:56Z |
CLN: Multiindex tests | diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py
index 7666e9670e6a6..11687b535d2b7 100644
--- a/pandas/tests/indexes/multi/test_constructors.py
+++ b/pandas/tests/indexes/multi/test_constructors.py
@@ -189,37 +189,24 @@ def test_from_arrays_tuples(idx):
tm.assert_index_equal(result, idx)
-def test_from_arrays_index_series_datetimetz():
- idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
- idx2 = date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo")
- result = MultiIndex.from_arrays([idx1, idx2])
- tm.assert_index_equal(result.get_level_values(0), idx1)
- tm.assert_index_equal(result.get_level_values(1), idx2)
-
- result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
- tm.assert_index_equal(result2.get_level_values(0), idx1)
- tm.assert_index_equal(result2.get_level_values(1), idx2)
-
- tm.assert_index_equal(result, result2)
-
-
-def test_from_arrays_index_series_timedelta():
- idx1 = pd.timedelta_range("1 days", freq="D", periods=3)
- idx2 = pd.timedelta_range("2 hours", freq="H", periods=3)
- result = MultiIndex.from_arrays([idx1, idx2])
- tm.assert_index_equal(result.get_level_values(0), idx1)
- tm.assert_index_equal(result.get_level_values(1), idx2)
-
- result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
- tm.assert_index_equal(result2.get_level_values(0), idx1)
- tm.assert_index_equal(result2.get_level_values(1), idx2)
-
- tm.assert_index_equal(result, result2)
-
-
-def test_from_arrays_index_series_period():
- idx1 = pd.period_range("2011-01-01", freq="D", periods=3)
- idx2 = pd.period_range("2015-01-01", freq="H", periods=3)
+@pytest.mark.parametrize(
+ ("idx1", "idx2"),
+ [
+ (
+ pd.period_range("2011-01-01", freq="D", periods=3),
+ pd.period_range("2015-01-01", freq="H", periods=3),
+ ),
+ (
+ date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"),
+ date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo"),
+ ),
+ (
+ pd.timedelta_range("1 days", freq="D", periods=3),
+ pd.timedelta_range("2 hours", freq="H", periods=3),
+ ),
+ ],
+)
+def test_from_arrays_index_series_period_datetimetz_and_timedelta(idx1, idx2):
result = MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py
index 8dc8572493444..7ec3df9fee0e5 100644
--- a/pandas/tests/indexes/multi/test_copy.py
+++ b/pandas/tests/indexes/multi/test_copy.py
@@ -79,10 +79,7 @@ def test_copy_method_kwargs(deep, kwarg, value):
names=["first", "second"],
)
idx_copy = idx.copy(**{kwarg: value, "deep": deep})
- if kwarg == "names":
- assert getattr(idx_copy, kwarg) == value
- else:
- assert [list(i) for i in getattr(idx_copy, kwarg)] == value
+ assert getattr(idx_copy, kwarg) == value
@pytest.mark.parametrize("deep", [True, False])
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index aa2f37dad152c..26017dd976f73 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -68,6 +68,7 @@ def test_unique_level(idx, level):
mi = MultiIndex.from_arrays([[], []], names=["first", "second"])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
+ tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py
index f9ab0b3aceec4..2fdf6d1913a0f 100644
--- a/pandas/tests/indexes/multi/test_integrity.py
+++ b/pandas/tests/indexes/multi/test_integrity.py
@@ -137,7 +137,7 @@ def test_dims():
pass
-def take_invalid_kwargs():
+def test_take_invalid_kwargs():
vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
idx = MultiIndex.from_product(vals, names=["str", "dt"])
indices = [1, 2]
| - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Found a few wrong tests and parametrized the similar ones
| https://api.github.com/repos/pandas-dev/pandas/pulls/38978 | 2021-01-05T19:59:44Z | 2021-01-06T14:43:23Z | 2021-01-06T14:43:23Z | 2021-01-06T15:24:58Z |
BUG: MultiIndex.union dropping duplicates from result | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index a72f78f3ca30d..badbc88302d6b 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -843,7 +843,7 @@ Interval
Indexing
^^^^^^^^
-- Bug in :meth:`Index.union` dropping duplicate ``Index`` values when ``Index`` was not monotonic or ``sort`` was set to ``False`` (:issue:`36289`, :issue:`31326`, :issue:`40862`)
+- Bug in :meth:`Index.union` and :meth:`MultiIndex.union` dropping duplicate ``Index`` values when ``Index`` was not monotonic or ``sort`` was set to ``False`` (:issue:`36289`, :issue:`31326`, :issue:`40862`)
- Bug in :meth:`CategoricalIndex.get_indexer` failing to raise ``InvalidIndexError`` when non-unique (:issue:`38372`)
- Bug in inserting many new columns into a :class:`DataFrame` causing incorrect subsequent indexing behavior (:issue:`38380`)
- Bug in :meth:`DataFrame.__setitem__` raising ``ValueError`` when setting multiple values to duplicate columns (:issue:`15695`)
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index cbef4ed44dc06..d7e15bb2ad197 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -291,7 +291,7 @@ def item_from_zerodim(val: object) -> object:
@cython.wraparound(False)
@cython.boundscheck(False)
-def fast_unique_multiple(list arrays, sort: bool = True) -> list:
+def fast_unique_multiple(list arrays, sort: bool = True):
"""
Generate a list of unique values from a list of arrays.
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 1a3719233a1da..eb72355fce583 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3574,14 +3574,20 @@ def equal_levels(self, other: MultiIndex) -> bool:
def _union(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
+ if (
+ any(-1 in code for code in self.codes)
+ and any(-1 in code for code in self.codes)
+ or self.has_duplicates
+ or other.has_duplicates
+ ):
+ # This is only necessary if both sides have nans or one has dups,
+ # fast_unique_multiple is faster
+ result = super()._union(other, sort)
+ else:
+ rvals = other._values.astype(object, copy=False)
+ result = lib.fast_unique_multiple([self._values, rvals], sort=sort)
- # We could get here with CategoricalIndex other
- rvals = other._values.astype(object, copy=False)
- uniq_tuples = lib.fast_unique_multiple([self._values, rvals], sort=sort)
-
- return MultiIndex.from_arrays(
- zip(*uniq_tuples), sortorder=0, names=result_names
- )
+ return MultiIndex.from_arrays(zip(*result), sortorder=0, names=result_names)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return is_object_dtype(dtype)
diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py
index 0b59e832ce3a8..eb456bee39dbf 100644
--- a/pandas/tests/indexes/multi/test_setops.py
+++ b/pandas/tests/indexes/multi/test_setops.py
@@ -3,7 +3,9 @@
import pandas as pd
from pandas import (
+ CategoricalIndex,
Index,
+ IntervalIndex,
MultiIndex,
Series,
)
@@ -508,3 +510,26 @@ def test_intersection_with_missing_values_on_both_sides(nulls_fixture):
result = mi1.intersection(mi2)
expected = MultiIndex.from_arrays([[3.0, nulls_fixture], [1, 2]])
tm.assert_index_equal(result, expected)
+
+
+def test_union_nan_got_duplicated():
+ # GH#38977
+ mi1 = MultiIndex.from_arrays([[1.0, np.nan], [2, 3]])
+ mi2 = MultiIndex.from_arrays([[1.0, np.nan, 3.0], [2, 3, 4]])
+ result = mi1.union(mi2)
+ tm.assert_index_equal(result, mi2)
+
+
+def test_union_duplicates(index):
+ # GH#38977
+ if index.empty or isinstance(index, (IntervalIndex, CategoricalIndex)):
+ # No duplicates in empty indexes
+ return
+ values = index.unique().values.tolist()
+ mi1 = MultiIndex.from_arrays([values, [1] * len(values)])
+ mi2 = MultiIndex.from_arrays([[values[0]] + values, [1] * (len(values) + 1)])
+ result = mi1.union(mi2)
+ tm.assert_index_equal(result, mi2.sort_values())
+
+ result = mi2.union(mi1)
+ tm.assert_index_equal(result, mi2.sort_values())
diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py
index 67bd5b309b634..5b7e90fe16d8f 100644
--- a/pandas/tests/libs/test_lib.py
+++ b/pandas/tests/libs/test_lib.py
@@ -2,7 +2,6 @@
import pytest
from pandas._libs import (
- Timestamp,
lib,
writers as libwriters,
)
@@ -43,11 +42,6 @@ def test_fast_unique_multiple_list_gen_sort(self):
out = lib.fast_unique_multiple_list_gen(gen, sort=False)
tm.assert_numpy_array_equal(np.array(out), expected)
- def test_fast_unique_multiple_unsortable_runtimewarning(self):
- arr = [np.array(["foo", Timestamp("2000")])]
- with tm.assert_produces_warning(RuntimeWarning):
- lib.fast_unique_multiple(arr, sort=None)
-
class TestIndexing:
def test_maybe_indices_to_slice_left_edge(self):
| - [x] xref #38745
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
This more or less sits on top of #36299. The current base-_union implementation works only for sorted indexes correctly if indexes containing duplicates. Hence I've only added tests for sorted indexes, | https://api.github.com/repos/pandas-dev/pandas/pulls/38977 | 2021-01-05T19:42:42Z | 2021-05-26T02:02:27Z | 2021-05-26T02:02:27Z | 2021-05-27T10:26:30Z |
DOC: remove is_lexsorted from MultiIndex docstring | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 1da355c31987e..61b6b7ff19edc 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -216,7 +216,6 @@ class MultiIndex(Index):
set_codes
to_frame
to_flat_index
- is_lexsorted
sortlevel
droplevel
swaplevel
| `is_lexsorted` has been deprecated (https://github.com/pandas-dev/pandas/pull/38701)
- [x] closes https://github.com/pandas-dev/pandas/issues/38953
- [ ] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38975 | 2021-01-05T17:31:37Z | 2021-01-06T00:15:27Z | 2021-01-06T00:15:27Z | 2021-01-06T00:15:31Z |
TST: update pre-commit config to only exclude extension from bare pytest.raises check | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f5d8503041ccd..52f923c41cbd4 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -126,7 +126,7 @@ repos:
entry: python scripts/validate_unwanted_patterns.py --validation-type="bare_pytest_raises"
types: [python]
files: ^pandas/tests/
- exclude: ^pandas/tests/(computation|extension|io)/
+ exclude: ^pandas/tests/extension/
- id: inconsistent-namespace-usage
name: 'Check for inconsistent use of pandas namespace in tests'
entry: python scripts/check_for_inconsistent_pandas_namespace.py
| With #38920 I eliminated all instances of `pytest.raise` without `match=msg` in pandas/tests/computation and pandas/tests/io. #38799 was happening around the same time and missed that they were fixed. So this closes the loop and now only pandas/tests/extension needs to be excluded from the linting check.
I don't think the bare `pytest.raise`s in pandas/tests/extensions will be removed. They are in a pretty complex inheritance hierarchy and reused for many different types of errors and error messages. So I propose that this PR closes #30999.
- [x] closes #30999
- [ ] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38973 | 2021-01-05T16:56:16Z | 2021-01-05T18:47:08Z | 2021-01-05T18:47:07Z | 2021-01-05T19:10:57Z |
Backport PR #38803 on branch 1.2.x (BUG: avoid attribute error with pyarrow >=0.16.0 and <1.0.0) | diff --git a/ci/deps/actions-37-locale.yaml b/ci/deps/actions-37-locale.yaml
index 4f9918ca2f0c0..b18ce37d05ca0 100644
--- a/ci/deps/actions-37-locale.yaml
+++ b/ci/deps/actions-37-locale.yaml
@@ -30,7 +30,7 @@ dependencies:
- openpyxl
- pandas-gbq
- google-cloud-bigquery>=1.27.2 # GH 36436
- - pyarrow>=0.17
+ - pyarrow=0.17 # GH 38803
- pytables>=3.5.1
- scipy
- xarray=0.12.3
diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index e9602bbe1cee1..5695c817b5a3a 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -37,6 +37,7 @@ I/O
- Bumped minimum fastparquet version to 0.4.0 to avoid ``AttributeError`` from numba (:issue:`38344`)
- Bumped minimum pymysql version to 0.8.1 to avoid test failures (:issue:`38344`)
+- Fixed ``AttributeError`` with PyArrow versions [0.16.0, 1.0.0) (:issue:`38801`)
-
-
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 184fbc050036b..7d3806fe11bd2 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -29,13 +29,12 @@
except ImportError:
pa = None
else:
- # our min supported version of pyarrow, 0.15.1, does not have a compute
- # module
- try:
+ # PyArrow backed StringArrays are available starting at 1.0.0, but this
+ # file is imported from even if pyarrow is < 1.0.0, before pyarrow.compute
+ # and its compute functions existed. GH38801
+ if LooseVersion(pa.__version__) >= "1.0.0":
import pyarrow.compute as pc
- except ImportError:
- pass
- else:
+
ARROW_CMP_FUNCS = {
"eq": pc.equal,
"ne": pc.not_equal,
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 99e7c3061d670..a9357ef89de92 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -880,7 +880,7 @@ def test_timezone_aware_index(self, pa, timezone_aware_date_list):
# this use-case sets the resolution to 1 minute
check_round_trip(df, pa, check_dtype=False)
- @td.skip_if_no("pyarrow", min_version="0.17")
+ @td.skip_if_no("pyarrow", min_version="1.0.0")
def test_filter_row_groups(self, pa):
# https://github.com/pandas-dev/pandas/issues/26551
df = pd.DataFrame({"a": list(range(0, 3))})
| Backport PR #38803: BUG: avoid attribute error with pyarrow >=0.16.0 and <1.0.0 | https://api.github.com/repos/pandas-dev/pandas/pulls/38971 | 2021-01-05T13:05:41Z | 2021-01-05T14:12:45Z | 2021-01-05T14:12:45Z | 2021-01-05T14:12:45Z |
Backport PR #38841 on branch 1.2.x (Update conf.py to execute imports during pdf building) | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 951a6d4043786..8ab1c8c2f3428 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -427,7 +427,7 @@
ipython_warning_is_error = False
-ipython_exec_lines = [
+ipython_execlines = [
"import numpy as np",
"import pandas as pd",
# This ensures correct rendering on system with console encoding != utf8
| Backport PR #38841: Update conf.py to execute imports during pdf building | https://api.github.com/repos/pandas-dev/pandas/pulls/38970 | 2021-01-05T12:58:45Z | 2021-01-05T14:12:29Z | 2021-01-05T14:12:29Z | 2021-01-05T14:12:30Z |
ENH: Add typing for pandas.core.frame.dropna | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 23c61773daf5a..84ab4fd5c60a1 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5537,14 +5537,38 @@ def notna(self) -> DataFrame:
def notnull(self) -> DataFrame:
return ~self.isna()
+ @overload
+ # https://github.com/python/mypy/issues/6580
+ # Overloaded function signatures 1 and 2 overlap with incompatible return types
+ def dropna( # type: ignore[misc]
+ self,
+ axis: Axis = ...,
+ how: str = ...,
+ thresh: Optional[int] = ...,
+ subset: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
+ inplace: Literal[False] = ...,
+ ) -> DataFrame:
+ ...
+
+ @overload
+ def dropna(
+ self,
+ axis: Axis = ...,
+ how: str = ...,
+ thresh: Optional[int] = ...,
+ subset: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
+ inplace: Literal[True] = ...,
+ ) -> None:
+ ...
+
def dropna(
self,
axis: Axis = 0,
how: str = "any",
- thresh=None,
- subset=None,
+ thresh: Optional[int] = None,
+ subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
inplace: bool = False,
- ):
+ ) -> Optional[DataFrame]:
"""
Remove missing values.
@@ -5683,6 +5707,7 @@ def dropna(
if inplace:
self._update_inplace(result)
+ return None
else:
return result
| This PR adds type hinting for pandas.core.frame.dropna. Additionally, an explicit `return None` was added to the if/else block for in-place changes to meet mypy standards and make the types explicit.
I haven't yet run the tests, because I didn't modify any source code. Once/if the CI builds pass, I'll check it off the list.
- [x] closes #38948
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them | https://api.github.com/repos/pandas-dev/pandas/pulls/38968 | 2021-01-05T10:02:32Z | 2021-04-24T13:31:51Z | null | 2021-04-24T13:31:51Z |
TST: add missing iloc label indexing tests | diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index bfc6b820c0fc0..24721a370241f 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -61,8 +61,8 @@ def test_iloc_getitem_list_int(self):
# the correct type
-class TestiLoc2:
- # TODO: better name, just separating out things that dont rely on base class
+class TestiLocBaseIndependent:
+ """Tests Independent Of Base Class"""
def test_is_scalar_access(self):
# GH#32085 index with duplicates doesnt matter for _is_scalar_access
@@ -262,12 +262,42 @@ def test_iloc_getitem_dups(self):
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
- # TODO: test something here?
- pass
+ df = DataFrame(
+ [
+ {"A": 1, "B": 2, "C": 3},
+ {"A": 100, "B": 200, "C": 300},
+ {"A": 1000, "B": 2000, "C": 3000},
+ ]
+ )
+
+ expected = DataFrame([{"A": 1, "B": 2, "C": 3}])
+ tm.assert_frame_equal(df.iloc[[0]], expected)
+
+ expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
+ tm.assert_frame_equal(df.iloc[[0, 1]], expected)
+
+ expected = DataFrame([{"B": 2, "C": 3}, {"B": 2000, "C": 3000}], index=[0, 2])
+ result = df.iloc[[0, 2], [1, 2]]
+ tm.assert_frame_equal(result, expected)
def test_iloc_getitem_bool(self):
- # TODO: test something here?
- pass
+ df = DataFrame(
+ [
+ {"A": 1, "B": 2, "C": 3},
+ {"A": 100, "B": 200, "C": 300},
+ {"A": 1000, "B": 2000, "C": 3000},
+ ]
+ )
+
+ expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
+ result = df.iloc[[True, True, False]]
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame(
+ [{"A": 1, "B": 2, "C": 3}, {"A": 1000, "B": 2000, "C": 3000}], index=[0, 2]
+ )
+ result = df.iloc[lambda x: x.index % 2 == 0]
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index", [[True, False], [True, False, True, False]])
def test_iloc_getitem_bool_diff_len(self, index):
@@ -278,8 +308,27 @@ def test_iloc_getitem_bool_diff_len(self, index):
_ = s.iloc[index]
def test_iloc_getitem_slice(self):
- # TODO: test something here?
- pass
+ df = DataFrame(
+ [
+ {"A": 1, "B": 2, "C": 3},
+ {"A": 100, "B": 200, "C": 300},
+ {"A": 1000, "B": 2000, "C": 3000},
+ ]
+ )
+
+ expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}])
+ result = df.iloc[:2]
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame([{"A": 100, "B": 200}], index=[1])
+ result = df.iloc[1:2, 0:2]
+ tm.assert_frame_equal(result, expected)
+
+ expected = DataFrame(
+ [{"A": 1, "C": 3}, {"A": 100, "C": 300}, {"A": 1000, "C": 3000}]
+ )
+ result = df.iloc[:, lambda df: [0, 2]]
+ tm.assert_frame_equal(result, expected)
def test_iloc_getitem_slice_dups(self):
| - [x] closes #38967
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38967 | 2021-01-05T09:00:38Z | 2021-01-05T14:12:11Z | 2021-01-05T14:12:10Z | 2021-01-10T11:05:52Z |
BUG: Timedelta(td64_out_of_bounds) silently overflowing | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 974de36cc736d..886469837d184 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -198,14 +198,14 @@ Datetimelike
- Bug in :class:`DataFrame` and :class:`Series` constructors sometimes dropping nanoseconds from :class:`Timestamp` (resp. :class:`Timedelta`) ``data``, with ``dtype=datetime64[ns]`` (resp. ``timedelta64[ns]``) (:issue:`38032`)
- Bug in :meth:`DataFrame.first` and :meth:`Series.first` returning two months for offset one month when first day is last calendar day (:issue:`29623`)
- Bug in constructing a :class:`DataFrame` or :class:`Series` with mismatched ``datetime64`` data and ``timedelta64`` dtype, or vice-versa, failing to raise ``TypeError`` (:issue:`38575`, :issue:`38764`, :issue:`38792`)
-- Bug in constructing a :class:`Series` or :class:`DataFrame` with a ``datetime`` object out of bounds for ``datetime64[ns]`` dtype (:issue:`38792`)
+- Bug in constructing a :class:`Series` or :class:`DataFrame` with a ``datetime`` object out of bounds for ``datetime64[ns]`` dtype or a ``timedelta`` object ouf of bounds for ``timedelta64[ns]`` dtype (:issue:`38792`, :issue:`38965`)
- Bug in :meth:`DatetimeIndex.intersection`, :meth:`DatetimeIndex.symmetric_difference`, :meth:`PeriodIndex.intersection`, :meth:`PeriodIndex.symmetric_difference` always returning object-dtype when operating with :class:`CategoricalIndex` (:issue:`38741`)
- Bug in :meth:`Series.where` incorrectly casting ``datetime64`` values to ``int64`` (:issue:`37682`)
-
Timedelta
^^^^^^^^^
-
+- Bug in constructing :class:`Timedelta` from ``np.timedelta64`` objects with non-nanosecond units that are out of bounds for ``timedelta64[ns]`` (:issue:`38965`)
-
-
diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd
index b2524c6bc6c0d..026fa719d1cc1 100644
--- a/pandas/_libs/tslibs/np_datetime.pxd
+++ b/pandas/_libs/tslibs/np_datetime.pxd
@@ -42,6 +42,7 @@ cdef extern from "numpy/ndarraytypes.h":
NPY_FR_ps
NPY_FR_fs
NPY_FR_as
+ NPY_FR_GENERIC
cdef extern from "src/datetime/np_datetime.h":
ctypedef struct pandas_timedeltastruct:
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index bc7def817c973..f3bf45f681b1f 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -24,7 +24,7 @@ PyDateTime_IMPORT
cimport pandas._libs.tslibs.util as util
from pandas._libs.tslibs.base cimport ABCTimestamp
-from pandas._libs.tslibs.conversion cimport cast_from_unit
+from pandas._libs.tslibs.conversion cimport cast_from_unit, precision_from_unit
from pandas._libs.tslibs.nattype cimport (
NPY_NAT,
c_NaT as NaT,
@@ -32,7 +32,10 @@ from pandas._libs.tslibs.nattype cimport (
checknull_with_nat,
)
from pandas._libs.tslibs.np_datetime cimport (
+ NPY_DATETIMEUNIT,
cmp_scalar,
+ get_datetime64_unit,
+ get_timedelta64_value,
pandas_timedeltastruct,
td64_to_tdstruct,
)
@@ -156,7 +159,7 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1:
if isinstance(delta, _Timedelta):
delta = delta.value
if is_timedelta64_object(delta):
- return delta.astype("timedelta64[ns]").item()
+ return get_timedelta64_value(ensure_td64ns(delta))
if is_integer_object(delta):
return delta
if PyDelta_Check(delta):
@@ -169,6 +172,72 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1:
raise TypeError(type(delta))
+cdef str npy_unit_to_abbrev(NPY_DATETIMEUNIT unit):
+ if unit == NPY_DATETIMEUNIT.NPY_FR_ns or unit == NPY_DATETIMEUNIT.NPY_FR_GENERIC:
+ # generic -> default to nanoseconds
+ return "ns"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_us:
+ return "us"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_ms:
+ return "ms"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_s:
+ return "s"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_m:
+ return "m"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_h:
+ return "h"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_D:
+ return "D"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_W:
+ return "W"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_M:
+ return "M"
+ elif unit == NPY_DATETIMEUNIT.NPY_FR_Y:
+ return "Y"
+ else:
+ raise NotImplementedError(unit)
+
+
+@cython.overflowcheck(True)
+cdef object ensure_td64ns(object ts):
+ """
+ Overflow-safe implementation of td64.astype("m8[ns]")
+
+ Parameters
+ ----------
+ ts : np.timedelta64
+
+ Returns
+ -------
+ np.timedelta64[ns]
+ """
+ cdef:
+ NPY_DATETIMEUNIT td64_unit
+ int64_t td64_value, mult
+ str unitstr
+
+ td64_unit = get_datetime64_unit(ts)
+ if (
+ td64_unit != NPY_DATETIMEUNIT.NPY_FR_ns
+ and td64_unit != NPY_DATETIMEUNIT.NPY_FR_GENERIC
+ ):
+ unitstr = npy_unit_to_abbrev(td64_unit)
+
+ td64_value = get_timedelta64_value(ts)
+
+ mult = precision_from_unit(unitstr)[0]
+ try:
+ # NB: cython#1381 this cannot be *=
+ td64_value = td64_value * mult
+ except OverflowError as err:
+ from pandas._libs.tslibs.conversion import OutOfBoundsTimedelta
+ raise OutOfBoundsTimedelta(ts)
+
+ return np.timedelta64(td64_value, "ns")
+
+ return ts
+
+
cdef convert_to_timedelta64(object ts, str unit):
"""
Convert an incoming object to a timedelta64 if possible.
@@ -184,37 +253,37 @@ cdef convert_to_timedelta64(object ts, str unit):
Return an ns based int64
"""
if checknull_with_nat(ts):
- return np.timedelta64(NPY_NAT)
+ return np.timedelta64(NPY_NAT, "ns")
elif isinstance(ts, _Timedelta):
# already in the proper format
- ts = np.timedelta64(ts.value)
+ ts = np.timedelta64(ts.value, "ns")
elif is_datetime64_object(ts):
# only accept a NaT here
if ts.astype('int64') == NPY_NAT:
return np.timedelta64(NPY_NAT)
elif is_timedelta64_object(ts):
- ts = ts.astype(f"m8[{unit.lower()}]")
+ ts = ensure_td64ns(ts)
elif is_integer_object(ts):
if ts == NPY_NAT:
- return np.timedelta64(NPY_NAT)
+ return np.timedelta64(NPY_NAT, "ns")
else:
if unit in ['Y', 'M', 'W']:
ts = np.timedelta64(ts, unit)
else:
ts = cast_from_unit(ts, unit)
- ts = np.timedelta64(ts)
+ ts = np.timedelta64(ts, "ns")
elif is_float_object(ts):
if unit in ['Y', 'M', 'W']:
ts = np.timedelta64(int(ts), unit)
else:
ts = cast_from_unit(ts, unit)
- ts = np.timedelta64(ts)
+ ts = np.timedelta64(ts, "ns")
elif isinstance(ts, str):
if len(ts) > 0 and ts[0] == 'P':
ts = parse_iso_format_string(ts)
else:
ts = parse_timedelta_string(ts)
- ts = np.timedelta64(ts)
+ ts = np.timedelta64(ts, "ns")
elif is_tick_object(ts):
ts = np.timedelta64(ts.nanos, 'ns')
@@ -1196,7 +1265,7 @@ class Timedelta(_Timedelta):
elif is_timedelta64_object(value):
if unit is not None:
value = value.astype(f'timedelta64[{unit}]')
- value = value.astype('timedelta64[ns]')
+ value = ensure_td64ns(value)
elif is_tick_object(value):
value = np.timedelta64(value.nanos, 'ns')
elif is_integer_object(value) or is_float_object(value):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 87f6e73e09d66..8065f85548f8c 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -28,6 +28,7 @@
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
+ OutOfBoundsTimedelta,
Period,
Timedelta,
Timestamp,
@@ -743,8 +744,12 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj,
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
- val = Timedelta(val).value
- dtype = np.dtype("m8[ns]")
+ try:
+ val = Timedelta(val).value
+ except (OutOfBoundsTimedelta, OverflowError):
+ dtype = np.dtype(object)
+ else:
+ dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
@@ -1386,7 +1391,7 @@ def try_timedelta(v):
try:
td_values = to_timedelta(v)
- except ValueError:
+ except (ValueError, OverflowError):
return v.reshape(shape)
else:
return np.asarray(td_values).reshape(shape)
@@ -1618,8 +1623,16 @@ def construct_2d_arraylike_from_scalar(
value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool
) -> np.ndarray:
+ shape = (length, width)
+
if dtype.kind in ["m", "M"]:
value = maybe_unbox_datetimelike(value, dtype)
+ elif dtype == object:
+ if isinstance(value, (np.timedelta64, np.datetime64)):
+ # calling np.array below would cast to pytimedelta/pydatetime
+ out = np.empty(shape, dtype=object)
+ out.fill(value)
+ return out
# Attempt to coerce to a numpy array
try:
@@ -1632,7 +1645,6 @@ def construct_2d_arraylike_from_scalar(
if arr.ndim != 0:
raise ValueError("DataFrame constructor not properly called!")
- shape = (length, width)
return np.full(shape, arr)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index f408a3ddde04e..889bd98d6d85a 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -10,7 +10,7 @@
import pytest
import pytz
-from pandas.compat.numpy import _np_version_under1p19, _np_version_under1p20
+from pandas.compat.numpy import _np_version_under1p19
from pandas.core.dtypes.common import is_integer_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype, PeriodDtype
@@ -2371,16 +2371,10 @@ def test_from_timedelta_scalar_preserves_nanos(self, constructor):
def test_from_timestamp_scalar_preserves_nanos(self, constructor):
ts = Timestamp.now() + Timedelta(1)
- obj = Series(ts, index=range(1), dtype="M8[ns]")
+ obj = constructor(ts, dtype="M8[ns]")
assert get1(obj) == ts
- def test_from_timedelta64_scalar_object(self, constructor, request):
- if getattr(constructor, "func", None) is DataFrame and _np_version_under1p20:
- # getattr check means we only xfail when box is None
- mark = pytest.mark.xfail(
- reason="np.array(td64, dtype=object) converts to int"
- )
- request.node.add_marker(mark)
+ def test_from_timedelta64_scalar_object(self, constructor):
td = Timedelta(1)
td64 = td.to_timedelta64()
@@ -2407,8 +2401,20 @@ def test_from_scalar_datetimelike_mismatched(self, constructor, cls, request):
with pytest.raises(TypeError, match="Cannot cast"):
constructor(scalar, dtype=dtype)
- def test_from_out_of_bounds_datetime(self, constructor):
+ @pytest.mark.parametrize("cls", [datetime, np.datetime64])
+ def test_from_out_of_bounds_datetime(self, constructor, cls):
scalar = datetime(9999, 1, 1)
+ if cls is np.datetime64:
+ scalar = np.datetime64(scalar, "D")
+ result = constructor(scalar)
+
+ assert type(get1(result)) is cls
+
+ @pytest.mark.parametrize("cls", [timedelta, np.timedelta64])
+ def test_from_out_of_bounds_timedelta(self, constructor, cls):
+ scalar = datetime(9999, 1, 1) - datetime(1970, 1, 1)
+ if cls is np.timedelta64:
+ scalar = np.timedelta64(scalar, "D")
result = constructor(scalar)
- assert type(get1(result)) is datetime
+ assert type(get1(result)) is cls
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
index 06bdb8a6cf0a2..64d5a5e9b3fff 100644
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ b/pandas/tests/scalar/timedelta/test_constructors.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+from pandas._libs.tslibs import OutOfBoundsTimedelta
+
from pandas import Timedelta, offsets, to_timedelta
@@ -197,6 +199,31 @@ def test_overflow_on_construction():
Timedelta(timedelta(days=13 * 19999))
+def test_construction_out_of_bounds_td64():
+ # TODO: parametrize over units just above/below the implementation bounds
+ # once GH#38964 is resolved
+
+ # Timedelta.max is just under 106752 days
+ td64 = np.timedelta64(106752, "D")
+ assert td64.astype("m8[ns]").view("i8") < 0 # i.e. naive astype will be wrong
+
+ msg = "106752 days"
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
+ Timedelta(td64)
+
+ # But just back in bounds and we are OK
+ assert Timedelta(td64 - 1) == td64 - 1
+
+ td64 *= -1
+ assert td64.astype("m8[ns]").view("i8") > 0 # i.e. naive astype will be wrong
+
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
+ Timedelta(td64)
+
+ # But just back in bounds and we are OK
+ assert Timedelta(td64 + 1) == td64 + 1
+
+
@pytest.mark.parametrize(
"fmt,exp",
[
diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py
index 0723a37b1ba82..54bfcbddfc3dd 100644
--- a/pandas/util/_exceptions.py
+++ b/pandas/util/_exceptions.py
@@ -10,7 +10,7 @@ def rewrite_exception(old_name: str, new_name: str):
try:
yield
except Exception as err:
- msg = err.args[0]
+ msg = str(err.args[0])
msg = msg.replace(old_name, new_name)
args: Tuple[str, ...] = (msg,)
if len(err.args) > 1:
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38965 | 2021-01-05T05:58:46Z | 2021-01-06T18:35:30Z | 2021-01-06T18:35:30Z | 2021-01-06T18:42:34Z |
DEPR: Rolling.win_type returning freq & is_datetimelike | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 38b7a1d13c253..d35965ead1a1c 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -161,6 +161,8 @@ Deprecations
- Deprecated :meth:`MultiIndex.is_lexsorted` and :meth:`MultiIndex.lexsort_depth` as a public methods, users should use :meth:`MultiIndex.is_monotonic_increasing` instead (:issue:`32259`)
- Deprecated keyword ``try_cast`` in :meth:`Series.where`, :meth:`Series.mask`, :meth:`DataFrame.where`, :meth:`DataFrame.mask`; cast results manually if desired (:issue:`38836`)
- Deprecated comparison of :class:`Timestamp` object with ``datetime.date`` objects. Instead of e.g. ``ts <= mydate`` use ``ts <= pd.Timestamp(mydate)`` or ``ts.date() <= mydate`` (:issue:`36131`)
+- Deprecated :attr:`Rolling.win_type` returning ``"freq"`` (:issue:`38963`)
+- Deprecated :attr:`Rolling.is_datetimelike` (:issue:`38963`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 99426c55da29b..594c5899209df 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -5,6 +5,7 @@
"""
import collections
from typing import List
+import warnings
from pandas._typing import final
@@ -27,7 +28,10 @@ def _shallow_copy(self, obj, **kwargs):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
- kwargs[attr] = getattr(self, attr)
+ # TODO: Remove once win_type deprecation is enforced
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "win_type", FutureWarning)
+ kwargs[attr] = getattr(self, attr)
return self._constructor(obj, **kwargs)
@@ -59,7 +63,10 @@ def _gotitem(self, key, ndim, subset=None):
# we need to make a shallow copy of ourselves
# with the same groupby
- kwargs = {attr: getattr(self, attr) for attr in self._attributes}
+ # TODO: Remove once win_type deprecation is enforced
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "win_type", FutureWarning)
+ kwargs = {attr: getattr(self, attr) for attr in self._attributes}
# Try to select from a DataFrame, falling back to a Series
try:
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 7ae1e61d426b9..a4612a4c8ed5d 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -110,7 +110,8 @@ def __init__(
self.window = window
self.min_periods = min_periods
self.center = center
- self.win_type = win_type
+ # TODO: Change this back to self.win_type once deprecation is enforced
+ self._win_type = win_type
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.method = method
self._win_freq_i8 = None
@@ -131,6 +132,27 @@ def __init__(
)
self.validate()
+ @property
+ def win_type(self):
+ if self._win_freq_i8 is not None:
+ warnings.warn(
+ "win_type will no longer return 'freq' in a future version. "
+ "Check the type of self.window instead.",
+ FutureWarning,
+ stacklevel=2,
+ )
+ return "freq"
+ return self._win_type
+
+ @property
+ def is_datetimelike(self):
+ warnings.warn(
+ "is_datetimelike is deprecated and will be removed in a future version.",
+ FutureWarning,
+ stacklevel=2,
+ )
+ return self._win_freq_i8 is not None
+
def validate(self) -> None:
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py
index 52c629f96b713..7d3c29dc60be0 100644
--- a/pandas/tests/window/test_api.py
+++ b/pandas/tests/window/test_api.py
@@ -319,3 +319,9 @@ def test_multiple_agg_funcs(func, window_size, expected_vals):
result = window.agg({"low": ["mean", "max"], "high": ["mean", "min"]})
tm.assert_frame_equal(result, expected)
+
+
+def test_is_datetimelike_deprecated():
+ s = Series(range(1)).rolling(1)
+ with tm.assert_produces_warning(FutureWarning):
+ assert not s.is_datetimelike
diff --git a/pandas/tests/window/test_win_type.py b/pandas/tests/window/test_win_type.py
index 4b1028e165c80..1cfba6f020018 100644
--- a/pandas/tests/window/test_win_type.py
+++ b/pandas/tests/window/test_win_type.py
@@ -4,7 +4,7 @@
from pandas.errors import UnsupportedFunctionCall
import pandas.util._test_decorators as td
-from pandas import DataFrame, Series, Timedelta, concat
+from pandas import DataFrame, Series, Timedelta, concat, date_range
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer
@@ -137,6 +137,12 @@ def test_consistent_win_type_freq(arg):
s.rolling(arg, win_type="freq")
+def test_win_type_freq_return_deprecation():
+ freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s")
+ with tm.assert_produces_warning(FutureWarning):
+ assert freq_roll.win_type == "freq"
+
+
@td.skip_if_no_scipy
def test_win_type_not_implemented():
class CustomIndexer(BaseIndexer):
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
xref: https://github.com/pandas-dev/pandas/pull/38641#issuecomment-754118989
xref: https://github.com/pandas-dev/pandas/pull/38664/files#r551419130 | https://api.github.com/repos/pandas-dev/pandas/pulls/38963 | 2021-01-05T05:17:07Z | 2021-01-06T14:27:58Z | 2021-01-06T14:27:58Z | 2021-01-08T17:26:42Z |
REF: move functions out of _testing/__init__ | diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 0591fc6afd633..c51ceb750c338 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -1,27 +1,12 @@
-import bz2
from collections import Counter
-from contextlib import contextmanager
from datetime import datetime
from functools import wraps
-import gzip
import operator
import os
import re
import string
-from typing import (
- Any,
- Callable,
- ContextManager,
- List,
- Optional,
- Sequence,
- Tuple,
- Type,
- Union,
- cast,
-)
+from typing import Callable, ContextManager, List, Type
import warnings
-import zipfile
import numpy as np
@@ -31,8 +16,7 @@
set_locale,
)
-from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
-from pandas.compat import get_lzma_file, import_lzma
+from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
@@ -55,6 +39,22 @@
Series,
bdate_range,
)
+from pandas._testing._io import ( # noqa:F401
+ close,
+ network,
+ round_trip_localpath,
+ round_trip_pathlib,
+ round_trip_pickle,
+ with_connectivity_check,
+ write_to_compressed,
+)
+from pandas._testing._random import ( # noqa:F401
+ randbool,
+ rands,
+ rands_array,
+ randu_array,
+)
+from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
@@ -79,6 +79,7 @@
raise_assert_detail,
)
from pandas._testing.contexts import ( # noqa:F401
+ RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
@@ -89,13 +90,8 @@
)
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray, period_array
-from pandas.io.common import urlopen
-
-lzma = import_lzma()
-
_N = 30
_K = 4
-_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
@@ -170,187 +166,6 @@ def reset_display_options():
pd.reset_option("^display.", silent=True)
-def round_trip_pickle(
- obj: Any, path: Optional[FilePathOrBuffer] = None
-) -> FrameOrSeries:
- """
- Pickle an object and then read it again.
-
- Parameters
- ----------
- obj : any object
- The object to pickle and then re-read.
- path : str, path object or file-like object, default None
- The path where the pickled object is written and then read.
-
- Returns
- -------
- pandas object
- The original object that was pickled and then re-read.
- """
- _path = path
- if _path is None:
- _path = f"__{rands(10)}__.pickle"
- with ensure_clean(_path) as temp_path:
- pd.to_pickle(obj, temp_path)
- return pd.read_pickle(temp_path)
-
-
-def round_trip_pathlib(writer, reader, path: Optional[str] = None):
- """
- Write an object to file specified by a pathlib.Path and read it back
-
- Parameters
- ----------
- writer : callable bound to pandas object
- IO writing function (e.g. DataFrame.to_csv )
- reader : callable
- IO reading function (e.g. pd.read_csv )
- path : str, default None
- The path where the object is written and then read.
-
- Returns
- -------
- pandas object
- The original object that was serialized and then re-read.
- """
- import pytest
-
- Path = pytest.importorskip("pathlib").Path
- if path is None:
- path = "___pathlib___"
- with ensure_clean(path) as path:
- writer(Path(path))
- obj = reader(Path(path))
- return obj
-
-
-def round_trip_localpath(writer, reader, path: Optional[str] = None):
- """
- Write an object to file specified by a py.path LocalPath and read it back.
-
- Parameters
- ----------
- writer : callable bound to pandas object
- IO writing function (e.g. DataFrame.to_csv )
- reader : callable
- IO reading function (e.g. pd.read_csv )
- path : str, default None
- The path where the object is written and then read.
-
- Returns
- -------
- pandas object
- The original object that was serialized and then re-read.
- """
- import pytest
-
- LocalPath = pytest.importorskip("py.path").local
- if path is None:
- path = "___localpath___"
- with ensure_clean(path) as path:
- writer(LocalPath(path))
- obj = reader(LocalPath(path))
- return obj
-
-
-def write_to_compressed(compression, path, data, dest="test"):
- """
- Write data to a compressed file.
-
- Parameters
- ----------
- compression : {'gzip', 'bz2', 'zip', 'xz'}
- The compression type to use.
- path : str
- The file path to write the data.
- data : str
- The data to write.
- dest : str, default "test"
- The destination file (for ZIP only)
-
- Raises
- ------
- ValueError : An invalid compression value was passed in.
- """
- args: Tuple[Any, ...] = (data,)
- mode = "wb"
- method = "write"
- compress_method: Callable
-
- if compression == "zip":
- compress_method = zipfile.ZipFile
- mode = "w"
- args = (dest, data)
- method = "writestr"
- elif compression == "gzip":
- compress_method = gzip.GzipFile
- elif compression == "bz2":
- compress_method = bz2.BZ2File
- elif compression == "xz":
- compress_method = get_lzma_file(lzma)
- else:
- raise ValueError(f"Unrecognized compression type: {compression}")
-
- with compress_method(path, mode=mode) as f:
- getattr(f, method)(*args)
-
-
-def randbool(size=(), p: float = 0.5):
- return np.random.rand(*size) <= p
-
-
-RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
-RANDU_CHARS = np.array(
- list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
- dtype=(np.unicode_, 1),
-)
-
-
-def rands_array(nchars, size, dtype="O"):
- """
- Generate an array of byte strings.
- """
- retval = (
- np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
- .view((np.str_, nchars))
- .reshape(size)
- )
- return retval.astype(dtype)
-
-
-def randu_array(nchars, size, dtype="O"):
- """
- Generate an array of unicode strings.
- """
- retval = (
- np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
- .view((np.unicode_, nchars))
- .reshape(size)
- )
- return retval.astype(dtype)
-
-
-def rands(nchars):
- """
- Generate one random byte string.
-
- See `rands_array` if you want to create an array of random strings.
-
- """
- return "".join(np.random.choice(RANDS_CHARS, nchars))
-
-
-def close(fignum=None):
- from matplotlib.pyplot import close as _close, get_fignums
-
- if fignum is None:
- for fignum in get_fignums():
- _close(fignum)
- else:
- _close(fignum)
-
-
# -----------------------------------------------------------------------------
# Comparators
@@ -935,449 +750,6 @@ def makeMissingDataframe(density=0.9, random_state=None):
return df
-def optional_args(decorator):
- """
- allows a decorator to take optional positional and keyword arguments.
- Assumes that taking a single, callable, positional argument means that
- it is decorating a function, i.e. something like this::
-
- @my_decorator
- def function(): pass
-
- Calls decorator with decorator(f, *args, **kwargs)
- """
-
- @wraps(decorator)
- def wrapper(*args, **kwargs):
- def dec(f):
- return decorator(f, *args, **kwargs)
-
- is_decorating = not kwargs and len(args) == 1 and callable(args[0])
- if is_decorating:
- f = args[0]
- # pandas\_testing.py:2331: error: Incompatible types in assignment
- # (expression has type "List[<nothing>]", variable has type
- # "Tuple[Any, ...]")
- args = [] # type: ignore[assignment]
- return dec(f)
- else:
- return dec
-
- return wrapper
-
-
-# skip tests on exceptions with this message
-_network_error_messages = (
- # 'urlopen error timed out',
- # 'timeout: timed out',
- # 'socket.timeout: timed out',
- "timed out",
- "Server Hangup",
- "HTTP Error 503: Service Unavailable",
- "502: Proxy Error",
- "HTTP Error 502: internal error",
- "HTTP Error 502",
- "HTTP Error 503",
- "HTTP Error 403",
- "HTTP Error 400",
- "Temporary failure in name resolution",
- "Name or service not known",
- "Connection refused",
- "certificate verify",
-)
-
-# or this e.errno/e.reason.errno
-_network_errno_vals = (
- 101, # Network is unreachable
- 111, # Connection refused
- 110, # Connection timed out
- 104, # Connection reset Error
- 54, # Connection reset by peer
- 60, # urllib.error.URLError: [Errno 60] Connection timed out
-)
-
-# Both of the above shouldn't mask real issues such as 404's
-# or refused connections (changed DNS).
-# But some tests (test_data yahoo) contact incredibly flakey
-# servers.
-
-# and conditionally raise on exception types in _get_default_network_errors
-
-
-def _get_default_network_errors():
- # Lazy import for http.client because it imports many things from the stdlib
- import http.client
-
- return (IOError, http.client.HTTPException, TimeoutError)
-
-
-def can_connect(url, error_classes=None):
- """
- Try to connect to the given url. True if succeeds, False if IOError
- raised
-
- Parameters
- ----------
- url : basestring
- The URL to try to connect to
-
- Returns
- -------
- connectable : bool
- Return True if no IOError (unable to connect) or URLError (bad url) was
- raised
- """
- if error_classes is None:
- error_classes = _get_default_network_errors()
-
- try:
- with urlopen(url):
- pass
- except error_classes:
- return False
- else:
- return True
-
-
-@optional_args
-def network(
- t,
- url="https://www.google.com",
- raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
- check_before_test=False,
- error_classes=None,
- skip_errnos=_network_errno_vals,
- _skip_on_messages=_network_error_messages,
-):
- """
- Label a test as requiring network connection and, if an error is
- encountered, only raise if it does not find a network connection.
-
- In comparison to ``network``, this assumes an added contract to your test:
- you must assert that, under normal conditions, your test will ONLY fail if
- it does not have network connectivity.
-
- You can call this in 3 ways: as a standard decorator, with keyword
- arguments, or with a positional argument that is the url to check.
-
- Parameters
- ----------
- t : callable
- The test requiring network connectivity.
- url : path
- The url to test via ``pandas.io.common.urlopen`` to check
- for connectivity. Defaults to 'https://www.google.com'.
- raise_on_error : bool
- If True, never catches errors.
- check_before_test : bool
- If True, checks connectivity before running the test case.
- error_classes : tuple or Exception
- error classes to ignore. If not in ``error_classes``, raises the error.
- defaults to IOError. Be careful about changing the error classes here.
- skip_errnos : iterable of int
- Any exception that has .errno or .reason.erno set to one
- of these values will be skipped with an appropriate
- message.
- _skip_on_messages: iterable of string
- any exception e for which one of the strings is
- a substring of str(e) will be skipped with an appropriate
- message. Intended to suppress errors where an errno isn't available.
-
- Notes
- -----
- * ``raise_on_error`` supersedes ``check_before_test``
-
- Returns
- -------
- t : callable
- The decorated test ``t``, with checks for connectivity errors.
-
- Example
- -------
-
- Tests decorated with @network will fail if it's possible to make a network
- connection to another URL (defaults to google.com)::
-
- >>> from pandas._testing import network
- >>> from pandas.io.common import urlopen
- >>> @network
- ... def test_network():
- ... with urlopen("rabbit://bonanza.com"):
- ... pass
- Traceback
- ...
- URLError: <urlopen error unknown url type: rabit>
-
- You can specify alternative URLs::
-
- >>> @network("https://www.yahoo.com")
- ... def test_something_with_yahoo():
- ... raise IOError("Failure Message")
- >>> test_something_with_yahoo()
- Traceback (most recent call last):
- ...
- IOError: Failure Message
-
- If you set check_before_test, it will check the url first and not run the
- test on failure::
-
- >>> @network("failing://url.blaher", check_before_test=True)
- ... def test_something():
- ... print("I ran!")
- ... raise ValueError("Failure")
- >>> test_something()
- Traceback (most recent call last):
- ...
-
- Errors not related to networking will always be raised.
- """
- from pytest import skip
-
- if error_classes is None:
- error_classes = _get_default_network_errors()
-
- t.network = True
-
- @wraps(t)
- def wrapper(*args, **kwargs):
- if (
- check_before_test
- and not raise_on_error
- and not can_connect(url, error_classes)
- ):
- skip()
- try:
- return t(*args, **kwargs)
- except Exception as err:
- errno = getattr(err, "errno", None)
- if not errno and hasattr(errno, "reason"):
- # pandas\_testing.py:2521: error: "Exception" has no attribute
- # "reason"
- errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
-
- if errno in skip_errnos:
- skip(f"Skipping test due to known errno and error {err}")
-
- e_str = str(err)
-
- if any(m.lower() in e_str.lower() for m in _skip_on_messages):
- skip(
- f"Skipping test because exception message is known and error {err}"
- )
-
- if not isinstance(err, error_classes):
- raise
-
- if raise_on_error or can_connect(url, error_classes):
- raise
- else:
- skip(f"Skipping test due to lack of connectivity and error {err}")
-
- return wrapper
-
-
-with_connectivity_check = network
-
-
-@contextmanager
-def assert_produces_warning(
- expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
- filter_level="always",
- check_stacklevel: bool = True,
- raise_on_extra_warnings: bool = True,
- match: Optional[str] = None,
-):
- """
- Context manager for running code expected to either raise a specific
- warning, or not raise any warnings. Verifies that the code raises the
- expected warning, and that it does not raise any other unexpected
- warnings. It is basically a wrapper around ``warnings.catch_warnings``.
-
- Parameters
- ----------
- expected_warning : {Warning, False, None}, default Warning
- The type of Exception raised. ``exception.Warning`` is the base
- class for all warnings. To check that no warning is returned,
- specify ``False`` or ``None``.
- filter_level : str or None, default "always"
- Specifies whether warnings are ignored, displayed, or turned
- into errors.
- Valid values are:
-
- * "error" - turns matching warnings into exceptions
- * "ignore" - discard the warning
- * "always" - always emit a warning
- * "default" - print the warning the first time it is generated
- from each location
- * "module" - print the warning the first time it is generated
- from each module
- * "once" - print the warning the first time it is generated
-
- check_stacklevel : bool, default True
- If True, displays the line that called the function containing
- the warning to show were the function is called. Otherwise, the
- line that implements the function is displayed.
- raise_on_extra_warnings : bool, default True
- Whether extra warnings not of the type `expected_warning` should
- cause the test to fail.
- match : str, optional
- Match warning message.
-
- Examples
- --------
- >>> import warnings
- >>> with assert_produces_warning():
- ... warnings.warn(UserWarning())
- ...
- >>> with assert_produces_warning(False):
- ... warnings.warn(RuntimeWarning())
- ...
- Traceback (most recent call last):
- ...
- AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
- >>> with assert_produces_warning(UserWarning):
- ... warnings.warn(RuntimeWarning())
- Traceback (most recent call last):
- ...
- AssertionError: Did not see expected warning of class 'UserWarning'.
-
- ..warn:: This is *not* thread-safe.
- """
- __tracebackhide__ = True
-
- with warnings.catch_warnings(record=True) as w:
- warnings.simplefilter(filter_level)
- yield w
-
- if expected_warning:
- expected_warning = cast(Type[Warning], expected_warning)
- _assert_caught_expected_warning(
- caught_warnings=w,
- expected_warning=expected_warning,
- match=match,
- check_stacklevel=check_stacklevel,
- )
-
- if raise_on_extra_warnings:
- _assert_caught_no_extra_warnings(
- caught_warnings=w,
- expected_warning=expected_warning,
- )
-
-
-def _assert_caught_expected_warning(
- *,
- caught_warnings: Sequence[warnings.WarningMessage],
- expected_warning: Type[Warning],
- match: Optional[str],
- check_stacklevel: bool,
-) -> None:
- """Assert that there was the expected warning among the caught warnings."""
- saw_warning = False
- matched_message = False
-
- for actual_warning in caught_warnings:
- if issubclass(actual_warning.category, expected_warning):
- saw_warning = True
-
- if check_stacklevel and issubclass(
- actual_warning.category, (FutureWarning, DeprecationWarning)
- ):
- _assert_raised_with_correct_stacklevel(actual_warning)
-
- if match is not None and re.search(match, str(actual_warning.message)):
- matched_message = True
-
- if not saw_warning:
- raise AssertionError(
- f"Did not see expected warning of class "
- f"{repr(expected_warning.__name__)}"
- )
-
- if match and not matched_message:
- raise AssertionError(
- f"Did not see warning {repr(expected_warning.__name__)} "
- f"matching {match}"
- )
-
-
-def _assert_caught_no_extra_warnings(
- *,
- caught_warnings: Sequence[warnings.WarningMessage],
- expected_warning: Optional[Union[Type[Warning], bool]],
-) -> None:
- """Assert that no extra warnings apart from the expected ones are caught."""
- extra_warnings = []
-
- for actual_warning in caught_warnings:
- if _is_unexpected_warning(actual_warning, expected_warning):
- extra_warnings.append(
- (
- actual_warning.category.__name__,
- actual_warning.message,
- actual_warning.filename,
- actual_warning.lineno,
- )
- )
-
- if extra_warnings:
- raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}")
-
-
-def _is_unexpected_warning(
- actual_warning: warnings.WarningMessage,
- expected_warning: Optional[Union[Type[Warning], bool]],
-) -> bool:
- """Check if the actual warning issued is unexpected."""
- if actual_warning and not expected_warning:
- return True
- expected_warning = cast(Type[Warning], expected_warning)
- return bool(not issubclass(actual_warning.category, expected_warning))
-
-
-def _assert_raised_with_correct_stacklevel(
- actual_warning: warnings.WarningMessage,
-) -> None:
- from inspect import getframeinfo, stack
-
- caller = getframeinfo(stack()[4][0])
- msg = (
- "Warning not set with correct stacklevel. "
- f"File where warning is raised: {actual_warning.filename} != "
- f"{caller.filename}. Warning message: {actual_warning.message}"
- )
- assert actual_warning.filename == caller.filename, msg
-
-
-class RNGContext:
- """
- Context manager to set the numpy random number generator speed. Returns
- to the original value upon exiting the context manager.
-
- Parameters
- ----------
- seed : int
- Seed for numpy.random.seed
-
- Examples
- --------
- with RNGContext(42):
- np.random.randn()
- """
-
- def __init__(self, seed):
- self.seed = seed
-
- def __enter__(self):
-
- self.start_state = np.random.get_state()
- np.random.seed(self.seed)
-
- def __exit__(self, exc_type, exc_value, traceback):
-
- np.random.set_state(self.start_state)
-
-
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py
new file mode 100644
index 0000000000000..5f27b016b68a2
--- /dev/null
+++ b/pandas/_testing/_io.py
@@ -0,0 +1,405 @@
+import bz2
+from functools import wraps
+import gzip
+from typing import Any, Callable, Optional, Tuple
+import zipfile
+
+from pandas._typing import FilePathOrBuffer, FrameOrSeries
+from pandas.compat import get_lzma_file, import_lzma
+
+import pandas as pd
+from pandas._testing._random import rands
+from pandas._testing.contexts import ensure_clean
+
+from pandas.io.common import urlopen
+
+_RAISE_NETWORK_ERROR_DEFAULT = False
+
+lzma = import_lzma()
+
+# skip tests on exceptions with these messages
+_network_error_messages = (
+ # 'urlopen error timed out',
+ # 'timeout: timed out',
+ # 'socket.timeout: timed out',
+ "timed out",
+ "Server Hangup",
+ "HTTP Error 503: Service Unavailable",
+ "502: Proxy Error",
+ "HTTP Error 502: internal error",
+ "HTTP Error 502",
+ "HTTP Error 503",
+ "HTTP Error 403",
+ "HTTP Error 400",
+ "Temporary failure in name resolution",
+ "Name or service not known",
+ "Connection refused",
+ "certificate verify",
+)
+
+# or this e.errno/e.reason.errno
+_network_errno_vals = (
+ 101, # Network is unreachable
+ 111, # Connection refused
+ 110, # Connection timed out
+ 104, # Connection reset Error
+ 54, # Connection reset by peer
+ 60, # urllib.error.URLError: [Errno 60] Connection timed out
+)
+
+# Both of the above shouldn't mask real issues such as 404's
+# or refused connections (changed DNS).
+# But some tests (test_data yahoo) contact incredibly flakey
+# servers.
+
+# and conditionally raise on exception types in _get_default_network_errors
+
+
+def _get_default_network_errors():
+ # Lazy import for http.client because it imports many things from the stdlib
+ import http.client
+
+ return (IOError, http.client.HTTPException, TimeoutError)
+
+
+def optional_args(decorator):
+ """
+ allows a decorator to take optional positional and keyword arguments.
+ Assumes that taking a single, callable, positional argument means that
+ it is decorating a function, i.e. something like this::
+
+ @my_decorator
+ def function(): pass
+
+ Calls decorator with decorator(f, *args, **kwargs)
+ """
+
+ @wraps(decorator)
+ def wrapper(*args, **kwargs):
+ def dec(f):
+ return decorator(f, *args, **kwargs)
+
+ is_decorating = not kwargs and len(args) == 1 and callable(args[0])
+ if is_decorating:
+ f = args[0]
+ # pandas\_testing.py:2331: error: Incompatible types in assignment
+ # (expression has type "List[<nothing>]", variable has type
+ # "Tuple[Any, ...]")
+ args = [] # type: ignore[assignment]
+ return dec(f)
+ else:
+ return dec
+
+ return wrapper
+
+
+@optional_args
+def network(
+ t,
+ url="https://www.google.com",
+ raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
+ check_before_test=False,
+ error_classes=None,
+ skip_errnos=_network_errno_vals,
+ _skip_on_messages=_network_error_messages,
+):
+ """
+ Label a test as requiring network connection and, if an error is
+ encountered, only raise if it does not find a network connection.
+
+ In comparison to ``network``, this assumes an added contract to your test:
+ you must assert that, under normal conditions, your test will ONLY fail if
+ it does not have network connectivity.
+
+ You can call this in 3 ways: as a standard decorator, with keyword
+ arguments, or with a positional argument that is the url to check.
+
+ Parameters
+ ----------
+ t : callable
+ The test requiring network connectivity.
+ url : path
+ The url to test via ``pandas.io.common.urlopen`` to check
+ for connectivity. Defaults to 'https://www.google.com'.
+ raise_on_error : bool
+ If True, never catches errors.
+ check_before_test : bool
+ If True, checks connectivity before running the test case.
+ error_classes : tuple or Exception
+ error classes to ignore. If not in ``error_classes``, raises the error.
+ defaults to IOError. Be careful about changing the error classes here.
+ skip_errnos : iterable of int
+ Any exception that has .errno or .reason.erno set to one
+ of these values will be skipped with an appropriate
+ message.
+ _skip_on_messages: iterable of string
+ any exception e for which one of the strings is
+ a substring of str(e) will be skipped with an appropriate
+ message. Intended to suppress errors where an errno isn't available.
+
+ Notes
+ -----
+ * ``raise_on_error`` supersedes ``check_before_test``
+
+ Returns
+ -------
+ t : callable
+ The decorated test ``t``, with checks for connectivity errors.
+
+ Example
+ -------
+
+ Tests decorated with @network will fail if it's possible to make a network
+ connection to another URL (defaults to google.com)::
+
+ >>> from pandas._testing import network
+ >>> from pandas.io.common import urlopen
+ >>> @network
+ ... def test_network():
+ ... with urlopen("rabbit://bonanza.com"):
+ ... pass
+ Traceback
+ ...
+ URLError: <urlopen error unknown url type: rabit>
+
+ You can specify alternative URLs::
+
+ >>> @network("https://www.yahoo.com")
+ ... def test_something_with_yahoo():
+ ... raise IOError("Failure Message")
+ >>> test_something_with_yahoo()
+ Traceback (most recent call last):
+ ...
+ IOError: Failure Message
+
+ If you set check_before_test, it will check the url first and not run the
+ test on failure::
+
+ >>> @network("failing://url.blaher", check_before_test=True)
+ ... def test_something():
+ ... print("I ran!")
+ ... raise ValueError("Failure")
+ >>> test_something()
+ Traceback (most recent call last):
+ ...
+
+ Errors not related to networking will always be raised.
+ """
+ from pytest import skip
+
+ if error_classes is None:
+ error_classes = _get_default_network_errors()
+
+ t.network = True
+
+ @wraps(t)
+ def wrapper(*args, **kwargs):
+ if (
+ check_before_test
+ and not raise_on_error
+ and not can_connect(url, error_classes)
+ ):
+ skip()
+ try:
+ return t(*args, **kwargs)
+ except Exception as err:
+ errno = getattr(err, "errno", None)
+ if not errno and hasattr(errno, "reason"):
+ # pandas\_testing.py:2521: error: "Exception" has no attribute
+ # "reason"
+ errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
+
+ if errno in skip_errnos:
+ skip(f"Skipping test due to known errno and error {err}")
+
+ e_str = str(err)
+
+ if any(m.lower() in e_str.lower() for m in _skip_on_messages):
+ skip(
+ f"Skipping test because exception message is known and error {err}"
+ )
+
+ if not isinstance(err, error_classes):
+ raise
+
+ if raise_on_error or can_connect(url, error_classes):
+ raise
+ else:
+ skip(f"Skipping test due to lack of connectivity and error {err}")
+
+ return wrapper
+
+
+with_connectivity_check = network
+
+
+def can_connect(url, error_classes=None):
+ """
+ Try to connect to the given url. True if succeeds, False if IOError
+ raised
+
+ Parameters
+ ----------
+ url : basestring
+ The URL to try to connect to
+
+ Returns
+ -------
+ connectable : bool
+ Return True if no IOError (unable to connect) or URLError (bad url) was
+ raised
+ """
+ if error_classes is None:
+ error_classes = _get_default_network_errors()
+
+ try:
+ with urlopen(url):
+ pass
+ except error_classes:
+ return False
+ else:
+ return True
+
+
+# ------------------------------------------------------------------
+# File-IO
+
+
+def round_trip_pickle(
+ obj: Any, path: Optional[FilePathOrBuffer] = None
+) -> FrameOrSeries:
+ """
+ Pickle an object and then read it again.
+
+ Parameters
+ ----------
+ obj : any object
+ The object to pickle and then re-read.
+ path : str, path object or file-like object, default None
+ The path where the pickled object is written and then read.
+
+ Returns
+ -------
+ pandas object
+ The original object that was pickled and then re-read.
+ """
+ _path = path
+ if _path is None:
+ _path = f"__{rands(10)}__.pickle"
+ with ensure_clean(_path) as temp_path:
+ pd.to_pickle(obj, temp_path)
+ return pd.read_pickle(temp_path)
+
+
+def round_trip_pathlib(writer, reader, path: Optional[str] = None):
+ """
+ Write an object to file specified by a pathlib.Path and read it back
+
+ Parameters
+ ----------
+ writer : callable bound to pandas object
+ IO writing function (e.g. DataFrame.to_csv )
+ reader : callable
+ IO reading function (e.g. pd.read_csv )
+ path : str, default None
+ The path where the object is written and then read.
+
+ Returns
+ -------
+ pandas object
+ The original object that was serialized and then re-read.
+ """
+ import pytest
+
+ Path = pytest.importorskip("pathlib").Path
+ if path is None:
+ path = "___pathlib___"
+ with ensure_clean(path) as path:
+ writer(Path(path))
+ obj = reader(Path(path))
+ return obj
+
+
+def round_trip_localpath(writer, reader, path: Optional[str] = None):
+ """
+ Write an object to file specified by a py.path LocalPath and read it back.
+
+ Parameters
+ ----------
+ writer : callable bound to pandas object
+ IO writing function (e.g. DataFrame.to_csv )
+ reader : callable
+ IO reading function (e.g. pd.read_csv )
+ path : str, default None
+ The path where the object is written and then read.
+
+ Returns
+ -------
+ pandas object
+ The original object that was serialized and then re-read.
+ """
+ import pytest
+
+ LocalPath = pytest.importorskip("py.path").local
+ if path is None:
+ path = "___localpath___"
+ with ensure_clean(path) as path:
+ writer(LocalPath(path))
+ obj = reader(LocalPath(path))
+ return obj
+
+
+def write_to_compressed(compression, path, data, dest="test"):
+ """
+ Write data to a compressed file.
+
+ Parameters
+ ----------
+ compression : {'gzip', 'bz2', 'zip', 'xz'}
+ The compression type to use.
+ path : str
+ The file path to write the data.
+ data : str
+ The data to write.
+ dest : str, default "test"
+ The destination file (for ZIP only)
+
+ Raises
+ ------
+ ValueError : An invalid compression value was passed in.
+ """
+ args: Tuple[Any, ...] = (data,)
+ mode = "wb"
+ method = "write"
+ compress_method: Callable
+
+ if compression == "zip":
+ compress_method = zipfile.ZipFile
+ mode = "w"
+ args = (dest, data)
+ method = "writestr"
+ elif compression == "gzip":
+ compress_method = gzip.GzipFile
+ elif compression == "bz2":
+ compress_method = bz2.BZ2File
+ elif compression == "xz":
+ compress_method = get_lzma_file(lzma)
+ else:
+ raise ValueError(f"Unrecognized compression type: {compression}")
+
+ with compress_method(path, mode=mode) as f:
+ getattr(f, method)(*args)
+
+
+# ------------------------------------------------------------------
+# Plotting
+
+
+def close(fignum=None):
+ from matplotlib.pyplot import close as _close, get_fignums
+
+ if fignum is None:
+ for fignum in get_fignums():
+ _close(fignum)
+ else:
+ _close(fignum)
diff --git a/pandas/_testing/_random.py b/pandas/_testing/_random.py
new file mode 100644
index 0000000000000..a646d7639a4e6
--- /dev/null
+++ b/pandas/_testing/_random.py
@@ -0,0 +1,48 @@
+import string
+
+import numpy as np
+
+
+def randbool(size=(), p: float = 0.5):
+ return np.random.rand(*size) <= p
+
+
+RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
+RANDU_CHARS = np.array(
+ list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
+ dtype=(np.unicode_, 1),
+)
+
+
+def rands_array(nchars, size, dtype="O"):
+ """
+ Generate an array of byte strings.
+ """
+ retval = (
+ np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
+ .view((np.str_, nchars))
+ .reshape(size)
+ )
+ return retval.astype(dtype)
+
+
+def randu_array(nchars, size, dtype="O"):
+ """
+ Generate an array of unicode strings.
+ """
+ retval = (
+ np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
+ .view((np.unicode_, nchars))
+ .reshape(size)
+ )
+ return retval.astype(dtype)
+
+
+def rands(nchars):
+ """
+ Generate one random byte string.
+
+ See `rands_array` if you want to create an array of random strings.
+
+ """
+ return "".join(np.random.choice(RANDS_CHARS, nchars))
diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py
new file mode 100644
index 0000000000000..6429f74637f01
--- /dev/null
+++ b/pandas/_testing/_warnings.py
@@ -0,0 +1,174 @@
+from contextlib import contextmanager
+import re
+from typing import Optional, Sequence, Type, Union, cast
+import warnings
+
+
+@contextmanager
+def assert_produces_warning(
+ expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
+ filter_level="always",
+ check_stacklevel: bool = True,
+ raise_on_extra_warnings: bool = True,
+ match: Optional[str] = None,
+):
+ """
+ Context manager for running code expected to either raise a specific
+ warning, or not raise any warnings. Verifies that the code raises the
+ expected warning, and that it does not raise any other unexpected
+ warnings. It is basically a wrapper around ``warnings.catch_warnings``.
+
+ Parameters
+ ----------
+ expected_warning : {Warning, False, None}, default Warning
+ The type of Exception raised. ``exception.Warning`` is the base
+ class for all warnings. To check that no warning is returned,
+ specify ``False`` or ``None``.
+ filter_level : str or None, default "always"
+ Specifies whether warnings are ignored, displayed, or turned
+ into errors.
+ Valid values are:
+
+ * "error" - turns matching warnings into exceptions
+ * "ignore" - discard the warning
+ * "always" - always emit a warning
+ * "default" - print the warning the first time it is generated
+ from each location
+ * "module" - print the warning the first time it is generated
+ from each module
+ * "once" - print the warning the first time it is generated
+
+ check_stacklevel : bool, default True
+ If True, displays the line that called the function containing
+ the warning to show were the function is called. Otherwise, the
+ line that implements the function is displayed.
+ raise_on_extra_warnings : bool, default True
+ Whether extra warnings not of the type `expected_warning` should
+ cause the test to fail.
+ match : str, optional
+ Match warning message.
+
+ Examples
+ --------
+ >>> import warnings
+ >>> with assert_produces_warning():
+ ... warnings.warn(UserWarning())
+ ...
+ >>> with assert_produces_warning(False):
+ ... warnings.warn(RuntimeWarning())
+ ...
+ Traceback (most recent call last):
+ ...
+ AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
+ >>> with assert_produces_warning(UserWarning):
+ ... warnings.warn(RuntimeWarning())
+ Traceback (most recent call last):
+ ...
+ AssertionError: Did not see expected warning of class 'UserWarning'.
+
+ ..warn:: This is *not* thread-safe.
+ """
+ __tracebackhide__ = True
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter(filter_level)
+ yield w
+
+ if expected_warning:
+ expected_warning = cast(Type[Warning], expected_warning)
+ _assert_caught_expected_warning(
+ caught_warnings=w,
+ expected_warning=expected_warning,
+ match=match,
+ check_stacklevel=check_stacklevel,
+ )
+
+ if raise_on_extra_warnings:
+ _assert_caught_no_extra_warnings(
+ caught_warnings=w,
+ expected_warning=expected_warning,
+ )
+
+
+def _assert_caught_expected_warning(
+ *,
+ caught_warnings: Sequence[warnings.WarningMessage],
+ expected_warning: Type[Warning],
+ match: Optional[str],
+ check_stacklevel: bool,
+) -> None:
+ """Assert that there was the expected warning among the caught warnings."""
+ saw_warning = False
+ matched_message = False
+
+ for actual_warning in caught_warnings:
+ if issubclass(actual_warning.category, expected_warning):
+ saw_warning = True
+
+ if check_stacklevel and issubclass(
+ actual_warning.category, (FutureWarning, DeprecationWarning)
+ ):
+ _assert_raised_with_correct_stacklevel(actual_warning)
+
+ if match is not None and re.search(match, str(actual_warning.message)):
+ matched_message = True
+
+ if not saw_warning:
+ raise AssertionError(
+ f"Did not see expected warning of class "
+ f"{repr(expected_warning.__name__)}"
+ )
+
+ if match and not matched_message:
+ raise AssertionError(
+ f"Did not see warning {repr(expected_warning.__name__)} "
+ f"matching {match}"
+ )
+
+
+def _assert_caught_no_extra_warnings(
+ *,
+ caught_warnings: Sequence[warnings.WarningMessage],
+ expected_warning: Optional[Union[Type[Warning], bool]],
+) -> None:
+ """Assert that no extra warnings apart from the expected ones are caught."""
+ extra_warnings = []
+
+ for actual_warning in caught_warnings:
+ if _is_unexpected_warning(actual_warning, expected_warning):
+ extra_warnings.append(
+ (
+ actual_warning.category.__name__,
+ actual_warning.message,
+ actual_warning.filename,
+ actual_warning.lineno,
+ )
+ )
+
+ if extra_warnings:
+ raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}")
+
+
+def _is_unexpected_warning(
+ actual_warning: warnings.WarningMessage,
+ expected_warning: Optional[Union[Type[Warning], bool]],
+) -> bool:
+ """Check if the actual warning issued is unexpected."""
+ if actual_warning and not expected_warning:
+ return True
+ expected_warning = cast(Type[Warning], expected_warning)
+ return bool(not issubclass(actual_warning.category, expected_warning))
+
+
+def _assert_raised_with_correct_stacklevel(
+ actual_warning: warnings.WarningMessage,
+) -> None:
+ from inspect import getframeinfo, stack
+
+ caller = getframeinfo(stack()[4][0])
+ msg = (
+ "Warning not set with correct stacklevel. "
+ f"File where warning is raised: {actual_warning.filename} != "
+ f"{caller.filename}. Warning message: {actual_warning.message}"
+ )
+ assert actual_warning.filename == caller.filename, msg
diff --git a/pandas/_testing/contexts.py b/pandas/_testing/contexts.py
index d6a4b47571653..d72dc8c3af104 100644
--- a/pandas/_testing/contexts.py
+++ b/pandas/_testing/contexts.py
@@ -3,6 +3,8 @@
from shutil import rmtree
import tempfile
+import numpy as np
+
from pandas.io.common import get_handle
@@ -214,3 +216,32 @@ def use_numexpr(use, min_elements=None):
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
+
+
+class RNGContext:
+ """
+ Context manager to set the numpy random number generator speed. Returns
+ to the original value upon exiting the context manager.
+
+ Parameters
+ ----------
+ seed : int
+ Seed for numpy.random.seed
+
+ Examples
+ --------
+ with RNGContext(42):
+ np.random.randn()
+ """
+
+ def __init__(self, seed):
+ self.seed = seed
+
+ def __enter__(self):
+
+ self.start_state = np.random.get_state()
+ np.random.seed(self.seed)
+
+ def __exit__(self, exc_type, exc_value, traceback):
+
+ np.random.set_state(self.start_state)
| https://api.github.com/repos/pandas-dev/pandas/pulls/38961 | 2021-01-04T23:52:11Z | 2021-01-05T02:18:14Z | 2021-01-05T02:18:14Z | 2021-01-05T02:26:32Z | |
TST: strict xfail | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 2862f7c957abc..bf5e632374b59 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -473,7 +473,7 @@ def index_with_missing(request):
Fixture for indices with missing values
"""
if request.param in ["int", "uint", "range", "empty", "repeats"]:
- pytest.xfail("missing values not supported")
+ pytest.skip("missing values not supported")
# GH 35538. Use deep copy to avoid illusive bug on np-dev
# Azure pipeline that writes into indices_dict despite copy
ind = indices_dict[request.param].copy(deep=True)
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 8735e2a09920d..ac2e300f9f8d6 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -436,7 +436,20 @@ def test_insert_index_float64(self, insert, coerced_val, coerced_dtype):
],
ids=["datetime64", "datetime64tz"],
)
- def test_insert_index_datetimes(self, fill_val, exp_dtype):
+ @pytest.mark.parametrize(
+ "insert_value",
+ [pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), 1],
+ )
+ def test_insert_index_datetimes(self, request, fill_val, exp_dtype, insert_value):
+ if not hasattr(insert_value, "tz"):
+ request.node.add_marker(
+ pytest.mark.xfail(reason="ToDo: must coerce to object")
+ )
+ elif fill_val.tz != insert_value.tz:
+ request.node.add_marker(
+ pytest.mark.xfail(reason="GH 37605 - require tz equality?")
+ )
+
obj = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], tz=fill_val.tz
)
@@ -448,25 +461,7 @@ def test_insert_index_datetimes(self, fill_val, exp_dtype):
)
self._assert_insert_conversion(obj, fill_val, exp, exp_dtype)
- if fill_val.tz:
- msg = "Cannot compare tz-naive and tz-aware"
- with pytest.raises(TypeError, match=msg):
- obj.insert(1, pd.Timestamp("2012-01-01"))
-
- msg = "Timezones don't match"
- with pytest.raises(ValueError, match=msg):
- obj.insert(1, pd.Timestamp("2012-01-01", tz="Asia/Tokyo"))
-
- else:
- msg = "Cannot compare tz-naive and tz-aware"
- with pytest.raises(TypeError, match=msg):
- obj.insert(1, pd.Timestamp("2012-01-01", tz="Asia/Tokyo"))
-
- msg = "value should be a 'Timestamp' or 'NaT'. Got 'int' instead."
- with pytest.raises(TypeError, match=msg):
- obj.insert(1, 1)
-
- pytest.xfail("ToDo: must coerce to object")
+ obj.insert(1, insert_value)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(["1 day", "2 day", "3 day", "4 day"])
diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py
index b2ac28e1865d6..5edef896be537 100644
--- a/pandas/tests/tseries/offsets/common.py
+++ b/pandas/tests/tseries/offsets/common.py
@@ -98,12 +98,10 @@ def _get_offset(self, klass, value=1, normalize=False):
klass = klass(value, normalize=normalize)
return klass
- def test_apply_out_of_range(self, tz_naive_fixture):
+ def test_apply_out_of_range(self, request, tz_naive_fixture):
tz = tz_naive_fixture
if self._offset is None:
return
- if isinstance(tz, tzlocal) and not IS64:
- pytest.xfail(reason="OverflowError inside tzlocal past 2038")
# try to create an out-of-bounds result timestamp; if we can't create
# the offset skip
@@ -123,6 +121,13 @@ def test_apply_out_of_range(self, tz_naive_fixture):
t = Timestamp("20080101", tz=tz)
result = t + offset
assert isinstance(result, datetime)
+
+ if isinstance(tz, tzlocal) and not IS64:
+ # If we hit OutOfBoundsDatetime on non-64 bit machines
+ # we'll drop out of the try clause before the next test
+ request.node.add_marker(
+ pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")
+ )
assert t.tzinfo == result.tzinfo
except OutOfBoundsDatetime:
| - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Part of #38902. | https://api.github.com/repos/pandas-dev/pandas/pulls/38960 | 2021-01-04T23:37:53Z | 2021-01-09T21:46:29Z | 2021-01-09T21:46:29Z | 2021-02-03T02:48:45Z |
TYP/CLN: assorted cleanups | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 1339dee954603..4dc14397a30f4 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -3565,7 +3565,7 @@ cpdef to_offset(freq):
f"to_offset does not support tuples {freq}, pass as a string instead"
)
- elif isinstance(freq, timedelta):
+ elif PyDelta_Check(freq):
return delta_to_tick(freq)
elif isinstance(freq, str):
diff --git a/pandas/conftest.py b/pandas/conftest.py
index 2862f7c957abc..9fc1f0509d232 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1002,14 +1002,6 @@ def tz_aware_fixture(request):
tz_aware_fixture2 = tz_aware_fixture
-@pytest.fixture(scope="module")
-def datetime_tz_utc():
- """
- Yields the UTC timezone object from the datetime module.
- """
- return timezone.utc
-
-
@pytest.fixture(params=["utc", "dateutil/UTC", utc, tzutc(), timezone.utc])
def utc_fixture(request):
"""
@@ -1189,7 +1181,7 @@ def any_nullable_int_dtype(request):
@pytest.fixture(params=tm.ALL_EA_INT_DTYPES + tm.FLOAT_EA_DTYPES)
-def any_numeric_dtype(request):
+def any_nullable_numeric_dtype(request):
"""
Parameterized fixture for any nullable integer dtype and
any float ea dtypes.
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 7f4e16dc236ac..94c7d325d0bc8 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -658,7 +658,7 @@ def _astype(self, dtype: DtypeObj, copy: bool) -> ArrayLike:
values = values.astype(dtype, copy=copy)
else:
- values = astype_nansafe(values, dtype, copy=True)
+ values = astype_nansafe(values, dtype, copy=copy)
return values
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 013e52248f5c4..f97077954f8bf 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -413,7 +413,7 @@ def _get_empty_dtype_and_na(join_units: Sequence[JoinUnit]) -> Tuple[DtypeObj, A
return np.dtype("M8[ns]"), np.datetime64("NaT", "ns")
elif "timedelta" in upcast_classes:
return np.dtype("m8[ns]"), np.timedelta64("NaT", "ns")
- else: # pragma
+ else:
try:
common_dtype = np.find_common_type(upcast_classes, [])
except TypeError:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7f2039c998f53..3f22f14766a07 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4494,7 +4494,7 @@ def replace(
method=method,
)
- def _replace_single(self, to_replace, method, inplace, limit):
+ def _replace_single(self, to_replace, method: str, inplace: bool, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 341a8a9f90b96..ceaf6e1ac21e5 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -80,6 +80,8 @@
if TYPE_CHECKING:
from tables import Col, File, Node
+ from pandas.core.internals import Block
+
# versioning attribute
_version = "0.15.2"
@@ -3860,9 +3862,6 @@ def _create_axes(
for a in new_non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
- def get_blk_items(mgr, blocks):
- return [mgr.items.take(blk.mgr_locs) for blk in blocks]
-
transposed = new_index.axis == 1
# figure out data_columns and get out blocks
@@ -3870,10 +3869,10 @@ def get_blk_items(mgr, blocks):
data_columns, min_itemsize, new_non_index_axes
)
- block_obj = self.get_object(obj, transposed)._consolidate()
+ frame = self.get_object(obj, transposed)._consolidate()
blocks, blk_items = self._get_blocks_and_items(
- block_obj, table_exists, new_non_index_axes, self.values_axes, data_columns
+ frame, table_exists, new_non_index_axes, self.values_axes, data_columns
)
# add my values
@@ -3978,27 +3977,31 @@ def get_blk_items(mgr, blocks):
@staticmethod
def _get_blocks_and_items(
- block_obj, table_exists, new_non_index_axes, values_axes, data_columns
+ frame: DataFrame,
+ table_exists: bool,
+ new_non_index_axes,
+ values_axes,
+ data_columns,
):
# Helper to clarify non-state-altering parts of _create_axes
- def get_blk_items(mgr, blocks):
- return [mgr.items.take(blk.mgr_locs) for blk in blocks]
+ def get_blk_items(mgr):
+ return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks]
- blocks = block_obj._mgr.blocks
- blk_items = get_blk_items(block_obj._mgr, blocks)
+ blocks: List["Block"] = list(frame._mgr.blocks)
+ blk_items: List[Index] = get_blk_items(frame._mgr)
if len(data_columns):
axis, axis_labels = new_non_index_axes[0]
new_labels = Index(axis_labels).difference(Index(data_columns))
- mgr = block_obj.reindex(new_labels, axis=axis)._mgr
+ mgr = frame.reindex(new_labels, axis=axis)._mgr
blocks = list(mgr.blocks)
- blk_items = get_blk_items(mgr, blocks)
+ blk_items = get_blk_items(mgr)
for c in data_columns:
- mgr = block_obj.reindex([c], axis=axis)._mgr
+ mgr = frame.reindex([c], axis=axis)._mgr
blocks.extend(mgr.blocks)
- blk_items.extend(get_blk_items(mgr, mgr.blocks))
+ blk_items.extend(get_blk_items(mgr))
# reorder the blocks in the same order as the existing table if we can
if table_exists:
@@ -4006,7 +4009,7 @@ def get_blk_items(mgr, blocks):
tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)
}
- new_blocks = []
+ new_blocks: List["Block"] = []
new_blk_items = []
for ea in values_axes:
items = tuple(ea.values)
@@ -4875,7 +4878,7 @@ def _unconvert_index(
def _maybe_convert_for_string_atom(
- name: str, block, existing_col, min_itemsize, nan_rep, encoding, errors
+ name: str, block: "Block", existing_col, min_itemsize, nan_rep, encoding, errors
):
if not block.is_object:
return block.values
@@ -4895,11 +4898,12 @@ def _maybe_convert_for_string_atom(
elif not (inferred_type == "string" or dtype_name == "object"):
return block.values
- block = block.fillna(nan_rep, downcast=False)
- if isinstance(block, list):
- # Note: because block is always object dtype, fillna goes
- # through a path such that the result is always a 1-element list
- block = block[0]
+ blocks: List["Block"] = block.fillna(nan_rep, downcast=False)
+ # Note: because block is always object dtype, fillna goes
+ # through a path such that the result is always a 1-element list
+ assert len(blocks) == 1
+ block = blocks[0]
+
data = block.values
# see if we have a valid string type
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index add1bd4bb3972..e448cf0b578ae 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -1159,7 +1159,6 @@ def test_dti_union_mixed(self):
@pytest.mark.parametrize(
"tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)]
)
- @pytest.mark.usefixtures("datetime_tz_utc")
def test_iteration_preserves_nanoseconds(self, tz):
# GH 19603
index = DatetimeIndex(
diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
index e5f509acf4734..6d53fe4563e41 100644
--- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py
+++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
@@ -1,5 +1,4 @@
import numpy as np
-import pytest
from pandas import Series, timedelta_range
import pandas._testing as tm
@@ -22,13 +21,6 @@ def test_partial_slice(self):
expected = s.iloc[:134]
tm.assert_series_equal(result, expected)
- result = s["6 days, 23:11:12"]
- assert result == s.iloc[133]
-
- msg = r"^Timedelta\('50 days 00:00:00'\)$"
- with pytest.raises(KeyError, match=msg):
- s["50 days"]
-
def test_partial_slice_high_reso(self):
# higher reso
diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py
index 2022bca514540..a7a60f37bcd00 100644
--- a/pandas/tests/series/indexing/test_getitem.py
+++ b/pandas/tests/series/indexing/test_getitem.py
@@ -18,6 +18,7 @@
Timestamp,
date_range,
period_range,
+ timedelta_range,
)
import pandas._testing as tm
from pandas.core.indexing import IndexingError
@@ -121,6 +122,23 @@ def test_getitem_scalar_categorical_index(self):
result = ser[cats[0]]
assert result == expected
+ def test_getitem_str_with_timedeltaindex(self):
+ rng = timedelta_range("1 day 10:11:12", freq="h", periods=500)
+ ser = Series(np.arange(len(rng)), index=rng)
+
+ key = "6 days, 23:11:12"
+ indexer = rng.get_loc(key)
+ assert indexer == 133
+
+ result = ser[key]
+ assert result == ser.iloc[133]
+
+ msg = r"^Timedelta\('50 days 00:00:00'\)$"
+ with pytest.raises(KeyError, match=msg):
+ rng.get_loc("50 days")
+ with pytest.raises(KeyError, match=msg):
+ ser["50 days"]
+
class TestSeriesGetitemSlices:
def test_getitem_partial_str_slice_with_datetimeindex(self):
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index d6d0723bee0e8..47641d49c7a09 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -167,19 +167,19 @@ def test_setitem_boolean_td64_values_cast_na(self, value):
expected = Series([NaT, 1, 2], dtype="timedelta64[ns]")
tm.assert_series_equal(series, expected)
- def test_setitem_boolean_nullable_int_types(self, any_numeric_dtype):
+ def test_setitem_boolean_nullable_int_types(self, any_nullable_numeric_dtype):
# GH: 26468
- ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
- ser[ser > 6] = Series(range(4), dtype=any_numeric_dtype)
- expected = Series([5, 6, 2, 3], dtype=any_numeric_dtype)
+ ser = Series([5, 6, 7, 8], dtype=any_nullable_numeric_dtype)
+ ser[ser > 6] = Series(range(4), dtype=any_nullable_numeric_dtype)
+ expected = Series([5, 6, 2, 3], dtype=any_nullable_numeric_dtype)
tm.assert_series_equal(ser, expected)
- ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
- ser.loc[ser > 6] = Series(range(4), dtype=any_numeric_dtype)
+ ser = Series([5, 6, 7, 8], dtype=any_nullable_numeric_dtype)
+ ser.loc[ser > 6] = Series(range(4), dtype=any_nullable_numeric_dtype)
tm.assert_series_equal(ser, expected)
- ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)
- loc_ser = Series(range(4), dtype=any_numeric_dtype)
+ ser = Series([5, 6, 7, 8], dtype=any_nullable_numeric_dtype)
+ loc_ser = Series(range(4), dtype=any_nullable_numeric_dtype)
ser.loc[ser > 6] = loc_ser.loc[loc_ser > 1]
tm.assert_series_equal(ser, expected)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index c7bd38bbd00b9..d7cd92c8e3362 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -272,8 +272,8 @@ def test_constructor_index_dtype(self, dtype):
[
([1, 2]),
(["1", "2"]),
- (list(pd.date_range("1/1/2011", periods=2, freq="H"))),
- (list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
+ (list(date_range("1/1/2011", periods=2, freq="H"))),
+ (list(date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([Interval(left=0, right=5)]),
],
)
@@ -628,10 +628,10 @@ def test_constructor_copy(self):
@pytest.mark.parametrize(
"index",
[
- pd.date_range("20170101", periods=3, tz="US/Eastern"),
- pd.date_range("20170101", periods=3),
- pd.timedelta_range("1 day", periods=3),
- pd.period_range("2012Q1", periods=3, freq="Q"),
+ date_range("20170101", periods=3, tz="US/Eastern"),
+ date_range("20170101", periods=3),
+ timedelta_range("1 day", periods=3),
+ period_range("2012Q1", periods=3, freq="Q"),
Index(list("abc")),
pd.Int64Index([1, 2, 3]),
RangeIndex(0, 3),
@@ -1038,16 +1038,16 @@ def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
- s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
+ ser = Series(date_range("20130101", periods=3, tz="US/Eastern"))
- result = Series(s, dtype=s.dtype)
- tm.assert_series_equal(result, s)
+ result = Series(ser, dtype=ser.dtype)
+ tm.assert_series_equal(result, ser)
- result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
- tm.assert_series_equal(result, s)
+ result = Series(ser.dt.tz_convert("UTC"), dtype=ser.dtype)
+ tm.assert_series_equal(result, ser)
- result = Series(s.values, dtype=s.dtype)
- tm.assert_series_equal(result, s)
+ result = Series(ser.values, dtype=ser.dtype)
+ tm.assert_series_equal(result, ser)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
@@ -1374,7 +1374,7 @@ def test_convert_non_ns(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype="timedelta64[s]")
s = Series(arr)
- expected = Series(pd.timedelta_range("00:00:01", periods=3, freq="s"))
+ expected = Series(timedelta_range("00:00:01", periods=3, freq="s"))
tm.assert_series_equal(s, expected)
# convert from a numpy array of non-ns datetime64
| https://api.github.com/repos/pandas-dev/pandas/pulls/38959 | 2021-01-04T22:45:54Z | 2021-01-05T00:14:05Z | 2021-01-05T00:14:05Z | 2021-01-05T01:18:03Z | |
Backport PR #38957 on branch 1.2.x (DOC: move API breaking "check_freq" section from v1.2.1rst to v1.1.0.rst) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index e054ac830ce41..64552b104c053 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -716,6 +716,19 @@ apply and applymap on ``DataFrame`` evaluates first row/column only once
df.apply(func, axis=1)
+.. _whatsnew_110.api_breaking:
+
+Backwards incompatible API changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _whatsnew_110.api_breaking.testing.check_freq:
+
+Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
+
+
Increased minimum versions for dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 37298d12a12c4..e9602bbe1cee1 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -10,20 +10,6 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
-.. _whatsnew_121.api_breaking:
-
-Backwards incompatible API changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. _whatsnew_121.api_breaking.testing.check_freq:
-
-Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
-
-.. ---------------------------------------------------------------------------
-
.. _whatsnew_121.regressions:
Fixed regressions
@@ -62,7 +48,7 @@ I/O
Other
~~~~~
- Fixed build failure on MacOS 11 in Python 3.9.1 (:issue:`38766`)
--
+- Added reference to backwards incompatible ``check_freq`` arg of :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in :ref:`pandas 1.1.0 whats new <whatsnew_110.api_breaking.testing.check_freq>` (:issue:`34050`)
.. ---------------------------------------------------------------------------
| Backport PR #38957: DOC: move API breaking "check_freq" section from v1.2.1rst to v1.1.0.rst | https://api.github.com/repos/pandas-dev/pandas/pulls/38958 | 2021-01-04T22:45:26Z | 2021-01-05T00:11:03Z | 2021-01-05T00:11:03Z | 2021-01-05T00:11:03Z |
DOC: move API breaking "check_freq" section from v1.2.1rst to v1.1.0.rst | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index e054ac830ce41..64552b104c053 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -716,6 +716,19 @@ apply and applymap on ``DataFrame`` evaluates first row/column only once
df.apply(func, axis=1)
+.. _whatsnew_110.api_breaking:
+
+Backwards incompatible API changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _whatsnew_110.api_breaking.testing.check_freq:
+
+Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
+
+
Increased minimum versions for dependencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 37298d12a12c4..e9602bbe1cee1 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -10,20 +10,6 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
-.. _whatsnew_121.api_breaking:
-
-Backwards incompatible API changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. _whatsnew_121.api_breaking.testing.check_freq:
-
-Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
-
-.. ---------------------------------------------------------------------------
-
.. _whatsnew_121.regressions:
Fixed regressions
@@ -62,7 +48,7 @@ I/O
Other
~~~~~
- Fixed build failure on MacOS 11 in Python 3.9.1 (:issue:`38766`)
--
+- Added reference to backwards incompatible ``check_freq`` arg of :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in :ref:`pandas 1.1.0 whats new <whatsnew_110.api_breaking.testing.check_freq>` (:issue:`34050`)
.. ---------------------------------------------------------------------------
| and add reference to 1.1.0 whats new update in v1.2.1.rst
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
This moves the doc notes for `assert_series_equal` and `assert_frame_equal` from the 1.2.1 whats new to the 1.1.0 whats new and adds a small reference to the section in the 1.1.0 page in the 1.2.1 page. This came up in #38471 and makes more sense to me than the current approach, but happy to close if current state is preferred.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38957 | 2021-01-04T20:42:11Z | 2021-01-04T22:45:12Z | 2021-01-04T22:45:11Z | 2021-01-05T15:37:43Z |
CLN: Unify number recognition tests for all parsers | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 3058d1eed22b9..6e9cc18358153 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2349,12 +2349,12 @@ def __init__(self, f: Union[FilePathOrBuffer, List], **kwds):
decimal = re.escape(self.decimal)
if self.thousands is None:
- regex = fr"^\-?[0-9]*({decimal}[0-9]*)?([0-9](E|e)\-?[0-9]*)?$"
+ regex = fr"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"
else:
thousands = re.escape(self.thousands)
regex = (
- fr"^\-?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
- fr"([0-9](E|e)\-?[0-9]*)?$"
+ fr"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
+ fr"([0-9]?(E|e)\-?[0-9]+)?$"
)
self.num = re.compile(regex)
diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py
index ec098353960d7..321678c36943a 100644
--- a/pandas/tests/io/parser/conftest.py
+++ b/pandas/tests/io/parser/conftest.py
@@ -148,3 +148,58 @@ def encoding_fmt(request):
Fixture for all possible string formats of a UTF encoding.
"""
return request.param
+
+
+@pytest.fixture(
+ params=[
+ ("-1,0", -1.0),
+ ("-1,2e0", -1.2),
+ ("-1e0", -1.0),
+ ("+1e0", 1.0),
+ ("+1e+0", 1.0),
+ ("+1e-1", 0.1),
+ ("+,1e1", 1.0),
+ ("+1,e0", 1.0),
+ ("-,1e1", -1.0),
+ ("-1,e0", -1.0),
+ ("0,1", 0.1),
+ ("1,", 1.0),
+ (",1", 0.1),
+ ("-,1", -0.1),
+ ("1_,", 1.0),
+ ("1_234,56", 1234.56),
+ ("1_234,56e0", 1234.56),
+ # negative cases; must not parse as float
+ ("_", "_"),
+ ("-_", "-_"),
+ ("-_1", "-_1"),
+ ("-_1e0", "-_1e0"),
+ ("_1", "_1"),
+ ("_1,", "_1,"),
+ ("_1,_", "_1,_"),
+ ("_1e0", "_1e0"),
+ ("1,2e_1", "1,2e_1"),
+ ("1,2e1_0", "1,2e1_0"),
+ ("1,_2", "1,_2"),
+ (",1__2", ",1__2"),
+ (",1e", ",1e"),
+ ("-,1e", "-,1e"),
+ ("1_000,000_000", "1_000,000_000"),
+ ("1,e1_2", "1,e1_2"),
+ ("e11,2", "e11,2"),
+ ("1e11,2", "1e11,2"),
+ ("1,2,2", "1,2,2"),
+ ("1,2_1", "1,2_1"),
+ ("1,2e-10e1", "1,2e-10e1"),
+ ("--1,2", "--1,2"),
+ ("1a_2,1", "1a_2,1"),
+ ("1,2E-1", 0.12),
+ ("1,2E1", 12.0),
+ ]
+)
+def numeric_decimal(request):
+ """
+ Fixture for all numeric formats which should get recognized. The first entry
+ represents the value to read while the second represents the expected result.
+ """
+ return request.param
diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
index fc34d65fdad52..ec1ccf009b8de 100644
--- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
+++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
@@ -181,3 +181,35 @@ def test_delimiter_with_usecols_and_parse_dates(all_parsers):
{"col1": [-9.1], "col2": [-9.1], "col3": [Timestamp("2010-10-10")]}
)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("thousands", ["_", None])
+def test_decimal_and_exponential(python_parser_only, numeric_decimal, thousands):
+ # GH#31920
+ decimal_number_check(python_parser_only, numeric_decimal, thousands, None)
+
+
+@pytest.mark.parametrize("thousands", ["_", None])
+@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
+def test_1000_sep_decimal_float_precision(
+ c_parser_only, numeric_decimal, float_precision, thousands
+):
+ # test decimal and thousand sep handling in across 'float_precision'
+ # parsers
+ decimal_number_check(c_parser_only, numeric_decimal, thousands, float_precision)
+
+
+def decimal_number_check(parser, numeric_decimal, thousands, float_precision):
+ # GH#31920
+ value = numeric_decimal[0]
+ if thousands is None and "_" in value:
+ pytest.skip("Skip test if no thousands sep is defined and sep is in value")
+ df = parser.read_csv(
+ StringIO(value),
+ sep="|",
+ thousands=thousands,
+ decimal=",",
+ header=None,
+ )
+ val = df.iloc[0, 0]
+ assert val == numeric_decimal[1]
diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py
index 15e7569ea9014..da778093237b0 100644
--- a/pandas/tests/io/parser/test_c_parser_only.py
+++ b/pandas/tests/io/parser/test_c_parser_only.py
@@ -653,64 +653,6 @@ def test_1000_sep_with_decimal(
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
-@pytest.mark.parametrize(
- "value,expected",
- [
- ("-1,0", -1.0),
- ("-1,2e0", -1.2),
- ("-1e0", -1.0),
- ("+1e0", 1.0),
- ("+1e+0", 1.0),
- ("+1e-1", 0.1),
- ("+,1e1", 1.0),
- ("+1,e0", 1.0),
- ("-,1e1", -1.0),
- ("-1,e0", -1.0),
- ("0,1", 0.1),
- ("1,", 1.0),
- (",1", 0.1),
- ("-,1", -0.1),
- ("1_,", 1.0),
- ("1_234,56", 1234.56),
- ("1_234,56e0", 1234.56),
- # negative cases; must not parse as float
- ("_", "_"),
- ("-_", "-_"),
- ("-_1", "-_1"),
- ("-_1e0", "-_1e0"),
- ("_1", "_1"),
- ("_1,", "_1,"),
- ("_1,_", "_1,_"),
- ("_1e0", "_1e0"),
- ("1,2e_1", "1,2e_1"),
- ("1,2e1_0", "1,2e1_0"),
- ("1,_2", "1,_2"),
- (",1__2", ",1__2"),
- (",1e", ",1e"),
- ("-,1e", "-,1e"),
- ("1_000,000_000", "1_000,000_000"),
- ("1,e1_2", "1,e1_2"),
- ],
-)
-def test_1000_sep_decimal_float_precision(
- c_parser_only, value, expected, float_precision
-):
- # test decimal and thousand sep handling in across 'float_precision'
- # parsers
- parser = c_parser_only
- df = parser.read_csv(
- StringIO(value),
- sep="|",
- thousands="_",
- decimal=",",
- header=None,
- float_precision=float_precision,
- )
- val = df.iloc[0, 0]
- assert val == expected
-
-
def test_float_precision_options(c_parser_only):
# GH 17154, 36228
parser = c_parser_only
diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py
index 04d5413abfafc..d55a6361fc8d2 100644
--- a/pandas/tests/io/parser/test_python_parser_only.py
+++ b/pandas/tests/io/parser/test_python_parser_only.py
@@ -305,49 +305,3 @@ def test_malformed_skipfooter(python_parser_only):
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#", skipfooter=1)
-
-
-@pytest.mark.parametrize("thousands", [None, "."])
-@pytest.mark.parametrize(
- "value, result_value",
- [
- ("1,2", 1.2),
- ("1,2e-1", 0.12),
- ("1,2E-1", 0.12),
- ("1,2e-10", 0.0000000012),
- ("1,2e1", 12.0),
- ("1,2E1", 12.0),
- ("-1,2e-1", -0.12),
- ("0,2", 0.2),
- (",2", 0.2),
- ],
-)
-def test_decimal_and_exponential(python_parser_only, thousands, value, result_value):
- # GH#31920
- data = StringIO(
- f"""a b
- 1,1 {value}
- """
- )
- result = python_parser_only.read_csv(
- data, "\t", decimal=",", engine="python", thousands=thousands
- )
- expected = DataFrame({"a": [1.1], "b": [result_value]})
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("thousands", [None, "."])
-@pytest.mark.parametrize(
- "value",
- ["e11,2", "1e11,2", "1,2,2", "1,2.1", "1,2e-10e1", "--1,2", "1a.2,1", "1..2,3"],
-)
-def test_decimal_and_exponential_erroneous(python_parser_only, thousands, value):
- # GH#31920
- data = StringIO(
- f"""a b
- 1,1 {value}
- """
- )
- result = python_parser_only.read_csv(data, "\t", decimal=",", thousands=thousands)
- expected = DataFrame({"a": [1.1], "b": [value]})
- tm.assert_frame_equal(result, expected)
| - [x] closes #38926
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Minor regex improvements.
Is a fixture the right thing to do here?
| https://api.github.com/repos/pandas-dev/pandas/pulls/38954 | 2021-01-04T19:03:38Z | 2021-01-04T23:15:17Z | 2021-01-04T23:15:17Z | 2021-01-04T23:23:01Z |
DOC: clarify and spellcheck indexing documentation | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index 0a11344d575f1..dc66303a44f53 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -429,7 +429,7 @@ For the rationale behind this behavior, see
s = pd.Series(list('abcdef'), index=[0, 3, 2, 5, 4, 2])
s.loc[3:5]
-Also, if the index has duplicate labels *and* either the start or the stop label is dupulicated,
+Also, if the index has duplicate labels *and* either the start or the stop label is duplicated,
an error will be raised. For instance, in the above example, ``s.loc[2:5]`` would raise a ``KeyError``.
For more information about duplicate labels, see
@@ -1138,10 +1138,10 @@ Setting with enlargement conditionally using :func:`numpy`
----------------------------------------------------------
An alternative to :meth:`~pandas.DataFrame.where` is to use :func:`numpy.where`.
-Combined with setting a new column, you can use it to enlarge a dataframe where the
+Combined with setting a new column, you can use it to enlarge a DataFrame where the
values are determined conditionally.
-Consider you have two choices to choose from in the following dataframe. And you want to
+Consider you have two choices to choose from in the following DataFrame. And you want to
set a new column color to 'green' when the second column has 'Z'. You can do the
following:
@@ -1293,8 +1293,8 @@ Full numpy-like syntax:
df.query('(a < b) & (b < c)')
df[(df['a'] < df['b']) & (df['b'] < df['c'])]
-Slightly nicer by removing the parentheses (by binding making comparison
-operators bind tighter than ``&`` and ``|``).
+Slightly nicer by removing the parentheses (comparison operators bind tighter
+than ``&`` and ``|``):
.. ipython:: python
| https://api.github.com/repos/pandas-dev/pandas/pulls/38951 | 2021-01-04T18:42:54Z | 2021-01-04T22:34:53Z | 2021-01-04T22:34:53Z | 2021-01-05T17:09:34Z | |
REF: de-duplicate tslibs.fields | diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 16fa05c3801c6..57404b99c7628 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -174,6 +174,18 @@ def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None)
return out
+cdef inline bint _is_on_month(int month, int compare_month, int modby) nogil:
+ """
+ Analogous to DateOffset.is_on_offset checking for the month part of a date.
+ """
+ if modby == 1:
+ return True
+ elif modby == 3:
+ return (month - compare_month) % 3 == 0
+ else:
+ return month == compare_month
+
+
@cython.wraparound(False)
@cython.boundscheck(False)
def get_start_end_field(const int64_t[:] dtindex, str field,
@@ -191,6 +203,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
int start_month = 1
ndarray[int8_t] out
npy_datetimestruct dts
+ int compare_month, modby
out = np.zeros(count, dtype='int8')
@@ -215,102 +228,15 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
end_month = 12
start_month = 1
- if field == 'is_month_start':
- if is_business:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if dts.day == get_firstbday(dts.year, dts.month):
- out[i] = 1
-
- else:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if dts.day == 1:
- out[i] = 1
-
- elif field == 'is_month_end':
- if is_business:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if dts.day == get_lastbday(dts.year, dts.month):
- out[i] = 1
-
- else:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if dts.day == get_days_in_month(dts.year, dts.month):
- out[i] = 1
-
- elif field == 'is_quarter_start':
- if is_business:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if ((dts.month - start_month) % 3 == 0) and (
- dts.day == get_firstbday(dts.year, dts.month)):
- out[i] = 1
-
- else:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if ((dts.month - start_month) % 3 == 0) and dts.day == 1:
- out[i] = 1
-
- elif field == 'is_quarter_end':
- if is_business:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if ((dts.month - end_month) % 3 == 0) and (
- dts.day == get_lastbday(dts.year, dts.month)):
- out[i] = 1
-
- else:
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = 0
- continue
-
- dt64_to_dtstruct(dtindex[i], &dts)
-
- if ((dts.month - end_month) % 3 == 0) and (
- dts.day == get_days_in_month(dts.year, dts.month)):
- out[i] = 1
+ compare_month = start_month if "start" in field else end_month
+ if "month" in field:
+ modby = 1
+ elif "quarter" in field:
+ modby = 3
+ else:
+ modby = 12
- elif field == 'is_year_start':
+ if field in ["is_month_start", "is_quarter_start", "is_year_start"]:
if is_business:
for i in range(count):
if dtindex[i] == NPY_NAT:
@@ -319,7 +245,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
dt64_to_dtstruct(dtindex[i], &dts)
- if (dts.month == start_month) and (
+ if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_firstbday(dts.year, dts.month)):
out[i] = 1
@@ -331,10 +257,10 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
dt64_to_dtstruct(dtindex[i], &dts)
- if (dts.month == start_month) and dts.day == 1:
+ if _is_on_month(dts.month, compare_month, modby) and dts.day == 1:
out[i] = 1
- elif field == 'is_year_end':
+ elif field in ["is_month_end", "is_quarter_end", "is_year_end"]:
if is_business:
for i in range(count):
if dtindex[i] == NPY_NAT:
@@ -343,7 +269,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
dt64_to_dtstruct(dtindex[i], &dts)
- if (dts.month == end_month) and (
+ if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_lastbday(dts.year, dts.month)):
out[i] = 1
@@ -355,7 +281,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field,
dt64_to_dtstruct(dtindex[i], &dts)
- if (dts.month == end_month) and (
+ if _is_on_month(dts.month, compare_month, modby) and (
dts.day == get_days_in_month(dts.year, dts.month)):
out[i] = 1
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38950 | 2021-01-04T18:19:52Z | 2021-01-04T19:20:16Z | 2021-01-04T19:20:16Z | 2021-01-04T19:30:14Z |
Backport PR #38909 on branch 1.2.x (BUG: Fixed regression in rolling.skew and rolling.kurt modifying object) | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index 4102bdd07aa8f..37298d12a12c4 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -37,7 +37,7 @@ Fixed regressions
- Fixed regression in :meth:`.GroupBy.sem` where the presence of non-numeric columns would cause an error instead of being dropped (:issue:`38774`)
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
--
+- Fixed regression in :meth:`Rolling.skew` and :meth:`Rolling.kurt` modifying the object inplace (:issue:`38908`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 54a09a6d2ede7..882674a5c5c92 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -523,7 +523,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
float64_t x = 0, xx = 0, xxx = 0
int64_t nobs = 0, i, j, N = len(values), nobs_mean = 0
int64_t s, e
- ndarray[float64_t] output, mean_array
+ ndarray[float64_t] output, mean_array, values_copy
bint is_monotonic_increasing_bounds
minp = max(minp, 3)
@@ -532,10 +532,11 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
)
output = np.empty(N, dtype=float)
min_val = np.nanmin(values)
+ values_copy = np.copy(values)
with nogil:
for i in range(0, N):
- val = values[i]
+ val = values_copy[i]
if notnan(val):
nobs_mean += 1
sum_val += val
@@ -544,7 +545,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
if min_val - mean_val > -1e5:
mean_val = round(mean_val)
for i in range(0, N):
- values[i] = values[i] - mean_val
+ values_copy[i] = values_copy[i] - mean_val
for i in range(0, N):
@@ -556,7 +557,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
if i == 0 or not is_monotonic_increasing_bounds:
for j in range(s, e):
- val = values[j]
+ val = values_copy[j]
add_skew(val, &nobs, &x, &xx, &xxx, &compensation_x_add,
&compensation_xx_add, &compensation_xxx_add)
@@ -566,13 +567,13 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
# and removed
# calculate deletes
for j in range(start[i - 1], s):
- val = values[j]
+ val = values_copy[j]
remove_skew(val, &nobs, &x, &xx, &xxx, &compensation_x_remove,
&compensation_xx_remove, &compensation_xxx_remove)
# calculate adds
for j in range(end[i - 1], e):
- val = values[j]
+ val = values_copy[j]
add_skew(val, &nobs, &x, &xx, &xxx, &compensation_x_add,
&compensation_xx_add, &compensation_xxx_add)
@@ -703,7 +704,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
float64_t compensation_x_remove = 0, compensation_x_add = 0
float64_t x = 0, xx = 0, xxx = 0, xxxx = 0
int64_t nobs = 0, i, j, s, e, N = len(values), nobs_mean = 0
- ndarray[float64_t] output
+ ndarray[float64_t] output, values_copy
bint is_monotonic_increasing_bounds
minp = max(minp, 4)
@@ -711,11 +712,12 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
start, end
)
output = np.empty(N, dtype=float)
+ values_copy = np.copy(values)
min_val = np.nanmin(values)
with nogil:
for i in range(0, N):
- val = values[i]
+ val = values_copy[i]
if notnan(val):
nobs_mean += 1
sum_val += val
@@ -724,7 +726,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
if min_val - mean_val > -1e4:
mean_val = round(mean_val)
for i in range(0, N):
- values[i] = values[i] - mean_val
+ values_copy[i] = values_copy[i] - mean_val
for i in range(0, N):
@@ -736,7 +738,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
if i == 0 or not is_monotonic_increasing_bounds:
for j in range(s, e):
- add_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx,
+ add_kurt(values_copy[j], &nobs, &x, &xx, &xxx, &xxxx,
&compensation_x_add, &compensation_xx_add,
&compensation_xxx_add, &compensation_xxxx_add)
@@ -746,13 +748,13 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
# and removed
# calculate deletes
for j in range(start[i - 1], s):
- remove_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx,
+ remove_kurt(values_copy[j], &nobs, &x, &xx, &xxx, &xxxx,
&compensation_x_remove, &compensation_xx_remove,
&compensation_xxx_remove, &compensation_xxxx_remove)
# calculate adds
for j in range(end[i - 1], e):
- add_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx,
+ add_kurt(values_copy[j], &nobs, &x, &xx, &xxx, &xxxx,
&compensation_x_add, &compensation_xx_add,
&compensation_xxx_add, &compensation_xxxx_add)
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 10b23cadfe279..e2cdf76d038ec 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1102,11 +1102,13 @@ def test_groupby_rolling_nan_included():
@pytest.mark.parametrize("method", ["skew", "kurt"])
def test_rolling_skew_kurt_numerical_stability(method):
- # GH: 6929
- s = Series(np.random.rand(10))
- expected = getattr(s.rolling(3), method)()
- s = s + 50000
- result = getattr(s.rolling(3), method)()
+ # GH#6929
+ ser = Series(np.random.rand(10))
+ ser_copy = ser.copy()
+ expected = getattr(ser.rolling(3), method)()
+ tm.assert_series_equal(ser, ser_copy)
+ ser = ser + 50000
+ result = getattr(ser.rolling(3), method)()
tm.assert_series_equal(result, expected)
| Backport PR #38909: BUG: Fixed regression in rolling.skew and rolling.kurt modifying object | https://api.github.com/repos/pandas-dev/pandas/pulls/38945 | 2021-01-04T13:39:05Z | 2021-01-04T15:23:01Z | 2021-01-04T15:23:01Z | 2021-01-04T15:23:01Z |
Backport PR #38893: doc fix for testing.assert_series_equal check_freq arg | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index b1f8389420cd9..4102bdd07aa8f 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -15,12 +15,12 @@ including other versions of pandas.
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. _whatsnew_121.api_breaking.testing.assert_frame_equal:
+.. _whatsnew_121.api_breaking.testing.check_freq:
-Added ``check_freq`` argument to ``testing.assert_frame_equal``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` now raises ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked by :func:`testing.assert_frame_equal`.
+The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
.. ---------------------------------------------------------------------------
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 0b0778f3d3e5c..90840033ca099 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1334,6 +1334,8 @@ def assert_series_equal(
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
+
+ .. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
| Backport PR #38893 | https://api.github.com/repos/pandas-dev/pandas/pulls/38942 | 2021-01-04T11:47:48Z | 2021-01-04T13:19:25Z | 2021-01-04T13:19:25Z | 2021-01-04T13:47:10Z |
DOC: minor tweaks to formatting on SQL comparison page | diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
index 52799442d6118..75d26354ddfa5 100644
--- a/doc/source/getting_started/comparison/comparison_with_sql.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sql.rst
@@ -69,31 +69,31 @@ Filtering in SQL is done via a WHERE clause.
.. include:: includes/filtering.rst
-Just like SQL's OR and AND, multiple conditions can be passed to a DataFrame using | (OR) and &
-(AND).
+Just like SQL's ``OR`` and ``AND``, multiple conditions can be passed to a DataFrame using ``|``
+(``OR``) and ``&`` (``AND``).
+
+Tips of more than $5 at Dinner meals:
.. code-block:: sql
- -- tips of more than $5.00 at Dinner meals
SELECT *
FROM tips
WHERE time = 'Dinner' AND tip > 5.00;
.. ipython:: python
- # tips of more than $5.00 at Dinner meals
tips[(tips["time"] == "Dinner") & (tips["tip"] > 5.00)]
+Tips by parties of at least 5 diners OR bill total was more than $45:
+
.. code-block:: sql
- -- tips by parties of at least 5 diners OR bill total was more than $45
SELECT *
FROM tips
WHERE size >= 5 OR total_bill > 45;
.. ipython:: python
- # tips by parties of at least 5 diners OR bill total was more than $45
tips[(tips["size"] >= 5) | (tips["total_bill"] > 45)]
NULL checking is done using the :meth:`~pandas.Series.notna` and :meth:`~pandas.Series.isna`
@@ -134,7 +134,7 @@ Getting items where ``col1`` IS NOT NULL can be done with :meth:`~pandas.Series.
GROUP BY
--------
-In pandas, SQL's GROUP BY operations are performed using the similarly named
+In pandas, SQL's ``GROUP BY`` operations are performed using the similarly named
:meth:`~pandas.DataFrame.groupby` method. :meth:`~pandas.DataFrame.groupby` typically refers to a
process where we'd like to split a dataset into groups, apply some function (typically aggregation)
, and then combine the groups together.
@@ -162,7 +162,7 @@ The pandas equivalent would be:
Notice that in the pandas code we used :meth:`~pandas.core.groupby.DataFrameGroupBy.size` and not
:meth:`~pandas.core.groupby.DataFrameGroupBy.count`. This is because
:meth:`~pandas.core.groupby.DataFrameGroupBy.count` applies the function to each column, returning
-the number of ``not null`` records within each.
+the number of ``NOT NULL`` records within each.
.. ipython:: python
@@ -223,10 +223,10 @@ Grouping by more than one column is done by passing a list of columns to the
JOIN
----
-JOINs can be performed with :meth:`~pandas.DataFrame.join` or :meth:`~pandas.merge`. By default,
-:meth:`~pandas.DataFrame.join` will join the DataFrames on their indices. Each method has
-parameters allowing you to specify the type of join to perform (LEFT, RIGHT, INNER, FULL) or the
-columns to join on (column names or indices).
+``JOIN``\s can be performed with :meth:`~pandas.DataFrame.join` or :meth:`~pandas.merge`. By
+default, :meth:`~pandas.DataFrame.join` will join the DataFrames on their indices. Each method has
+parameters allowing you to specify the type of join to perform (``LEFT``, ``RIGHT``, ``INNER``,
+``FULL``) or the columns to join on (column names or indices).
.. ipython:: python
@@ -235,7 +235,7 @@ columns to join on (column names or indices).
Assume we have two database tables of the same name and structure as our DataFrames.
-Now let's go over the various types of JOINs.
+Now let's go over the various types of ``JOIN``\s.
INNER JOIN
~~~~~~~~~~
@@ -261,9 +261,11 @@ column with another DataFrame's index.
LEFT OUTER JOIN
~~~~~~~~~~~~~~~
+
+Show all records from ``df1``.
+
.. code-block:: sql
- -- show all records from df1
SELECT *
FROM df1
LEFT OUTER JOIN df2
@@ -271,14 +273,15 @@ LEFT OUTER JOIN
.. ipython:: python
- # show all records from df1
pd.merge(df1, df2, on="key", how="left")
RIGHT JOIN
~~~~~~~~~~
+
+Show all records from ``df2``.
+
.. code-block:: sql
- -- show all records from df2
SELECT *
FROM df1
RIGHT OUTER JOIN df2
@@ -286,17 +289,17 @@ RIGHT JOIN
.. ipython:: python
- # show all records from df2
pd.merge(df1, df2, on="key", how="right")
FULL JOIN
~~~~~~~~~
-pandas also allows for FULL JOINs, which display both sides of the dataset, whether or not the
-joined columns find a match. As of writing, FULL JOINs are not supported in all RDBMS (MySQL).
+pandas also allows for ``FULL JOIN``\s, which display both sides of the dataset, whether or not the
+joined columns find a match. As of writing, ``FULL JOIN``\s are not supported in all RDBMS (MySQL).
+
+Show all records from both tables.
.. code-block:: sql
- -- show all records from both tables
SELECT *
FROM df1
FULL OUTER JOIN df2
@@ -304,13 +307,13 @@ joined columns find a match. As of writing, FULL JOINs are not supported in all
.. ipython:: python
- # show all records from both frames
pd.merge(df1, df2, on="key", how="outer")
UNION
-----
-UNION ALL can be performed using :meth:`~pandas.concat`.
+
+``UNION ALL`` can be performed using :meth:`~pandas.concat`.
.. ipython:: python
@@ -342,7 +345,7 @@ UNION ALL can be performed using :meth:`~pandas.concat`.
pd.concat([df1, df2])
-SQL's UNION is similar to UNION ALL, however UNION will remove duplicate rows.
+SQL's ``UNION`` is similar to ``UNION ALL``, however ``UNION`` will remove duplicate rows.
.. code-block:: sql
@@ -444,7 +447,7 @@ the same using ``rank(method='first')`` function
Let's find tips with (rank < 3) per gender group for (tips < 2).
Notice that when using ``rank(method='min')`` function
``rnk_min`` remains the same for the same ``tip``
-(as Oracle's RANK() function)
+(as Oracle's ``RANK()`` function)
.. ipython:: python
@@ -477,7 +480,7 @@ DELETE
DELETE FROM tips
WHERE tip > 9;
-In pandas we select the rows that should remain, instead of deleting them
+In pandas we select the rows that should remain instead of deleting them:
.. ipython:: python
diff --git a/doc/source/getting_started/comparison/includes/filtering.rst b/doc/source/getting_started/comparison/includes/filtering.rst
index 861a93d92c2c2..8ddf7c0d2fa39 100644
--- a/doc/source/getting_started/comparison/includes/filtering.rst
+++ b/doc/source/getting_started/comparison/includes/filtering.rst
@@ -1,5 +1,5 @@
DataFrames can be filtered in multiple ways; the most intuitive of which is using
-:ref:`boolean indexing <indexing.boolean>`
+:ref:`boolean indexing <indexing.boolean>`.
.. ipython:: python
| Adding code formatting, missing punctuation, etc. No changes of substance.
- [ ] ~~closes #xxxx~~
- [ ] tests added / passed
- [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] ~~whatsnew entry~~ | https://api.github.com/repos/pandas-dev/pandas/pulls/38941 | 2021-01-04T08:41:33Z | 2021-01-04T13:33:13Z | 2021-01-04T13:33:13Z | 2021-01-04T13:33:16Z |
⬆️ UPGRADE: Autoupdate pre-commit config | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2dade8afbf91f..f5d8503041ccd 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,11 +20,9 @@ repos:
types: [text]
args: [--append-config=flake8/cython-template.cfg]
- repo: https://github.com/PyCQA/isort
- rev: 5.6.4
+ rev: 5.7.0
hooks:
- id: isort
- types: [text] # overwrite upstream `types: [python]`
- types_or: [python, cython]
- repo: https://github.com/asottile/pyupgrade
rev: v2.7.4
hooks:
| <!-- START pr-commits -->
<!-- END pr-commits -->
## Base PullRequest
default branch (https://github.com/pandas-dev/pandas/tree/master)
## Command results
<details>
<summary>Details: </summary>
<details>
<summary><em>add path</em></summary>
```Shell
/home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/bin
```
</details>
<details>
<summary><em>pip install pre-commit</em></summary>
```Shell
Collecting pre-commit
Downloading pre_commit-2.9.3-py2.py3-none-any.whl (184 kB)
Collecting cfgv>=2.0.0
Using cached cfgv-3.2.0-py2.py3-none-any.whl (7.3 kB)
Collecting identify>=1.0.0
Downloading identify-1.5.11-py2.py3-none-any.whl (97 kB)
Collecting nodeenv>=0.11.1
Using cached nodeenv-1.5.0-py2.py3-none-any.whl (21 kB)
Collecting pyyaml>=5.1
Using cached PyYAML-5.3.1-cp39-cp39-linux_x86_64.whl
Collecting toml
Using cached toml-0.10.2-py2.py3-none-any.whl (16 kB)
Collecting virtualenv>=20.0.8
Downloading virtualenv-20.2.2-py2.py3-none-any.whl (5.7 MB)
Collecting appdirs<2,>=1.4.3
Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB)
Collecting distlib<1,>=0.3.1
Using cached distlib-0.3.1-py2.py3-none-any.whl (335 kB)
Collecting filelock<4,>=3.0.0
Using cached filelock-3.0.12-py3-none-any.whl (7.6 kB)
Collecting six<2,>=1.9.0
Using cached six-1.15.0-py2.py3-none-any.whl (10 kB)
Installing collected packages: six, filelock, distlib, appdirs, virtualenv, toml, pyyaml, nodeenv, identify, cfgv, pre-commit
Successfully installed appdirs-1.4.4 cfgv-3.2.0 distlib-0.3.1 filelock-3.0.12 identify-1.5.11 nodeenv-1.5.0 pre-commit-2.9.3 pyyaml-5.3.1 six-1.15.0 toml-0.10.2 virtualenv-20.2.2
```
### stderr:
```Shell
WARNING: You are using pip version 20.3.1; however, version 20.3.3 is available.
You should consider upgrading via the '/opt/hostedtoolcache/Python/3.9.1/x64/bin/python -m pip install --upgrade pip' command.
```
</details>
<details>
<summary><em>pre-commit autoupdate || (exit 0);</em></summary>
```Shell
Updating https://github.com/python/black ... already up to date.
Updating https://gitlab.com/pycqa/flake8 ... already up to date.
Updating https://github.com/PyCQA/isort ... [INFO] Initializing environment for https://github.com/PyCQA/isort.
updating 5.6.4 -> 5.7.0.
Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade.
already up to date.
Updating https://github.com/pre-commit/pygrep-hooks ... already up to date.
Updating https://github.com/asottile/yesqa ... already up to date.
Updating https://github.com/pre-commit/pre-commit-hooks ... [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.
already up to date.
Updating https://github.com/codespell-project/codespell ... [INFO] Initializing environment for https://github.com/codespell-project/codespell.
already up to date.
```
</details>
<details>
<summary><em>pre-commit run -a || (exit 0);</em></summary>
```Shell
[INFO] Installing environment for https://github.com/python/black.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://gitlab.com/pycqa/flake8.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/PyCQA/isort.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/pyupgrade.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/asottile/yesqa.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
[INFO] Installing environment for https://github.com/codespell-project/codespell.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
black..................................................................................................Passed
flake8.................................................................................................Passed
flake8 (cython)........................................................................................Passed
flake8 (cython template)...............................................................................Passed
isort..................................................................................................Passed
pyupgrade..............................................................................................Passed
rst ``code`` is two backticks..........................................................................Passed
rst directives end with two colons.....................................................................Passed
rst ``inline code`` next to normal text................................................................Passed
Generate pip dependency from conda.....................................................................Passed
flake8-rst.............................................................................................Passed
Check for non-standard imports.........................................................................Passed
Check for non-standard numpy.random-related imports excluding pandas/_testing.py.......................Passed
Check for non-standard imports in test suite...........................................................Passed
Check for incorrect code block or IPython directives...................................................Passed
Check for use of not concatenated strings..............................................................Passed
Check for strings with wrong placed spaces.............................................................Passed
Check for import of private attributes across modules..................................................Passed
Check for use of private functions across modules......................................................Passed
Check for use of bare pytest raises....................................................................Passed
Check for inconsistent use of pandas namespace in tests................................................Passed
Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias..........................Passed
Check for use of foo.__class__ instead of type(foo)....................................................Passed
Check for use of comment-based annotation syntax and missing error codes...............................Passed
Check code for instances of os.remove..................................................................Passed
Strip unnecessary `# noqa`s............................................................................Passed
Fix End of Files.......................................................................................Passed
Trim Trailing Whitespace...............................................................................Passed
codespell..............................................................................................Passed
```
</details>
</details>
## Changed files
<details>
<summary>Changed file: </summary>
- .pre-commit-config.yaml
</details>
<hr>
[:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action) | https://api.github.com/repos/pandas-dev/pandas/pulls/38940 | 2021-01-04T07:40:09Z | 2021-01-05T09:41:28Z | 2021-01-05T09:41:28Z | 2021-01-05T09:41:33Z |
API: honor copy=True when passing dict to DataFrame | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 1e723493a4cc8..63902b53ea36d 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -110,6 +110,30 @@ both XPath 1.0 and XSLT 1.0 is available. (:issue:`27554`)
For more, see :ref:`io.xml` in the user guide on IO tools.
+.. _whatsnew_130.dataframe_honors_copy_with_dict:
+
+DataFrame constructor honors ``copy=False`` with dict
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When passing a dictionary to :class:`DataFrame` with ``copy=False``,
+a copy will no longer be made (:issue:`32960`)
+
+.. ipython:: python
+
+ arr = np.array([1, 2, 3])
+ df = pd.DataFrame({"A": arr, "B": arr.copy()}, copy=False)
+ df
+
+``df["A"]`` remains a view on ``arr``:
+
+.. ipython:: python
+
+ arr[0] = 0
+ assert df.iloc[0, 0] == 0
+
+The default behavior when not passing ``copy`` will remain unchanged, i.e.
+a copy will be made.
+
.. _whatsnew_130.enhancements.other:
Other enhancements
@@ -546,6 +570,8 @@ Conversion
- Bug in creating a :class:`DataFrame` from an empty ``np.recarray`` not retaining the original dtypes (:issue:`40121`)
- Bug in :class:`DataFrame` failing to raise ``TypeError`` when constructing from a ``frozenset`` (:issue:`40163`)
- Bug in :class:`Index` construction silently ignoring a passed ``dtype`` when the data cannot be cast to that dtype (:issue:`21311`)
+- Bug in :class:`DataFrame` construction with a dictionary containing an arraylike with ``ExtensionDtype`` and ``copy=True`` failing to make a copy (:issue:`38939`)
+-
Strings
^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 510bdfcb0079f..6f2edaa300c93 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -476,8 +476,12 @@ class DataFrame(NDFrame, OpsMixin):
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
- copy : bool, default False
- Copy data from inputs. Only affects DataFrame / 2d ndarray input.
+ copy : bool or None, default None
+ Copy data from inputs.
+ For dict data, the default of None behaves like ``copy=True``. For DataFrame
+ or 2d ndarray input, the default of None behaves like ``copy=False``.
+
+ .. versionchanged:: 1.3.0
See Also
--------
@@ -555,8 +559,16 @@ def __init__(
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
- copy: bool = False,
+ copy: Optional[bool] = None,
):
+
+ if copy is None:
+ if isinstance(data, dict) or data is None:
+ # retain pre-GH#38939 default behavior
+ copy = True
+ else:
+ copy = False
+
if data is None:
data = {}
if dtype is not None:
@@ -565,18 +577,13 @@ def __init__(
if isinstance(data, DataFrame):
data = data._mgr
- # first check if a Manager is passed without any other arguments
- # -> use fastpath (without checking Manager type)
- if (
- index is None
- and columns is None
- and dtype is None
- and copy is False
- and isinstance(data, (BlockManager, ArrayManager))
- ):
- # GH#33357 fastpath
- NDFrame.__init__(self, data)
- return
+ if isinstance(data, (BlockManager, ArrayManager)):
+ # first check if a Manager is passed without any other arguments
+ # -> use fastpath (without checking Manager type)
+ if index is None and columns is None and dtype is None and not copy:
+ # GH#33357 fastpath
+ NDFrame.__init__(self, data)
+ return
manager = get_option("mode.data_manager")
@@ -586,7 +593,8 @@ def __init__(
)
elif isinstance(data, dict):
- mgr = dict_to_mgr(data, index, columns, dtype=dtype, typ=manager)
+ # GH#38939 de facto copy defaults to False only in non-dict cases
+ mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 1ee38834c5758..0ecd798986c53 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1807,7 +1807,9 @@ def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
- return result.unstack()
+ # FIXME: not being consolidated breaks
+ # test_describe_with_duplicate_output_column_names
+ return result._consolidate().unstack()
@final
def resample(self, rule, *args, **kwargs):
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 9959174373034..5b4b710838ef8 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -101,9 +101,11 @@ def arrays_to_mgr(
arr_names,
index,
columns,
+ *,
dtype: Optional[DtypeObj] = None,
verify_integrity: bool = True,
typ: Optional[str] = None,
+ consolidate: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
@@ -131,7 +133,9 @@ def arrays_to_mgr(
axes = [columns, index]
if typ == "block":
- return create_block_manager_from_arrays(arrays, arr_names, axes)
+ return create_block_manager_from_arrays(
+ arrays, arr_names, axes, consolidate=consolidate
+ )
elif typ == "array":
if len(columns) != len(arrays):
assert len(arrays) == 0
@@ -181,7 +185,7 @@ def rec_array_to_mgr(
if columns is None:
columns = arr_columns
- mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype, typ=typ)
+ mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype=dtype, typ=typ)
if copy:
mgr = mgr.copy()
@@ -376,7 +380,13 @@ def maybe_squeeze_dt64tz(dta: ArrayLike) -> ArrayLike:
def dict_to_mgr(
- data: Dict, index, columns, dtype: Optional[DtypeObj], typ: str
+ data: Dict,
+ index,
+ columns,
+ *,
+ dtype: Optional[DtypeObj] = None,
+ typ: str = "block",
+ copy: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
@@ -414,6 +424,8 @@ def dict_to_mgr(
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
+ arrays = list(arrays)
+
else:
keys = list(data.keys())
columns = data_names = Index(keys)
@@ -424,7 +436,21 @@ def dict_to_mgr(
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
- return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype, typ=typ)
+
+ if copy:
+ # arrays_to_mgr (via form_blocks) won't make copies for EAs
+ # dtype attr check to exclude EADtype-castable strs
+ arrays = [
+ x
+ if not hasattr(x, "dtype") or not isinstance(x.dtype, ExtensionDtype)
+ else x.copy()
+ for x in arrays
+ ]
+ # TODO: can we get rid of the dt64tz special case above?
+
+ return arrays_to_mgr(
+ arrays, data_names, index, columns, dtype=dtype, typ=typ, consolidate=copy
+ )
def nested_data_to_arrays(
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 69338abcd7d58..6681015856d6b 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -53,7 +53,10 @@
import pandas.core.algorithms as algos
from pandas.core.arrays.sparse import SparseDtype
-from pandas.core.construction import extract_array
+from pandas.core.construction import (
+ ensure_wrapped_if_datetimelike,
+ extract_array,
+)
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.api import (
Float64Index,
@@ -991,6 +994,8 @@ def fast_xs(self, loc: int) -> ArrayLike:
# Any]]"
result = np.empty(n, dtype=dtype) # type: ignore[arg-type]
+ result = ensure_wrapped_if_datetimelike(result)
+
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
@@ -1693,7 +1698,7 @@ def set_values(self, values: ArrayLike):
def create_block_manager_from_blocks(
- blocks: List[Block], axes: List[Index]
+ blocks: List[Block], axes: List[Index], consolidate: bool = True
) -> BlockManager:
try:
mgr = BlockManager(blocks, axes)
@@ -1703,7 +1708,8 @@ def create_block_manager_from_blocks(
tot_items = sum(arr.shape[0] for arr in arrays)
raise construction_error(tot_items, arrays[0].shape[1:], axes, err)
- mgr._consolidate_inplace()
+ if consolidate:
+ mgr._consolidate_inplace()
return mgr
@@ -1713,7 +1719,10 @@ def _extract_array(obj):
def create_block_manager_from_arrays(
- arrays, names: Index, axes: List[Index]
+ arrays,
+ names: Index,
+ axes: List[Index],
+ consolidate: bool = True,
) -> BlockManager:
assert isinstance(names, Index)
assert isinstance(axes, list)
@@ -1722,12 +1731,13 @@ def create_block_manager_from_arrays(
arrays = [_extract_array(x) for x in arrays]
try:
- blocks = _form_blocks(arrays, names, axes)
+ blocks = _form_blocks(arrays, names, axes, consolidate)
mgr = BlockManager(blocks, axes)
- mgr._consolidate_inplace()
- return mgr
except ValueError as e:
raise construction_error(len(arrays), arrays[0].shape, axes, e)
+ if consolidate:
+ mgr._consolidate_inplace()
+ return mgr
def construction_error(
@@ -1760,7 +1770,7 @@ def construction_error(
def _form_blocks(
- arrays: List[ArrayLike], names: Index, axes: List[Index]
+ arrays: List[ArrayLike], names: Index, axes: List[Index], consolidate: bool
) -> List[Block]:
# put "leftover" items in float bucket, where else?
# generalize?
@@ -1786,15 +1796,21 @@ def _form_blocks(
blocks: List[Block] = []
if len(items_dict["NumericBlock"]):
- numeric_blocks = _multi_blockify(items_dict["NumericBlock"])
+ numeric_blocks = _multi_blockify(
+ items_dict["NumericBlock"], consolidate=consolidate
+ )
blocks.extend(numeric_blocks)
if len(items_dict["TimeDeltaBlock"]):
- timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"])
+ timedelta_blocks = _multi_blockify(
+ items_dict["TimeDeltaBlock"], consolidate=consolidate
+ )
blocks.extend(timedelta_blocks)
if len(items_dict["DatetimeBlock"]):
- datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], DT64NS_DTYPE)
+ datetime_blocks = _simple_blockify(
+ items_dict["DatetimeBlock"], DT64NS_DTYPE, consolidate=consolidate
+ )
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
@@ -1805,7 +1821,9 @@ def _form_blocks(
blocks.extend(dttz_blocks)
if len(items_dict["ObjectBlock"]) > 0:
- object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_)
+ object_blocks = _simple_blockify(
+ items_dict["ObjectBlock"], np.object_, consolidate=consolidate
+ )
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
@@ -1844,11 +1862,14 @@ def _form_blocks(
return blocks
-def _simple_blockify(tuples, dtype) -> List[Block]:
+def _simple_blockify(tuples, dtype, consolidate: bool) -> List[Block]:
"""
return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
+ if not consolidate:
+ return _tuples_to_blocks_no_consolidate(tuples, dtype=dtype)
+
values, placement = _stack_arrays(tuples, dtype)
# TODO: CHECK DTYPE?
@@ -1859,8 +1880,12 @@ def _simple_blockify(tuples, dtype) -> List[Block]:
return [block]
-def _multi_blockify(tuples, dtype: Optional[Dtype] = None):
+def _multi_blockify(tuples, dtype: Optional[DtypeObj] = None, consolidate: bool = True):
""" return an array of blocks that potentially have different dtypes """
+
+ if not consolidate:
+ return _tuples_to_blocks_no_consolidate(tuples, dtype=dtype)
+
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[1].dtype)
@@ -1880,6 +1905,18 @@ def _multi_blockify(tuples, dtype: Optional[Dtype] = None):
return new_blocks
+def _tuples_to_blocks_no_consolidate(tuples, dtype: Optional[DtypeObj]) -> List[Block]:
+ # tuples produced within _form_blocks are of the form (placement, whatever, array)
+ if dtype is not None:
+ return [
+ new_block(
+ np.atleast_2d(x[1].astype(dtype, copy=False)), placement=x[0], ndim=2
+ )
+ for x in tuples
+ ]
+ return [new_block(np.atleast_2d(x[1]), placement=x[0], ndim=2) for x in tuples]
+
+
def _stack_arrays(tuples, dtype: np.dtype):
placement, arrays = zip(*tuples)
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 1e2622d6a8fcd..ef86a8e6a1cb0 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -538,7 +538,6 @@ def test_df_div_zero_series_does_not_commute(self):
def test_df_mod_zero_df(self, using_array_manager):
# GH#3590, modulo as ints
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
-
# this is technically wrong, as the integer portion is coerced to float
first = Series([0, 0, 0, 0])
if not using_array_manager:
@@ -551,6 +550,15 @@ def test_df_mod_zero_df(self, using_array_manager):
result = df % df
tm.assert_frame_equal(result, expected)
+ # GH#38939 If we dont pass copy=False, df is consolidated and
+ # result["first"] is float64 instead of int64
+ df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}, copy=False)
+ first = Series([0, 0, 0, 0], dtype="int64")
+ second = Series([np.nan, np.nan, np.nan, 0])
+ expected = pd.DataFrame({"first": first, "second": second})
+ result = df % df
+ tm.assert_frame_equal(result, expected)
+
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]})
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 366b24e328642..68dbdd9e0bf35 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -150,7 +150,7 @@ def take(self, indexer, allow_fill=False, fill_value=None):
return self._from_sequence(result)
def copy(self):
- return type(self)(self._data.copy())
+ return type(self)(self._data.copy(), dtype=self.dtype)
def astype(self, dtype, copy=True):
if is_dtype_equal(dtype, self._dtype):
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 437160e78741b..55f9d85574f94 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -261,7 +261,18 @@ def test_dataframe_constructor_with_dtype():
tm.assert_frame_equal(result, expected)
-@pytest.mark.parametrize("frame", [True, False])
+@pytest.mark.parametrize(
+ "frame",
+ [
+ pytest.param(
+ True,
+ marks=pytest.mark.xfail(
+ reason="pd.concat call inside NDFrame.astype reverts the dtype"
+ ),
+ ),
+ False,
+ ],
+)
def test_astype_dispatches(frame):
# This is a dtype-specific test that ensures Series[decimal].astype
# gets all the way through to ExtensionArray.astype
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 0613c727dec98..759277a47f62b 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -285,7 +285,7 @@ def test_combine_le(self, data_repeated):
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
- df = pd.DataFrame({"A": arr})
+ df = pd.DataFrame({"A": arr}, copy=False)
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index b76a44b3c86be..d618c4cda4f13 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1997,7 +1997,7 @@ def test_constructor_ndarray_copy(self, float_frame):
def test_constructor_series_copy(self, float_frame):
series = float_frame._series
- df = DataFrame({"A": series["A"]})
+ df = DataFrame({"A": series["A"]}, copy=True)
df["A"][:] = 5
assert not (series["A"] == 5).all()
@@ -2311,6 +2311,86 @@ def test_constructor_list_str_na(self, string_dtype):
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize("copy", [False, True])
+ @td.skip_array_manager_not_yet_implemented
+ def test_dict_nocopy(self, copy, any_nullable_numeric_dtype, any_numpy_dtype):
+ a = np.array([1, 2], dtype=any_numpy_dtype)
+ b = np.array([3, 4], dtype=any_numpy_dtype)
+ if b.dtype.kind in ["S", "U"]:
+ # These get cast, making the checks below more cumbersome
+ return
+
+ c = pd.array([1, 2], dtype=any_nullable_numeric_dtype)
+ df = DataFrame({"a": a, "b": b, "c": c}, copy=copy)
+
+ def get_base(obj):
+ if isinstance(obj, np.ndarray):
+ return obj.base
+ elif isinstance(obj.dtype, np.dtype):
+ # i.e. DatetimeArray, TimedeltaArray
+ return obj._ndarray.base
+ else:
+ raise TypeError
+
+ def check_views():
+ # written to work for either BlockManager or ArrayManager
+ assert sum(x is c for x in df._mgr.arrays) == 1
+ assert (
+ sum(
+ get_base(x) is a
+ for x in df._mgr.arrays
+ if isinstance(x.dtype, np.dtype)
+ )
+ == 1
+ )
+ assert (
+ sum(
+ get_base(x) is b
+ for x in df._mgr.arrays
+ if isinstance(x.dtype, np.dtype)
+ )
+ == 1
+ )
+
+ if not copy:
+ # constructor preserves views
+ check_views()
+
+ df.iloc[0, 0] = 0
+ df.iloc[0, 1] = 0
+ if not copy:
+ # Check that the underlying data behind df["c"] is still `c`
+ # after setting with iloc. Since we don't know which entry in
+ # df._mgr.arrays corresponds to df["c"], we just check that exactly
+ # one of these arrays is `c`. GH#38939
+ assert sum(x is c for x in df._mgr.arrays) == 1
+ # TODO: we can call check_views if we stop consolidating
+ # in setitem_with_indexer
+
+ # FIXME: until GH#35417, iloc.setitem into EA values does not preserve
+ # view, so we have to check in the other direction
+ # df.iloc[0, 2] = 0
+ # if not copy:
+ # check_views()
+ c[0] = 0
+
+ if copy:
+ if a.dtype.kind == "M":
+ assert a[0] == a.dtype.type(1, "ns")
+ assert b[0] == b.dtype.type(3, "ns")
+ else:
+ assert a[0] == a.dtype.type(1)
+ assert b[0] == b.dtype.type(3)
+ # FIXME: enable after GH#35417
+ # assert c[0] == 1
+ assert df.iloc[0, 2] == 1
+ else:
+ # TODO: we can call check_views if we stop consolidating
+ # in setitem_with_indexer
+ # FIXME: enable after GH#35417
+ # assert b[0] == 0
+ assert df.iloc[0, 2] == 0
+
class TestDataFrameConstructorWithDatetimeTZ:
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 85accac5a8235..ae07fc6e3b2b3 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1063,6 +1063,7 @@ def test_loc_setitem_empty_append_raises(self):
[
"cannot copy sequence with size 2 to array axis with dimension 0",
r"could not broadcast input array from shape \(2,\) into shape \(0,\)",
+ "Must have equal len keys and value when setting with an iterable",
]
)
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index b0d41a89931e9..b8680cc4e611e 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -171,7 +171,8 @@ def test_partial_setting_mixed_dtype(self):
tm.assert_frame_equal(df, DataFrame(columns=["A", "B"], index=[0]))
# columns will align
- df = DataFrame(columns=["A", "B"])
+ # TODO: it isn't great that this behavior depends on consolidation
+ df = DataFrame(columns=["A", "B"])._consolidate()
df.loc[0] = Series(1, index=["B"])
exp = DataFrame([[np.nan, 1]], columns=["A", "B"], index=[0], dtype="float64")
| - [x] closes #32960
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
xref #34872 cc @TomAugspurger used the test_dict_nocopy you wrote but it ended up pretty mangled | https://api.github.com/repos/pandas-dev/pandas/pulls/38939 | 2021-01-04T06:26:44Z | 2021-03-31T01:03:22Z | 2021-03-31T01:03:21Z | 2021-03-31T01:05:17Z |
DOC: remove use of head() in the comparison docs | diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst
index b97efe31b8b29..2b316cccb7fc9 100644
--- a/doc/source/getting_started/comparison/comparison_with_sas.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sas.rst
@@ -4,23 +4,13 @@
Comparison with SAS
********************
+
For potential users coming from `SAS <https://en.wikipedia.org/wiki/SAS_(software)>`__
this page is meant to demonstrate how different SAS operations would be
performed in pandas.
.. include:: includes/introduction.rst
-.. note::
-
- Throughout this tutorial, the pandas ``DataFrame`` will be displayed by calling
- ``df.head()``, which displays the first N (default 5) rows of the ``DataFrame``.
- This is often used in interactive work (e.g. `Jupyter notebook
- <https://jupyter.org/>`_ or terminal) - the equivalent in SAS would be:
-
- .. code-block:: sas
-
- proc print data=df(obs=5);
- run;
Data structures
---------------
@@ -120,7 +110,7 @@ The pandas method is :func:`read_csv`, which works similarly.
"pandas/master/pandas/tests/io/data/csv/tips.csv"
)
tips = pd.read_csv(url)
- tips.head()
+ tips
Like ``PROC IMPORT``, ``read_csv`` can take a number of parameters to specify
@@ -138,6 +128,19 @@ In addition to text/csv, pandas supports a variety of other data formats
such as Excel, HDF5, and SQL databases. These are all read via a ``pd.read_*``
function. See the :ref:`IO documentation<io>` for more details.
+Limiting output
+~~~~~~~~~~~~~~~
+
+.. include:: includes/limit.rst
+
+The equivalent in SAS would be:
+
+.. code-block:: sas
+
+ proc print data=df(obs=5);
+ run;
+
+
Exporting data
~~~~~~~~~~~~~~
@@ -173,20 +176,8 @@ be used on new or existing columns.
new_bill = total_bill / 2;
run;
-pandas provides similar vectorized operations by
-specifying the individual ``Series`` in the ``DataFrame``.
-New columns can be assigned in the same way.
+.. include:: includes/column_operations.rst
-.. ipython:: python
-
- tips["total_bill"] = tips["total_bill"] - 2
- tips["new_bill"] = tips["total_bill"] / 2.0
- tips.head()
-
-.. ipython:: python
- :suppress:
-
- tips = tips.drop("new_bill", axis=1)
Filtering
~~~~~~~~~
@@ -278,18 +269,7 @@ drop, and rename columns.
rename total_bill=total_bill_2;
run;
-The same operations are expressed in pandas below.
-
-.. ipython:: python
-
- # keep
- tips[["sex", "total_bill", "tip"]].head()
-
- # drop
- tips.drop("sex", axis=1).head()
-
- # rename
- tips.rename(columns={"total_bill": "total_bill_2"}).head()
+.. include:: includes/column_selection.rst
Sorting by values
@@ -442,6 +422,8 @@ input frames.
Missing data
------------
+Both pandas and SAS have a representation for missing data.
+
.. include:: includes/missing_intro.rst
One difference is that missing data cannot be compared to its sentinel value.
diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
index 52799442d6118..685aea6334556 100644
--- a/doc/source/getting_started/comparison/comparison_with_sql.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sql.rst
@@ -21,7 +21,7 @@ structure.
"/pandas/master/pandas/tests/io/data/csv/tips.csv"
)
tips = pd.read_csv(url)
- tips.head()
+ tips
SELECT
------
@@ -31,14 +31,13 @@ to select all columns):
.. code-block:: sql
SELECT total_bill, tip, smoker, time
- FROM tips
- LIMIT 5;
+ FROM tips;
With pandas, column selection is done by passing a list of column names to your DataFrame:
.. ipython:: python
- tips[["total_bill", "tip", "smoker", "time"]].head(5)
+ tips[["total_bill", "tip", "smoker", "time"]]
Calling the DataFrame without the list of column names would display all columns (akin to SQL's
``*``).
@@ -48,14 +47,13 @@ In SQL, you can add a calculated column:
.. code-block:: sql
SELECT *, tip/total_bill as tip_rate
- FROM tips
- LIMIT 5;
+ FROM tips;
With pandas, you can use the :meth:`DataFrame.assign` method of a DataFrame to append a new column:
.. ipython:: python
- tips.assign(tip_rate=tips["tip"] / tips["total_bill"]).head(5)
+ tips.assign(tip_rate=tips["tip"] / tips["total_bill"])
WHERE
-----
@@ -368,6 +366,20 @@ In pandas, you can use :meth:`~pandas.concat` in conjunction with
pd.concat([df1, df2]).drop_duplicates()
+
+LIMIT
+-----
+
+.. code-block:: sql
+
+ SELECT * FROM tips
+ LIMIT 10;
+
+.. ipython:: python
+
+ tips.head(10)
+
+
pandas equivalents for some SQL analytic and aggregate functions
----------------------------------------------------------------
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index ca536e7273870..43cb775b5461d 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -10,16 +10,6 @@ performed in pandas.
.. include:: includes/introduction.rst
-.. note::
-
- Throughout this tutorial, the pandas ``DataFrame`` will be displayed by calling
- ``df.head()``, which displays the first N (default 5) rows of the ``DataFrame``.
- This is often used in interactive work (e.g. `Jupyter notebook
- <https://jupyter.org/>`_ or terminal) -- the equivalent in Stata would be:
-
- .. code-block:: stata
-
- list in 1/5
Data structures
---------------
@@ -116,7 +106,7 @@ the data set if presented with a url.
"/pandas/master/pandas/tests/io/data/csv/tips.csv"
)
tips = pd.read_csv(url)
- tips.head()
+ tips
Like ``import delimited``, :func:`read_csv` can take a number of parameters to specify
how the data should be parsed. For example, if the data were instead tab delimited,
@@ -141,6 +131,18 @@ such as Excel, SAS, HDF5, Parquet, and SQL databases. These are all read via a
function. See the :ref:`IO documentation<io>` for more details.
+Limiting output
+~~~~~~~~~~~~~~~
+
+.. include:: includes/limit.rst
+
+The equivalent in Stata would be:
+
+.. code-block:: stata
+
+ list in 1/5
+
+
Exporting data
~~~~~~~~~~~~~~
@@ -179,18 +181,8 @@ the column from the data set.
generate new_bill = total_bill / 2
drop new_bill
-pandas provides similar vectorized operations by
-specifying the individual ``Series`` in the ``DataFrame``.
-New columns can be assigned in the same way. The :meth:`DataFrame.drop` method
-drops a column from the ``DataFrame``.
+.. include:: includes/column_operations.rst
-.. ipython:: python
-
- tips["total_bill"] = tips["total_bill"] - 2
- tips["new_bill"] = tips["total_bill"] / 2
- tips.head()
-
- tips = tips.drop("new_bill", axis=1)
Filtering
~~~~~~~~~
@@ -256,20 +248,7 @@ Stata provides keywords to select, drop, and rename columns.
rename total_bill total_bill_2
-The same operations are expressed in pandas below. Note that in contrast to Stata, these
-operations do not happen in place. To make these changes persist, assign the operation back
-to a variable.
-
-.. ipython:: python
-
- # keep
- tips[["sex", "total_bill", "tip"]].head()
-
- # drop
- tips.drop("sex", axis=1).head()
-
- # rename
- tips.rename(columns={"total_bill": "total_bill_2"}).head()
+.. include:: includes/column_selection.rst
Sorting by values
@@ -428,12 +407,14 @@ or the intersection of the two by using the values created in the
restore
merge 1:n key using df2.dta
-.. include:: includes/merge_setup.rst
+.. include:: includes/merge.rst
Missing data
------------
+Both pandas and Stata have a representation for missing data.
+
.. include:: includes/missing_intro.rst
One difference is that missing data cannot be compared to its sentinel value.
diff --git a/doc/source/getting_started/comparison/includes/column_operations.rst b/doc/source/getting_started/comparison/includes/column_operations.rst
new file mode 100644
index 0000000000000..bc5db8e6b8038
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/column_operations.rst
@@ -0,0 +1,11 @@
+pandas provides similar vectorized operations by specifying the individual ``Series`` in the
+``DataFrame``. New columns can be assigned in the same way. The :meth:`DataFrame.drop` method drops
+a column from the ``DataFrame``.
+
+.. ipython:: python
+
+ tips["total_bill"] = tips["total_bill"] - 2
+ tips["new_bill"] = tips["total_bill"] / 2
+ tips
+
+ tips = tips.drop("new_bill", axis=1)
diff --git a/doc/source/getting_started/comparison/includes/column_selection.rst b/doc/source/getting_started/comparison/includes/column_selection.rst
new file mode 100644
index 0000000000000..b925af1294f54
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/column_selection.rst
@@ -0,0 +1,23 @@
+The same operations are expressed in pandas below. Note that these operations do not happen in
+place. To make these changes persist, assign the operation back to a variable.
+
+Keep certain columns
+''''''''''''''''''''
+
+.. ipython:: python
+
+ tips[["sex", "total_bill", "tip"]]
+
+Drop a column
+'''''''''''''
+
+.. ipython:: python
+
+ tips.drop("sex", axis=1)
+
+Rename a column
+'''''''''''''''
+
+.. ipython:: python
+
+ tips.rename(columns={"total_bill": "total_bill_2"})
diff --git a/doc/source/getting_started/comparison/includes/extract_substring.rst b/doc/source/getting_started/comparison/includes/extract_substring.rst
index 78eee286ad467..1ba0dfac2317a 100644
--- a/doc/source/getting_started/comparison/includes/extract_substring.rst
+++ b/doc/source/getting_started/comparison/includes/extract_substring.rst
@@ -4,4 +4,4 @@ indexes are zero-based.
.. ipython:: python
- tips["sex"].str[0:1].head()
+ tips["sex"].str[0:1]
diff --git a/doc/source/getting_started/comparison/includes/find_substring.rst b/doc/source/getting_started/comparison/includes/find_substring.rst
index ee940b64f5cae..42543d05a0014 100644
--- a/doc/source/getting_started/comparison/includes/find_substring.rst
+++ b/doc/source/getting_started/comparison/includes/find_substring.rst
@@ -5,4 +5,4 @@ zero-based.
.. ipython:: python
- tips["sex"].str.find("ale").head()
+ tips["sex"].str.find("ale")
diff --git a/doc/source/getting_started/comparison/includes/groupby.rst b/doc/source/getting_started/comparison/includes/groupby.rst
index caa9f6ec9c9b8..93d5d51e3fb00 100644
--- a/doc/source/getting_started/comparison/includes/groupby.rst
+++ b/doc/source/getting_started/comparison/includes/groupby.rst
@@ -4,4 +4,4 @@ pandas provides a flexible ``groupby`` mechanism that allows similar aggregation
.. ipython:: python
tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
- tips_summed.head()
+ tips_summed
diff --git a/doc/source/getting_started/comparison/includes/if_then.rst b/doc/source/getting_started/comparison/includes/if_then.rst
index d7977366cfc33..f94e7588827f5 100644
--- a/doc/source/getting_started/comparison/includes/if_then.rst
+++ b/doc/source/getting_started/comparison/includes/if_then.rst
@@ -4,7 +4,7 @@ the ``where`` method from ``numpy``.
.. ipython:: python
tips["bucket"] = np.where(tips["total_bill"] < 10, "low", "high")
- tips.head()
+ tips
.. ipython:: python
:suppress:
diff --git a/doc/source/getting_started/comparison/includes/length.rst b/doc/source/getting_started/comparison/includes/length.rst
index 5a0c803e9eff2..9141fd4ea582a 100644
--- a/doc/source/getting_started/comparison/includes/length.rst
+++ b/doc/source/getting_started/comparison/includes/length.rst
@@ -4,5 +4,5 @@ Use ``len`` and ``rstrip`` to exclude trailing blanks.
.. ipython:: python
- tips["time"].str.len().head()
- tips["time"].str.rstrip().str.len().head()
+ tips["time"].str.len()
+ tips["time"].str.rstrip().str.len()
diff --git a/doc/source/getting_started/comparison/includes/limit.rst b/doc/source/getting_started/comparison/includes/limit.rst
new file mode 100644
index 0000000000000..4efeb4e43d07c
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/limit.rst
@@ -0,0 +1,7 @@
+By default, pandas will truncate output of large ``DataFrame``\s to show the first and last rows.
+This can be overridden by :ref:`changing the pandas options <options>`, or using
+:meth:`DataFrame.head` or :meth:`DataFrame.tail`.
+
+.. ipython:: python
+
+ tips.head(5)
diff --git a/doc/source/getting_started/comparison/includes/missing.rst b/doc/source/getting_started/comparison/includes/missing.rst
index 8e6ba95e98036..341c7d5498d82 100644
--- a/doc/source/getting_started/comparison/includes/missing.rst
+++ b/doc/source/getting_started/comparison/includes/missing.rst
@@ -1,24 +1,31 @@
-This doesn't work in pandas. Instead, the :func:`pd.isna` or :func:`pd.notna` functions
-should be used for comparisons.
+In pandas, :meth:`Series.isna` and :meth:`Series.notna` can be used to filter the rows.
.. ipython:: python
- outer_join[pd.isna(outer_join["value_x"])]
- outer_join[pd.notna(outer_join["value_x"])]
+ outer_join[outer_join["value_x"].isna()]
+ outer_join[outer_join["value_x"].notna()]
-pandas also provides a variety of methods to work with missing data -- some of
-which would be challenging to express in Stata. For example, there are methods to
-drop all rows with any missing values, replacing missing values with a specified
-value, like the mean, or forward filling from previous rows. See the
-:ref:`missing data documentation<missing_data>` for more.
+pandas provides :ref:`a variety of methods to work with missing data <missing_data>`. Here are some examples:
+
+Drop rows with missing values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. ipython:: python
- # Drop rows with any missing value
outer_join.dropna()
- # Fill forwards
+Forward fill from previous rows
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. ipython:: python
+
outer_join.fillna(method="ffill")
- # Impute missing values with the mean
+Replace missing values with a specified value
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using the mean:
+
+.. ipython:: python
+
outer_join["value_x"].fillna(outer_join["value_x"].mean())
diff --git a/doc/source/getting_started/comparison/includes/missing_intro.rst b/doc/source/getting_started/comparison/includes/missing_intro.rst
index ed97f639f3f3d..366aa43d1264c 100644
--- a/doc/source/getting_started/comparison/includes/missing_intro.rst
+++ b/doc/source/getting_started/comparison/includes/missing_intro.rst
@@ -1,6 +1,6 @@
-Both have a representation for missing data — pandas' is the special float value ``NaN`` (not a
-number). Many of the semantics are the same; for example missing data propagates through numeric
-operations, and is ignored by default for aggregations.
+pandas represents missing data with the special float value ``NaN`` (not a number). Many of the
+semantics are the same; for example missing data propagates through numeric operations, and is
+ignored by default for aggregations.
.. ipython:: python
diff --git a/doc/source/getting_started/comparison/includes/sorting.rst b/doc/source/getting_started/comparison/includes/sorting.rst
index 0840c9dd554b7..4e2e40a18adbd 100644
--- a/doc/source/getting_started/comparison/includes/sorting.rst
+++ b/doc/source/getting_started/comparison/includes/sorting.rst
@@ -3,4 +3,4 @@ pandas has a :meth:`DataFrame.sort_values` method, which takes a list of columns
.. ipython:: python
tips = tips.sort_values(["sex", "total_bill"])
- tips.head()
+ tips
diff --git a/doc/source/getting_started/comparison/includes/time_date.rst b/doc/source/getting_started/comparison/includes/time_date.rst
index 12a00b36dc97d..fb9ee2e216cd7 100644
--- a/doc/source/getting_started/comparison/includes/time_date.rst
+++ b/doc/source/getting_started/comparison/includes/time_date.rst
@@ -11,7 +11,7 @@
tips[
["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"]
- ].head()
+ ]
.. ipython:: python
:suppress:
diff --git a/doc/source/getting_started/comparison/includes/transform.rst b/doc/source/getting_started/comparison/includes/transform.rst
index 0aa5b5b298cf7..b7599471432ad 100644
--- a/doc/source/getting_started/comparison/includes/transform.rst
+++ b/doc/source/getting_started/comparison/includes/transform.rst
@@ -5,4 +5,4 @@ succinctly expressed in one operation.
gb = tips.groupby("smoker")["total_bill"]
tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
- tips.head()
+ tips
| This helps to clarify the examples by removing code that isn't relevant. Added a dedicated section to the SAS, SQL, and Stata pages.
This builds on https://github.com/pandas-dev/pandas/pull/38933; ~~will rebase and mark as ready for review once that's merged. In the meantime, the last commit is the one that can be reviewed.~~ Thanks!
- [ ] ~~closes #xxxx~~
- [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [ ] ~~whatsnew entry~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/38935 | 2021-01-04T03:43:50Z | 2021-01-04T13:21:16Z | 2021-01-04T13:21:16Z | 2021-01-04T13:21:20Z |
ENH: Improve numerical stability for groupby.mean and groupby.cumsum | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index af11b6543a74b..b4b98ec0403a8 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -294,6 +294,7 @@ Groupby/resample/rolling
- Bug in :meth:`SeriesGroupBy.value_counts` where unobserved categories in a grouped categorical series were not tallied (:issue:`38672`)
- Bug in :meth:`.GroupBy.indices` would contain non-existent indices when null values were present in the groupby keys (:issue:`9304`)
- Fixed bug in :meth:`DataFrameGroupBy.sum` and :meth:`SeriesGroupBy.sum` causing loss of precision through using Kahan summation (:issue:`38778`)
+- Fixed bug in :meth:`DataFrameGroupBy.cumsum`, :meth:`SeriesGroupBy.cumsum`, :meth:`DataFrameGroupBy.mean` and :meth:`SeriesGroupBy.mean` causing loss of precision through using Kahan summation (:issue:`38934`)
Reshaping
^^^^^^^^^
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index ac8f22263f787..553ecbc58e745 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -246,12 +246,13 @@ def group_cumsum(numeric[:, :] out,
"""
cdef:
Py_ssize_t i, j, N, K, size
- numeric val
- numeric[:, :] accum
+ numeric val, y, t
+ numeric[:, :] accum, compensation
int64_t lab
N, K = (<object>values).shape
accum = np.zeros((ngroups, K), dtype=np.asarray(values).dtype)
+ compensation = np.zeros((ngroups, K), dtype=np.asarray(values).dtype)
with nogil:
for i in range(N):
@@ -264,7 +265,10 @@ def group_cumsum(numeric[:, :] out,
if numeric == float32_t or numeric == float64_t:
if val == val:
- accum[lab, j] += val
+ y = val - compensation[lab, j]
+ t = accum[lab, j] + y
+ compensation[lab, j] = t - accum[lab, j] - y
+ accum[lab, j] = t
out[i, j] = accum[lab, j]
else:
out[i, j] = NaN
@@ -272,7 +276,10 @@ def group_cumsum(numeric[:, :] out,
accum[lab, j] = NaN
break
else:
- accum[lab, j] += val
+ y = val - compensation[lab, j]
+ t = accum[lab, j] + y
+ compensation[lab, j] = t - accum[lab, j] - y
+ accum[lab, j] = t
out[i, j] = accum[lab, j]
@@ -637,8 +644,8 @@ def _group_mean(floating[:, :] out,
Py_ssize_t min_count=-1):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- floating val, count
- floating[:, :] sumx
+ floating val, count, y, t
+ floating[:, :] sumx, compensation
int64_t[:, :] nobs
Py_ssize_t len_values = len(values), len_labels = len(labels)
@@ -649,6 +656,7 @@ def _group_mean(floating[:, :] out,
nobs = np.zeros((<object>out).shape, dtype=np.int64)
sumx = np.zeros_like(out)
+ compensation = np.zeros_like(out)
N, K = (<object>values).shape
@@ -664,7 +672,10 @@ def _group_mean(floating[:, :] out,
# not nan
if val == val:
nobs[lab, j] += 1
- sumx[lab, j] += val
+ y = val - compensation[lab, j]
+ t = sumx[lab, j] + y
+ compensation[lab, j] = t - sumx[lab, j] - y
+ sumx[lab, j] = t
for i in range(ncounts):
for j in range(K):
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index e1c63448a2d22..5735f895e33b6 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2178,12 +2178,26 @@ def test_groupby_series_with_tuple_name():
@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
-def test_groupby_numerical_stability_sum():
+@pytest.mark.parametrize(
+ "func, values", [("sum", [97.0, 98.0]), ("mean", [24.25, 24.5])]
+)
+def test_groupby_numerical_stability_sum_mean(func, values):
# GH#38778
data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data})
- result = df.groupby("group").sum()
- expected = DataFrame(
- {"a": [97.0, 98.0], "b": [97.0, 98.0]}, index=Index([1, 2], name="group")
- )
+ result = getattr(df.groupby("group"), func)()
+ expected = DataFrame({"a": values, "b": values}, index=Index([1, 2], name="group"))
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
+def test_groupby_numerical_stability_cumsum():
+ # GH#38934
+ data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
+ df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data})
+ result = df.groupby("group").cumsum()
+ exp_data = (
+ [1e16] * 2 + [1e16 + 96, 1e16 + 98] + [5e15 + 97, 5e15 + 98] + [97.0, 98.0]
+ )
+ expected = DataFrame({"a": exp_data, "b": exp_data})
+ tm.assert_frame_equal(result, expected, check_exact=True)
| - [x] tests added / passed
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38934 | 2021-01-04T03:34:55Z | 2021-01-04T13:25:06Z | 2021-01-04T13:25:06Z | 2021-01-04T13:26:44Z |
DOC: improve shared content between comparison pages | diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst
index eb11b75027909..b97efe31b8b29 100644
--- a/doc/source/getting_started/comparison/comparison_with_sas.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sas.rst
@@ -308,8 +308,8 @@ Sorting in SAS is accomplished via ``PROC SORT``
String processing
-----------------
-Length
-~~~~~~
+Finding length of string
+~~~~~~~~~~~~~~~~~~~~~~~~
SAS determines the length of a character string with the
`LENGTHN <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002284668.htm>`__
@@ -327,8 +327,8 @@ functions. ``LENGTHN`` excludes trailing blanks and ``LENGTHC`` includes trailin
.. include:: includes/length.rst
-Find
-~~~~
+Finding position of substring
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SAS determines the position of a character in a string with the
`FINDW <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002978282.htm>`__ function.
@@ -342,19 +342,11 @@ you supply as the second argument.
put(FINDW(sex,'ale'));
run;
-Python determines the position of a character in a string with the
-``find`` function. ``find`` searches for the first position of the
-substring. If the substring is found, the function returns its
-position. Keep in mind that Python indexes are zero-based and
-the function will return -1 if it fails to find the substring.
-
-.. ipython:: python
-
- tips["sex"].str.find("ale").head()
+.. include:: includes/find_substring.rst
-Substring
-~~~~~~~~~
+Extracting substring by position
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SAS extracts a substring from a string based on its position with the
`SUBSTR <https://www2.sas.com/proceedings/sugi25/25/cc/25p088.pdf>`__ function.
@@ -366,17 +358,11 @@ SAS extracts a substring from a string based on its position with the
put(substr(sex,1,1));
run;
-With pandas you can use ``[]`` notation to extract a substring
-from a string by position locations. Keep in mind that Python
-indexes are zero-based.
+.. include:: includes/extract_substring.rst
-.. ipython:: python
- tips["sex"].str[0:1].head()
-
-
-Scan
-~~~~
+Extracting nth word
+~~~~~~~~~~~~~~~~~~~
The SAS `SCAN <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000214639.htm>`__
function returns the nth word from a string. The first argument is the string you want to parse and the
@@ -394,20 +380,11 @@ second argument specifies which word you want to extract.
;;;
run;
-Python extracts a substring from a string based on its text
-by using regular expressions. There are much more powerful
-approaches, but this just shows a simple approach.
-
-.. ipython:: python
-
- firstlast = pd.DataFrame({"String": ["John Smith", "Jane Cook"]})
- firstlast["First_Name"] = firstlast["String"].str.split(" ", expand=True)[0]
- firstlast["Last_Name"] = firstlast["String"].str.rsplit(" ", expand=True)[0]
- firstlast
+.. include:: includes/nth_word.rst
-Upcase, lowcase, and propcase
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Changing case
+~~~~~~~~~~~~~
The SAS `UPCASE <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000245965.htm>`__
`LOWCASE <https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000245912.htm>`__ and
@@ -427,27 +404,13 @@ functions change the case of the argument.
;;;
run;
-The equivalent Python functions are ``upper``, ``lower``, and ``title``.
+.. include:: includes/case.rst
-.. ipython:: python
-
- firstlast = pd.DataFrame({"String": ["John Smith", "Jane Cook"]})
- firstlast["string_up"] = firstlast["String"].str.upper()
- firstlast["string_low"] = firstlast["String"].str.lower()
- firstlast["string_prop"] = firstlast["String"].str.title()
- firstlast
Merging
-------
-The following tables will be used in the merge examples
-
-.. ipython:: python
-
- df1 = pd.DataFrame({"key": ["A", "B", "C", "D"], "value": np.random.randn(4)})
- df1
- df2 = pd.DataFrame({"key": ["B", "D", "D", "E"], "value": np.random.randn(4)})
- df2
+.. include:: includes/merge_setup.rst
In SAS, data must be explicitly sorted before merging. Different
types of joins are accomplished using the ``in=`` dummy
@@ -473,39 +436,13 @@ input frames.
if a or b then output outer_join;
run;
-pandas DataFrames have a :meth:`~DataFrame.merge` method, which provides
-similar functionality. Note that the data does not have
-to be sorted ahead of time, and different join
-types are accomplished via the ``how`` keyword.
-
-.. ipython:: python
-
- inner_join = df1.merge(df2, on=["key"], how="inner")
- inner_join
-
- left_join = df1.merge(df2, on=["key"], how="left")
- left_join
-
- right_join = df1.merge(df2, on=["key"], how="right")
- right_join
-
- outer_join = df1.merge(df2, on=["key"], how="outer")
- outer_join
+.. include:: includes/merge.rst
Missing data
------------
-Like SAS, pandas has a representation for missing data - which is the
-special float value ``NaN`` (not a number). Many of the semantics
-are the same, for example missing data propagates through numeric
-operations, and is ignored by default for aggregations.
-
-.. ipython:: python
-
- outer_join
- outer_join["value_x"] + outer_join["value_y"]
- outer_join["value_x"].sum()
+.. include:: includes/missing_intro.rst
One difference is that missing data cannot be compared to its sentinel value.
For example, in SAS you could do this to filter missing values.
@@ -522,25 +459,7 @@ For example, in SAS you could do this to filter missing values.
if value_x ^= .;
run;
-Which doesn't work in pandas. Instead, the ``pd.isna`` or ``pd.notna`` functions
-should be used for comparisons.
-
-.. ipython:: python
-
- outer_join[pd.isna(outer_join["value_x"])]
- outer_join[pd.notna(outer_join["value_x"])]
-
-pandas also provides a variety of methods to work with missing data - some of
-which would be challenging to express in SAS. For example, there are methods to
-drop all rows with any missing values, replacing missing values with a specified
-value, like the mean, or forward filling from previous rows. See the
-:ref:`missing data documentation<missing_data>` for more.
-
-.. ipython:: python
-
- outer_join.dropna()
- outer_join.fillna(method="ffill")
- outer_join["value_x"].fillna(outer_join["value_x"].mean())
+.. include:: includes/missing.rst
GroupBy
@@ -549,7 +468,7 @@ GroupBy
Aggregation
~~~~~~~~~~~
-SAS's PROC SUMMARY can be used to group by one or
+SAS's ``PROC SUMMARY`` can be used to group by one or
more key variables and compute aggregations on
numeric columns.
@@ -561,14 +480,7 @@ numeric columns.
output out=tips_summed sum=;
run;
-pandas provides a flexible ``groupby`` mechanism that
-allows similar aggregations. See the :ref:`groupby documentation<groupby>`
-for more details and examples.
-
-.. ipython:: python
-
- tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
- tips_summed.head()
+.. include:: includes/groupby.rst
Transformation
@@ -597,16 +509,7 @@ example, to subtract the mean for each observation by smoker group.
if a and b;
run;
-
-pandas ``groupby`` provides a ``transform`` mechanism that allows
-these type of operations to be succinctly expressed in one
-operation.
-
-.. ipython:: python
-
- gb = tips.groupby("smoker")["total_bill"]
- tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
- tips.head()
+.. include:: includes/transform.rst
By group processing
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index d1ad18bddb0a7..ca536e7273870 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -311,15 +311,7 @@ first position of the substring you supply as the second argument.
generate str_position = strpos(sex, "ale")
-Python determines the position of a character in a string with the
-:func:`find` function. ``find`` searches for the first position of the
-substring. If the substring is found, the function returns its
-position. Keep in mind that Python indexes are zero-based and
-the function will return -1 if it fails to find the substring.
-
-.. ipython:: python
-
- tips["sex"].str.find("ale").head()
+.. include:: includes/find_substring.rst
Extracting substring by position
@@ -331,13 +323,7 @@ Stata extracts a substring from a string based on its position with the :func:`s
generate short_sex = substr(sex, 1, 1)
-With pandas you can use ``[]`` notation to extract a substring
-from a string by position locations. Keep in mind that Python
-indexes are zero-based.
-
-.. ipython:: python
-
- tips["sex"].str[0:1].head()
+.. include:: includes/extract_substring.rst
Extracting nth word
@@ -358,16 +344,7 @@ second argument specifies which word you want to extract.
generate first_name = word(name, 1)
generate last_name = word(name, -1)
-Python extracts a substring from a string based on its text
-by using regular expressions. There are much more powerful
-approaches, but this just shows a simple approach.
-
-.. ipython:: python
-
- firstlast = pd.DataFrame({"string": ["John Smith", "Jane Cook"]})
- firstlast["First_Name"] = firstlast["string"].str.split(" ", expand=True)[0]
- firstlast["Last_Name"] = firstlast["string"].str.rsplit(" ", expand=True)[0]
- firstlast
+.. include:: includes/nth_word.rst
Changing case
@@ -390,27 +367,13 @@ change the case of ASCII and Unicode strings, respectively.
generate title = strproper(string)
list
-The equivalent Python functions are ``upper``, ``lower``, and ``title``.
-
-.. ipython:: python
+.. include:: includes/case.rst
- firstlast = pd.DataFrame({"string": ["John Smith", "Jane Cook"]})
- firstlast["upper"] = firstlast["string"].str.upper()
- firstlast["lower"] = firstlast["string"].str.lower()
- firstlast["title"] = firstlast["string"].str.title()
- firstlast
Merging
-------
-The following tables will be used in the merge examples
-
-.. ipython:: python
-
- df1 = pd.DataFrame({"key": ["A", "B", "C", "D"], "value": np.random.randn(4)})
- df1
- df2 = pd.DataFrame({"key": ["B", "D", "D", "E"], "value": np.random.randn(4)})
- df2
+.. include:: includes/merge_setup.rst
In Stata, to perform a merge, one data set must be in memory
and the other must be referenced as a file name on disk. In
@@ -465,38 +428,13 @@ or the intersection of the two by using the values created in the
restore
merge 1:n key using df2.dta
-pandas DataFrames have a :meth:`DataFrame.merge` method, which provides
-similar functionality. Note that different join
-types are accomplished via the ``how`` keyword.
-
-.. ipython:: python
-
- inner_join = df1.merge(df2, on=["key"], how="inner")
- inner_join
-
- left_join = df1.merge(df2, on=["key"], how="left")
- left_join
-
- right_join = df1.merge(df2, on=["key"], how="right")
- right_join
-
- outer_join = df1.merge(df2, on=["key"], how="outer")
- outer_join
+.. include:: includes/merge_setup.rst
Missing data
------------
-Like Stata, pandas has a representation for missing data -- the
-special float value ``NaN`` (not a number). Many of the semantics
-are the same; for example missing data propagates through numeric
-operations, and is ignored by default for aggregations.
-
-.. ipython:: python
-
- outer_join
- outer_join["value_x"] + outer_join["value_y"]
- outer_join["value_x"].sum()
+.. include:: includes/missing_intro.rst
One difference is that missing data cannot be compared to its sentinel value.
For example, in Stata you could do this to filter missing values.
@@ -508,30 +446,7 @@ For example, in Stata you could do this to filter missing values.
* Keep non-missing values
list if value_x != .
-This doesn't work in pandas. Instead, the :func:`pd.isna` or :func:`pd.notna` functions
-should be used for comparisons.
-
-.. ipython:: python
-
- outer_join[pd.isna(outer_join["value_x"])]
- outer_join[pd.notna(outer_join["value_x"])]
-
-pandas also provides a variety of methods to work with missing data -- some of
-which would be challenging to express in Stata. For example, there are methods to
-drop all rows with any missing values, replacing missing values with a specified
-value, like the mean, or forward filling from previous rows. See the
-:ref:`missing data documentation<missing_data>` for more.
-
-.. ipython:: python
-
- # Drop rows with any missing value
- outer_join.dropna()
-
- # Fill forwards
- outer_join.fillna(method="ffill")
-
- # Impute missing values with the mean
- outer_join["value_x"].fillna(outer_join["value_x"].mean())
+.. include:: includes/missing.rst
GroupBy
@@ -548,14 +463,7 @@ numeric columns.
collapse (sum) total_bill tip, by(sex smoker)
-pandas provides a flexible ``groupby`` mechanism that
-allows similar aggregations. See the :ref:`groupby documentation<groupby>`
-for more details and examples.
-
-.. ipython:: python
-
- tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
- tips_summed.head()
+.. include:: includes/groupby.rst
Transformation
@@ -570,16 +478,7 @@ For example, to subtract the mean for each observation by smoker group.
bysort sex smoker: egen group_bill = mean(total_bill)
generate adj_total_bill = total_bill - group_bill
-
-pandas ``groupby`` provides a ``transform`` mechanism that allows
-these type of operations to be succinctly expressed in one
-operation.
-
-.. ipython:: python
-
- gb = tips.groupby("smoker")["total_bill"]
- tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
- tips.head()
+.. include:: includes/transform.rst
By group processing
diff --git a/doc/source/getting_started/comparison/includes/case.rst b/doc/source/getting_started/comparison/includes/case.rst
new file mode 100644
index 0000000000000..c00a830bc8511
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/case.rst
@@ -0,0 +1,10 @@
+The equivalent pandas methods are :meth:`Series.str.upper`, :meth:`Series.str.lower`, and
+:meth:`Series.str.title`.
+
+.. ipython:: python
+
+ firstlast = pd.DataFrame({"string": ["John Smith", "Jane Cook"]})
+ firstlast["upper"] = firstlast["string"].str.upper()
+ firstlast["lower"] = firstlast["string"].str.lower()
+ firstlast["title"] = firstlast["string"].str.title()
+ firstlast
diff --git a/doc/source/getting_started/comparison/includes/extract_substring.rst b/doc/source/getting_started/comparison/includes/extract_substring.rst
new file mode 100644
index 0000000000000..78eee286ad467
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/extract_substring.rst
@@ -0,0 +1,7 @@
+With pandas you can use ``[]`` notation to extract a substring
+from a string by position locations. Keep in mind that Python
+indexes are zero-based.
+
+.. ipython:: python
+
+ tips["sex"].str[0:1].head()
diff --git a/doc/source/getting_started/comparison/includes/find_substring.rst b/doc/source/getting_started/comparison/includes/find_substring.rst
new file mode 100644
index 0000000000000..ee940b64f5cae
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/find_substring.rst
@@ -0,0 +1,8 @@
+You can find the position of a character in a column of strings with the :meth:`Series.str.find`
+method. ``find`` searches for the first position of the substring. If the substring is found, the
+method returns its position. If not found, it returns ``-1``. Keep in mind that Python indexes are
+zero-based.
+
+.. ipython:: python
+
+ tips["sex"].str.find("ale").head()
diff --git a/doc/source/getting_started/comparison/includes/groupby.rst b/doc/source/getting_started/comparison/includes/groupby.rst
new file mode 100644
index 0000000000000..caa9f6ec9c9b8
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/groupby.rst
@@ -0,0 +1,7 @@
+pandas provides a flexible ``groupby`` mechanism that allows similar aggregations. See the
+:ref:`groupby documentation<groupby>` for more details and examples.
+
+.. ipython:: python
+
+ tips_summed = tips.groupby(["sex", "smoker"])[["total_bill", "tip"]].sum()
+ tips_summed.head()
diff --git a/doc/source/getting_started/comparison/includes/length.rst b/doc/source/getting_started/comparison/includes/length.rst
index 9581c661c0170..5a0c803e9eff2 100644
--- a/doc/source/getting_started/comparison/includes/length.rst
+++ b/doc/source/getting_started/comparison/includes/length.rst
@@ -1,4 +1,4 @@
-Python determines the length of a character string with the ``len`` function.
+You can find the length of a character string with :meth:`Series.str.len`.
In Python 3, all strings are Unicode strings. ``len`` includes trailing blanks.
Use ``len`` and ``rstrip`` to exclude trailing blanks.
diff --git a/doc/source/getting_started/comparison/includes/merge.rst b/doc/source/getting_started/comparison/includes/merge.rst
new file mode 100644
index 0000000000000..b8e3f54fd132b
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/merge.rst
@@ -0,0 +1,17 @@
+pandas DataFrames have a :meth:`~DataFrame.merge` method, which provides similar functionality. The
+data does not have to be sorted ahead of time, and different join types are accomplished via the
+``how`` keyword.
+
+.. ipython:: python
+
+ inner_join = df1.merge(df2, on=["key"], how="inner")
+ inner_join
+
+ left_join = df1.merge(df2, on=["key"], how="left")
+ left_join
+
+ right_join = df1.merge(df2, on=["key"], how="right")
+ right_join
+
+ outer_join = df1.merge(df2, on=["key"], how="outer")
+ outer_join
diff --git a/doc/source/getting_started/comparison/includes/merge_setup.rst b/doc/source/getting_started/comparison/includes/merge_setup.rst
new file mode 100644
index 0000000000000..f115cd58f7a94
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/merge_setup.rst
@@ -0,0 +1,8 @@
+The following tables will be used in the merge examples:
+
+.. ipython:: python
+
+ df1 = pd.DataFrame({"key": ["A", "B", "C", "D"], "value": np.random.randn(4)})
+ df1
+ df2 = pd.DataFrame({"key": ["B", "D", "D", "E"], "value": np.random.randn(4)})
+ df2
diff --git a/doc/source/getting_started/comparison/includes/missing.rst b/doc/source/getting_started/comparison/includes/missing.rst
new file mode 100644
index 0000000000000..8e6ba95e98036
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/missing.rst
@@ -0,0 +1,24 @@
+This doesn't work in pandas. Instead, the :func:`pd.isna` or :func:`pd.notna` functions
+should be used for comparisons.
+
+.. ipython:: python
+
+ outer_join[pd.isna(outer_join["value_x"])]
+ outer_join[pd.notna(outer_join["value_x"])]
+
+pandas also provides a variety of methods to work with missing data -- some of
+which would be challenging to express in Stata. For example, there are methods to
+drop all rows with any missing values, replacing missing values with a specified
+value, like the mean, or forward filling from previous rows. See the
+:ref:`missing data documentation<missing_data>` for more.
+
+.. ipython:: python
+
+ # Drop rows with any missing value
+ outer_join.dropna()
+
+ # Fill forwards
+ outer_join.fillna(method="ffill")
+
+ # Impute missing values with the mean
+ outer_join["value_x"].fillna(outer_join["value_x"].mean())
diff --git a/doc/source/getting_started/comparison/includes/missing_intro.rst b/doc/source/getting_started/comparison/includes/missing_intro.rst
new file mode 100644
index 0000000000000..ed97f639f3f3d
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/missing_intro.rst
@@ -0,0 +1,9 @@
+Both have a representation for missing data — pandas' is the special float value ``NaN`` (not a
+number). Many of the semantics are the same; for example missing data propagates through numeric
+operations, and is ignored by default for aggregations.
+
+.. ipython:: python
+
+ outer_join
+ outer_join["value_x"] + outer_join["value_y"]
+ outer_join["value_x"].sum()
diff --git a/doc/source/getting_started/comparison/includes/nth_word.rst b/doc/source/getting_started/comparison/includes/nth_word.rst
new file mode 100644
index 0000000000000..7af0285005d5b
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/nth_word.rst
@@ -0,0 +1,9 @@
+The simplest way to extract words in pandas is to split the strings by spaces, then reference the
+word by index. Note there are more powerful approaches should you need them.
+
+.. ipython:: python
+
+ firstlast = pd.DataFrame({"String": ["John Smith", "Jane Cook"]})
+ firstlast["First_Name"] = firstlast["String"].str.split(" ", expand=True)[0]
+ firstlast["Last_Name"] = firstlast["String"].str.rsplit(" ", expand=True)[0]
+ firstlast
diff --git a/doc/source/getting_started/comparison/includes/sorting.rst b/doc/source/getting_started/comparison/includes/sorting.rst
index 23f11ff485474..0840c9dd554b7 100644
--- a/doc/source/getting_started/comparison/includes/sorting.rst
+++ b/doc/source/getting_started/comparison/includes/sorting.rst
@@ -1,5 +1,4 @@
-pandas objects have a :meth:`DataFrame.sort_values` method, which
-takes a list of columns to sort by.
+pandas has a :meth:`DataFrame.sort_values` method, which takes a list of columns to sort by.
.. ipython:: python
diff --git a/doc/source/getting_started/comparison/includes/transform.rst b/doc/source/getting_started/comparison/includes/transform.rst
new file mode 100644
index 0000000000000..0aa5b5b298cf7
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/transform.rst
@@ -0,0 +1,8 @@
+pandas provides a :ref:`groupby.transform` mechanism that allows these type of operations to be
+succinctly expressed in one operation.
+
+.. ipython:: python
+
+ gb = tips.groupby("smoker")["total_bill"]
+ tips["adj_total_bill"] = tips["total_bill"] - gb.transform("mean")
+ tips.head()
| This pull request does a few things between the SAS and Stata pages, in separate commits:
- Makes the headings match, where it makes sense for them to
- Create more shared includes, as a follow-up to https://github.com/pandas-dev/pandas/pull/38887
- Improves some wording and ensures more methods are linked in the comparison includes
The motivation here is that I'm working on adding the other sections to the Comparison to Spreadsheets page, and want to ensure they're consistent.
---
- [ ] ~~closes #xxxx~~
- [x] tests added / passed
- [ ] ~~passes `black pandas`~~
- [ ] ~~passes `git diff upstream/master -u -- "*.py" | flake8 --diff`~~
- [ ] ~~whatsnew entry~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/38933 | 2021-01-03T23:31:09Z | 2021-01-04T03:55:07Z | 2021-01-04T03:55:07Z | 2021-01-04T04:53:19Z |
BUG: rank_2d raising with mixed dtypes | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index af11b6543a74b..0884065247fbc 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -217,6 +217,8 @@ Numeric
- Bug in :meth:`DataFrame.select_dtypes` with ``include=np.number`` now retains numeric ``ExtensionDtype`` columns (:issue:`35340`)
- Bug in :meth:`DataFrame.mode` and :meth:`Series.mode` not keeping consistent integer :class:`Index` for empty input (:issue:`33321`)
- Bug in :meth:`DataFrame.rank` with ``np.inf`` and mixture of ``np.nan`` and ``np.inf`` (:issue:`32593`)
+- Bug in :meth:`DataFrame.rank` with ``axis=0`` and columns holding incomparable types raising ``IndexError`` (:issue:`38932`)
+-
Conversion
^^^^^^^^^^
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 3aa4738b36dc8..76bfb001cea81 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -26,6 +26,7 @@ from numpy cimport (
int16_t,
int32_t,
int64_t,
+ intp_t,
ndarray,
uint8_t,
uint16_t,
@@ -1105,14 +1106,13 @@ def rank_2d(
Py_ssize_t infs
ndarray[float64_t, ndim=2] ranks
ndarray[rank_t, ndim=2] values
- ndarray[int64_t, ndim=2] argsorted
+ ndarray[intp_t, ndim=2] argsort_indexer
ndarray[uint8_t, ndim=2] mask
rank_t val, nan_value
float64_t count, sum_ranks = 0.0
int tiebreak = 0
int64_t idx
bint check_mask, condition, keep_na
- const int64_t[:] labels
tiebreak = tiebreakers[ties_method]
@@ -1158,40 +1158,19 @@ def rank_2d(
n, k = (<object>values).shape
ranks = np.empty((n, k), dtype='f8')
- # For compatibility when calling rank_1d
- labels = np.zeros(k, dtype=np.int64)
- if rank_t is object:
- try:
- _as = values.argsort(1)
- except TypeError:
- values = in_arr
- for i in range(len(values)):
- ranks[i] = rank_1d(
- in_arr[i],
- labels=labels,
- ties_method=ties_method,
- ascending=ascending,
- pct=pct
- )
- if axis == 0:
- return ranks.T
- else:
- return ranks
+ if tiebreak == TIEBREAK_FIRST:
+ # need to use a stable sort here
+ argsort_indexer = values.argsort(axis=1, kind='mergesort')
+ if not ascending:
+ tiebreak = TIEBREAK_FIRST_DESCENDING
else:
- if tiebreak == TIEBREAK_FIRST:
- # need to use a stable sort here
- _as = values.argsort(axis=1, kind='mergesort')
- if not ascending:
- tiebreak = TIEBREAK_FIRST_DESCENDING
- else:
- _as = values.argsort(1)
+ argsort_indexer = values.argsort(1)
if not ascending:
- _as = _as[:, ::-1]
+ argsort_indexer = argsort_indexer[:, ::-1]
- values = _take_2d(values, _as)
- argsorted = _as.astype('i8')
+ values = _take_2d(values, argsort_indexer)
for i in range(n):
dups = sum_ranks = infs = 0
@@ -1200,7 +1179,7 @@ def rank_2d(
count = 0.0
for j in range(k):
val = values[i, j]
- idx = argsorted[i, j]
+ idx = argsort_indexer[i, j]
if keep_na and check_mask and mask[i, idx]:
ranks[i, idx] = NaN
infs += 1
@@ -1215,38 +1194,38 @@ def rank_2d(
condition = (
j == k - 1 or
are_diff(values[i, j + 1], val) or
- (keep_na and check_mask and mask[i, argsorted[i, j + 1]])
+ (keep_na and check_mask and mask[i, argsort_indexer[i, j + 1]])
)
else:
condition = (
j == k - 1 or
values[i, j + 1] != val or
- (keep_na and check_mask and mask[i, argsorted[i, j + 1]])
+ (keep_na and check_mask and mask[i, argsort_indexer[i, j + 1]])
)
if condition:
if tiebreak == TIEBREAK_AVERAGE:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = sum_ranks / dups
+ ranks[i, argsort_indexer[i, z]] = sum_ranks / dups
elif tiebreak == TIEBREAK_MIN:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = j - dups + 2
+ ranks[i, argsort_indexer[i, z]] = j - dups + 2
elif tiebreak == TIEBREAK_MAX:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = j + 1
+ ranks[i, argsort_indexer[i, z]] = j + 1
elif tiebreak == TIEBREAK_FIRST:
if rank_t is object:
raise ValueError('first not supported for non-numeric data')
else:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = z + 1
+ ranks[i, argsort_indexer[i, z]] = z + 1
elif tiebreak == TIEBREAK_FIRST_DESCENDING:
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = 2 * j - z - dups + 2
+ ranks[i, argsort_indexer[i, z]] = 2 * j - z - dups + 2
elif tiebreak == TIEBREAK_DENSE:
total_tie_count += 1
for z in range(j - dups + 1, j + 1):
- ranks[i, argsorted[i, z]] = total_tie_count
+ ranks[i, argsort_indexer[i, z]] = total_tie_count
sum_ranks = dups = 0
if pct:
if tiebreak == TIEBREAK_DENSE:
diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py
index 6ad1b475e28a2..4255c1cb5e65f 100644
--- a/pandas/tests/frame/methods/test_rank.py
+++ b/pandas/tests/frame/methods/test_rank.py
@@ -445,3 +445,15 @@ def test_rank_both_inf(self):
expected = DataFrame({"a": [1.0, 2.0, 3.0]})
result = df.rank()
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "data,expected",
+ [
+ ({"a": [1, 2, "a"], "b": [4, 5, 6]}, DataFrame({"b": [1.0, 2.0, 3.0]})),
+ ({"a": [1, 2, "a"]}, DataFrame(index=range(3))),
+ ],
+ )
+ def test_rank_mixed_axis_zero(self, data, expected):
+ df = DataFrame(data)
+ result = df.rank()
+ tm.assert_frame_equal(result, expected)
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38932 | 2021-01-03T22:21:58Z | 2021-01-05T00:45:16Z | 2021-01-05T00:45:16Z | 2021-01-05T01:30:14Z |
BUG: DataFrame.__setitem__ raising ValueError with string indexer and empty df and df to set | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index ac3b5dcaf53ae..e46c729348d33 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -244,6 +244,7 @@ Indexing
- Bug in :meth:`CategoricalIndex.get_indexer` failing to raise ``InvalidIndexError`` when non-unique (:issue:`38372`)
- Bug in inserting many new columns into a :class:`DataFrame` causing incorrect subsequent indexing behavior (:issue:`38380`)
- Bug in :meth:`DataFrame.loc`, :meth:`Series.loc`, :meth:`DataFrame.__getitem__` and :meth:`Series.__getitem__` returning incorrect elements for non-monotonic :class:`DatetimeIndex` for string slices (:issue:`33146`)
+- Bug in :meth:`DataFrame.__setitem__` raising ``ValueError`` with empty :class:`DataFrame` and specified columns for string indexer and non empty :class:`DataFrame` to set (:issue:`38831`)
- Bug in :meth:`DataFrame.iloc.__setitem__` and :meth:`DataFrame.loc.__setitem__` with mixed dtypes when setting with a dictionary value (:issue:`38335`)
- Bug in :meth:`DataFrame.loc` dropping levels of :class:`MultiIndex` when :class:`DataFrame` used as input has only one row (:issue:`10521`)
-
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1abbe37e67b09..aeae39094ba7c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3334,13 +3334,14 @@ def _ensure_valid_index(self, value):
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
- try:
- value = Series(value)
- except (ValueError, NotImplementedError, TypeError) as err:
- raise ValueError(
- "Cannot set a frame with no defined index "
- "and a value that cannot be converted to a Series"
- ) from err
+ if not isinstance(value, DataFrame):
+ try:
+ value = Series(value)
+ except (ValueError, NotImplementedError, TypeError) as err:
+ raise ValueError(
+ "Cannot set a frame with no defined index "
+ "and a value that cannot be converted to a Series"
+ ) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py
index 19d2f8301037a..28b1f02ff020c 100644
--- a/pandas/tests/frame/indexing/test_setitem.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -338,6 +338,16 @@ def test_setitem_bool_with_numeric_index(self, dtype):
tm.assert_index_equal(df.columns, expected_cols)
+ @pytest.mark.parametrize("indexer", ["B", ["B"]])
+ def test_setitem_frame_length_0_str_key(self, indexer):
+ # GH#38831
+ df = DataFrame(columns=["A", "B"])
+ other = DataFrame({"B": [1, 2]})
+ df[indexer] = other
+ expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
+ expected["A"] = expected["A"].astype("object")
+ tm.assert_frame_equal(df, expected)
+
class TestDataFrameSetItemWithExpansion:
def test_setitem_listlike_views(self):
| - [x] closes #38831
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Dont have to convert df to series, df has an index.
cc @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/38931 | 2021-01-03T22:14:19Z | 2021-01-06T00:35:39Z | 2021-01-06T00:35:38Z | 2021-01-06T01:13:51Z |
TST: Replace pytest.xfail | diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index bc1295cc0a0ce..2548fc18e4032 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -386,9 +386,13 @@ def test_asi8_deprecation(self, index):
@pytest.mark.parametrize("na_position", [None, "middle"])
-def test_sort_values_invalid_na_position(index_with_missing, na_position):
- if isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
- pytest.xfail("missing value sorting order not defined for index type")
+def test_sort_values_invalid_na_position(request, index_with_missing, na_position):
+ if isinstance(index_with_missing, MultiIndex):
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="missing value sorting order not defined for index type"
+ )
+ )
if na_position not in ["first", "last"]:
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
@@ -396,12 +400,16 @@ def test_sort_values_invalid_na_position(index_with_missing, na_position):
@pytest.mark.parametrize("na_position", ["first", "last"])
-def test_sort_values_with_missing(index_with_missing, na_position):
+def test_sort_values_with_missing(request, index_with_missing, na_position):
# GH 35584. Test that sort_values works with missing values,
# sort non-missing and place missing according to na_position
- if isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
- pytest.xfail("missing value sorting order not defined for index type")
+ if isinstance(index_with_missing, MultiIndex):
+ request.node.add_marker(
+ pytest.mark.xfail(reason="missing value sorting order not implemented")
+ )
+ elif isinstance(index_with_missing, CategoricalIndex):
+ pytest.skip("missing value sorting order not well-defined")
missing_count = np.sum(index_with_missing.isna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 64b08c6058b81..f2a33df71e8e3 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -49,7 +49,7 @@ def test_union_different_types(request, index, index_fixture2):
)
if any(isinstance(idx, pd.MultiIndex) for idx in (idx1, idx2)):
- pytest.xfail("This test doesn't consider multiindixes.")
+ pytest.skip("This test doesn't consider multiindixes.")
if is_dtype_equal(idx1.dtype, idx2.dtype):
pytest.skip("This test only considers non matching dtypes.")
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py
index d6d0723bee0e8..f79a822481ea0 100644
--- a/pandas/tests/series/indexing/test_setitem.py
+++ b/pandas/tests/series/indexing/test_setitem.py
@@ -328,14 +328,15 @@ def test_series_where(self, obj, key, expected):
tm.assert_series_equal(res, expected)
def test_index_where(self, obj, key, expected, request):
- if obj.dtype == bool:
- msg = "Index/Series casting behavior inconsistent GH#38692"
- mark = pytest.xfail(reason=msg)
- request.node.add_marker(mark)
-
mask = np.zeros(obj.shape, dtype=bool)
mask[key] = True
+ if obj.dtype == bool and not mask.all():
+ # When mask is all True, casting behavior does not apply
+ msg = "Index/Series casting behavior inconsistent GH#38692"
+ mark = pytest.mark.xfail(reason=msg)
+ request.node.add_marker(mark)
+
res = Index(obj).where(~mask, np.nan)
tm.assert_index_equal(res, Index(expected))
diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py
index 59c68fba53e25..edcec386cd8ba 100644
--- a/pandas/tests/series/indexing/test_where.py
+++ b/pandas/tests/series/indexing/test_where.py
@@ -489,10 +489,6 @@ def test_where_datetimelike_categorical(tz_naive_fixture):
tm.assert_series_equal(res, Series(dr))
# DataFrame.where
- if tz is None:
- res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))
- else:
- with pytest.xfail(reason="frame._values loses tz"):
- res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))
+ res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))
tm.assert_frame_equal(res, pd.DataFrame(dr))
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index a0e0213a6dce5..219aaddb116cd 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -737,12 +737,18 @@ def test_align_date_objects_with_datetimeindex(self):
class TestNamePreservation:
@pytest.mark.parametrize("box", [list, tuple, np.array, Index, Series, pd.array])
@pytest.mark.parametrize("flex", [True, False])
- def test_series_ops_name_retention(self, flex, box, names, all_binary_operators):
+ def test_series_ops_name_retention(
+ self, request, flex, box, names, all_binary_operators
+ ):
# GH#33930 consistent name renteiton
op = all_binary_operators
- if op is ops.rfloordiv and box in [list, tuple]:
- pytest.xfail("op fails because of inconsistent ndarray-wrapping GH#28759")
+ if op is ops.rfloordiv and box in [list, tuple] and not flex:
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="op fails because of inconsistent ndarray-wrapping GH#28759"
+ )
+ )
left = Series(range(10), name=names[0])
right = Series(range(10), name=names[1])
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Part of #38902. | https://api.github.com/repos/pandas-dev/pandas/pulls/38929 | 2021-01-03T21:10:21Z | 2021-01-04T01:26:12Z | 2021-01-04T01:26:12Z | 2021-01-04T21:05:18Z |
TST: stricten xfails | diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index e6d1cd5f47d8d..22eb642ed8512 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -821,7 +821,6 @@ def test_frame_with_frame_reindex(self):
(np.datetime64(20, "ns"), "<M8[ns]"),
],
)
- @pytest.mark.xfail(reason="GH38630", strict=False)
@pytest.mark.parametrize(
"op",
[
@@ -835,9 +834,12 @@ def test_frame_with_frame_reindex(self):
ids=lambda x: x.__name__,
)
def test_binop_other(self, op, value, dtype):
+
skip = {
(operator.truediv, "bool"),
(operator.pow, "bool"),
+ (operator.add, "bool"),
+ (operator.mul, "bool"),
}
e = DummyElement(value, dtype)
@@ -879,12 +881,18 @@ def test_binop_other(self, op, value, dtype):
elif (op, dtype) in skip:
- msg = "operator '.*' not implemented for .* dtypes"
- with pytest.raises(NotImplementedError, match=msg):
+ if op in [operator.add, operator.mul]:
with tm.assert_produces_warning(UserWarning):
# "evaluating in Python space because ..."
op(s, e.value)
+ else:
+ msg = "operator '.*' not implemented for .* dtypes"
+ with pytest.raises(NotImplementedError, match=msg):
+ with tm.assert_produces_warning(UserWarning):
+ # "evaluating in Python space because ..."
+ op(s, e.value)
+
else:
# FIXME: Since dispatching to Series, this test no longer
# asserts anything meaningful
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index bc1295cc0a0ce..a1a3ab554225b 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -16,12 +16,10 @@
from pandas import (
CategoricalIndex,
DatetimeIndex,
- Int64Index,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
- UInt64Index,
)
import pandas._testing as tm
@@ -371,12 +369,9 @@ def test_ravel_deprecation(self, index):
with tm.assert_produces_warning(FutureWarning):
index.ravel()
- @pytest.mark.xfail(reason="GH38630", strict=False)
def test_asi8_deprecation(self, index):
# GH#37877
- if isinstance(
- index, (Int64Index, UInt64Index, DatetimeIndex, TimedeltaIndex, PeriodIndex)
- ):
+ if isinstance(index, (DatetimeIndex, TimedeltaIndex, PeriodIndex)):
warn = None
else:
warn = FutureWarning
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38927 | 2021-01-03T18:51:51Z | 2021-01-04T13:36:07Z | 2021-01-04T13:36:07Z | 2021-01-04T16:22:59Z |
ENH: Add support to import optional submodule and specify different min_version than default | diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py
index 3775a47d44521..def881b8fd863 100644
--- a/pandas/compat/_optional.py
+++ b/pandas/compat/_optional.py
@@ -1,6 +1,8 @@
import distutils.version
import importlib
+import sys
import types
+from typing import Optional
import warnings
# Update install.rst when updating versions!
@@ -58,7 +60,11 @@ def _get_version(module: types.ModuleType) -> str:
def import_optional_dependency(
- name: str, extra: str = "", raise_on_missing: bool = True, on_version: str = "raise"
+ name: str,
+ extra: str = "",
+ raise_on_missing: bool = True,
+ on_version: str = "raise",
+ min_version: Optional[str] = None,
):
"""
Import an optional dependency.
@@ -70,8 +76,7 @@ def import_optional_dependency(
Parameters
----------
name : str
- The module name. This should be top-level only, so that the
- version may be checked.
+ The module name.
extra : str
Additional text to include in the ImportError message.
raise_on_missing : bool, default True
@@ -85,7 +90,9 @@ def import_optional_dependency(
* ignore: Return the module, even if the version is too old.
It's expected that users validate the version locally when
using ``on_version="ignore"`` (see. ``io/html.py``)
-
+ min_version : str, default None
+ Specify a minimum version that is different from the global pandas
+ minimum version required.
Returns
-------
maybe_module : Optional[ModuleType]
@@ -110,13 +117,20 @@ def import_optional_dependency(
else:
return None
- minimum_version = VERSIONS.get(name)
+ # Handle submodules: if we have submodule, grab parent module from sys.modules
+ parent = name.split(".")[0]
+ if parent != name:
+ install_name = parent
+ module_to_get = sys.modules[install_name]
+ else:
+ module_to_get = module
+ minimum_version = min_version if min_version is not None else VERSIONS.get(parent)
if minimum_version:
- version = _get_version(module)
+ version = _get_version(module_to_get)
if distutils.version.LooseVersion(version) < minimum_version:
assert on_version in {"warn", "raise", "ignore"}
msg = (
- f"Pandas requires version '{minimum_version}' or newer of '{name}' "
+ f"Pandas requires version '{minimum_version}' or newer of '{parent}' "
f"(version '{version}' currently installed)."
)
if on_version == "warn":
diff --git a/pandas/tests/test_optional_dependency.py b/pandas/tests/test_optional_dependency.py
index e5ed69b7703b1..304ec124ac8c5 100644
--- a/pandas/tests/test_optional_dependency.py
+++ b/pandas/tests/test_optional_dependency.py
@@ -33,6 +33,10 @@ def test_bad_version(monkeypatch):
with pytest.raises(ImportError, match=match):
import_optional_dependency("fakemodule")
+ # Test min_version parameter
+ result = import_optional_dependency("fakemodule", min_version="0.8")
+ assert result is module
+
with tm.assert_produces_warning(UserWarning):
result = import_optional_dependency("fakemodule", on_version="warn")
assert result is None
@@ -42,6 +46,31 @@ def test_bad_version(monkeypatch):
assert result is module
+def test_submodule(monkeypatch):
+ # Create a fake module with a submodule
+ name = "fakemodule"
+ module = types.ModuleType(name)
+ module.__version__ = "0.9.0"
+ sys.modules[name] = module
+ sub_name = "submodule"
+ submodule = types.ModuleType(sub_name)
+ setattr(module, sub_name, submodule)
+ sys.modules[f"{name}.{sub_name}"] = submodule
+ monkeypatch.setitem(VERSIONS, name, "1.0.0")
+
+ match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'"
+ with pytest.raises(ImportError, match=match):
+ import_optional_dependency("fakemodule.submodule")
+
+ with tm.assert_produces_warning(UserWarning):
+ result = import_optional_dependency("fakemodule.submodule", on_version="warn")
+ assert result is None
+
+ module.__version__ = "1.0.0" # exact match is OK
+ result = import_optional_dependency("fakemodule.submodule")
+ assert result is submodule
+
+
def test_no_version_raises(monkeypatch):
name = "fakemodule"
module = types.ModuleType(name)
| - [ ] closes #38888
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
cc @jreback, @arw2019 | https://api.github.com/repos/pandas-dev/pandas/pulls/38925 | 2021-01-03T18:23:27Z | 2021-01-04T00:14:16Z | 2021-01-04T00:14:16Z | 2021-01-04T03:38:53Z |
TST: 26807 split pandas/tests/tseries/offsets/test_offsets.py into multiple smaller test modules | diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py
index 25837c0b6aee2..b2ac28e1865d6 100644
--- a/pandas/tests/tseries/offsets/common.py
+++ b/pandas/tests/tseries/offsets/common.py
@@ -1,6 +1,24 @@
"""
-Assertion helpers for offsets tests
+Assertion helpers and base class for offsets tests
"""
+from datetime import datetime
+from typing import Optional, Type
+
+from dateutil.tz.tz import tzlocal
+import pytest
+
+from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp
+from pandas._libs.tslibs.offsets import (
+ FY5253,
+ BusinessHour,
+ CustomBusinessHour,
+ DateOffset,
+ FY5253Quarter,
+ LastWeekOfMonth,
+ Week,
+ WeekOfMonth,
+)
+from pandas.compat import IS64
def assert_offset_equal(offset, base, expected):
@@ -24,3 +42,156 @@ def assert_is_on_offset(offset, date, expected):
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {date}"
)
+
+
+class WeekDay:
+ MON = 0
+ TUE = 1
+ WED = 2
+ THU = 3
+ FRI = 4
+ SAT = 5
+ SUN = 6
+
+
+class Base:
+ _offset: Optional[Type[DateOffset]] = None
+ d = Timestamp(datetime(2008, 1, 2))
+
+ timezones = [
+ None,
+ "UTC",
+ "Asia/Tokyo",
+ "US/Eastern",
+ "dateutil/Asia/Tokyo",
+ "dateutil/US/Pacific",
+ ]
+
+ def _get_offset(self, klass, value=1, normalize=False):
+ # create instance from offset class
+ if klass is FY5253:
+ klass = klass(
+ n=value,
+ startingMonth=1,
+ weekday=1,
+ variation="last",
+ normalize=normalize,
+ )
+ elif klass is FY5253Quarter:
+ klass = klass(
+ n=value,
+ startingMonth=1,
+ weekday=1,
+ qtr_with_extra_week=1,
+ variation="last",
+ normalize=normalize,
+ )
+ elif klass is LastWeekOfMonth:
+ klass = klass(n=value, weekday=5, normalize=normalize)
+ elif klass is WeekOfMonth:
+ klass = klass(n=value, week=1, weekday=5, normalize=normalize)
+ elif klass is Week:
+ klass = klass(n=value, weekday=5, normalize=normalize)
+ elif klass is DateOffset:
+ klass = klass(days=value, normalize=normalize)
+ else:
+ klass = klass(value, normalize=normalize)
+ return klass
+
+ def test_apply_out_of_range(self, tz_naive_fixture):
+ tz = tz_naive_fixture
+ if self._offset is None:
+ return
+ if isinstance(tz, tzlocal) and not IS64:
+ pytest.xfail(reason="OverflowError inside tzlocal past 2038")
+
+ # try to create an out-of-bounds result timestamp; if we can't create
+ # the offset skip
+ try:
+ if self._offset in (BusinessHour, CustomBusinessHour):
+ # Using 10000 in BusinessHour fails in tz check because of DST
+ # difference
+ offset = self._get_offset(self._offset, value=100000)
+ else:
+ offset = self._get_offset(self._offset, value=10000)
+
+ result = Timestamp("20080101") + offset
+ assert isinstance(result, datetime)
+ assert result.tzinfo is None
+
+ # Check tz is preserved
+ t = Timestamp("20080101", tz=tz)
+ result = t + offset
+ assert isinstance(result, datetime)
+ assert t.tzinfo == result.tzinfo
+
+ except OutOfBoundsDatetime:
+ pass
+ except (ValueError, KeyError):
+ # we are creating an invalid offset
+ # so ignore
+ pass
+
+ def test_offsets_compare_equal(self):
+ # root cause of GH#456: __ne__ was not implemented
+ if self._offset is None:
+ return
+ offset1 = self._offset()
+ offset2 = self._offset()
+ assert not offset1 != offset2
+ assert offset1 == offset2
+
+ def test_rsub(self):
+ if self._offset is None or not hasattr(self, "offset2"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset2 attr
+ return
+ assert self.d - self.offset2 == (-self.offset2).apply(self.d)
+
+ def test_radd(self):
+ if self._offset is None or not hasattr(self, "offset2"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset2 attr
+ return
+ assert self.d + self.offset2 == self.offset2 + self.d
+
+ def test_sub(self):
+ if self._offset is None or not hasattr(self, "offset2"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset2 attr
+ return
+ off = self.offset2
+ msg = "Cannot subtract datetime from offset"
+ with pytest.raises(TypeError, match=msg):
+ off - self.d
+
+ assert 2 * off - off == off
+ assert self.d - self.offset2 == self.d + self._offset(-2)
+ assert self.d - self.offset2 == self.d - (2 * off - off)
+
+ def testMult1(self):
+ if self._offset is None or not hasattr(self, "offset1"):
+ # i.e. skip for TestCommon and YQM subclasses that do not have
+ # offset1 attr
+ return
+ assert self.d + 10 * self.offset1 == self.d + self._offset(10)
+ assert self.d + 5 * self.offset1 == self.d + self._offset(5)
+
+ def testMult2(self):
+ if self._offset is None:
+ return
+ assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)
+ assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
+
+ def test_compare_str(self):
+ # GH#23524
+ # comparing to strings that cannot be cast to DateOffsets should
+ # not raise for __eq__ or __ne__
+ if self._offset is None:
+ return
+ off = self._get_offset(self._offset)
+
+ assert not off == "infer"
+ assert off != "foo"
+ # Note: inequalities are only implemented for Tick subclasses;
+ # tests for this are in test_ticks
diff --git a/pandas/tests/tseries/offsets/test_business_day.py b/pandas/tests/tseries/offsets/test_business_day.py
new file mode 100644
index 0000000000000..d3c4fb50e2ab0
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_business_day.py
@@ -0,0 +1,441 @@
+"""
+Tests for offsets.BDay
+"""
+from datetime import date, datetime, timedelta
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs.offsets import ApplyTypeError, BDay, BMonthEnd, CDay
+from pandas.compat.numpy import np_datetime64_compat
+
+from pandas import DatetimeIndex, _testing as tm, read_pickle
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
+from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
+
+from pandas.tseries import offsets as offsets
+from pandas.tseries.holiday import USFederalHolidayCalendar
+
+
+class TestBusinessDay(Base):
+ _offset = BDay
+
+ def setup_method(self, method):
+ self.d = datetime(2008, 1, 1)
+
+ self.offset = BDay()
+ self.offset1 = self.offset
+ self.offset2 = BDay(2)
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset) == "<BusinessDay>"
+ assert repr(self.offset2) == "<2 * BusinessDays>"
+
+ expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
+ assert repr(self.offset + timedelta(1)) == expected
+
+ def test_with_offset(self):
+ offset = self.offset + timedelta(hours=2)
+
+ assert (self.d + offset) == datetime(2008, 1, 2, 2)
+
+ def test_with_offset_index(self):
+ dti = DatetimeIndex([self.d])
+ result = dti + (self.offset + timedelta(hours=2))
+
+ expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
+ tm.assert_index_equal(result, expected)
+
+ def test_eq(self):
+ assert self.offset2 == self.offset2
+
+ def test_mul(self):
+ pass
+
+ def test_hash(self):
+ assert hash(self.offset2) == hash(self.offset2)
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset2(self.d) == datetime(2008, 1, 3)
+
+ def testRollback1(self):
+ assert BDay(10).rollback(self.d) == self.d
+
+ def testRollback2(self):
+ assert BDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
+
+ def testRollforward1(self):
+ assert BDay(10).rollforward(self.d) == self.d
+
+ def testRollforward2(self):
+ assert BDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
+
+ def test_roll_date_object(self):
+ offset = BDay()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 14)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 17)
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 15)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 15)
+
+ def test_is_on_offset(self):
+ tests = [
+ (BDay(), datetime(2008, 1, 1), True),
+ (BDay(), datetime(2008, 1, 5), False),
+ ]
+
+ for offset, d, expected in tests:
+ assert_is_on_offset(offset, d, expected)
+
+ apply_cases: _ApplyCases = [
+ (
+ BDay(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 2),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 8),
+ },
+ ),
+ (
+ 2 * BDay(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 3),
+ datetime(2008, 1, 4): datetime(2008, 1, 8),
+ datetime(2008, 1, 5): datetime(2008, 1, 8),
+ datetime(2008, 1, 6): datetime(2008, 1, 8),
+ datetime(2008, 1, 7): datetime(2008, 1, 9),
+ },
+ ),
+ (
+ -BDay(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 3),
+ datetime(2008, 1, 5): datetime(2008, 1, 4),
+ datetime(2008, 1, 6): datetime(2008, 1, 4),
+ datetime(2008, 1, 7): datetime(2008, 1, 4),
+ datetime(2008, 1, 8): datetime(2008, 1, 7),
+ },
+ ),
+ (
+ -2 * BDay(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 28),
+ datetime(2008, 1, 4): datetime(2008, 1, 2),
+ datetime(2008, 1, 5): datetime(2008, 1, 3),
+ datetime(2008, 1, 6): datetime(2008, 1, 3),
+ datetime(2008, 1, 7): datetime(2008, 1, 3),
+ datetime(2008, 1, 8): datetime(2008, 1, 4),
+ datetime(2008, 1, 9): datetime(2008, 1, 7),
+ },
+ ),
+ (
+ BDay(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 4): datetime(2008, 1, 4),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + BDay(10)
+ assert result == datetime(2012, 11, 6)
+
+ result = dt + BDay(100) - BDay(100)
+ assert result == dt
+
+ off = BDay() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 12, 23)
+ assert rs == xp
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+ xp = datetime(2011, 12, 26)
+ assert rs == xp
+
+ off = BDay() * 10
+ rs = datetime(2014, 1, 5) + off # see #5890
+ xp = datetime(2014, 1, 17)
+ assert rs == xp
+
+ def test_apply_corner(self):
+ msg = "Only know how to combine business day with datetime or timedelta"
+ with pytest.raises(ApplyTypeError, match=msg):
+ BDay().apply(BMonthEnd())
+
+
+class TestCustomBusinessDay(Base):
+ _offset = CDay
+
+ def setup_method(self, method):
+ self.d = datetime(2008, 1, 1)
+ self.nd = np_datetime64_compat("2008-01-01 00:00:00Z")
+
+ self.offset = CDay()
+ self.offset1 = self.offset
+ self.offset2 = CDay(2)
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset) == "<CustomBusinessDay>"
+ assert repr(self.offset2) == "<2 * CustomBusinessDays>"
+
+ expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
+ assert repr(self.offset + timedelta(1)) == expected
+
+ def test_with_offset(self):
+ offset = self.offset + timedelta(hours=2)
+
+ assert (self.d + offset) == datetime(2008, 1, 2, 2)
+
+ def test_with_offset_index(self):
+ dti = DatetimeIndex([self.d])
+ result = dti + (self.offset + timedelta(hours=2))
+
+ expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
+ tm.assert_index_equal(result, expected)
+
+ def test_eq(self):
+ assert self.offset2 == self.offset2
+
+ def test_mul(self):
+ pass
+
+ def test_hash(self):
+ assert hash(self.offset2) == hash(self.offset2)
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset2(self.d) == datetime(2008, 1, 3)
+ assert self.offset2(self.nd) == datetime(2008, 1, 3)
+
+ def testRollback1(self):
+ assert CDay(10).rollback(self.d) == self.d
+
+ def testRollback2(self):
+ assert CDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
+
+ def testRollforward1(self):
+ assert CDay(10).rollforward(self.d) == self.d
+
+ def testRollforward2(self):
+ assert CDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
+
+ def test_roll_date_object(self):
+ offset = CDay()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 14)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 17)
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 15)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 15)
+
+ on_offset_cases = [
+ (CDay(), datetime(2008, 1, 1), True),
+ (CDay(), datetime(2008, 1, 5), False),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ offset, d, expected = case
+ assert_is_on_offset(offset, d, expected)
+
+ apply_cases: _ApplyCases = [
+ (
+ CDay(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 2),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 8),
+ },
+ ),
+ (
+ 2 * CDay(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 3),
+ datetime(2008, 1, 4): datetime(2008, 1, 8),
+ datetime(2008, 1, 5): datetime(2008, 1, 8),
+ datetime(2008, 1, 6): datetime(2008, 1, 8),
+ datetime(2008, 1, 7): datetime(2008, 1, 9),
+ },
+ ),
+ (
+ -CDay(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 3),
+ datetime(2008, 1, 5): datetime(2008, 1, 4),
+ datetime(2008, 1, 6): datetime(2008, 1, 4),
+ datetime(2008, 1, 7): datetime(2008, 1, 4),
+ datetime(2008, 1, 8): datetime(2008, 1, 7),
+ },
+ ),
+ (
+ -2 * CDay(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 28),
+ datetime(2008, 1, 4): datetime(2008, 1, 2),
+ datetime(2008, 1, 5): datetime(2008, 1, 3),
+ datetime(2008, 1, 6): datetime(2008, 1, 3),
+ datetime(2008, 1, 7): datetime(2008, 1, 3),
+ datetime(2008, 1, 8): datetime(2008, 1, 4),
+ datetime(2008, 1, 9): datetime(2008, 1, 7),
+ },
+ ),
+ (
+ CDay(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 4): datetime(2008, 1, 4),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + CDay(10)
+ assert result == datetime(2012, 11, 6)
+
+ result = dt + CDay(100) - CDay(100)
+ assert result == dt
+
+ off = CDay() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 12, 23)
+ assert rs == xp
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+ xp = datetime(2011, 12, 26)
+ assert rs == xp
+
+ def test_apply_corner(self):
+ msg = (
+ "Only know how to combine trading day "
+ "with datetime, datetime64 or timedelta"
+ )
+ with pytest.raises(ApplyTypeError, match=msg):
+ CDay().apply(BMonthEnd())
+
+ def test_holidays(self):
+ # Define a TradingDay offset
+ holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
+ tday = CDay(holidays=holidays)
+ for year in range(2012, 2015):
+ dt = datetime(year, 4, 30)
+ xp = datetime(year, 5, 2)
+ rs = dt + tday
+ assert rs == xp
+
+ def test_weekmask(self):
+ weekmask_saudi = "Sat Sun Mon Tue Wed" # Thu-Fri Weekend
+ weekmask_uae = "1111001" # Fri-Sat Weekend
+ weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
+ bday_saudi = CDay(weekmask=weekmask_saudi)
+ bday_uae = CDay(weekmask=weekmask_uae)
+ bday_egypt = CDay(weekmask=weekmask_egypt)
+ dt = datetime(2013, 5, 1)
+ xp_saudi = datetime(2013, 5, 4)
+ xp_uae = datetime(2013, 5, 2)
+ xp_egypt = datetime(2013, 5, 2)
+ assert xp_saudi == dt + bday_saudi
+ assert xp_uae == dt + bday_uae
+ assert xp_egypt == dt + bday_egypt
+ xp2 = datetime(2013, 5, 5)
+ assert xp2 == dt + 2 * bday_saudi
+ assert xp2 == dt + 2 * bday_uae
+ assert xp2 == dt + 2 * bday_egypt
+
+ def test_weekmask_and_holidays(self):
+ weekmask_egypt = "Sun Mon Tue Wed Thu" # Fri-Sat Weekend
+ holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
+ bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
+ dt = datetime(2013, 4, 30)
+ xp_egypt = datetime(2013, 5, 5)
+ assert xp_egypt == dt + 2 * bday_egypt
+
+ @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
+ def test_calendar(self):
+ calendar = USFederalHolidayCalendar()
+ dt = datetime(2014, 1, 17)
+ assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
+
+ def test_roundtrip_pickle(self):
+ def _check_roundtrip(obj):
+ unpickled = tm.round_trip_pickle(obj)
+ assert unpickled == obj
+
+ _check_roundtrip(self.offset)
+ _check_roundtrip(self.offset2)
+ _check_roundtrip(self.offset * 2)
+
+ def test_pickle_compat_0_14_1(self, datapath):
+ hdays = [datetime(2013, 1, 1) for ele in range(4)]
+ pth = datapath("tseries", "offsets", "data", "cday-0.14.1.pickle")
+ cday0_14_1 = read_pickle(pth)
+ cday = CDay(holidays=hdays)
+ assert cday == cday0_14_1
diff --git a/pandas/tests/tseries/offsets/test_business_hour.py b/pandas/tests/tseries/offsets/test_business_hour.py
new file mode 100644
index 0000000000000..5f387b2edeb0b
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_business_hour.py
@@ -0,0 +1,905 @@
+"""
+Tests for offsets.BusinessHour
+"""
+from datetime import datetime, time as dt_time
+
+import pytest
+
+from pandas._libs.tslibs import Timedelta, Timestamp
+from pandas._libs.tslibs.offsets import BDay, BusinessHour, Nano
+
+from pandas import DatetimeIndex, _testing as tm, date_range
+from pandas.tests.tseries.offsets.common import Base, assert_offset_equal
+
+
+class TestBusinessHour(Base):
+ _offset = BusinessHour
+
+ def setup_method(self, method):
+ self.d = datetime(2014, 7, 1, 10, 00)
+
+ self.offset1 = BusinessHour()
+ self.offset2 = BusinessHour(n=3)
+
+ self.offset3 = BusinessHour(n=-1)
+ self.offset4 = BusinessHour(n=-4)
+
+ from datetime import time as dt_time
+
+ self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
+ self.offset6 = BusinessHour(start="20:00", end="05:00")
+ self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30))
+ self.offset8 = BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"])
+ self.offset9 = BusinessHour(
+ n=3, start=["09:00", "22:00"], end=["13:00", "03:00"]
+ )
+ self.offset10 = BusinessHour(
+ n=-1, start=["23:00", "13:00"], end=["02:00", "17:00"]
+ )
+
+ @pytest.mark.parametrize(
+ "start,end,match",
+ [
+ (
+ dt_time(11, 0, 5),
+ "17:00",
+ "time data must be specified only with hour and minute",
+ ),
+ ("AAA", "17:00", "time data must match '%H:%M' format"),
+ ("14:00:05", "17:00", "time data must match '%H:%M' format"),
+ ([], "17:00", "Must include at least 1 start time"),
+ ("09:00", [], "Must include at least 1 end time"),
+ (
+ ["09:00", "11:00"],
+ "17:00",
+ "number of starting time and ending time must be the same",
+ ),
+ (
+ ["09:00", "11:00"],
+ ["10:00"],
+ "number of starting time and ending time must be the same",
+ ),
+ (
+ ["09:00", "11:00"],
+ ["12:00", "20:00"],
+ r"invalid starting and ending time\(s\): opening hours should not "
+ "touch or overlap with one another",
+ ),
+ (
+ ["12:00", "20:00"],
+ ["09:00", "11:00"],
+ r"invalid starting and ending time\(s\): opening hours should not "
+ "touch or overlap with one another",
+ ),
+ ],
+ )
+ def test_constructor_errors(self, start, end, match):
+ with pytest.raises(ValueError, match=match):
+ BusinessHour(start=start, end=end)
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset1) == "<BusinessHour: BH=09:00-17:00>"
+ assert repr(self.offset2) == "<3 * BusinessHours: BH=09:00-17:00>"
+ assert repr(self.offset3) == "<-1 * BusinessHour: BH=09:00-17:00>"
+ assert repr(self.offset4) == "<-4 * BusinessHours: BH=09:00-17:00>"
+
+ assert repr(self.offset5) == "<BusinessHour: BH=11:00-14:30>"
+ assert repr(self.offset6) == "<BusinessHour: BH=20:00-05:00>"
+ assert repr(self.offset7) == "<-2 * BusinessHours: BH=21:30-06:30>"
+ assert repr(self.offset8) == "<BusinessHour: BH=09:00-12:00,13:00-17:00>"
+ assert repr(self.offset9) == "<3 * BusinessHours: BH=09:00-13:00,22:00-03:00>"
+ assert repr(self.offset10) == "<-1 * BusinessHour: BH=13:00-17:00,23:00-02:00>"
+
+ def test_with_offset(self):
+ expected = Timestamp("2014-07-01 13:00")
+
+ assert self.d + BusinessHour() * 3 == expected
+ assert self.d + BusinessHour(n=3) == expected
+
+ @pytest.mark.parametrize(
+ "offset_name",
+ ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
+ )
+ def test_eq_attribute(self, offset_name):
+ offset = getattr(self, offset_name)
+ assert offset == offset
+
+ @pytest.mark.parametrize(
+ "offset1,offset2",
+ [
+ (BusinessHour(start="09:00"), BusinessHour()),
+ (
+ BusinessHour(start=["23:00", "13:00"], end=["12:00", "17:00"]),
+ BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
+ ),
+ ],
+ )
+ def test_eq(self, offset1, offset2):
+ assert offset1 == offset2
+
+ @pytest.mark.parametrize(
+ "offset1,offset2",
+ [
+ (BusinessHour(), BusinessHour(-1)),
+ (BusinessHour(start="09:00"), BusinessHour(start="09:01")),
+ (
+ BusinessHour(start="09:00", end="17:00"),
+ BusinessHour(start="17:00", end="09:01"),
+ ),
+ (
+ BusinessHour(start=["13:00", "23:00"], end=["18:00", "07:00"]),
+ BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
+ ),
+ ],
+ )
+ def test_neq(self, offset1, offset2):
+ assert offset1 != offset2
+
+ @pytest.mark.parametrize(
+ "offset_name",
+ ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
+ )
+ def test_hash(self, offset_name):
+ offset = getattr(self, offset_name)
+ assert offset == offset
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
+ assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
+ assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
+ assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
+ assert self.offset8(self.d) == datetime(2014, 7, 1, 11)
+ assert self.offset9(self.d) == datetime(2014, 7, 1, 22)
+ assert self.offset10(self.d) == datetime(2014, 7, 1, 1)
+
+ def test_sub(self):
+ # we have to override test_sub here because self.offset2 is not
+ # defined as self._offset(2)
+ off = self.offset2
+ msg = "Cannot subtract datetime from offset"
+ with pytest.raises(TypeError, match=msg):
+ off - self.d
+ assert 2 * off - off == off
+
+ assert self.d - self.offset2 == self.d + self._offset(-3)
+
+ def testRollback1(self):
+ assert self.offset1.rollback(self.d) == self.d
+ assert self.offset2.rollback(self.d) == self.d
+ assert self.offset3.rollback(self.d) == self.d
+ assert self.offset4.rollback(self.d) == self.d
+ assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
+ assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
+ assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
+ assert self.offset8.rollback(self.d) == self.d
+ assert self.offset9.rollback(self.d) == self.d
+ assert self.offset10.rollback(self.d) == datetime(2014, 7, 1, 2)
+
+ d = datetime(2014, 7, 1, 0)
+ assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
+ assert self.offset6.rollback(d) == d
+ assert self.offset7.rollback(d) == d
+ assert self.offset8.rollback(d) == datetime(2014, 6, 30, 17)
+ assert self.offset9.rollback(d) == d
+ assert self.offset10.rollback(d) == d
+
+ assert self._offset(5).rollback(self.d) == self.d
+
+ def testRollback2(self):
+ assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
+ 2014, 7, 4, 17, 0
+ )
+
+ def testRollforward1(self):
+ assert self.offset1.rollforward(self.d) == self.d
+ assert self.offset2.rollforward(self.d) == self.d
+ assert self.offset3.rollforward(self.d) == self.d
+ assert self.offset4.rollforward(self.d) == self.d
+ assert self.offset5.rollforward(self.d) == datetime(2014, 7, 1, 11, 0)
+ assert self.offset6.rollforward(self.d) == datetime(2014, 7, 1, 20, 0)
+ assert self.offset7.rollforward(self.d) == datetime(2014, 7, 1, 21, 30)
+ assert self.offset8.rollforward(self.d) == self.d
+ assert self.offset9.rollforward(self.d) == self.d
+ assert self.offset10.rollforward(self.d) == datetime(2014, 7, 1, 13)
+
+ d = datetime(2014, 7, 1, 0)
+ assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
+ assert self.offset6.rollforward(d) == d
+ assert self.offset7.rollforward(d) == d
+ assert self.offset8.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset9.rollforward(d) == d
+ assert self.offset10.rollforward(d) == d
+
+ assert self._offset(5).rollforward(self.d) == self.d
+
+ def testRollforward2(self):
+ assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
+ 2014, 7, 7, 9
+ )
+
+ def test_roll_date_object(self):
+ offset = BusinessHour()
+
+ dt = datetime(2014, 7, 6, 15, 0)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2014, 7, 4, 17)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2014, 7, 7, 9)
+
+ normalize_cases = []
+ normalize_cases.append(
+ (
+ BusinessHour(normalize=True),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
+ datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
+ },
+ )
+ )
+
+ normalize_cases.append(
+ (
+ BusinessHour(-1, normalize=True),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
+ datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
+ datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
+ datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
+ },
+ )
+ )
+
+ normalize_cases.append(
+ (
+ BusinessHour(1, normalize=True, start="17:00", end="04:00"),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
+ datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
+ datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
+ datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
+ },
+ )
+ )
+
+ @pytest.mark.parametrize("case", normalize_cases)
+ def test_normalize(self, case):
+ offset, cases = case
+ for dt, expected in cases.items():
+ assert offset.apply(dt) == expected
+
+ on_offset_cases = []
+ on_offset_cases.append(
+ (
+ BusinessHour(),
+ {
+ datetime(2014, 7, 1, 9): True,
+ datetime(2014, 7, 1, 8, 59): False,
+ datetime(2014, 7, 1, 8): False,
+ datetime(2014, 7, 1, 17): True,
+ datetime(2014, 7, 1, 17, 1): False,
+ datetime(2014, 7, 1, 18): False,
+ datetime(2014, 7, 5, 9): False,
+ datetime(2014, 7, 6, 12): False,
+ },
+ )
+ )
+
+ on_offset_cases.append(
+ (
+ BusinessHour(start="10:00", end="15:00"),
+ {
+ datetime(2014, 7, 1, 9): False,
+ datetime(2014, 7, 1, 10): True,
+ datetime(2014, 7, 1, 15): True,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12): False,
+ datetime(2014, 7, 6, 12): False,
+ },
+ )
+ )
+
+ on_offset_cases.append(
+ (
+ BusinessHour(start="19:00", end="05:00"),
+ {
+ datetime(2014, 7, 1, 9, 0): False,
+ datetime(2014, 7, 1, 10, 0): False,
+ datetime(2014, 7, 1, 15): False,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12, 0): False,
+ datetime(2014, 7, 6, 12, 0): False,
+ datetime(2014, 7, 1, 19, 0): True,
+ datetime(2014, 7, 2, 0, 0): True,
+ datetime(2014, 7, 4, 23): True,
+ datetime(2014, 7, 5, 1): True,
+ datetime(2014, 7, 5, 5, 0): True,
+ datetime(2014, 7, 6, 23, 0): False,
+ datetime(2014, 7, 7, 3, 0): False,
+ },
+ )
+ )
+
+ on_offset_cases.append(
+ (
+ BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"]),
+ {
+ datetime(2014, 7, 1, 9): True,
+ datetime(2014, 7, 1, 8, 59): False,
+ datetime(2014, 7, 1, 8): False,
+ datetime(2014, 7, 1, 17): True,
+ datetime(2014, 7, 1, 17, 1): False,
+ datetime(2014, 7, 1, 18): False,
+ datetime(2014, 7, 5, 9): False,
+ datetime(2014, 7, 6, 12): False,
+ datetime(2014, 7, 1, 12, 30): False,
+ },
+ )
+ )
+
+ on_offset_cases.append(
+ (
+ BusinessHour(start=["19:00", "23:00"], end=["21:00", "05:00"]),
+ {
+ datetime(2014, 7, 1, 9, 0): False,
+ datetime(2014, 7, 1, 10, 0): False,
+ datetime(2014, 7, 1, 15): False,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12, 0): False,
+ datetime(2014, 7, 6, 12, 0): False,
+ datetime(2014, 7, 1, 19, 0): True,
+ datetime(2014, 7, 2, 0, 0): True,
+ datetime(2014, 7, 4, 23): True,
+ datetime(2014, 7, 5, 1): True,
+ datetime(2014, 7, 5, 5, 0): True,
+ datetime(2014, 7, 6, 23, 0): False,
+ datetime(2014, 7, 7, 3, 0): False,
+ datetime(2014, 7, 4, 22): False,
+ },
+ )
+ )
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ offset, cases = case
+ for dt, expected in cases.items():
+ assert offset.is_on_offset(dt) == expected
+
+ apply_cases = [
+ (
+ BusinessHour(),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(4),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(-1),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
+ datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
+ datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(-4),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(start="13:00", end="16:00"),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
+ },
+ ),
+ (
+ BusinessHour(n=2, start="13:00", end="16:00"),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
+ datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=-1, start="13:00", end="16:00"),
+ {
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
+ datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15),
+ },
+ ),
+ (
+ BusinessHour(n=-3, start="10:00", end="16:00"),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
+ datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(start="19:00", end="05:00"),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
+ datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
+ datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
+ datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=-1, start="19:00", end="05:00"),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
+ datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
+ datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
+ datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=4, start="00:00", end="23:00"),
+ {
+ datetime(2014, 7, 3, 22): datetime(2014, 7, 4, 3),
+ datetime(2014, 7, 4, 22): datetime(2014, 7, 7, 3),
+ datetime(2014, 7, 3, 22, 30): datetime(2014, 7, 4, 3, 30),
+ datetime(2014, 7, 3, 22, 20): datetime(2014, 7, 4, 3, 20),
+ datetime(2014, 7, 4, 22, 30, 30): datetime(2014, 7, 7, 3, 30, 30),
+ datetime(2014, 7, 4, 22, 30, 20): datetime(2014, 7, 7, 3, 30, 20),
+ },
+ ),
+ (
+ BusinessHour(n=-4, start="00:00", end="23:00"),
+ {
+ datetime(2014, 7, 4, 3): datetime(2014, 7, 3, 22),
+ datetime(2014, 7, 7, 3): datetime(2014, 7, 4, 22),
+ datetime(2014, 7, 4, 3, 30): datetime(2014, 7, 3, 22, 30),
+ datetime(2014, 7, 4, 3, 20): datetime(2014, 7, 3, 22, 20),
+ datetime(2014, 7, 7, 3, 30, 30): datetime(2014, 7, 4, 22, 30, 30),
+ datetime(2014, 7, 7, 3, 30, 20): datetime(2014, 7, 4, 22, 30, 20),
+ },
+ ),
+ (
+ BusinessHour(start=["09:00", "14:00"], end=["12:00", "18:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 17, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 14),
+ # out of business hours
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 17, 30): datetime(2014, 7, 7, 9, 30),
+ datetime(2014, 7, 4, 17, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 11, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 11, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=-4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 15): datetime(2014, 6, 30, 18),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 10),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 11),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 12),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 12),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 14, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 14, 30, 30),
+ },
+ ),
+ (
+ BusinessHour(n=-1, start=["19:00", "03:00"], end=["01:00", "05:00"]),
+ {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 4): datetime(2014, 7, 2, 1),
+ datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
+ datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 0),
+ datetime(2014, 7, 7, 3, 30): datetime(2014, 7, 5, 0, 30),
+ datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 7, 4, 30),
+ datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 7, 4, 30, 30),
+ },
+ ),
+ ]
+
+ # long business hours (see gh-26381)
+
+ # multiple business hours
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ apply_large_n_cases = [
+ (
+ # A week later
+ BusinessHour(40),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30),
+ },
+ ),
+ (
+ # 3 days and 1 hour before
+ BusinessHour(-25),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
+ datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
+ datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
+ datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
+ datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
+ datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
+ },
+ ),
+ (
+ # 5 days and 3 hours later
+ BusinessHour(28, start="21:00", end="02:00"),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
+ datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
+ datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
+ datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
+ },
+ ),
+ (
+ # large n for multiple opening hours (3 days and 1 hour before)
+ BusinessHour(n=-25, start=["09:00", "14:00"], end=["12:00", "19:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 11),
+ datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 18),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 19),
+ datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
+ datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 18),
+ datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 18),
+ datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 18),
+ datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 18),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 18),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 18),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 18, 30),
+ datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
+ },
+ ),
+ (
+ # 5 days and 3 hours later
+ BusinessHour(28, start=["21:00", "03:00"], end=["01:00", "04:00"]),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
+ datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 3),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 9, 23),
+ datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
+ datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 2): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 3): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 21): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 14, 22),
+ datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 7, 1): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_large_n_cases)
+ def test_apply_large_n(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_nanoseconds(self):
+ tests = [
+ (
+ BusinessHour(),
+ {
+ Timestamp("2014-07-04 15:00")
+ + Nano(5): Timestamp("2014-07-04 16:00")
+ + Nano(5),
+ Timestamp("2014-07-04 16:00")
+ + Nano(5): Timestamp("2014-07-07 09:00")
+ + Nano(5),
+ Timestamp("2014-07-04 16:00")
+ - Nano(5): Timestamp("2014-07-04 17:00")
+ - Nano(5),
+ },
+ ),
+ (
+ BusinessHour(-1),
+ {
+ Timestamp("2014-07-04 15:00")
+ + Nano(5): Timestamp("2014-07-04 14:00")
+ + Nano(5),
+ Timestamp("2014-07-04 10:00")
+ + Nano(5): Timestamp("2014-07-04 09:00")
+ + Nano(5),
+ Timestamp("2014-07-04 10:00")
+ - Nano(5): Timestamp("2014-07-03 17:00")
+ - Nano(5),
+ },
+ ),
+ ]
+
+ for offset, cases in tests:
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_datetimeindex(self):
+ idx1 = date_range(start="2014-07-04 15:00", end="2014-07-08 10:00", freq="BH")
+ idx2 = date_range(start="2014-07-04 15:00", periods=12, freq="BH")
+ idx3 = date_range(end="2014-07-08 10:00", periods=12, freq="BH")
+ expected = DatetimeIndex(
+ [
+ "2014-07-04 15:00",
+ "2014-07-04 16:00",
+ "2014-07-07 09:00",
+ "2014-07-07 10:00",
+ "2014-07-07 11:00",
+ "2014-07-07 12:00",
+ "2014-07-07 13:00",
+ "2014-07-07 14:00",
+ "2014-07-07 15:00",
+ "2014-07-07 16:00",
+ "2014-07-08 09:00",
+ "2014-07-08 10:00",
+ ],
+ freq="BH",
+ )
+ for idx in [idx1, idx2, idx3]:
+ tm.assert_index_equal(idx, expected)
+
+ idx1 = date_range(start="2014-07-04 15:45", end="2014-07-08 10:45", freq="BH")
+ idx2 = date_range(start="2014-07-04 15:45", periods=12, freq="BH")
+ idx3 = date_range(end="2014-07-08 10:45", periods=12, freq="BH")
+
+ expected = idx1
+ for idx in [idx1, idx2, idx3]:
+ tm.assert_index_equal(idx, expected)
+
+ def test_bday_ignores_timedeltas(self):
+ idx = date_range("2010/02/01", "2010/02/10", freq="12H")
+ t1 = idx + BDay(offset=Timedelta(3, unit="H"))
+
+ expected = DatetimeIndex(
+ [
+ "2010-02-02 03:00:00",
+ "2010-02-02 15:00:00",
+ "2010-02-03 03:00:00",
+ "2010-02-03 15:00:00",
+ "2010-02-04 03:00:00",
+ "2010-02-04 15:00:00",
+ "2010-02-05 03:00:00",
+ "2010-02-05 15:00:00",
+ "2010-02-08 03:00:00",
+ "2010-02-08 15:00:00",
+ "2010-02-08 03:00:00",
+ "2010-02-08 15:00:00",
+ "2010-02-08 03:00:00",
+ "2010-02-08 15:00:00",
+ "2010-02-09 03:00:00",
+ "2010-02-09 15:00:00",
+ "2010-02-10 03:00:00",
+ "2010-02-10 15:00:00",
+ "2010-02-11 03:00:00",
+ ],
+ freq=None,
+ )
+ tm.assert_index_equal(t1, expected)
diff --git a/pandas/tests/tseries/offsets/test_custom_business_hour.py b/pandas/tests/tseries/offsets/test_custom_business_hour.py
new file mode 100644
index 0000000000000..f05b286616572
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_custom_business_hour.py
@@ -0,0 +1,293 @@
+"""
+Tests for offsets.CustomBusinessHour
+"""
+from datetime import datetime
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas._libs.tslibs.offsets import BusinessHour, CustomBusinessHour, Nano
+
+import pandas._testing as tm
+from pandas.tests.tseries.offsets.common import Base, assert_offset_equal
+
+
+class TestCustomBusinessHour(Base):
+ _offset = CustomBusinessHour
+ holidays = ["2014-06-27", datetime(2014, 6, 30), np.datetime64("2014-07-02")]
+
+ def setup_method(self, method):
+ # 2014 Calendar to check custom holidays
+ # Sun Mon Tue Wed Thu Fri Sat
+ # 6/22 23 24 25 26 27 28
+ # 29 30 7/1 2 3 4 5
+ # 6 7 8 9 10 11 12
+ self.d = datetime(2014, 7, 1, 10, 00)
+ self.offset1 = CustomBusinessHour(weekmask="Tue Wed Thu Fri")
+
+ self.offset2 = CustomBusinessHour(holidays=self.holidays)
+
+ def test_constructor_errors(self):
+ from datetime import time as dt_time
+
+ msg = "time data must be specified only with hour and minute"
+ with pytest.raises(ValueError, match=msg):
+ CustomBusinessHour(start=dt_time(11, 0, 5))
+ msg = "time data must match '%H:%M' format"
+ with pytest.raises(ValueError, match=msg):
+ CustomBusinessHour(start="AAA")
+ msg = "time data must match '%H:%M' format"
+ with pytest.raises(ValueError, match=msg):
+ CustomBusinessHour(start="14:00:05")
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset1) == "<CustomBusinessHour: CBH=09:00-17:00>"
+ assert repr(self.offset2) == "<CustomBusinessHour: CBH=09:00-17:00>"
+
+ def test_with_offset(self):
+ expected = Timestamp("2014-07-01 13:00")
+
+ assert self.d + CustomBusinessHour() * 3 == expected
+ assert self.d + CustomBusinessHour(n=3) == expected
+
+ def test_eq(self):
+ for offset in [self.offset1, self.offset2]:
+ assert offset == offset
+
+ assert CustomBusinessHour() != CustomBusinessHour(-1)
+ assert CustomBusinessHour(start="09:00") == CustomBusinessHour()
+ assert CustomBusinessHour(start="09:00") != CustomBusinessHour(start="09:01")
+ assert CustomBusinessHour(start="09:00", end="17:00") != CustomBusinessHour(
+ start="17:00", end="09:01"
+ )
+
+ assert CustomBusinessHour(weekmask="Tue Wed Thu Fri") != CustomBusinessHour(
+ weekmask="Mon Tue Wed Thu Fri"
+ )
+ assert CustomBusinessHour(holidays=["2014-06-27"]) != CustomBusinessHour(
+ holidays=["2014-06-28"]
+ )
+
+ def test_sub(self):
+ # override the Base.test_sub implementation because self.offset2 is
+ # defined differently in this class than the test expects
+ pass
+
+ def test_hash(self):
+ assert hash(self.offset1) == hash(self.offset1)
+ assert hash(self.offset2) == hash(self.offset2)
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
+ assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
+
+ def testRollback1(self):
+ assert self.offset1.rollback(self.d) == self.d
+ assert self.offset2.rollback(self.d) == self.d
+
+ d = datetime(2014, 7, 1, 0)
+
+ # 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
+ assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
+
+ # 2014/6/30 and 2014/6/27 are holidays
+ assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
+
+ def testRollback2(self):
+ assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
+ 2014, 7, 4, 17, 0
+ )
+
+ def testRollforward1(self):
+ assert self.offset1.rollforward(self.d) == self.d
+ assert self.offset2.rollforward(self.d) == self.d
+
+ d = datetime(2014, 7, 1, 0)
+ assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
+ assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
+
+ def testRollforward2(self):
+ assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
+ 2014, 7, 7, 9
+ )
+
+ def test_roll_date_object(self):
+ offset = BusinessHour()
+
+ dt = datetime(2014, 7, 6, 15, 0)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2014, 7, 4, 17)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2014, 7, 7, 9)
+
+ normalize_cases = [
+ (
+ CustomBusinessHour(normalize=True, holidays=holidays),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
+ datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
+ datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
+ },
+ ),
+ (
+ CustomBusinessHour(-1, normalize=True, holidays=holidays),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
+ datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
+ datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
+ datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
+ },
+ ),
+ (
+ CustomBusinessHour(
+ 1, normalize=True, start="17:00", end="04:00", holidays=holidays
+ ),
+ {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
+ datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
+ datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
+ datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("norm_cases", normalize_cases)
+ def test_normalize(self, norm_cases):
+ offset, cases = norm_cases
+ for dt, expected in cases.items():
+ assert offset.apply(dt) == expected
+
+ def test_is_on_offset(self):
+ tests = [
+ (
+ CustomBusinessHour(start="10:00", end="15:00", holidays=self.holidays),
+ {
+ datetime(2014, 7, 1, 9): False,
+ datetime(2014, 7, 1, 10): True,
+ datetime(2014, 7, 1, 15): True,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12): False,
+ datetime(2014, 7, 6, 12): False,
+ },
+ )
+ ]
+
+ for offset, cases in tests:
+ for dt, expected in cases.items():
+ assert offset.is_on_offset(dt) == expected
+
+ apply_cases = [
+ (
+ CustomBusinessHour(holidays=holidays),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
+ },
+ ),
+ (
+ CustomBusinessHour(4, holidays=holidays),
+ {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("apply_case", apply_cases)
+ def test_apply(self, apply_case):
+ offset, cases = apply_case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ nano_cases = [
+ (
+ CustomBusinessHour(holidays=holidays),
+ {
+ Timestamp("2014-07-01 15:00")
+ + Nano(5): Timestamp("2014-07-01 16:00")
+ + Nano(5),
+ Timestamp("2014-07-01 16:00")
+ + Nano(5): Timestamp("2014-07-03 09:00")
+ + Nano(5),
+ Timestamp("2014-07-01 16:00")
+ - Nano(5): Timestamp("2014-07-01 17:00")
+ - Nano(5),
+ },
+ ),
+ (
+ CustomBusinessHour(-1, holidays=holidays),
+ {
+ Timestamp("2014-07-01 15:00")
+ + Nano(5): Timestamp("2014-07-01 14:00")
+ + Nano(5),
+ Timestamp("2014-07-01 10:00")
+ + Nano(5): Timestamp("2014-07-01 09:00")
+ + Nano(5),
+ Timestamp("2014-07-01 10:00")
+ - Nano(5): Timestamp("2014-06-26 17:00")
+ - Nano(5),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("nano_case", nano_cases)
+ def test_apply_nanoseconds(self, nano_case):
+ offset, cases = nano_case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
diff --git a/pandas/tests/tseries/offsets/test_dst.py b/pandas/tests/tseries/offsets/test_dst.py
new file mode 100644
index 0000000000000..0ae94b6b57640
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_dst.py
@@ -0,0 +1,175 @@
+"""
+Tests for DateOffset additions over Daylight Savings Time
+"""
+from datetime import timedelta
+
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas._libs.tslibs.offsets import (
+ BMonthBegin,
+ BMonthEnd,
+ BQuarterBegin,
+ BQuarterEnd,
+ BYearBegin,
+ BYearEnd,
+ CBMonthBegin,
+ CBMonthEnd,
+ DateOffset,
+ Day,
+ MonthBegin,
+ MonthEnd,
+ QuarterBegin,
+ QuarterEnd,
+ SemiMonthBegin,
+ SemiMonthEnd,
+ Week,
+ YearBegin,
+ YearEnd,
+)
+
+from pandas.tests.tseries.offsets.test_offsets import get_utc_offset_hours
+
+
+class TestDST:
+
+ # one microsecond before the DST transition
+ ts_pre_fallback = "2013-11-03 01:59:59.999999"
+ ts_pre_springfwd = "2013-03-10 01:59:59.999999"
+
+ # test both basic names and dateutil timezones
+ timezone_utc_offsets = {
+ "US/Eastern": {"utc_offset_daylight": -4, "utc_offset_standard": -5},
+ "dateutil/US/Pacific": {"utc_offset_daylight": -7, "utc_offset_standard": -8},
+ }
+ valid_date_offsets_singular = [
+ "weekday",
+ "day",
+ "hour",
+ "minute",
+ "second",
+ "microsecond",
+ ]
+ valid_date_offsets_plural = [
+ "weeks",
+ "days",
+ "hours",
+ "minutes",
+ "seconds",
+ "milliseconds",
+ "microseconds",
+ ]
+
+ def _test_all_offsets(self, n, **kwds):
+ valid_offsets = (
+ self.valid_date_offsets_plural
+ if n > 1
+ else self.valid_date_offsets_singular
+ )
+
+ for name in valid_offsets:
+ self._test_offset(offset_name=name, offset_n=n, **kwds)
+
+ def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
+ offset = DateOffset(**{offset_name: offset_n})
+
+ t = tstart + offset
+ if expected_utc_offset is not None:
+ assert get_utc_offset_hours(t) == expected_utc_offset
+
+ if offset_name == "weeks":
+ # dates should match
+ assert t.date() == timedelta(days=7 * offset.kwds["weeks"]) + tstart.date()
+ # expect the same day of week, hour of day, minute, second, ...
+ assert (
+ t.dayofweek == tstart.dayofweek
+ and t.hour == tstart.hour
+ and t.minute == tstart.minute
+ and t.second == tstart.second
+ )
+ elif offset_name == "days":
+ # dates should match
+ assert timedelta(offset.kwds["days"]) + tstart.date() == t.date()
+ # expect the same hour of day, minute, second, ...
+ assert (
+ t.hour == tstart.hour
+ and t.minute == tstart.minute
+ and t.second == tstart.second
+ )
+ elif offset_name in self.valid_date_offsets_singular:
+ # expect the singular offset value to match between tstart and t
+ datepart_offset = getattr(
+ t, offset_name if offset_name != "weekday" else "dayofweek"
+ )
+ assert datepart_offset == offset.kwds[offset_name]
+ else:
+ # the offset should be the same as if it was done in UTC
+ assert t == (tstart.tz_convert("UTC") + offset).tz_convert("US/Pacific")
+
+ def _make_timestamp(self, string, hrs_offset, tz):
+ if hrs_offset >= 0:
+ offset_string = f"{hrs_offset:02d}00"
+ else:
+ offset_string = f"-{(hrs_offset * -1):02}00"
+ return Timestamp(string + offset_string).tz_convert(tz)
+
+ def test_springforward_plural(self):
+ # test moving from standard to daylight savings
+ for tz, utc_offsets in self.timezone_utc_offsets.items():
+ hrs_pre = utc_offsets["utc_offset_standard"]
+ hrs_post = utc_offsets["utc_offset_daylight"]
+ self._test_all_offsets(
+ n=3,
+ tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
+ expected_utc_offset=hrs_post,
+ )
+
+ def test_fallback_singular(self):
+ # in the case of singular offsets, we don't necessarily know which utc
+ # offset the new Timestamp will wind up in (the tz for 1 month may be
+ # different from 1 second) so we don't specify an expected_utc_offset
+ for tz, utc_offsets in self.timezone_utc_offsets.items():
+ hrs_pre = utc_offsets["utc_offset_standard"]
+ self._test_all_offsets(
+ n=1,
+ tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),
+ expected_utc_offset=None,
+ )
+
+ def test_springforward_singular(self):
+ for tz, utc_offsets in self.timezone_utc_offsets.items():
+ hrs_pre = utc_offsets["utc_offset_standard"]
+ self._test_all_offsets(
+ n=1,
+ tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
+ expected_utc_offset=None,
+ )
+
+ offset_classes = {
+ MonthBegin: ["11/2/2012", "12/1/2012"],
+ MonthEnd: ["11/2/2012", "11/30/2012"],
+ BMonthBegin: ["11/2/2012", "12/3/2012"],
+ BMonthEnd: ["11/2/2012", "11/30/2012"],
+ CBMonthBegin: ["11/2/2012", "12/3/2012"],
+ CBMonthEnd: ["11/2/2012", "11/30/2012"],
+ SemiMonthBegin: ["11/2/2012", "11/15/2012"],
+ SemiMonthEnd: ["11/2/2012", "11/15/2012"],
+ Week: ["11/2/2012", "11/9/2012"],
+ YearBegin: ["11/2/2012", "1/1/2013"],
+ YearEnd: ["11/2/2012", "12/31/2012"],
+ BYearBegin: ["11/2/2012", "1/1/2013"],
+ BYearEnd: ["11/2/2012", "12/31/2012"],
+ QuarterBegin: ["11/2/2012", "12/1/2012"],
+ QuarterEnd: ["11/2/2012", "12/31/2012"],
+ BQuarterBegin: ["11/2/2012", "12/3/2012"],
+ BQuarterEnd: ["11/2/2012", "12/31/2012"],
+ Day: ["11/4/2012", "11/4/2012 23:00"],
+ }.items()
+
+ @pytest.mark.parametrize("tup", offset_classes)
+ def test_all_offset_classes(self, tup):
+ offset, test_values = tup
+
+ first = Timestamp(test_values[0], tz="US/Eastern") + offset()
+ second = Timestamp(test_values[1], tz="US/Eastern")
+ assert first == second
diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py
index 7713be67a7e05..14728314b8e20 100644
--- a/pandas/tests/tseries/offsets/test_fiscal.py
+++ b/pandas/tests/tseries/offsets/test_fiscal.py
@@ -10,13 +10,16 @@
from pandas import Timestamp
import pandas._testing as tm
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ WeekDay,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
from pandas.tseries.frequencies import get_offset
from pandas.tseries.offsets import FY5253, FY5253Quarter
-from .common import assert_is_on_offset, assert_offset_equal
-from .test_offsets import Base, WeekDay
-
def makeFY5253LastOfMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="last", **kwds)
diff --git a/pandas/tests/tseries/offsets/test_month.py b/pandas/tests/tseries/offsets/test_month.py
new file mode 100644
index 0000000000000..578af79084e09
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_month.py
@@ -0,0 +1,838 @@
+"""
+Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
+"""
+from datetime import date, datetime
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas._libs.tslibs.offsets import (
+ CBMonthBegin,
+ CBMonthEnd,
+ CDay,
+ SemiMonthBegin,
+ SemiMonthEnd,
+)
+
+from pandas import DatetimeIndex, Series, _testing as tm, date_range
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
+from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
+
+from pandas.tseries import offsets as offsets
+from pandas.tseries.holiday import USFederalHolidayCalendar
+
+
+class CustomBusinessMonthBase:
+ def setup_method(self, method):
+ self.d = datetime(2008, 1, 1)
+
+ self.offset = self._offset()
+ self.offset1 = self.offset
+ self.offset2 = self._offset(2)
+
+ def test_eq(self):
+ assert self.offset2 == self.offset2
+
+ def test_mul(self):
+ pass
+
+ def test_hash(self):
+ assert hash(self.offset2) == hash(self.offset2)
+
+ def test_roundtrip_pickle(self):
+ def _check_roundtrip(obj):
+ unpickled = tm.round_trip_pickle(obj)
+ assert unpickled == obj
+
+ _check_roundtrip(self._offset())
+ _check_roundtrip(self._offset(2))
+ _check_roundtrip(self._offset() * 2)
+
+ def test_copy(self):
+ # GH 17452
+ off = self._offset(weekmask="Mon Wed Fri")
+ assert off == off.copy()
+
+
+class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
+ _offset = CBMonthEnd
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset) == "<CustomBusinessMonthEnd>"
+ assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset2(self.d) == datetime(2008, 2, 29)
+
+ def testRollback1(self):
+ assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
+
+ def testRollback2(self):
+ assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
+
+ def testRollforward1(self):
+ assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
+
+ def test_roll_date_object(self):
+ offset = CBMonthEnd()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 8, 31)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 28)
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 15)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 15)
+
+ on_offset_cases = [
+ (CBMonthEnd(), datetime(2008, 1, 31), True),
+ (CBMonthEnd(), datetime(2008, 1, 1), False),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ offset, d, expected = case
+ assert_is_on_offset(offset, d, expected)
+
+ apply_cases: _ApplyCases = [
+ (
+ CBMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 2, 7): datetime(2008, 2, 29),
+ },
+ ),
+ (
+ 2 * CBMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 2, 29),
+ datetime(2008, 2, 7): datetime(2008, 3, 31),
+ },
+ ),
+ (
+ -CBMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 2, 8): datetime(2008, 1, 31),
+ },
+ ),
+ (
+ -2 * CBMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 11, 30),
+ datetime(2008, 2, 9): datetime(2007, 12, 31),
+ },
+ ),
+ (
+ CBMonthEnd(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 2, 7): datetime(2008, 2, 29),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + CBMonthEnd(10)
+ assert result == datetime(2013, 7, 31)
+
+ result = dt + CDay(100) - CDay(100)
+ assert result == dt
+
+ off = CBMonthEnd() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 7, 29)
+ assert rs == xp
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+ xp = datetime(2012, 5, 31)
+ assert rs == xp
+
+ def test_holidays(self):
+ # Define a TradingDay offset
+ holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
+ bm_offset = CBMonthEnd(holidays=holidays)
+ dt = datetime(2012, 1, 1)
+ assert dt + bm_offset == datetime(2012, 1, 30)
+ assert dt + 2 * bm_offset == datetime(2012, 2, 27)
+
+ @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
+ def test_datetimeindex(self):
+ from pandas.tseries.holiday import USFederalHolidayCalendar
+
+ hcal = USFederalHolidayCalendar()
+ freq = CBMonthEnd(calendar=hcal)
+
+ assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
+ 0
+ ] == datetime(2012, 1, 31)
+
+
+class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
+ _offset = CBMonthBegin
+
+ def test_different_normalize_equals(self):
+ # GH#21404 changed __eq__ to return False when `normalize` does not match
+ offset = self._offset()
+ offset2 = self._offset(normalize=True)
+ assert offset != offset2
+
+ def test_repr(self):
+ assert repr(self.offset) == "<CustomBusinessMonthBegin>"
+ assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
+
+ def test_call(self):
+ with tm.assert_produces_warning(FutureWarning):
+ # GH#34171 DateOffset.__call__ is deprecated
+ assert self.offset2(self.d) == datetime(2008, 3, 3)
+
+ def testRollback1(self):
+ assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
+
+ def testRollback2(self):
+ assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
+
+ def testRollforward1(self):
+ assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
+
+ def test_roll_date_object(self):
+ offset = CBMonthBegin()
+
+ dt = date(2012, 9, 15)
+
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 3)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 10, 1)
+
+ offset = offsets.Day()
+ result = offset.rollback(dt)
+ assert result == datetime(2012, 9, 15)
+
+ result = offset.rollforward(dt)
+ assert result == datetime(2012, 9, 15)
+
+ on_offset_cases = [
+ (CBMonthBegin(), datetime(2008, 1, 1), True),
+ (CBMonthBegin(), datetime(2008, 1, 31), False),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ offset, dt, expected = case
+ assert_is_on_offset(offset, dt, expected)
+
+ apply_cases: _ApplyCases = [
+ (
+ CBMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 2, 1),
+ datetime(2008, 2, 7): datetime(2008, 3, 3),
+ },
+ ),
+ (
+ 2 * CBMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 3, 3),
+ datetime(2008, 2, 7): datetime(2008, 4, 1),
+ },
+ ),
+ (
+ -CBMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 12, 3),
+ datetime(2008, 2, 8): datetime(2008, 2, 1),
+ },
+ ),
+ (
+ -2 * CBMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2007, 11, 1),
+ datetime(2008, 2, 9): datetime(2008, 1, 1),
+ },
+ ),
+ (
+ CBMonthBegin(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 7): datetime(2008, 2, 1),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ def test_apply_large_n(self):
+ dt = datetime(2012, 10, 23)
+
+ result = dt + CBMonthBegin(10)
+ assert result == datetime(2013, 8, 1)
+
+ result = dt + CDay(100) - CDay(100)
+ assert result == dt
+
+ off = CBMonthBegin() * 6
+ rs = datetime(2012, 1, 1) - off
+ xp = datetime(2011, 7, 1)
+ assert rs == xp
+
+ st = datetime(2011, 12, 18)
+ rs = st + off
+
+ xp = datetime(2012, 6, 1)
+ assert rs == xp
+
+ def test_holidays(self):
+ # Define a TradingDay offset
+ holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
+ bm_offset = CBMonthBegin(holidays=holidays)
+ dt = datetime(2012, 1, 1)
+
+ assert dt + bm_offset == datetime(2012, 1, 2)
+ assert dt + 2 * bm_offset == datetime(2012, 2, 3)
+
+ @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
+ def test_datetimeindex(self):
+ hcal = USFederalHolidayCalendar()
+ cbmb = CBMonthBegin(calendar=hcal)
+ assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
+ 0
+ ] == datetime(2012, 1, 3)
+
+
+class TestSemiMonthEnd(Base):
+ _offset = SemiMonthEnd
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_offset_whole_year(self):
+ dates = (
+ datetime(2007, 12, 31),
+ datetime(2008, 1, 15),
+ datetime(2008, 1, 31),
+ datetime(2008, 2, 15),
+ datetime(2008, 2, 29),
+ datetime(2008, 3, 15),
+ datetime(2008, 3, 31),
+ datetime(2008, 4, 15),
+ datetime(2008, 4, 30),
+ datetime(2008, 5, 15),
+ datetime(2008, 5, 31),
+ datetime(2008, 6, 15),
+ datetime(2008, 6, 30),
+ datetime(2008, 7, 15),
+ datetime(2008, 7, 31),
+ datetime(2008, 8, 15),
+ datetime(2008, 8, 31),
+ datetime(2008, 9, 15),
+ datetime(2008, 9, 30),
+ datetime(2008, 10, 15),
+ datetime(2008, 10, 31),
+ datetime(2008, 11, 15),
+ datetime(2008, 11, 30),
+ datetime(2008, 12, 15),
+ datetime(2008, 12, 31),
+ )
+
+ for base, exp_date in zip(dates[:-1], dates[1:]):
+ assert_offset_equal(SemiMonthEnd(), base, exp_date)
+
+ # ensure .apply_index works as expected
+ s = DatetimeIndex(dates[:-1])
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = SemiMonthEnd() + s
+
+ exp = DatetimeIndex(dates[1:])
+ tm.assert_index_equal(result, exp)
+
+ # ensure generating a range with DatetimeIndex gives same result
+ result = date_range(start=dates[0], end=dates[-1], freq="SM")
+ exp = DatetimeIndex(dates, freq="SM")
+ tm.assert_index_equal(result, exp)
+
+ offset_cases = []
+ offset_cases.append(
+ (
+ SemiMonthEnd(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 15),
+ datetime(2008, 1, 15): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 15),
+ datetime(2006, 12, 14): datetime(2006, 12, 15),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2007, 1, 15),
+ datetime(2007, 1, 1): datetime(2007, 1, 15),
+ datetime(2006, 12, 1): datetime(2006, 12, 15),
+ datetime(2006, 12, 15): datetime(2006, 12, 31),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(day_of_month=20),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 20),
+ datetime(2008, 1, 15): datetime(2008, 1, 20),
+ datetime(2008, 1, 21): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 20),
+ datetime(2006, 12, 14): datetime(2006, 12, 20),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2007, 1, 20),
+ datetime(2007, 1, 1): datetime(2007, 1, 20),
+ datetime(2006, 12, 1): datetime(2006, 12, 20),
+ datetime(2006, 12, 15): datetime(2006, 12, 20),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 15),
+ datetime(2008, 1, 16): datetime(2008, 1, 31),
+ datetime(2008, 1, 15): datetime(2008, 1, 15),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2006, 12, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 15),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(0, day_of_month=16),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 16),
+ datetime(2008, 1, 16): datetime(2008, 1, 16),
+ datetime(2008, 1, 15): datetime(2008, 1, 16),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2006, 12, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 16),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(2),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 29),
+ datetime(2006, 12, 29): datetime(2007, 1, 15),
+ datetime(2006, 12, 31): datetime(2007, 1, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 31),
+ datetime(2007, 1, 16): datetime(2007, 2, 15),
+ datetime(2006, 11, 1): datetime(2006, 11, 30),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(-1),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 6, 15),
+ datetime(2008, 12, 31): datetime(2008, 12, 15),
+ datetime(2006, 12, 29): datetime(2006, 12, 15),
+ datetime(2006, 12, 30): datetime(2006, 12, 15),
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(-1, day_of_month=4),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ datetime(2007, 1, 4): datetime(2006, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 6, 4),
+ datetime(2008, 12, 31): datetime(2008, 12, 4),
+ datetime(2006, 12, 5): datetime(2006, 12, 4),
+ datetime(2006, 12, 30): datetime(2006, 12, 4),
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ },
+ )
+ )
+
+ offset_cases.append(
+ (
+ SemiMonthEnd(-2),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ datetime(2008, 6, 30): datetime(2008, 5, 31),
+ datetime(2008, 3, 15): datetime(2008, 2, 15),
+ datetime(2008, 12, 31): datetime(2008, 11, 30),
+ datetime(2006, 12, 29): datetime(2006, 11, 30),
+ datetime(2006, 12, 14): datetime(2006, 11, 15),
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ },
+ )
+ )
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_apply_index(self, case):
+ # https://github.com/pandas-dev/pandas/issues/34580
+ offset, cases = case
+ s = DatetimeIndex(cases.keys())
+ exp = DatetimeIndex(cases.values())
+
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = offset + s
+ tm.assert_index_equal(result, exp)
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = offset.apply_index(s)
+ tm.assert_index_equal(result, exp)
+
+ on_offset_cases = [
+ (datetime(2007, 12, 31), True),
+ (datetime(2007, 12, 15), True),
+ (datetime(2007, 12, 14), False),
+ (datetime(2007, 12, 1), False),
+ (datetime(2008, 2, 29), True),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ dt, expected = case
+ assert_is_on_offset(SemiMonthEnd(), dt, expected)
+
+ @pytest.mark.parametrize("klass", [Series, DatetimeIndex])
+ def test_vectorized_offset_addition(self, klass):
+ s = klass(
+ [
+ Timestamp("2000-01-15 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-15", tz="US/Central"),
+ ],
+ name="a",
+ )
+
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = s + SemiMonthEnd()
+ result2 = SemiMonthEnd() + s
+
+ exp = klass(
+ [
+ Timestamp("2000-01-31 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-29", tz="US/Central"),
+ ],
+ name="a",
+ )
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
+
+ s = klass(
+ [
+ Timestamp("2000-01-01 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-01", tz="US/Central"),
+ ],
+ name="a",
+ )
+
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = s + SemiMonthEnd()
+ result2 = SemiMonthEnd() + s
+
+ exp = klass(
+ [
+ Timestamp("2000-01-15 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-15", tz="US/Central"),
+ ],
+ name="a",
+ )
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
+
+
+class TestSemiMonthBegin(Base):
+ _offset = SemiMonthBegin
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_offset_whole_year(self):
+ dates = (
+ datetime(2007, 12, 15),
+ datetime(2008, 1, 1),
+ datetime(2008, 1, 15),
+ datetime(2008, 2, 1),
+ datetime(2008, 2, 15),
+ datetime(2008, 3, 1),
+ datetime(2008, 3, 15),
+ datetime(2008, 4, 1),
+ datetime(2008, 4, 15),
+ datetime(2008, 5, 1),
+ datetime(2008, 5, 15),
+ datetime(2008, 6, 1),
+ datetime(2008, 6, 15),
+ datetime(2008, 7, 1),
+ datetime(2008, 7, 15),
+ datetime(2008, 8, 1),
+ datetime(2008, 8, 15),
+ datetime(2008, 9, 1),
+ datetime(2008, 9, 15),
+ datetime(2008, 10, 1),
+ datetime(2008, 10, 15),
+ datetime(2008, 11, 1),
+ datetime(2008, 11, 15),
+ datetime(2008, 12, 1),
+ datetime(2008, 12, 15),
+ )
+
+ for base, exp_date in zip(dates[:-1], dates[1:]):
+ assert_offset_equal(SemiMonthBegin(), base, exp_date)
+
+ # ensure .apply_index works as expected
+ s = DatetimeIndex(dates[:-1])
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = SemiMonthBegin() + s
+
+ exp = DatetimeIndex(dates[1:])
+ tm.assert_index_equal(result, exp)
+
+ # ensure generating a range with DatetimeIndex gives same result
+ result = date_range(start=dates[0], end=dates[-1], freq="SMS")
+ exp = DatetimeIndex(dates, freq="SMS")
+ tm.assert_index_equal(result, exp)
+
+ offset_cases = [
+ (
+ SemiMonthBegin(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 15),
+ datetime(2008, 1, 15): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 14): datetime(2006, 12, 15),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2007, 1, 1): datetime(2007, 1, 15),
+ datetime(2006, 12, 1): datetime(2006, 12, 15),
+ datetime(2006, 12, 15): datetime(2007, 1, 1),
+ },
+ ),
+ (
+ SemiMonthBegin(day_of_month=20),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 20),
+ datetime(2008, 1, 15): datetime(2008, 1, 20),
+ datetime(2008, 1, 21): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 14): datetime(2006, 12, 20),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2007, 1, 1): datetime(2007, 1, 20),
+ datetime(2006, 12, 1): datetime(2006, 12, 20),
+ datetime(2006, 12, 15): datetime(2006, 12, 20),
+ },
+ ),
+ (
+ SemiMonthBegin(0),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 16): datetime(2008, 2, 1),
+ datetime(2008, 1, 15): datetime(2008, 1, 15),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 2): datetime(2006, 12, 15),
+ datetime(2007, 1, 1): datetime(2007, 1, 1),
+ },
+ ),
+ (
+ SemiMonthBegin(0, day_of_month=16),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 16): datetime(2008, 1, 16),
+ datetime(2008, 1, 15): datetime(2008, 1, 16),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2007, 1, 5): datetime(2007, 1, 16),
+ datetime(2007, 1, 1): datetime(2007, 1, 1),
+ },
+ ),
+ (
+ SemiMonthBegin(2),
+ {
+ datetime(2008, 1, 1): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 15),
+ datetime(2006, 12, 1): datetime(2007, 1, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 15),
+ datetime(2006, 12, 15): datetime(2007, 1, 15),
+ datetime(2007, 1, 1): datetime(2007, 2, 1),
+ datetime(2007, 1, 16): datetime(2007, 2, 15),
+ datetime(2006, 11, 1): datetime(2006, 12, 1),
+ },
+ ),
+ (
+ SemiMonthBegin(-1),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ datetime(2008, 6, 30): datetime(2008, 6, 15),
+ datetime(2008, 6, 14): datetime(2008, 6, 1),
+ datetime(2008, 12, 31): datetime(2008, 12, 15),
+ datetime(2006, 12, 29): datetime(2006, 12, 15),
+ datetime(2006, 12, 15): datetime(2006, 12, 1),
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ },
+ ),
+ (
+ SemiMonthBegin(-1, day_of_month=4),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 4),
+ datetime(2007, 1, 4): datetime(2007, 1, 1),
+ datetime(2008, 6, 30): datetime(2008, 6, 4),
+ datetime(2008, 12, 31): datetime(2008, 12, 4),
+ datetime(2006, 12, 5): datetime(2006, 12, 4),
+ datetime(2006, 12, 30): datetime(2006, 12, 4),
+ datetime(2006, 12, 2): datetime(2006, 12, 1),
+ datetime(2007, 1, 1): datetime(2006, 12, 4),
+ },
+ ),
+ (
+ SemiMonthBegin(-2),
+ {
+ datetime(2007, 1, 1): datetime(2006, 12, 1),
+ datetime(2008, 6, 30): datetime(2008, 6, 1),
+ datetime(2008, 6, 14): datetime(2008, 5, 15),
+ datetime(2008, 12, 31): datetime(2008, 12, 1),
+ datetime(2006, 12, 29): datetime(2006, 12, 1),
+ datetime(2006, 12, 15): datetime(2006, 11, 15),
+ datetime(2007, 1, 1): datetime(2006, 12, 1),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_apply_index(self, case):
+ offset, cases = case
+ s = DatetimeIndex(cases.keys())
+
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = offset + s
+
+ exp = DatetimeIndex(cases.values())
+ tm.assert_index_equal(result, exp)
+
+ on_offset_cases = [
+ (datetime(2007, 12, 1), True),
+ (datetime(2007, 12, 15), True),
+ (datetime(2007, 12, 14), False),
+ (datetime(2007, 12, 31), False),
+ (datetime(2008, 2, 15), True),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ dt, expected = case
+ assert_is_on_offset(SemiMonthBegin(), dt, expected)
+
+ @pytest.mark.parametrize("klass", [Series, DatetimeIndex])
+ def test_vectorized_offset_addition(self, klass):
+ s = klass(
+ [
+ Timestamp("2000-01-15 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-15", tz="US/Central"),
+ ],
+ name="a",
+ )
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = s + SemiMonthBegin()
+ result2 = SemiMonthBegin() + s
+
+ exp = klass(
+ [
+ Timestamp("2000-02-01 00:15:00", tz="US/Central"),
+ Timestamp("2000-03-01", tz="US/Central"),
+ ],
+ name="a",
+ )
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
+
+ s = klass(
+ [
+ Timestamp("2000-01-01 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-01", tz="US/Central"),
+ ],
+ name="a",
+ )
+ with tm.assert_produces_warning(None):
+ # GH#22535 check that we don't get a FutureWarning from adding
+ # an integer array to PeriodIndex
+ result = s + SemiMonthBegin()
+ result2 = SemiMonthBegin() + s
+
+ exp = klass(
+ [
+ Timestamp("2000-01-15 00:15:00", tz="US/Central"),
+ Timestamp("2000-02-15", tz="US/Central"),
+ ],
+ name="a",
+ )
+ tm.assert_equal(result, exp)
+ tm.assert_equal(result2, exp)
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 325a5311829dc..b65f8084e4bec 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -1,43 +1,30 @@
-from datetime import date, datetime, time as dt_time, timedelta
-from typing import Dict, List, Optional, Tuple, Type
+"""
+Tests of pandas.tseries.offsets
+"""
+from datetime import datetime, timedelta
+from typing import Dict, List, Tuple
-from dateutil.tz import tzlocal
import numpy as np
import pytest
-from pandas._libs.tslibs import (
- NaT,
- OutOfBoundsDatetime,
- Timestamp,
- conversion,
- timezones,
-)
+from pandas._libs.tslibs import NaT, Timestamp, conversion, timezones
import pandas._libs.tslibs.offsets as liboffsets
-from pandas._libs.tslibs.offsets import ApplyTypeError, _get_offset, _offset_map
+from pandas._libs.tslibs.offsets import _get_offset, _offset_map
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
-from pandas.compat import IS64
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
-from pandas import DatetimeIndex, Series, Timedelta, date_range, read_pickle
+from pandas import DatetimeIndex
import pandas._testing as tm
+from pandas.tests.tseries.offsets.common import Base, WeekDay, assert_offset_equal
-from pandas.tseries.holiday import USFederalHolidayCalendar
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import (
FY5253,
BaseOffset,
BDay,
- BMonthBegin,
BMonthEnd,
- BQuarterBegin,
- BQuarterEnd,
BusinessHour,
- BYearBegin,
- BYearEnd,
- CBMonthBegin,
- CBMonthEnd,
- CDay,
CustomBusinessDay,
CustomBusinessHour,
CustomBusinessMonthBegin,
@@ -48,182 +35,15 @@
FY5253Quarter,
LastWeekOfMonth,
MonthBegin,
- MonthEnd,
Nano,
- QuarterBegin,
- QuarterEnd,
- SemiMonthBegin,
- SemiMonthEnd,
Tick,
Week,
WeekOfMonth,
- YearBegin,
- YearEnd,
)
-from .common import assert_is_on_offset, assert_offset_equal
-
-
-class WeekDay:
- # TODO: Remove: This is not used outside of tests
- MON = 0
- TUE = 1
- WED = 2
- THU = 3
- FRI = 4
- SAT = 5
- SUN = 6
-
-
-#####
-# DateOffset Tests
-#####
_ApplyCases = List[Tuple[BaseOffset, Dict[datetime, datetime]]]
-class Base:
- _offset: Optional[Type[DateOffset]] = None
- d = Timestamp(datetime(2008, 1, 2))
-
- timezones = [
- None,
- "UTC",
- "Asia/Tokyo",
- "US/Eastern",
- "dateutil/Asia/Tokyo",
- "dateutil/US/Pacific",
- ]
-
- def _get_offset(self, klass, value=1, normalize=False):
- # create instance from offset class
- if klass is FY5253:
- klass = klass(
- n=value,
- startingMonth=1,
- weekday=1,
- variation="last",
- normalize=normalize,
- )
- elif klass is FY5253Quarter:
- klass = klass(
- n=value,
- startingMonth=1,
- weekday=1,
- qtr_with_extra_week=1,
- variation="last",
- normalize=normalize,
- )
- elif klass is LastWeekOfMonth:
- klass = klass(n=value, weekday=5, normalize=normalize)
- elif klass is WeekOfMonth:
- klass = klass(n=value, week=1, weekday=5, normalize=normalize)
- elif klass is Week:
- klass = klass(n=value, weekday=5, normalize=normalize)
- elif klass is DateOffset:
- klass = klass(days=value, normalize=normalize)
- else:
- klass = klass(value, normalize=normalize)
- return klass
-
- def test_apply_out_of_range(self, tz_naive_fixture):
- tz = tz_naive_fixture
- if self._offset is None:
- return
- if isinstance(tz, tzlocal) and not IS64:
- pytest.xfail(reason="OverflowError inside tzlocal past 2038")
-
- # try to create an out-of-bounds result timestamp; if we can't create
- # the offset skip
- try:
- if self._offset in (BusinessHour, CustomBusinessHour):
- # Using 10000 in BusinessHour fails in tz check because of DST
- # difference
- offset = self._get_offset(self._offset, value=100000)
- else:
- offset = self._get_offset(self._offset, value=10000)
-
- result = Timestamp("20080101") + offset
- assert isinstance(result, datetime)
- assert result.tzinfo is None
-
- # Check tz is preserved
- t = Timestamp("20080101", tz=tz)
- result = t + offset
- assert isinstance(result, datetime)
- assert t.tzinfo == result.tzinfo
-
- except OutOfBoundsDatetime:
- pass
- except (ValueError, KeyError):
- # we are creating an invalid offset
- # so ignore
- pass
-
- def test_offsets_compare_equal(self):
- # root cause of GH#456: __ne__ was not implemented
- if self._offset is None:
- return
- offset1 = self._offset()
- offset2 = self._offset()
- assert not offset1 != offset2
- assert offset1 == offset2
-
- def test_rsub(self):
- if self._offset is None or not hasattr(self, "offset2"):
- # i.e. skip for TestCommon and YQM subclasses that do not have
- # offset2 attr
- return
- assert self.d - self.offset2 == (-self.offset2).apply(self.d)
-
- def test_radd(self):
- if self._offset is None or not hasattr(self, "offset2"):
- # i.e. skip for TestCommon and YQM subclasses that do not have
- # offset2 attr
- return
- assert self.d + self.offset2 == self.offset2 + self.d
-
- def test_sub(self):
- if self._offset is None or not hasattr(self, "offset2"):
- # i.e. skip for TestCommon and YQM subclasses that do not have
- # offset2 attr
- return
- off = self.offset2
- msg = "Cannot subtract datetime from offset"
- with pytest.raises(TypeError, match=msg):
- off - self.d
-
- assert 2 * off - off == off
- assert self.d - self.offset2 == self.d + self._offset(-2)
- assert self.d - self.offset2 == self.d - (2 * off - off)
-
- def testMult1(self):
- if self._offset is None or not hasattr(self, "offset1"):
- # i.e. skip for TestCommon and YQM subclasses that do not have
- # offset1 attr
- return
- assert self.d + 10 * self.offset1 == self.d + self._offset(10)
- assert self.d + 5 * self.offset1 == self.d + self._offset(5)
-
- def testMult2(self):
- if self._offset is None:
- return
- assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)
- assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
-
- def test_compare_str(self):
- # GH#23524
- # comparing to strings that cannot be cast to DateOffsets should
- # not raise for __eq__ or __ne__
- if self._offset is None:
- return
- off = self._get_offset(self._offset)
-
- assert not off == "infer"
- assert off != "foo"
- # Note: inequalities are only implemented for Tick subclasses;
- # tests for this are in test_ticks
-
-
class TestCommon(Base):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
@@ -724,3327 +544,6 @@ def test_eq(self):
assert offset1 != offset2
-class TestBusinessDay(Base):
- _offset = BDay
-
- def setup_method(self, method):
- self.d = datetime(2008, 1, 1)
-
- self.offset = BDay()
- self.offset1 = self.offset
- self.offset2 = BDay(2)
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset) == "<BusinessDay>"
- assert repr(self.offset2) == "<2 * BusinessDays>"
-
- expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
- assert repr(self.offset + timedelta(1)) == expected
-
- def test_with_offset(self):
- offset = self.offset + timedelta(hours=2)
-
- assert (self.d + offset) == datetime(2008, 1, 2, 2)
-
- def test_with_offset_index(self):
- dti = DatetimeIndex([self.d])
- result = dti + (self.offset + timedelta(hours=2))
-
- expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
- tm.assert_index_equal(result, expected)
-
- def test_eq(self):
- assert self.offset2 == self.offset2
-
- def test_mul(self):
- pass
-
- def test_hash(self):
- assert hash(self.offset2) == hash(self.offset2)
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset2(self.d) == datetime(2008, 1, 3)
-
- def testRollback1(self):
- assert BDay(10).rollback(self.d) == self.d
-
- def testRollback2(self):
- assert BDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
-
- def testRollforward1(self):
- assert BDay(10).rollforward(self.d) == self.d
-
- def testRollforward2(self):
- assert BDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
-
- def test_roll_date_object(self):
- offset = BDay()
-
- dt = date(2012, 9, 15)
-
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 14)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 17)
-
- offset = offsets.Day()
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 15)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 15)
-
- def test_is_on_offset(self):
- tests = [
- (BDay(), datetime(2008, 1, 1), True),
- (BDay(), datetime(2008, 1, 5), False),
- ]
-
- for offset, d, expected in tests:
- assert_is_on_offset(offset, d, expected)
-
- apply_cases: _ApplyCases = []
- apply_cases.append(
- (
- BDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 2),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 8),
- },
- )
- )
-
- apply_cases.append(
- (
- 2 * BDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 3),
- datetime(2008, 1, 4): datetime(2008, 1, 8),
- datetime(2008, 1, 5): datetime(2008, 1, 8),
- datetime(2008, 1, 6): datetime(2008, 1, 8),
- datetime(2008, 1, 7): datetime(2008, 1, 9),
- },
- )
- )
-
- apply_cases.append(
- (
- -BDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 3),
- datetime(2008, 1, 5): datetime(2008, 1, 4),
- datetime(2008, 1, 6): datetime(2008, 1, 4),
- datetime(2008, 1, 7): datetime(2008, 1, 4),
- datetime(2008, 1, 8): datetime(2008, 1, 7),
- },
- )
- )
-
- apply_cases.append(
- (
- -2 * BDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 28),
- datetime(2008, 1, 4): datetime(2008, 1, 2),
- datetime(2008, 1, 5): datetime(2008, 1, 3),
- datetime(2008, 1, 6): datetime(2008, 1, 3),
- datetime(2008, 1, 7): datetime(2008, 1, 3),
- datetime(2008, 1, 8): datetime(2008, 1, 4),
- datetime(2008, 1, 9): datetime(2008, 1, 7),
- },
- )
- )
-
- apply_cases.append(
- (
- BDay(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 4): datetime(2008, 1, 4),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- dt = datetime(2012, 10, 23)
-
- result = dt + BDay(10)
- assert result == datetime(2012, 11, 6)
-
- result = dt + BDay(100) - BDay(100)
- assert result == dt
-
- off = BDay() * 6
- rs = datetime(2012, 1, 1) - off
- xp = datetime(2011, 12, 23)
- assert rs == xp
-
- st = datetime(2011, 12, 18)
- rs = st + off
- xp = datetime(2011, 12, 26)
- assert rs == xp
-
- off = BDay() * 10
- rs = datetime(2014, 1, 5) + off # see #5890
- xp = datetime(2014, 1, 17)
- assert rs == xp
-
- def test_apply_corner(self):
- msg = "Only know how to combine business day with datetime or timedelta"
- with pytest.raises(ApplyTypeError, match=msg):
- BDay().apply(BMonthEnd())
-
-
-class TestBusinessHour(Base):
- _offset = BusinessHour
-
- def setup_method(self, method):
- self.d = datetime(2014, 7, 1, 10, 00)
-
- self.offset1 = BusinessHour()
- self.offset2 = BusinessHour(n=3)
-
- self.offset3 = BusinessHour(n=-1)
- self.offset4 = BusinessHour(n=-4)
-
- from datetime import time as dt_time
-
- self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
- self.offset6 = BusinessHour(start="20:00", end="05:00")
- self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30))
- self.offset8 = BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"])
- self.offset9 = BusinessHour(
- n=3, start=["09:00", "22:00"], end=["13:00", "03:00"]
- )
- self.offset10 = BusinessHour(
- n=-1, start=["23:00", "13:00"], end=["02:00", "17:00"]
- )
-
- @pytest.mark.parametrize(
- "start,end,match",
- [
- (
- dt_time(11, 0, 5),
- "17:00",
- "time data must be specified only with hour and minute",
- ),
- ("AAA", "17:00", "time data must match '%H:%M' format"),
- ("14:00:05", "17:00", "time data must match '%H:%M' format"),
- ([], "17:00", "Must include at least 1 start time"),
- ("09:00", [], "Must include at least 1 end time"),
- (
- ["09:00", "11:00"],
- "17:00",
- "number of starting time and ending time must be the same",
- ),
- (
- ["09:00", "11:00"],
- ["10:00"],
- "number of starting time and ending time must be the same",
- ),
- (
- ["09:00", "11:00"],
- ["12:00", "20:00"],
- r"invalid starting and ending time\(s\): opening hours should not "
- "touch or overlap with one another",
- ),
- (
- ["12:00", "20:00"],
- ["09:00", "11:00"],
- r"invalid starting and ending time\(s\): opening hours should not "
- "touch or overlap with one another",
- ),
- ],
- )
- def test_constructor_errors(self, start, end, match):
- with pytest.raises(ValueError, match=match):
- BusinessHour(start=start, end=end)
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset1) == "<BusinessHour: BH=09:00-17:00>"
- assert repr(self.offset2) == "<3 * BusinessHours: BH=09:00-17:00>"
- assert repr(self.offset3) == "<-1 * BusinessHour: BH=09:00-17:00>"
- assert repr(self.offset4) == "<-4 * BusinessHours: BH=09:00-17:00>"
-
- assert repr(self.offset5) == "<BusinessHour: BH=11:00-14:30>"
- assert repr(self.offset6) == "<BusinessHour: BH=20:00-05:00>"
- assert repr(self.offset7) == "<-2 * BusinessHours: BH=21:30-06:30>"
- assert repr(self.offset8) == "<BusinessHour: BH=09:00-12:00,13:00-17:00>"
- assert repr(self.offset9) == "<3 * BusinessHours: BH=09:00-13:00,22:00-03:00>"
- assert repr(self.offset10) == "<-1 * BusinessHour: BH=13:00-17:00,23:00-02:00>"
-
- def test_with_offset(self):
- expected = Timestamp("2014-07-01 13:00")
-
- assert self.d + BusinessHour() * 3 == expected
- assert self.d + BusinessHour(n=3) == expected
-
- @pytest.mark.parametrize(
- "offset_name",
- ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
- )
- def test_eq_attribute(self, offset_name):
- offset = getattr(self, offset_name)
- assert offset == offset
-
- @pytest.mark.parametrize(
- "offset1,offset2",
- [
- (BusinessHour(start="09:00"), BusinessHour()),
- (
- BusinessHour(start=["23:00", "13:00"], end=["12:00", "17:00"]),
- BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
- ),
- ],
- )
- def test_eq(self, offset1, offset2):
- assert offset1 == offset2
-
- @pytest.mark.parametrize(
- "offset1,offset2",
- [
- (BusinessHour(), BusinessHour(-1)),
- (BusinessHour(start="09:00"), BusinessHour(start="09:01")),
- (
- BusinessHour(start="09:00", end="17:00"),
- BusinessHour(start="17:00", end="09:01"),
- ),
- (
- BusinessHour(start=["13:00", "23:00"], end=["18:00", "07:00"]),
- BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
- ),
- ],
- )
- def test_neq(self, offset1, offset2):
- assert offset1 != offset2
-
- @pytest.mark.parametrize(
- "offset_name",
- ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
- )
- def test_hash(self, offset_name):
- offset = getattr(self, offset_name)
- assert offset == offset
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
- assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
- assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
- assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
- assert self.offset8(self.d) == datetime(2014, 7, 1, 11)
- assert self.offset9(self.d) == datetime(2014, 7, 1, 22)
- assert self.offset10(self.d) == datetime(2014, 7, 1, 1)
-
- def test_sub(self):
- # we have to override test_sub here because self.offset2 is not
- # defined as self._offset(2)
- off = self.offset2
- msg = "Cannot subtract datetime from offset"
- with pytest.raises(TypeError, match=msg):
- off - self.d
- assert 2 * off - off == off
-
- assert self.d - self.offset2 == self.d + self._offset(-3)
-
- def testRollback1(self):
- assert self.offset1.rollback(self.d) == self.d
- assert self.offset2.rollback(self.d) == self.d
- assert self.offset3.rollback(self.d) == self.d
- assert self.offset4.rollback(self.d) == self.d
- assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
- assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
- assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
- assert self.offset8.rollback(self.d) == self.d
- assert self.offset9.rollback(self.d) == self.d
- assert self.offset10.rollback(self.d) == datetime(2014, 7, 1, 2)
-
- d = datetime(2014, 7, 1, 0)
- assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
- assert self.offset6.rollback(d) == d
- assert self.offset7.rollback(d) == d
- assert self.offset8.rollback(d) == datetime(2014, 6, 30, 17)
- assert self.offset9.rollback(d) == d
- assert self.offset10.rollback(d) == d
-
- assert self._offset(5).rollback(self.d) == self.d
-
- def testRollback2(self):
- assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
- 2014, 7, 4, 17, 0
- )
-
- def testRollforward1(self):
- assert self.offset1.rollforward(self.d) == self.d
- assert self.offset2.rollforward(self.d) == self.d
- assert self.offset3.rollforward(self.d) == self.d
- assert self.offset4.rollforward(self.d) == self.d
- assert self.offset5.rollforward(self.d) == datetime(2014, 7, 1, 11, 0)
- assert self.offset6.rollforward(self.d) == datetime(2014, 7, 1, 20, 0)
- assert self.offset7.rollforward(self.d) == datetime(2014, 7, 1, 21, 30)
- assert self.offset8.rollforward(self.d) == self.d
- assert self.offset9.rollforward(self.d) == self.d
- assert self.offset10.rollforward(self.d) == datetime(2014, 7, 1, 13)
-
- d = datetime(2014, 7, 1, 0)
- assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
- assert self.offset6.rollforward(d) == d
- assert self.offset7.rollforward(d) == d
- assert self.offset8.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset9.rollforward(d) == d
- assert self.offset10.rollforward(d) == d
-
- assert self._offset(5).rollforward(self.d) == self.d
-
- def testRollforward2(self):
- assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
- 2014, 7, 7, 9
- )
-
- def test_roll_date_object(self):
- offset = BusinessHour()
-
- dt = datetime(2014, 7, 6, 15, 0)
-
- result = offset.rollback(dt)
- assert result == datetime(2014, 7, 4, 17)
-
- result = offset.rollforward(dt)
- assert result == datetime(2014, 7, 7, 9)
-
- normalize_cases = []
- normalize_cases.append(
- (
- BusinessHour(normalize=True),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
- datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
- datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
- },
- )
- )
-
- normalize_cases.append(
- (
- BusinessHour(-1, normalize=True),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
- datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
- datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
- datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
- },
- )
- )
-
- normalize_cases.append(
- (
- BusinessHour(1, normalize=True, start="17:00", end="04:00"),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
- datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
- datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
- datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
- },
- )
- )
-
- @pytest.mark.parametrize("case", normalize_cases)
- def test_normalize(self, case):
- offset, cases = case
- for dt, expected in cases.items():
- assert offset.apply(dt) == expected
-
- on_offset_cases = []
- on_offset_cases.append(
- (
- BusinessHour(),
- {
- datetime(2014, 7, 1, 9): True,
- datetime(2014, 7, 1, 8, 59): False,
- datetime(2014, 7, 1, 8): False,
- datetime(2014, 7, 1, 17): True,
- datetime(2014, 7, 1, 17, 1): False,
- datetime(2014, 7, 1, 18): False,
- datetime(2014, 7, 5, 9): False,
- datetime(2014, 7, 6, 12): False,
- },
- )
- )
-
- on_offset_cases.append(
- (
- BusinessHour(start="10:00", end="15:00"),
- {
- datetime(2014, 7, 1, 9): False,
- datetime(2014, 7, 1, 10): True,
- datetime(2014, 7, 1, 15): True,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12): False,
- datetime(2014, 7, 6, 12): False,
- },
- )
- )
-
- on_offset_cases.append(
- (
- BusinessHour(start="19:00", end="05:00"),
- {
- datetime(2014, 7, 1, 9, 0): False,
- datetime(2014, 7, 1, 10, 0): False,
- datetime(2014, 7, 1, 15): False,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12, 0): False,
- datetime(2014, 7, 6, 12, 0): False,
- datetime(2014, 7, 1, 19, 0): True,
- datetime(2014, 7, 2, 0, 0): True,
- datetime(2014, 7, 4, 23): True,
- datetime(2014, 7, 5, 1): True,
- datetime(2014, 7, 5, 5, 0): True,
- datetime(2014, 7, 6, 23, 0): False,
- datetime(2014, 7, 7, 3, 0): False,
- },
- )
- )
-
- on_offset_cases.append(
- (
- BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"]),
- {
- datetime(2014, 7, 1, 9): True,
- datetime(2014, 7, 1, 8, 59): False,
- datetime(2014, 7, 1, 8): False,
- datetime(2014, 7, 1, 17): True,
- datetime(2014, 7, 1, 17, 1): False,
- datetime(2014, 7, 1, 18): False,
- datetime(2014, 7, 5, 9): False,
- datetime(2014, 7, 6, 12): False,
- datetime(2014, 7, 1, 12, 30): False,
- },
- )
- )
-
- on_offset_cases.append(
- (
- BusinessHour(start=["19:00", "23:00"], end=["21:00", "05:00"]),
- {
- datetime(2014, 7, 1, 9, 0): False,
- datetime(2014, 7, 1, 10, 0): False,
- datetime(2014, 7, 1, 15): False,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12, 0): False,
- datetime(2014, 7, 6, 12, 0): False,
- datetime(2014, 7, 1, 19, 0): True,
- datetime(2014, 7, 2, 0, 0): True,
- datetime(2014, 7, 4, 23): True,
- datetime(2014, 7, 5, 1): True,
- datetime(2014, 7, 5, 5, 0): True,
- datetime(2014, 7, 6, 23, 0): False,
- datetime(2014, 7, 7, 3, 0): False,
- datetime(2014, 7, 4, 22): False,
- },
- )
- )
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- offset, cases = case
- for dt, expected in cases.items():
- assert offset.is_on_offset(dt) == expected
-
- opening_time_cases = []
- # opening time should be affected by sign of n, not by n's value and
- # end
- opening_time_cases.append(
- (
- [
- BusinessHour(),
- BusinessHour(n=2),
- BusinessHour(n=4),
- BusinessHour(end="10:00"),
- BusinessHour(n=2, end="4:00"),
- BusinessHour(n=4, end="15:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 9),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 9),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 9),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 9),
- ),
- # if timestamp is on opening time, next opening time is
- # as it is
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 2, 10): (
- datetime(2014, 7, 3, 9),
- datetime(2014, 7, 2, 9),
- ),
- # 2014-07-05 is saturday
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 9),
- ),
- datetime(2014, 7, 7, 9, 1): (
- datetime(2014, 7, 8, 9),
- datetime(2014, 7, 7, 9),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(start="11:15"),
- BusinessHour(n=2, start="11:15"),
- BusinessHour(n=3, start="11:15"),
- BusinessHour(start="11:15", end="10:00"),
- BusinessHour(n=2, start="11:15", end="4:00"),
- BusinessHour(n=3, start="11:15", end="15:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 11, 15),
- datetime(2014, 6, 30, 11, 15),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 2, 10): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15),
- ),
- datetime(2014, 7, 2, 11, 15): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 2, 11, 15),
- ),
- datetime(2014, 7, 2, 11, 15, 1): (
- datetime(2014, 7, 3, 11, 15),
- datetime(2014, 7, 2, 11, 15),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 11, 15),
- datetime(2014, 7, 3, 11, 15),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- datetime(2014, 7, 7, 9, 1): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(-1),
- BusinessHour(n=-2),
- BusinessHour(n=-4),
- BusinessHour(n=-1, end="10:00"),
- BusinessHour(n=-2, end="4:00"),
- BusinessHour(n=-4, end="15:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 2, 9),
- ),
- datetime(2014, 7, 2, 10): (
- datetime(2014, 7, 2, 9),
- datetime(2014, 7, 3, 9),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 7, 9): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 7, 9),
- ),
- datetime(2014, 7, 7, 9, 1): (
- datetime(2014, 7, 7, 9),
- datetime(2014, 7, 8, 9),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(start="17:00", end="05:00"),
- BusinessHour(n=3, start="17:00", end="03:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 6, 30, 17),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 4, 17): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 3, 17),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 7, 17, 1): (
- datetime(2014, 7, 8, 17),
- datetime(2014, 7, 7, 17),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(-1, start="17:00", end="05:00"),
- BusinessHour(n=-2, start="17:00", end="03:00"),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 2, 16, 59): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 3, 17),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17),
- ),
- datetime(2014, 7, 7, 18): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 8, 17),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(start=["11:15", "15:00"], end=["13:00", "20:00"]),
- BusinessHour(n=3, start=["11:15", "15:00"], end=["12:00", "20:00"]),
- BusinessHour(start=["11:15", "15:00"], end=["13:00", "17:00"]),
- BusinessHour(n=2, start=["11:15", "15:00"], end=["12:00", "03:00"]),
- BusinessHour(n=3, start=["11:15", "15:00"], end=["13:00", "16:00"]),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 11, 15),
- datetime(2014, 6, 30, 15),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 2, 10): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 15),
- ),
- datetime(2014, 7, 2, 11, 15): (
- datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 2, 11, 15),
- ),
- datetime(2014, 7, 2, 11, 15, 1): (
- datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 11, 15),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 11, 15),
- datetime(2014, 7, 3, 15),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 7, 9, 1): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 15),
- ),
- datetime(2014, 7, 7, 12): (
- datetime(2014, 7, 7, 15),
- datetime(2014, 7, 7, 11, 15),
- ),
- },
- )
- )
-
- opening_time_cases.append(
- (
- [
- BusinessHour(n=-1, start=["17:00", "08:00"], end=["05:00", "10:00"]),
- BusinessHour(n=-2, start=["08:00", "17:00"], end=["10:00", "03:00"]),
- ],
- {
- datetime(2014, 7, 1, 11): (
- datetime(2014, 7, 1, 8),
- datetime(2014, 7, 1, 17),
- ),
- datetime(2014, 7, 1, 18): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 8),
- ),
- datetime(2014, 7, 1, 23): (
- datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 8),
- ),
- datetime(2014, 7, 2, 8): (
- datetime(2014, 7, 2, 8),
- datetime(2014, 7, 2, 8),
- ),
- datetime(2014, 7, 2, 9): (
- datetime(2014, 7, 2, 8),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 2, 16, 59): (
- datetime(2014, 7, 2, 8),
- datetime(2014, 7, 2, 17),
- ),
- datetime(2014, 7, 5, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 8),
- ),
- datetime(2014, 7, 4, 10): (
- datetime(2014, 7, 4, 8),
- datetime(2014, 7, 4, 17),
- ),
- datetime(2014, 7, 4, 23): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 8),
- ),
- datetime(2014, 7, 6, 10): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 8),
- ),
- datetime(2014, 7, 7, 5): (
- datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 8),
- ),
- datetime(2014, 7, 7, 18): (
- datetime(2014, 7, 7, 17),
- datetime(2014, 7, 8, 8),
- ),
- },
- )
- )
-
- @pytest.mark.parametrize("case", opening_time_cases)
- def test_opening_time(self, case):
- _offsets, cases = case
- for offset in _offsets:
- for dt, (exp_next, exp_prev) in cases.items():
- assert offset._next_opening_time(dt) == exp_next
- assert offset._prev_opening_time(dt) == exp_prev
-
- apply_cases = []
- apply_cases.append(
- (
- BusinessHour(),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
- # out of business hours
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(4),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(-1),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
- datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
- # out of business hours
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
- datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(-4),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(start="13:00", end="16:00"),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=2, start="13:00", end="16:00"),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
- datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
- datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-1, start="13:00", end="16:00"),
- {
- datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
- datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-3, start="10:00", end="16:00"),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
- datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(start="19:00", end="05:00"),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
- datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
- datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
- datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
- datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-1, start="19:00", end="05:00"),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
- datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
- datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
- datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
- datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30),
- },
- )
- )
-
- # long business hours (see gh-26381)
- apply_cases.append(
- (
- BusinessHour(n=4, start="00:00", end="23:00"),
- {
- datetime(2014, 7, 3, 22): datetime(2014, 7, 4, 3),
- datetime(2014, 7, 4, 22): datetime(2014, 7, 7, 3),
- datetime(2014, 7, 3, 22, 30): datetime(2014, 7, 4, 3, 30),
- datetime(2014, 7, 3, 22, 20): datetime(2014, 7, 4, 3, 20),
- datetime(2014, 7, 4, 22, 30, 30): datetime(2014, 7, 7, 3, 30, 30),
- datetime(2014, 7, 4, 22, 30, 20): datetime(2014, 7, 7, 3, 30, 20),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-4, start="00:00", end="23:00"),
- {
- datetime(2014, 7, 4, 3): datetime(2014, 7, 3, 22),
- datetime(2014, 7, 7, 3): datetime(2014, 7, 4, 22),
- datetime(2014, 7, 4, 3, 30): datetime(2014, 7, 3, 22, 30),
- datetime(2014, 7, 4, 3, 20): datetime(2014, 7, 3, 22, 20),
- datetime(2014, 7, 7, 3, 30, 30): datetime(2014, 7, 4, 22, 30, 30),
- datetime(2014, 7, 7, 3, 30, 20): datetime(2014, 7, 4, 22, 30, 20),
- },
- )
- )
-
- # multiple business hours
- apply_cases.append(
- (
- BusinessHour(start=["09:00", "14:00"], end=["12:00", "18:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 17),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 17, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 14),
- # out of business hours
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 9),
- datetime(2014, 7, 4, 17, 30): datetime(2014, 7, 7, 9, 30),
- datetime(2014, 7, 4, 17, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 17),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 11),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 17),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 11, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 11, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 15): datetime(2014, 6, 30, 18),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 10),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 11),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 12),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 12),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 12),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 12),
- datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 12),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 14, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 14, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- BusinessHour(n=-1, start=["19:00", "03:00"], end=["01:00", "05:00"]),
- {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 4): datetime(2014, 7, 2, 1),
- datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
- datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
- datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 0),
- datetime(2014, 7, 7, 3, 30): datetime(2014, 7, 5, 0, 30),
- datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 7, 4, 30),
- datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 7, 4, 30, 30),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- apply_large_n_cases = []
- # A week later
- apply_large_n_cases.append(
- (
- BusinessHour(40),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
- datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30),
- },
- )
- )
-
- # 3 days and 1 hour before
- apply_large_n_cases.append(
- (
- BusinessHour(-25),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
- datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
- datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
- datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
- datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
- datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
- },
- )
- )
-
- # 5 days and 3 hours later
- apply_large_n_cases.append(
- (
- BusinessHour(28, start="21:00", end="02:00"),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
- datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
- datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
- datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
- datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
- datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
- datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
- datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
- datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
- },
- )
- )
-
- # large n for multiple opening hours (3 days and 1 hour before)
- apply_large_n_cases.append(
- (
- BusinessHour(n=-25, start=["09:00", "14:00"], end=["12:00", "19:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 11),
- datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 18),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 19),
- datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
- datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 18),
- datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 18),
- datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 18),
- datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 18),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 18),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 18),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 18, 30),
- datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
- },
- )
- )
-
- # 5 days and 3 hours later
- apply_large_n_cases.append(
- (
- BusinessHour(28, start=["21:00", "03:00"], end=["01:00", "04:00"]),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
- datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 3),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 9, 23),
- datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
- datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
- datetime(2014, 7, 4, 2): datetime(2014, 7, 11, 23),
- datetime(2014, 7, 4, 3): datetime(2014, 7, 11, 23),
- datetime(2014, 7, 4, 21): datetime(2014, 7, 12, 0),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 14, 22),
- datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 7, 1): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_large_n_cases)
- def test_apply_large_n(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_nanoseconds(self):
- tests = []
-
- tests.append(
- (
- BusinessHour(),
- {
- Timestamp("2014-07-04 15:00")
- + Nano(5): Timestamp("2014-07-04 16:00")
- + Nano(5),
- Timestamp("2014-07-04 16:00")
- + Nano(5): Timestamp("2014-07-07 09:00")
- + Nano(5),
- Timestamp("2014-07-04 16:00")
- - Nano(5): Timestamp("2014-07-04 17:00")
- - Nano(5),
- },
- )
- )
-
- tests.append(
- (
- BusinessHour(-1),
- {
- Timestamp("2014-07-04 15:00")
- + Nano(5): Timestamp("2014-07-04 14:00")
- + Nano(5),
- Timestamp("2014-07-04 10:00")
- + Nano(5): Timestamp("2014-07-04 09:00")
- + Nano(5),
- Timestamp("2014-07-04 10:00")
- - Nano(5): Timestamp("2014-07-03 17:00")
- - Nano(5),
- },
- )
- )
-
- for offset, cases in tests:
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_datetimeindex(self):
- idx1 = date_range(start="2014-07-04 15:00", end="2014-07-08 10:00", freq="BH")
- idx2 = date_range(start="2014-07-04 15:00", periods=12, freq="BH")
- idx3 = date_range(end="2014-07-08 10:00", periods=12, freq="BH")
- expected = DatetimeIndex(
- [
- "2014-07-04 15:00",
- "2014-07-04 16:00",
- "2014-07-07 09:00",
- "2014-07-07 10:00",
- "2014-07-07 11:00",
- "2014-07-07 12:00",
- "2014-07-07 13:00",
- "2014-07-07 14:00",
- "2014-07-07 15:00",
- "2014-07-07 16:00",
- "2014-07-08 09:00",
- "2014-07-08 10:00",
- ],
- freq="BH",
- )
- for idx in [idx1, idx2, idx3]:
- tm.assert_index_equal(idx, expected)
-
- idx1 = date_range(start="2014-07-04 15:45", end="2014-07-08 10:45", freq="BH")
- idx2 = date_range(start="2014-07-04 15:45", periods=12, freq="BH")
- idx3 = date_range(end="2014-07-08 10:45", periods=12, freq="BH")
-
- expected = DatetimeIndex(
- [
- "2014-07-04 15:45",
- "2014-07-04 16:45",
- "2014-07-07 09:45",
- "2014-07-07 10:45",
- "2014-07-07 11:45",
- "2014-07-07 12:45",
- "2014-07-07 13:45",
- "2014-07-07 14:45",
- "2014-07-07 15:45",
- "2014-07-07 16:45",
- "2014-07-08 09:45",
- "2014-07-08 10:45",
- ],
- freq="BH",
- )
- expected = idx1
- for idx in [idx1, idx2, idx3]:
- tm.assert_index_equal(idx, expected)
-
- def test_bday_ignores_timedeltas(self):
- idx = date_range("2010/02/01", "2010/02/10", freq="12H")
- t1 = idx + BDay(offset=Timedelta(3, unit="H"))
-
- expected = DatetimeIndex(
- [
- "2010-02-02 03:00:00",
- "2010-02-02 15:00:00",
- "2010-02-03 03:00:00",
- "2010-02-03 15:00:00",
- "2010-02-04 03:00:00",
- "2010-02-04 15:00:00",
- "2010-02-05 03:00:00",
- "2010-02-05 15:00:00",
- "2010-02-08 03:00:00",
- "2010-02-08 15:00:00",
- "2010-02-08 03:00:00",
- "2010-02-08 15:00:00",
- "2010-02-08 03:00:00",
- "2010-02-08 15:00:00",
- "2010-02-09 03:00:00",
- "2010-02-09 15:00:00",
- "2010-02-10 03:00:00",
- "2010-02-10 15:00:00",
- "2010-02-11 03:00:00",
- ],
- freq=None,
- )
- tm.assert_index_equal(t1, expected)
-
-
-class TestCustomBusinessHour(Base):
- _offset = CustomBusinessHour
- holidays = ["2014-06-27", datetime(2014, 6, 30), np.datetime64("2014-07-02")]
-
- def setup_method(self, method):
- # 2014 Calendar to check custom holidays
- # Sun Mon Tue Wed Thu Fri Sat
- # 6/22 23 24 25 26 27 28
- # 29 30 7/1 2 3 4 5
- # 6 7 8 9 10 11 12
- self.d = datetime(2014, 7, 1, 10, 00)
- self.offset1 = CustomBusinessHour(weekmask="Tue Wed Thu Fri")
-
- self.offset2 = CustomBusinessHour(holidays=self.holidays)
-
- def test_constructor_errors(self):
- from datetime import time as dt_time
-
- msg = "time data must be specified only with hour and minute"
- with pytest.raises(ValueError, match=msg):
- CustomBusinessHour(start=dt_time(11, 0, 5))
- msg = "time data must match '%H:%M' format"
- with pytest.raises(ValueError, match=msg):
- CustomBusinessHour(start="AAA")
- msg = "time data must match '%H:%M' format"
- with pytest.raises(ValueError, match=msg):
- CustomBusinessHour(start="14:00:05")
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset1) == "<CustomBusinessHour: CBH=09:00-17:00>"
- assert repr(self.offset2) == "<CustomBusinessHour: CBH=09:00-17:00>"
-
- def test_with_offset(self):
- expected = Timestamp("2014-07-01 13:00")
-
- assert self.d + CustomBusinessHour() * 3 == expected
- assert self.d + CustomBusinessHour(n=3) == expected
-
- def test_eq(self):
- for offset in [self.offset1, self.offset2]:
- assert offset == offset
-
- assert CustomBusinessHour() != CustomBusinessHour(-1)
- assert CustomBusinessHour(start="09:00") == CustomBusinessHour()
- assert CustomBusinessHour(start="09:00") != CustomBusinessHour(start="09:01")
- assert CustomBusinessHour(start="09:00", end="17:00") != CustomBusinessHour(
- start="17:00", end="09:01"
- )
-
- assert CustomBusinessHour(weekmask="Tue Wed Thu Fri") != CustomBusinessHour(
- weekmask="Mon Tue Wed Thu Fri"
- )
- assert CustomBusinessHour(holidays=["2014-06-27"]) != CustomBusinessHour(
- holidays=["2014-06-28"]
- )
-
- def test_sub(self):
- # override the Base.test_sub implementation because self.offset2 is
- # defined differently in this class than the test expects
- pass
-
- def test_hash(self):
- assert hash(self.offset1) == hash(self.offset1)
- assert hash(self.offset2) == hash(self.offset2)
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
- assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
-
- def testRollback1(self):
- assert self.offset1.rollback(self.d) == self.d
- assert self.offset2.rollback(self.d) == self.d
-
- d = datetime(2014, 7, 1, 0)
-
- # 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
- assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
-
- # 2014/6/30 and 2014/6/27 are holidays
- assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
-
- def testRollback2(self):
- assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
- 2014, 7, 4, 17, 0
- )
-
- def testRollforward1(self):
- assert self.offset1.rollforward(self.d) == self.d
- assert self.offset2.rollforward(self.d) == self.d
-
- d = datetime(2014, 7, 1, 0)
- assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
- assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
-
- def testRollforward2(self):
- assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
- 2014, 7, 7, 9
- )
-
- def test_roll_date_object(self):
- offset = BusinessHour()
-
- dt = datetime(2014, 7, 6, 15, 0)
-
- result = offset.rollback(dt)
- assert result == datetime(2014, 7, 4, 17)
-
- result = offset.rollforward(dt)
- assert result == datetime(2014, 7, 7, 9)
-
- normalize_cases = []
- normalize_cases.append(
- (
- CustomBusinessHour(normalize=True, holidays=holidays),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
- datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
- datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
- },
- )
- )
-
- normalize_cases.append(
- (
- CustomBusinessHour(-1, normalize=True, holidays=holidays),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
- datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
- datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
- datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
- },
- )
- )
-
- normalize_cases.append(
- (
- CustomBusinessHour(
- 1, normalize=True, start="17:00", end="04:00", holidays=holidays
- ),
- {
- datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
- datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
- datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
- datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
- },
- )
- )
-
- @pytest.mark.parametrize("norm_cases", normalize_cases)
- def test_normalize(self, norm_cases):
- offset, cases = norm_cases
- for dt, expected in cases.items():
- assert offset.apply(dt) == expected
-
- def test_is_on_offset(self):
- tests = []
-
- tests.append(
- (
- CustomBusinessHour(start="10:00", end="15:00", holidays=self.holidays),
- {
- datetime(2014, 7, 1, 9): False,
- datetime(2014, 7, 1, 10): True,
- datetime(2014, 7, 1, 15): True,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12): False,
- datetime(2014, 7, 6, 12): False,
- },
- )
- )
-
- for offset, cases in tests:
- for dt, expected in cases.items():
- assert offset.is_on_offset(dt) == expected
-
- apply_cases = []
- apply_cases.append(
- (
- CustomBusinessHour(holidays=holidays),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
- # out of business hours
- datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
- },
- )
- )
-
- apply_cases.append(
- (
- CustomBusinessHour(4, holidays=holidays),
- {
- datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
- },
- )
- )
-
- @pytest.mark.parametrize("apply_case", apply_cases)
- def test_apply(self, apply_case):
- offset, cases = apply_case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- nano_cases = []
- nano_cases.append(
- (
- CustomBusinessHour(holidays=holidays),
- {
- Timestamp("2014-07-01 15:00")
- + Nano(5): Timestamp("2014-07-01 16:00")
- + Nano(5),
- Timestamp("2014-07-01 16:00")
- + Nano(5): Timestamp("2014-07-03 09:00")
- + Nano(5),
- Timestamp("2014-07-01 16:00")
- - Nano(5): Timestamp("2014-07-01 17:00")
- - Nano(5),
- },
- )
- )
-
- nano_cases.append(
- (
- CustomBusinessHour(-1, holidays=holidays),
- {
- Timestamp("2014-07-01 15:00")
- + Nano(5): Timestamp("2014-07-01 14:00")
- + Nano(5),
- Timestamp("2014-07-01 10:00")
- + Nano(5): Timestamp("2014-07-01 09:00")
- + Nano(5),
- Timestamp("2014-07-01 10:00")
- - Nano(5): Timestamp("2014-06-26 17:00")
- - Nano(5),
- },
- )
- )
-
- @pytest.mark.parametrize("nano_case", nano_cases)
- def test_apply_nanoseconds(self, nano_case):
- offset, cases = nano_case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
-
-class TestCustomBusinessDay(Base):
- _offset = CDay
-
- def setup_method(self, method):
- self.d = datetime(2008, 1, 1)
- self.nd = np_datetime64_compat("2008-01-01 00:00:00Z")
-
- self.offset = CDay()
- self.offset1 = self.offset
- self.offset2 = CDay(2)
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset) == "<CustomBusinessDay>"
- assert repr(self.offset2) == "<2 * CustomBusinessDays>"
-
- expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
- assert repr(self.offset + timedelta(1)) == expected
-
- def test_with_offset(self):
- offset = self.offset + timedelta(hours=2)
-
- assert (self.d + offset) == datetime(2008, 1, 2, 2)
-
- def test_with_offset_index(self):
- dti = DatetimeIndex([self.d])
- result = dti + (self.offset + timedelta(hours=2))
-
- expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
- tm.assert_index_equal(result, expected)
-
- def test_eq(self):
- assert self.offset2 == self.offset2
-
- def test_mul(self):
- pass
-
- def test_hash(self):
- assert hash(self.offset2) == hash(self.offset2)
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset2(self.d) == datetime(2008, 1, 3)
- assert self.offset2(self.nd) == datetime(2008, 1, 3)
-
- def testRollback1(self):
- assert CDay(10).rollback(self.d) == self.d
-
- def testRollback2(self):
- assert CDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
-
- def testRollforward1(self):
- assert CDay(10).rollforward(self.d) == self.d
-
- def testRollforward2(self):
- assert CDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
-
- def test_roll_date_object(self):
- offset = CDay()
-
- dt = date(2012, 9, 15)
-
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 14)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 17)
-
- offset = offsets.Day()
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 15)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 15)
-
- on_offset_cases = [
- (CDay(), datetime(2008, 1, 1), True),
- (CDay(), datetime(2008, 1, 5), False),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- offset, d, expected = case
- assert_is_on_offset(offset, d, expected)
-
- apply_cases: _ApplyCases = []
- apply_cases.append(
- (
- CDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 2),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 8),
- },
- )
- )
-
- apply_cases.append(
- (
- 2 * CDay(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 3),
- datetime(2008, 1, 4): datetime(2008, 1, 8),
- datetime(2008, 1, 5): datetime(2008, 1, 8),
- datetime(2008, 1, 6): datetime(2008, 1, 8),
- datetime(2008, 1, 7): datetime(2008, 1, 9),
- },
- )
- )
-
- apply_cases.append(
- (
- -CDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 3),
- datetime(2008, 1, 5): datetime(2008, 1, 4),
- datetime(2008, 1, 6): datetime(2008, 1, 4),
- datetime(2008, 1, 7): datetime(2008, 1, 4),
- datetime(2008, 1, 8): datetime(2008, 1, 7),
- },
- )
- )
-
- apply_cases.append(
- (
- -2 * CDay(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 28),
- datetime(2008, 1, 4): datetime(2008, 1, 2),
- datetime(2008, 1, 5): datetime(2008, 1, 3),
- datetime(2008, 1, 6): datetime(2008, 1, 3),
- datetime(2008, 1, 7): datetime(2008, 1, 3),
- datetime(2008, 1, 8): datetime(2008, 1, 4),
- datetime(2008, 1, 9): datetime(2008, 1, 7),
- },
- )
- )
-
- apply_cases.append(
- (
- CDay(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 4): datetime(2008, 1, 4),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- dt = datetime(2012, 10, 23)
-
- result = dt + CDay(10)
- assert result == datetime(2012, 11, 6)
-
- result = dt + CDay(100) - CDay(100)
- assert result == dt
-
- off = CDay() * 6
- rs = datetime(2012, 1, 1) - off
- xp = datetime(2011, 12, 23)
- assert rs == xp
-
- st = datetime(2011, 12, 18)
- rs = st + off
- xp = datetime(2011, 12, 26)
- assert rs == xp
-
- def test_apply_corner(self):
- msg = (
- "Only know how to combine trading day "
- "with datetime, datetime64 or timedelta"
- )
- with pytest.raises(ApplyTypeError, match=msg):
- CDay().apply(BMonthEnd())
-
- def test_holidays(self):
- # Define a TradingDay offset
- holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
- tday = CDay(holidays=holidays)
- for year in range(2012, 2015):
- dt = datetime(year, 4, 30)
- xp = datetime(year, 5, 2)
- rs = dt + tday
- assert rs == xp
-
- def test_weekmask(self):
- weekmask_saudi = "Sat Sun Mon Tue Wed" # Thu-Fri Weekend
- weekmask_uae = "1111001" # Fri-Sat Weekend
- weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
- bday_saudi = CDay(weekmask=weekmask_saudi)
- bday_uae = CDay(weekmask=weekmask_uae)
- bday_egypt = CDay(weekmask=weekmask_egypt)
- dt = datetime(2013, 5, 1)
- xp_saudi = datetime(2013, 5, 4)
- xp_uae = datetime(2013, 5, 2)
- xp_egypt = datetime(2013, 5, 2)
- assert xp_saudi == dt + bday_saudi
- assert xp_uae == dt + bday_uae
- assert xp_egypt == dt + bday_egypt
- xp2 = datetime(2013, 5, 5)
- assert xp2 == dt + 2 * bday_saudi
- assert xp2 == dt + 2 * bday_uae
- assert xp2 == dt + 2 * bday_egypt
-
- def test_weekmask_and_holidays(self):
- weekmask_egypt = "Sun Mon Tue Wed Thu" # Fri-Sat Weekend
- holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]
- bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
- dt = datetime(2013, 4, 30)
- xp_egypt = datetime(2013, 5, 5)
- assert xp_egypt == dt + 2 * bday_egypt
-
- @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
- def test_calendar(self):
- calendar = USFederalHolidayCalendar()
- dt = datetime(2014, 1, 17)
- assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
-
- def test_roundtrip_pickle(self):
- def _check_roundtrip(obj):
- unpickled = tm.round_trip_pickle(obj)
- assert unpickled == obj
-
- _check_roundtrip(self.offset)
- _check_roundtrip(self.offset2)
- _check_roundtrip(self.offset * 2)
-
- def test_pickle_compat_0_14_1(self, datapath):
- hdays = [datetime(2013, 1, 1) for ele in range(4)]
- pth = datapath("tseries", "offsets", "data", "cday-0.14.1.pickle")
- cday0_14_1 = read_pickle(pth)
- cday = CDay(holidays=hdays)
- assert cday == cday0_14_1
-
-
-class CustomBusinessMonthBase:
- def setup_method(self, method):
- self.d = datetime(2008, 1, 1)
-
- self.offset = self._offset()
- self.offset1 = self.offset
- self.offset2 = self._offset(2)
-
- def test_eq(self):
- assert self.offset2 == self.offset2
-
- def test_mul(self):
- pass
-
- def test_hash(self):
- assert hash(self.offset2) == hash(self.offset2)
-
- def test_roundtrip_pickle(self):
- def _check_roundtrip(obj):
- unpickled = tm.round_trip_pickle(obj)
- assert unpickled == obj
-
- _check_roundtrip(self._offset())
- _check_roundtrip(self._offset(2))
- _check_roundtrip(self._offset() * 2)
-
- def test_copy(self):
- # GH 17452
- off = self._offset(weekmask="Mon Wed Fri")
- assert off == off.copy()
-
-
-class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
- _offset = CBMonthEnd
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset) == "<CustomBusinessMonthEnd>"
- assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset2(self.d) == datetime(2008, 2, 29)
-
- def testRollback1(self):
- assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
-
- def testRollback2(self):
- assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
-
- def testRollforward1(self):
- assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
-
- def test_roll_date_object(self):
- offset = CBMonthEnd()
-
- dt = date(2012, 9, 15)
-
- result = offset.rollback(dt)
- assert result == datetime(2012, 8, 31)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 28)
-
- offset = offsets.Day()
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 15)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 15)
-
- on_offset_cases = [
- (CBMonthEnd(), datetime(2008, 1, 31), True),
- (CBMonthEnd(), datetime(2008, 1, 1), False),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- offset, d, expected = case
- assert_is_on_offset(offset, d, expected)
-
- apply_cases: _ApplyCases = []
- apply_cases.append(
- (
- CBMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 2, 7): datetime(2008, 2, 29),
- },
- )
- )
-
- apply_cases.append(
- (
- 2 * CBMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2008, 2, 29),
- datetime(2008, 2, 7): datetime(2008, 3, 31),
- },
- )
- )
-
- apply_cases.append(
- (
- -CBMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 2, 8): datetime(2008, 1, 31),
- },
- )
- )
-
- apply_cases.append(
- (
- -2 * CBMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2007, 11, 30),
- datetime(2008, 2, 9): datetime(2007, 12, 31),
- },
- )
- )
-
- apply_cases.append(
- (
- CBMonthEnd(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 2, 7): datetime(2008, 2, 29),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- dt = datetime(2012, 10, 23)
-
- result = dt + CBMonthEnd(10)
- assert result == datetime(2013, 7, 31)
-
- result = dt + CDay(100) - CDay(100)
- assert result == dt
-
- off = CBMonthEnd() * 6
- rs = datetime(2012, 1, 1) - off
- xp = datetime(2011, 7, 29)
- assert rs == xp
-
- st = datetime(2011, 12, 18)
- rs = st + off
- xp = datetime(2012, 5, 31)
- assert rs == xp
-
- def test_holidays(self):
- # Define a TradingDay offset
- holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
- bm_offset = CBMonthEnd(holidays=holidays)
- dt = datetime(2012, 1, 1)
- assert dt + bm_offset == datetime(2012, 1, 30)
- assert dt + 2 * bm_offset == datetime(2012, 2, 27)
-
- @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
- def test_datetimeindex(self):
- from pandas.tseries.holiday import USFederalHolidayCalendar
-
- hcal = USFederalHolidayCalendar()
- freq = CBMonthEnd(calendar=hcal)
-
- assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
- 0
- ] == datetime(2012, 1, 31)
-
-
-class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
- _offset = CBMonthBegin
-
- def test_different_normalize_equals(self):
- # GH#21404 changed __eq__ to return False when `normalize` does not match
- offset = self._offset()
- offset2 = self._offset(normalize=True)
- assert offset != offset2
-
- def test_repr(self):
- assert repr(self.offset) == "<CustomBusinessMonthBegin>"
- assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
-
- def test_call(self):
- with tm.assert_produces_warning(FutureWarning):
- # GH#34171 DateOffset.__call__ is deprecated
- assert self.offset2(self.d) == datetime(2008, 3, 3)
-
- def testRollback1(self):
- assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
-
- def testRollback2(self):
- assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
-
- def testRollforward1(self):
- assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
-
- def test_roll_date_object(self):
- offset = CBMonthBegin()
-
- dt = date(2012, 9, 15)
-
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 3)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 10, 1)
-
- offset = offsets.Day()
- result = offset.rollback(dt)
- assert result == datetime(2012, 9, 15)
-
- result = offset.rollforward(dt)
- assert result == datetime(2012, 9, 15)
-
- on_offset_cases = [
- (CBMonthBegin(), datetime(2008, 1, 1), True),
- (CBMonthBegin(), datetime(2008, 1, 31), False),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- offset, dt, expected = case
- assert_is_on_offset(offset, dt, expected)
-
- apply_cases: _ApplyCases = []
- apply_cases.append(
- (
- CBMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2008, 2, 1),
- datetime(2008, 2, 7): datetime(2008, 3, 3),
- },
- )
- )
-
- apply_cases.append(
- (
- 2 * CBMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2008, 3, 3),
- datetime(2008, 2, 7): datetime(2008, 4, 1),
- },
- )
- )
-
- apply_cases.append(
- (
- -CBMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2007, 12, 3),
- datetime(2008, 2, 8): datetime(2008, 2, 1),
- },
- )
- )
-
- apply_cases.append(
- (
- -2 * CBMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2007, 11, 1),
- datetime(2008, 2, 9): datetime(2008, 1, 1),
- },
- )
- )
-
- apply_cases.append(
- (
- CBMonthBegin(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 7): datetime(2008, 2, 1),
- },
- )
- )
-
- @pytest.mark.parametrize("case", apply_cases)
- def test_apply(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- dt = datetime(2012, 10, 23)
-
- result = dt + CBMonthBegin(10)
- assert result == datetime(2013, 8, 1)
-
- result = dt + CDay(100) - CDay(100)
- assert result == dt
-
- off = CBMonthBegin() * 6
- rs = datetime(2012, 1, 1) - off
- xp = datetime(2011, 7, 1)
- assert rs == xp
-
- st = datetime(2011, 12, 18)
- rs = st + off
-
- xp = datetime(2012, 6, 1)
- assert rs == xp
-
- def test_holidays(self):
- # Define a TradingDay offset
- holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
- bm_offset = CBMonthBegin(holidays=holidays)
- dt = datetime(2012, 1, 1)
-
- assert dt + bm_offset == datetime(2012, 1, 2)
- assert dt + 2 * bm_offset == datetime(2012, 2, 3)
-
- @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
- def test_datetimeindex(self):
- hcal = USFederalHolidayCalendar()
- cbmb = CBMonthBegin(calendar=hcal)
- assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
- 0
- ] == datetime(2012, 1, 3)
-
-
-class TestWeek(Base):
- _offset = Week
- d = Timestamp(datetime(2008, 1, 2))
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_repr(self):
- assert repr(Week(weekday=0)) == "<Week: weekday=0>"
- assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
- assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
-
- def test_corner(self):
- with pytest.raises(ValueError, match="Day must be"):
- Week(weekday=7)
-
- with pytest.raises(ValueError, match="Day must be"):
- Week(weekday=-1)
-
- def test_is_anchored(self):
- assert Week(weekday=0).is_anchored()
- assert not Week().is_anchored()
- assert not Week(2, weekday=2).is_anchored()
- assert not Week(2).is_anchored()
-
- offset_cases = []
- # not business week
- offset_cases.append(
- (
- Week(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 8),
- datetime(2008, 1, 4): datetime(2008, 1, 11),
- datetime(2008, 1, 5): datetime(2008, 1, 12),
- datetime(2008, 1, 6): datetime(2008, 1, 13),
- datetime(2008, 1, 7): datetime(2008, 1, 14),
- },
- )
- )
-
- # Mon
- offset_cases.append(
- (
- Week(weekday=0),
- {
- datetime(2007, 12, 31): datetime(2008, 1, 7),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 14),
- },
- )
- )
-
- # n=0 -> roll forward. Mon
- offset_cases.append(
- (
- Week(0, weekday=0),
- {
- datetime(2007, 12, 31): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7),
- },
- )
- )
-
- # n=0 -> roll forward. Mon
- offset_cases.append(
- (
- Week(-2, weekday=1),
- {
- datetime(2010, 4, 6): datetime(2010, 3, 23),
- datetime(2010, 4, 8): datetime(2010, 3, 30),
- datetime(2010, 4, 5): datetime(2010, 3, 23),
- },
- )
- )
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- @pytest.mark.parametrize("weekday", range(7))
- def test_is_on_offset(self, weekday):
- offset = Week(weekday=weekday)
-
- for day in range(1, 8):
- date = datetime(2008, 1, day)
-
- if day % 7 == weekday:
- expected = True
- else:
- expected = False
- assert_is_on_offset(offset, date, expected)
-
-
-class TestWeekOfMonth(Base):
- _offset = WeekOfMonth
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_constructor(self):
- with pytest.raises(ValueError, match="^Week"):
- WeekOfMonth(n=1, week=4, weekday=0)
-
- with pytest.raises(ValueError, match="^Week"):
- WeekOfMonth(n=1, week=-1, weekday=0)
-
- with pytest.raises(ValueError, match="^Day"):
- WeekOfMonth(n=1, week=0, weekday=-1)
-
- with pytest.raises(ValueError, match="^Day"):
- WeekOfMonth(n=1, week=0, weekday=-7)
-
- def test_repr(self):
- assert (
- repr(WeekOfMonth(weekday=1, week=2)) == "<WeekOfMonth: week=2, weekday=1>"
- )
-
- def test_offset(self):
- date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
- date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
- date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
- date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
-
- # see for loop for structure
- test_cases = [
- (-2, 2, 1, date1, datetime(2010, 11, 16)),
- (-2, 2, 1, date2, datetime(2010, 11, 16)),
- (-2, 2, 1, date3, datetime(2010, 11, 16)),
- (-2, 2, 1, date4, datetime(2010, 12, 21)),
- (-1, 2, 1, date1, datetime(2010, 12, 21)),
- (-1, 2, 1, date2, datetime(2010, 12, 21)),
- (-1, 2, 1, date3, datetime(2010, 12, 21)),
- (-1, 2, 1, date4, datetime(2011, 1, 18)),
- (0, 0, 1, date1, datetime(2011, 1, 4)),
- (0, 0, 1, date2, datetime(2011, 2, 1)),
- (0, 0, 1, date3, datetime(2011, 2, 1)),
- (0, 0, 1, date4, datetime(2011, 2, 1)),
- (0, 1, 1, date1, datetime(2011, 1, 11)),
- (0, 1, 1, date2, datetime(2011, 1, 11)),
- (0, 1, 1, date3, datetime(2011, 2, 8)),
- (0, 1, 1, date4, datetime(2011, 2, 8)),
- (0, 0, 1, date1, datetime(2011, 1, 4)),
- (0, 1, 1, date2, datetime(2011, 1, 11)),
- (0, 2, 1, date3, datetime(2011, 1, 18)),
- (0, 3, 1, date4, datetime(2011, 1, 25)),
- (1, 0, 0, date1, datetime(2011, 2, 7)),
- (1, 0, 0, date2, datetime(2011, 2, 7)),
- (1, 0, 0, date3, datetime(2011, 2, 7)),
- (1, 0, 0, date4, datetime(2011, 2, 7)),
- (1, 0, 1, date1, datetime(2011, 2, 1)),
- (1, 0, 1, date2, datetime(2011, 2, 1)),
- (1, 0, 1, date3, datetime(2011, 2, 1)),
- (1, 0, 1, date4, datetime(2011, 2, 1)),
- (1, 0, 2, date1, datetime(2011, 1, 5)),
- (1, 0, 2, date2, datetime(2011, 2, 2)),
- (1, 0, 2, date3, datetime(2011, 2, 2)),
- (1, 0, 2, date4, datetime(2011, 2, 2)),
- (1, 2, 1, date1, datetime(2011, 1, 18)),
- (1, 2, 1, date2, datetime(2011, 1, 18)),
- (1, 2, 1, date3, datetime(2011, 2, 15)),
- (1, 2, 1, date4, datetime(2011, 2, 15)),
- (2, 2, 1, date1, datetime(2011, 2, 15)),
- (2, 2, 1, date2, datetime(2011, 2, 15)),
- (2, 2, 1, date3, datetime(2011, 3, 15)),
- (2, 2, 1, date4, datetime(2011, 3, 15)),
- ]
-
- for n, week, weekday, dt, expected in test_cases:
- offset = WeekOfMonth(n, week=week, weekday=weekday)
- assert_offset_equal(offset, dt, expected)
-
- # try subtracting
- result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
- assert result == datetime(2011, 1, 12)
-
- result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
- assert result == datetime(2011, 2, 2)
-
- on_offset_cases = [
- (0, 0, datetime(2011, 2, 7), True),
- (0, 0, datetime(2011, 2, 6), False),
- (0, 0, datetime(2011, 2, 14), False),
- (1, 0, datetime(2011, 2, 14), True),
- (0, 1, datetime(2011, 2, 1), True),
- (0, 1, datetime(2011, 2, 8), False),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- week, weekday, dt, expected = case
- offset = WeekOfMonth(week=week, weekday=weekday)
- assert offset.is_on_offset(dt) == expected
-
-
-class TestLastWeekOfMonth(Base):
- _offset = LastWeekOfMonth
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_constructor(self):
- with pytest.raises(ValueError, match="^N cannot be 0"):
- LastWeekOfMonth(n=0, weekday=1)
-
- with pytest.raises(ValueError, match="^Day"):
- LastWeekOfMonth(n=1, weekday=-1)
-
- with pytest.raises(ValueError, match="^Day"):
- LastWeekOfMonth(n=1, weekday=7)
-
- def test_offset(self):
- # Saturday
- last_sat = datetime(2013, 8, 31)
- next_sat = datetime(2013, 9, 28)
- offset_sat = LastWeekOfMonth(n=1, weekday=5)
-
- one_day_before = last_sat + timedelta(days=-1)
- assert one_day_before + offset_sat == last_sat
-
- one_day_after = last_sat + timedelta(days=+1)
- assert one_day_after + offset_sat == next_sat
-
- # Test On that day
- assert last_sat + offset_sat == next_sat
-
- # Thursday
-
- offset_thur = LastWeekOfMonth(n=1, weekday=3)
- last_thurs = datetime(2013, 1, 31)
- next_thurs = datetime(2013, 2, 28)
-
- one_day_before = last_thurs + timedelta(days=-1)
- assert one_day_before + offset_thur == last_thurs
-
- one_day_after = last_thurs + timedelta(days=+1)
- assert one_day_after + offset_thur == next_thurs
-
- # Test on that day
- assert last_thurs + offset_thur == next_thurs
-
- three_before = last_thurs + timedelta(days=-3)
- assert three_before + offset_thur == last_thurs
-
- two_after = last_thurs + timedelta(days=+2)
- assert two_after + offset_thur == next_thurs
-
- offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
- assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
-
- on_offset_cases = [
- (WeekDay.SUN, datetime(2013, 1, 27), True),
- (WeekDay.SAT, datetime(2013, 3, 30), True),
- (WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
- (WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
- (WeekDay.MON, datetime(2013, 2, 25), True),
- (WeekDay.SAT, datetime(2013, 11, 30), True),
- (WeekDay.SAT, datetime(2006, 8, 26), True),
- (WeekDay.SAT, datetime(2007, 8, 25), True),
- (WeekDay.SAT, datetime(2008, 8, 30), True),
- (WeekDay.SAT, datetime(2009, 8, 29), True),
- (WeekDay.SAT, datetime(2010, 8, 28), True),
- (WeekDay.SAT, datetime(2011, 8, 27), True),
- (WeekDay.SAT, datetime(2019, 8, 31), True),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- weekday, dt, expected = case
- offset = LastWeekOfMonth(weekday=weekday)
- assert offset.is_on_offset(dt) == expected
-
- def test_repr(self):
- assert (
- repr(LastWeekOfMonth(n=2, weekday=1)) == "<2 * LastWeekOfMonths: weekday=1>"
- )
-
-
-class TestSemiMonthEnd(Base):
- _offset = SemiMonthEnd
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_offset_whole_year(self):
- dates = (
- datetime(2007, 12, 31),
- datetime(2008, 1, 15),
- datetime(2008, 1, 31),
- datetime(2008, 2, 15),
- datetime(2008, 2, 29),
- datetime(2008, 3, 15),
- datetime(2008, 3, 31),
- datetime(2008, 4, 15),
- datetime(2008, 4, 30),
- datetime(2008, 5, 15),
- datetime(2008, 5, 31),
- datetime(2008, 6, 15),
- datetime(2008, 6, 30),
- datetime(2008, 7, 15),
- datetime(2008, 7, 31),
- datetime(2008, 8, 15),
- datetime(2008, 8, 31),
- datetime(2008, 9, 15),
- datetime(2008, 9, 30),
- datetime(2008, 10, 15),
- datetime(2008, 10, 31),
- datetime(2008, 11, 15),
- datetime(2008, 11, 30),
- datetime(2008, 12, 15),
- datetime(2008, 12, 31),
- )
-
- for base, exp_date in zip(dates[:-1], dates[1:]):
- assert_offset_equal(SemiMonthEnd(), base, exp_date)
-
- # ensure .apply_index works as expected
- s = DatetimeIndex(dates[:-1])
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = SemiMonthEnd() + s
-
- exp = DatetimeIndex(dates[1:])
- tm.assert_index_equal(result, exp)
-
- # ensure generating a range with DatetimeIndex gives same result
- result = date_range(start=dates[0], end=dates[-1], freq="SM")
- exp = DatetimeIndex(dates, freq="SM")
- tm.assert_index_equal(result, exp)
-
- offset_cases = []
- offset_cases.append(
- (
- SemiMonthEnd(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 15),
- datetime(2008, 1, 15): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 15),
- datetime(2006, 12, 14): datetime(2006, 12, 15),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2007, 1, 15),
- datetime(2007, 1, 1): datetime(2007, 1, 15),
- datetime(2006, 12, 1): datetime(2006, 12, 15),
- datetime(2006, 12, 15): datetime(2006, 12, 31),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(day_of_month=20),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 20),
- datetime(2008, 1, 15): datetime(2008, 1, 20),
- datetime(2008, 1, 21): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 20),
- datetime(2006, 12, 14): datetime(2006, 12, 20),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2007, 1, 20),
- datetime(2007, 1, 1): datetime(2007, 1, 20),
- datetime(2006, 12, 1): datetime(2006, 12, 20),
- datetime(2006, 12, 15): datetime(2006, 12, 20),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 15),
- datetime(2008, 1, 16): datetime(2008, 1, 31),
- datetime(2008, 1, 15): datetime(2008, 1, 15),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2006, 12, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 15),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(0, day_of_month=16),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 16),
- datetime(2008, 1, 16): datetime(2008, 1, 16),
- datetime(2008, 1, 15): datetime(2008, 1, 16),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2006, 12, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 16),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(2),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 29),
- datetime(2006, 12, 29): datetime(2007, 1, 15),
- datetime(2006, 12, 31): datetime(2007, 1, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 31),
- datetime(2007, 1, 16): datetime(2007, 2, 15),
- datetime(2006, 11, 1): datetime(2006, 11, 30),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(-1),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 6, 15),
- datetime(2008, 12, 31): datetime(2008, 12, 15),
- datetime(2006, 12, 29): datetime(2006, 12, 15),
- datetime(2006, 12, 30): datetime(2006, 12, 15),
- datetime(2007, 1, 1): datetime(2006, 12, 31),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(-1, day_of_month=4),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 31),
- datetime(2007, 1, 4): datetime(2006, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 6, 4),
- datetime(2008, 12, 31): datetime(2008, 12, 4),
- datetime(2006, 12, 5): datetime(2006, 12, 4),
- datetime(2006, 12, 30): datetime(2006, 12, 4),
- datetime(2007, 1, 1): datetime(2006, 12, 31),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthEnd(-2),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 15),
- datetime(2008, 6, 30): datetime(2008, 5, 31),
- datetime(2008, 3, 15): datetime(2008, 2, 15),
- datetime(2008, 12, 31): datetime(2008, 11, 30),
- datetime(2006, 12, 29): datetime(2006, 11, 30),
- datetime(2006, 12, 14): datetime(2006, 11, 15),
- datetime(2007, 1, 1): datetime(2006, 12, 15),
- },
- )
- )
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_apply_index(self, case):
- # https://github.com/pandas-dev/pandas/issues/34580
- offset, cases = case
- s = DatetimeIndex(cases.keys())
- exp = DatetimeIndex(cases.values())
-
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = offset + s
- tm.assert_index_equal(result, exp)
-
- with tm.assert_produces_warning(FutureWarning):
- result = offset.apply_index(s)
- tm.assert_index_equal(result, exp)
-
- on_offset_cases = [
- (datetime(2007, 12, 31), True),
- (datetime(2007, 12, 15), True),
- (datetime(2007, 12, 14), False),
- (datetime(2007, 12, 1), False),
- (datetime(2008, 2, 29), True),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- dt, expected = case
- assert_is_on_offset(SemiMonthEnd(), dt, expected)
-
- @pytest.mark.parametrize("klass", [Series, DatetimeIndex])
- def test_vectorized_offset_addition(self, klass):
- s = klass(
- [
- Timestamp("2000-01-15 00:15:00", tz="US/Central"),
- Timestamp("2000-02-15", tz="US/Central"),
- ],
- name="a",
- )
-
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = s + SemiMonthEnd()
- result2 = SemiMonthEnd() + s
-
- exp = klass(
- [
- Timestamp("2000-01-31 00:15:00", tz="US/Central"),
- Timestamp("2000-02-29", tz="US/Central"),
- ],
- name="a",
- )
- tm.assert_equal(result, exp)
- tm.assert_equal(result2, exp)
-
- s = klass(
- [
- Timestamp("2000-01-01 00:15:00", tz="US/Central"),
- Timestamp("2000-02-01", tz="US/Central"),
- ],
- name="a",
- )
-
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = s + SemiMonthEnd()
- result2 = SemiMonthEnd() + s
-
- exp = klass(
- [
- Timestamp("2000-01-15 00:15:00", tz="US/Central"),
- Timestamp("2000-02-15", tz="US/Central"),
- ],
- name="a",
- )
- tm.assert_equal(result, exp)
- tm.assert_equal(result2, exp)
-
-
-class TestSemiMonthBegin(Base):
- _offset = SemiMonthBegin
- offset1 = _offset()
- offset2 = _offset(2)
-
- def test_offset_whole_year(self):
- dates = (
- datetime(2007, 12, 15),
- datetime(2008, 1, 1),
- datetime(2008, 1, 15),
- datetime(2008, 2, 1),
- datetime(2008, 2, 15),
- datetime(2008, 3, 1),
- datetime(2008, 3, 15),
- datetime(2008, 4, 1),
- datetime(2008, 4, 15),
- datetime(2008, 5, 1),
- datetime(2008, 5, 15),
- datetime(2008, 6, 1),
- datetime(2008, 6, 15),
- datetime(2008, 7, 1),
- datetime(2008, 7, 15),
- datetime(2008, 8, 1),
- datetime(2008, 8, 15),
- datetime(2008, 9, 1),
- datetime(2008, 9, 15),
- datetime(2008, 10, 1),
- datetime(2008, 10, 15),
- datetime(2008, 11, 1),
- datetime(2008, 11, 15),
- datetime(2008, 12, 1),
- datetime(2008, 12, 15),
- )
-
- for base, exp_date in zip(dates[:-1], dates[1:]):
- assert_offset_equal(SemiMonthBegin(), base, exp_date)
-
- # ensure .apply_index works as expected
- s = DatetimeIndex(dates[:-1])
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = SemiMonthBegin() + s
-
- exp = DatetimeIndex(dates[1:])
- tm.assert_index_equal(result, exp)
-
- # ensure generating a range with DatetimeIndex gives same result
- result = date_range(start=dates[0], end=dates[-1], freq="SMS")
- exp = DatetimeIndex(dates, freq="SMS")
- tm.assert_index_equal(result, exp)
-
- offset_cases = []
- offset_cases.append(
- (
- SemiMonthBegin(),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 15),
- datetime(2008, 1, 15): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 14): datetime(2006, 12, 15),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2007, 1, 1): datetime(2007, 1, 15),
- datetime(2006, 12, 1): datetime(2006, 12, 15),
- datetime(2006, 12, 15): datetime(2007, 1, 1),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(day_of_month=20),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 20),
- datetime(2008, 1, 15): datetime(2008, 1, 20),
- datetime(2008, 1, 21): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 14): datetime(2006, 12, 20),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2007, 1, 1): datetime(2007, 1, 20),
- datetime(2006, 12, 1): datetime(2006, 12, 20),
- datetime(2006, 12, 15): datetime(2006, 12, 20),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(0),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 16): datetime(2008, 2, 1),
- datetime(2008, 1, 15): datetime(2008, 1, 15),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 2): datetime(2006, 12, 15),
- datetime(2007, 1, 1): datetime(2007, 1, 1),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(0, day_of_month=16),
- {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 16): datetime(2008, 1, 16),
- datetime(2008, 1, 15): datetime(2008, 1, 16),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2007, 1, 5): datetime(2007, 1, 16),
- datetime(2007, 1, 1): datetime(2007, 1, 1),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(2),
- {
- datetime(2008, 1, 1): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 15),
- datetime(2006, 12, 1): datetime(2007, 1, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 15),
- datetime(2006, 12, 15): datetime(2007, 1, 15),
- datetime(2007, 1, 1): datetime(2007, 2, 1),
- datetime(2007, 1, 16): datetime(2007, 2, 15),
- datetime(2006, 11, 1): datetime(2006, 12, 1),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(-1),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 15),
- datetime(2008, 6, 30): datetime(2008, 6, 15),
- datetime(2008, 6, 14): datetime(2008, 6, 1),
- datetime(2008, 12, 31): datetime(2008, 12, 15),
- datetime(2006, 12, 29): datetime(2006, 12, 15),
- datetime(2006, 12, 15): datetime(2006, 12, 1),
- datetime(2007, 1, 1): datetime(2006, 12, 15),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(-1, day_of_month=4),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 4),
- datetime(2007, 1, 4): datetime(2007, 1, 1),
- datetime(2008, 6, 30): datetime(2008, 6, 4),
- datetime(2008, 12, 31): datetime(2008, 12, 4),
- datetime(2006, 12, 5): datetime(2006, 12, 4),
- datetime(2006, 12, 30): datetime(2006, 12, 4),
- datetime(2006, 12, 2): datetime(2006, 12, 1),
- datetime(2007, 1, 1): datetime(2006, 12, 4),
- },
- )
- )
-
- offset_cases.append(
- (
- SemiMonthBegin(-2),
- {
- datetime(2007, 1, 1): datetime(2006, 12, 1),
- datetime(2008, 6, 30): datetime(2008, 6, 1),
- datetime(2008, 6, 14): datetime(2008, 5, 15),
- datetime(2008, 12, 31): datetime(2008, 12, 1),
- datetime(2006, 12, 29): datetime(2006, 12, 1),
- datetime(2006, 12, 15): datetime(2006, 11, 15),
- datetime(2007, 1, 1): datetime(2006, 12, 1),
- },
- )
- )
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in cases.items():
- assert_offset_equal(offset, base, expected)
-
- @pytest.mark.parametrize("case", offset_cases)
- def test_apply_index(self, case):
- offset, cases = case
- s = DatetimeIndex(cases.keys())
-
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = offset + s
-
- exp = DatetimeIndex(cases.values())
- tm.assert_index_equal(result, exp)
-
- on_offset_cases = [
- (datetime(2007, 12, 1), True),
- (datetime(2007, 12, 15), True),
- (datetime(2007, 12, 14), False),
- (datetime(2007, 12, 31), False),
- (datetime(2008, 2, 15), True),
- ]
-
- @pytest.mark.parametrize("case", on_offset_cases)
- def test_is_on_offset(self, case):
- dt, expected = case
- assert_is_on_offset(SemiMonthBegin(), dt, expected)
-
- @pytest.mark.parametrize("klass", [Series, DatetimeIndex])
- def test_vectorized_offset_addition(self, klass):
- s = klass(
- [
- Timestamp("2000-01-15 00:15:00", tz="US/Central"),
- Timestamp("2000-02-15", tz="US/Central"),
- ],
- name="a",
- )
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = s + SemiMonthBegin()
- result2 = SemiMonthBegin() + s
-
- exp = klass(
- [
- Timestamp("2000-02-01 00:15:00", tz="US/Central"),
- Timestamp("2000-03-01", tz="US/Central"),
- ],
- name="a",
- )
- tm.assert_equal(result, exp)
- tm.assert_equal(result2, exp)
-
- s = klass(
- [
- Timestamp("2000-01-01 00:15:00", tz="US/Central"),
- Timestamp("2000-02-01", tz="US/Central"),
- ],
- name="a",
- )
- with tm.assert_produces_warning(None):
- # GH#22535 check that we don't get a FutureWarning from adding
- # an integer array to PeriodIndex
- result = s + SemiMonthBegin()
- result2 = SemiMonthBegin() + s
-
- exp = klass(
- [
- Timestamp("2000-01-15 00:15:00", tz="US/Central"),
- Timestamp("2000-02-15", tz="US/Central"),
- ],
- name="a",
- )
- tm.assert_equal(result, exp)
- tm.assert_equal(result2, exp)
-
-
def test_Easter():
assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
@@ -4208,153 +707,6 @@ def get_utc_offset_hours(ts):
return (o.days * 24 * 3600 + o.seconds) / 3600.0
-class TestDST:
- """
- test DateOffset additions over Daylight Savings Time
- """
-
- # one microsecond before the DST transition
- ts_pre_fallback = "2013-11-03 01:59:59.999999"
- ts_pre_springfwd = "2013-03-10 01:59:59.999999"
-
- # test both basic names and dateutil timezones
- timezone_utc_offsets = {
- "US/Eastern": {"utc_offset_daylight": -4, "utc_offset_standard": -5},
- "dateutil/US/Pacific": {"utc_offset_daylight": -7, "utc_offset_standard": -8},
- }
- valid_date_offsets_singular = [
- "weekday",
- "day",
- "hour",
- "minute",
- "second",
- "microsecond",
- ]
- valid_date_offsets_plural = [
- "weeks",
- "days",
- "hours",
- "minutes",
- "seconds",
- "milliseconds",
- "microseconds",
- ]
-
- def _test_all_offsets(self, n, **kwds):
- valid_offsets = (
- self.valid_date_offsets_plural
- if n > 1
- else self.valid_date_offsets_singular
- )
-
- for name in valid_offsets:
- self._test_offset(offset_name=name, offset_n=n, **kwds)
-
- def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
- offset = DateOffset(**{offset_name: offset_n})
-
- t = tstart + offset
- if expected_utc_offset is not None:
- assert get_utc_offset_hours(t) == expected_utc_offset
-
- if offset_name == "weeks":
- # dates should match
- assert t.date() == timedelta(days=7 * offset.kwds["weeks"]) + tstart.date()
- # expect the same day of week, hour of day, minute, second, ...
- assert (
- t.dayofweek == tstart.dayofweek
- and t.hour == tstart.hour
- and t.minute == tstart.minute
- and t.second == tstart.second
- )
- elif offset_name == "days":
- # dates should match
- assert timedelta(offset.kwds["days"]) + tstart.date() == t.date()
- # expect the same hour of day, minute, second, ...
- assert (
- t.hour == tstart.hour
- and t.minute == tstart.minute
- and t.second == tstart.second
- )
- elif offset_name in self.valid_date_offsets_singular:
- # expect the singular offset value to match between tstart and t
- datepart_offset = getattr(
- t, offset_name if offset_name != "weekday" else "dayofweek"
- )
- assert datepart_offset == offset.kwds[offset_name]
- else:
- # the offset should be the same as if it was done in UTC
- assert t == (tstart.tz_convert("UTC") + offset).tz_convert("US/Pacific")
-
- def _make_timestamp(self, string, hrs_offset, tz):
- if hrs_offset >= 0:
- offset_string = f"{hrs_offset:02d}00"
- else:
- offset_string = f"-{(hrs_offset * -1):02}00"
- return Timestamp(string + offset_string).tz_convert(tz)
-
- def test_springforward_plural(self):
- # test moving from standard to daylight savings
- for tz, utc_offsets in self.timezone_utc_offsets.items():
- hrs_pre = utc_offsets["utc_offset_standard"]
- hrs_post = utc_offsets["utc_offset_daylight"]
- self._test_all_offsets(
- n=3,
- tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
- expected_utc_offset=hrs_post,
- )
-
- def test_fallback_singular(self):
- # in the case of singular offsets, we don't necessarily know which utc
- # offset the new Timestamp will wind up in (the tz for 1 month may be
- # different from 1 second) so we don't specify an expected_utc_offset
- for tz, utc_offsets in self.timezone_utc_offsets.items():
- hrs_pre = utc_offsets["utc_offset_standard"]
- self._test_all_offsets(
- n=1,
- tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),
- expected_utc_offset=None,
- )
-
- def test_springforward_singular(self):
- for tz, utc_offsets in self.timezone_utc_offsets.items():
- hrs_pre = utc_offsets["utc_offset_standard"]
- self._test_all_offsets(
- n=1,
- tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),
- expected_utc_offset=None,
- )
-
- offset_classes = {
- MonthBegin: ["11/2/2012", "12/1/2012"],
- MonthEnd: ["11/2/2012", "11/30/2012"],
- BMonthBegin: ["11/2/2012", "12/3/2012"],
- BMonthEnd: ["11/2/2012", "11/30/2012"],
- CBMonthBegin: ["11/2/2012", "12/3/2012"],
- CBMonthEnd: ["11/2/2012", "11/30/2012"],
- SemiMonthBegin: ["11/2/2012", "11/15/2012"],
- SemiMonthEnd: ["11/2/2012", "11/15/2012"],
- Week: ["11/2/2012", "11/9/2012"],
- YearBegin: ["11/2/2012", "1/1/2013"],
- YearEnd: ["11/2/2012", "12/31/2012"],
- BYearBegin: ["11/2/2012", "1/1/2013"],
- BYearEnd: ["11/2/2012", "12/31/2012"],
- QuarterBegin: ["11/2/2012", "12/1/2012"],
- QuarterEnd: ["11/2/2012", "12/31/2012"],
- BQuarterBegin: ["11/2/2012", "12/3/2012"],
- BQuarterEnd: ["11/2/2012", "12/31/2012"],
- Day: ["11/4/2012", "11/4/2012 23:00"],
- }.items()
-
- @pytest.mark.parametrize("tup", offset_classes)
- def test_all_offset_classes(self, tup):
- offset, test_values = tup
-
- first = Timestamp(test_values[0], tz="US/Eastern") + offset()
- second = Timestamp(test_values[1], tz="US/Eastern")
- assert first == second
-
-
# ---------------------------------------------------------------------
diff --git a/pandas/tests/tseries/offsets/test_opening_times.py b/pandas/tests/tseries/offsets/test_opening_times.py
new file mode 100644
index 0000000000000..107436e4b3343
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_opening_times.py
@@ -0,0 +1,456 @@
+"""
+Test offset.BusinessHour._next_opening_time and offset.BusinessHour._prev_opening_time
+"""
+from datetime import datetime
+
+import pytest
+
+from pandas._libs.tslibs.offsets import BusinessHour
+
+
+class TestOpeningTimes:
+ # opening time should be affected by sign of n, not by n's value and end
+ opening_time_cases = [
+ (
+ [
+ BusinessHour(),
+ BusinessHour(n=2),
+ BusinessHour(n=4),
+ BusinessHour(end="10:00"),
+ BusinessHour(n=2, end="4:00"),
+ BusinessHour(n=4, end="15:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9),
+ ),
+ # if timestamp is on opening time, next opening time is
+ # as it is
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 2, 10): (
+ datetime(2014, 7, 3, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ # 2014-07-05 is saturday
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9),
+ ),
+ datetime(2014, 7, 7, 9, 1): (
+ datetime(2014, 7, 8, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(start="11:15"),
+ BusinessHour(n=2, start="11:15"),
+ BusinessHour(n=3, start="11:15"),
+ BusinessHour(start="11:15", end="10:00"),
+ BusinessHour(n=2, start="11:15", end="4:00"),
+ BusinessHour(n=3, start="11:15", end="15:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 11, 15),
+ datetime(2014, 6, 30, 11, 15),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 2, 10): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15),
+ ),
+ datetime(2014, 7, 2, 11, 15): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 2, 11, 15),
+ ),
+ datetime(2014, 7, 2, 11, 15, 1): (
+ datetime(2014, 7, 3, 11, 15),
+ datetime(2014, 7, 2, 11, 15),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 11, 15),
+ datetime(2014, 7, 3, 11, 15),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ datetime(2014, 7, 7, 9, 1): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(-1),
+ BusinessHour(n=-2),
+ BusinessHour(n=-4),
+ BusinessHour(n=-1, end="10:00"),
+ BusinessHour(n=-2, end="4:00"),
+ BusinessHour(n=-4, end="15:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 2, 9),
+ ),
+ datetime(2014, 7, 2, 10): (
+ datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 3, 9),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 7, 9): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 7, 9),
+ ),
+ datetime(2014, 7, 7, 9, 1): (
+ datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 8, 9),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(start="17:00", end="05:00"),
+ BusinessHour(n=3, start="17:00", end="03:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 6, 30, 17),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 4, 17): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 3, 17),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 7, 17, 1): (
+ datetime(2014, 7, 8, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(-1, start="17:00", end="05:00"),
+ BusinessHour(n=-2, start="17:00", end="03:00"),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 2, 16, 59): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 3, 17),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17),
+ ),
+ datetime(2014, 7, 7, 18): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 8, 17),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(start=["11:15", "15:00"], end=["13:00", "20:00"]),
+ BusinessHour(n=3, start=["11:15", "15:00"], end=["12:00", "20:00"]),
+ BusinessHour(start=["11:15", "15:00"], end=["13:00", "17:00"]),
+ BusinessHour(n=2, start=["11:15", "15:00"], end=["12:00", "03:00"]),
+ BusinessHour(n=3, start=["11:15", "15:00"], end=["13:00", "16:00"]),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 11, 15),
+ datetime(2014, 6, 30, 15),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 2, 10): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 15),
+ ),
+ datetime(2014, 7, 2, 11, 15): (
+ datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 2, 11, 15),
+ ),
+ datetime(2014, 7, 2, 11, 15, 1): (
+ datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 11, 15),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 11, 15),
+ datetime(2014, 7, 3, 15),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 7, 9, 1): (
+ datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 15),
+ ),
+ datetime(2014, 7, 7, 12): (
+ datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 7, 11, 15),
+ ),
+ },
+ ),
+ (
+ [
+ BusinessHour(n=-1, start=["17:00", "08:00"], end=["05:00", "10:00"]),
+ BusinessHour(n=-2, start=["08:00", "17:00"], end=["10:00", "03:00"]),
+ ],
+ {
+ datetime(2014, 7, 1, 11): (
+ datetime(2014, 7, 1, 8),
+ datetime(2014, 7, 1, 17),
+ ),
+ datetime(2014, 7, 1, 18): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 8),
+ ),
+ datetime(2014, 7, 1, 23): (
+ datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 8),
+ ),
+ datetime(2014, 7, 2, 8): (
+ datetime(2014, 7, 2, 8),
+ datetime(2014, 7, 2, 8),
+ ),
+ datetime(2014, 7, 2, 9): (
+ datetime(2014, 7, 2, 8),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 2, 16, 59): (
+ datetime(2014, 7, 2, 8),
+ datetime(2014, 7, 2, 17),
+ ),
+ datetime(2014, 7, 5, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 8),
+ ),
+ datetime(2014, 7, 4, 10): (
+ datetime(2014, 7, 4, 8),
+ datetime(2014, 7, 4, 17),
+ ),
+ datetime(2014, 7, 4, 23): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 8),
+ ),
+ datetime(2014, 7, 6, 10): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 8),
+ ),
+ datetime(2014, 7, 7, 5): (
+ datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 8),
+ ),
+ datetime(2014, 7, 7, 18): (
+ datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 8, 8),
+ ),
+ },
+ ),
+ ]
+
+ @pytest.mark.parametrize("case", opening_time_cases)
+ def test_opening_time(self, case):
+ _offsets, cases = case
+ for offset in _offsets:
+ for dt, (exp_next, exp_prev) in cases.items():
+ assert offset._next_opening_time(dt) == exp_next
+ assert offset._prev_opening_time(dt) == exp_prev
diff --git a/pandas/tests/tseries/offsets/test_week.py b/pandas/tests/tseries/offsets/test_week.py
new file mode 100644
index 0000000000000..54751a70b151d
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_week.py
@@ -0,0 +1,297 @@
+"""
+Tests for offset.Week, offset.WeekofMonth and offset.LastWeekofMonth
+"""
+from datetime import datetime, timedelta
+
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas._libs.tslibs.offsets import LastWeekOfMonth, Week, WeekOfMonth
+
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ WeekDay,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
+
+
+class TestWeek(Base):
+ _offset = Week
+ d = Timestamp(datetime(2008, 1, 2))
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_repr(self):
+ assert repr(Week(weekday=0)) == "<Week: weekday=0>"
+ assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
+ assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
+
+ def test_corner(self):
+ with pytest.raises(ValueError, match="Day must be"):
+ Week(weekday=7)
+
+ with pytest.raises(ValueError, match="Day must be"):
+ Week(weekday=-1)
+
+ def test_is_anchored(self):
+ assert Week(weekday=0).is_anchored()
+ assert not Week().is_anchored()
+ assert not Week(2, weekday=2).is_anchored()
+ assert not Week(2).is_anchored()
+
+ offset_cases = []
+ # not business week
+ offset_cases.append(
+ (
+ Week(),
+ {
+ datetime(2008, 1, 1): datetime(2008, 1, 8),
+ datetime(2008, 1, 4): datetime(2008, 1, 11),
+ datetime(2008, 1, 5): datetime(2008, 1, 12),
+ datetime(2008, 1, 6): datetime(2008, 1, 13),
+ datetime(2008, 1, 7): datetime(2008, 1, 14),
+ },
+ )
+ )
+
+ # Mon
+ offset_cases.append(
+ (
+ Week(weekday=0),
+ {
+ datetime(2007, 12, 31): datetime(2008, 1, 7),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 14),
+ },
+ )
+ )
+
+ # n=0 -> roll forward. Mon
+ offset_cases.append(
+ (
+ Week(0, weekday=0),
+ {
+ datetime(2007, 12, 31): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7),
+ },
+ )
+ )
+
+ # n=0 -> roll forward. Mon
+ offset_cases.append(
+ (
+ Week(-2, weekday=1),
+ {
+ datetime(2010, 4, 6): datetime(2010, 3, 23),
+ datetime(2010, 4, 8): datetime(2010, 3, 30),
+ datetime(2010, 4, 5): datetime(2010, 3, 23),
+ },
+ )
+ )
+
+ @pytest.mark.parametrize("case", offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in cases.items():
+ assert_offset_equal(offset, base, expected)
+
+ @pytest.mark.parametrize("weekday", range(7))
+ def test_is_on_offset(self, weekday):
+ offset = Week(weekday=weekday)
+
+ for day in range(1, 8):
+ date = datetime(2008, 1, day)
+
+ if day % 7 == weekday:
+ expected = True
+ else:
+ expected = False
+ assert_is_on_offset(offset, date, expected)
+
+
+class TestWeekOfMonth(Base):
+ _offset = WeekOfMonth
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_constructor(self):
+ with pytest.raises(ValueError, match="^Week"):
+ WeekOfMonth(n=1, week=4, weekday=0)
+
+ with pytest.raises(ValueError, match="^Week"):
+ WeekOfMonth(n=1, week=-1, weekday=0)
+
+ with pytest.raises(ValueError, match="^Day"):
+ WeekOfMonth(n=1, week=0, weekday=-1)
+
+ with pytest.raises(ValueError, match="^Day"):
+ WeekOfMonth(n=1, week=0, weekday=-7)
+
+ def test_repr(self):
+ assert (
+ repr(WeekOfMonth(weekday=1, week=2)) == "<WeekOfMonth: week=2, weekday=1>"
+ )
+
+ def test_offset(self):
+ date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
+ date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
+ date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
+ date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
+
+ # see for loop for structure
+ test_cases = [
+ (-2, 2, 1, date1, datetime(2010, 11, 16)),
+ (-2, 2, 1, date2, datetime(2010, 11, 16)),
+ (-2, 2, 1, date3, datetime(2010, 11, 16)),
+ (-2, 2, 1, date4, datetime(2010, 12, 21)),
+ (-1, 2, 1, date1, datetime(2010, 12, 21)),
+ (-1, 2, 1, date2, datetime(2010, 12, 21)),
+ (-1, 2, 1, date3, datetime(2010, 12, 21)),
+ (-1, 2, 1, date4, datetime(2011, 1, 18)),
+ (0, 0, 1, date1, datetime(2011, 1, 4)),
+ (0, 0, 1, date2, datetime(2011, 2, 1)),
+ (0, 0, 1, date3, datetime(2011, 2, 1)),
+ (0, 0, 1, date4, datetime(2011, 2, 1)),
+ (0, 1, 1, date1, datetime(2011, 1, 11)),
+ (0, 1, 1, date2, datetime(2011, 1, 11)),
+ (0, 1, 1, date3, datetime(2011, 2, 8)),
+ (0, 1, 1, date4, datetime(2011, 2, 8)),
+ (0, 0, 1, date1, datetime(2011, 1, 4)),
+ (0, 1, 1, date2, datetime(2011, 1, 11)),
+ (0, 2, 1, date3, datetime(2011, 1, 18)),
+ (0, 3, 1, date4, datetime(2011, 1, 25)),
+ (1, 0, 0, date1, datetime(2011, 2, 7)),
+ (1, 0, 0, date2, datetime(2011, 2, 7)),
+ (1, 0, 0, date3, datetime(2011, 2, 7)),
+ (1, 0, 0, date4, datetime(2011, 2, 7)),
+ (1, 0, 1, date1, datetime(2011, 2, 1)),
+ (1, 0, 1, date2, datetime(2011, 2, 1)),
+ (1, 0, 1, date3, datetime(2011, 2, 1)),
+ (1, 0, 1, date4, datetime(2011, 2, 1)),
+ (1, 0, 2, date1, datetime(2011, 1, 5)),
+ (1, 0, 2, date2, datetime(2011, 2, 2)),
+ (1, 0, 2, date3, datetime(2011, 2, 2)),
+ (1, 0, 2, date4, datetime(2011, 2, 2)),
+ (1, 2, 1, date1, datetime(2011, 1, 18)),
+ (1, 2, 1, date2, datetime(2011, 1, 18)),
+ (1, 2, 1, date3, datetime(2011, 2, 15)),
+ (1, 2, 1, date4, datetime(2011, 2, 15)),
+ (2, 2, 1, date1, datetime(2011, 2, 15)),
+ (2, 2, 1, date2, datetime(2011, 2, 15)),
+ (2, 2, 1, date3, datetime(2011, 3, 15)),
+ (2, 2, 1, date4, datetime(2011, 3, 15)),
+ ]
+
+ for n, week, weekday, dt, expected in test_cases:
+ offset = WeekOfMonth(n, week=week, weekday=weekday)
+ assert_offset_equal(offset, dt, expected)
+
+ # try subtracting
+ result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
+ assert result == datetime(2011, 1, 12)
+
+ result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
+ assert result == datetime(2011, 2, 2)
+
+ on_offset_cases = [
+ (0, 0, datetime(2011, 2, 7), True),
+ (0, 0, datetime(2011, 2, 6), False),
+ (0, 0, datetime(2011, 2, 14), False),
+ (1, 0, datetime(2011, 2, 14), True),
+ (0, 1, datetime(2011, 2, 1), True),
+ (0, 1, datetime(2011, 2, 8), False),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ week, weekday, dt, expected = case
+ offset = WeekOfMonth(week=week, weekday=weekday)
+ assert offset.is_on_offset(dt) == expected
+
+
+class TestLastWeekOfMonth(Base):
+ _offset = LastWeekOfMonth
+ offset1 = _offset()
+ offset2 = _offset(2)
+
+ def test_constructor(self):
+ with pytest.raises(ValueError, match="^N cannot be 0"):
+ LastWeekOfMonth(n=0, weekday=1)
+
+ with pytest.raises(ValueError, match="^Day"):
+ LastWeekOfMonth(n=1, weekday=-1)
+
+ with pytest.raises(ValueError, match="^Day"):
+ LastWeekOfMonth(n=1, weekday=7)
+
+ def test_offset(self):
+ # Saturday
+ last_sat = datetime(2013, 8, 31)
+ next_sat = datetime(2013, 9, 28)
+ offset_sat = LastWeekOfMonth(n=1, weekday=5)
+
+ one_day_before = last_sat + timedelta(days=-1)
+ assert one_day_before + offset_sat == last_sat
+
+ one_day_after = last_sat + timedelta(days=+1)
+ assert one_day_after + offset_sat == next_sat
+
+ # Test On that day
+ assert last_sat + offset_sat == next_sat
+
+ # Thursday
+
+ offset_thur = LastWeekOfMonth(n=1, weekday=3)
+ last_thurs = datetime(2013, 1, 31)
+ next_thurs = datetime(2013, 2, 28)
+
+ one_day_before = last_thurs + timedelta(days=-1)
+ assert one_day_before + offset_thur == last_thurs
+
+ one_day_after = last_thurs + timedelta(days=+1)
+ assert one_day_after + offset_thur == next_thurs
+
+ # Test on that day
+ assert last_thurs + offset_thur == next_thurs
+
+ three_before = last_thurs + timedelta(days=-3)
+ assert three_before + offset_thur == last_thurs
+
+ two_after = last_thurs + timedelta(days=+2)
+ assert two_after + offset_thur == next_thurs
+
+ offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
+ assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
+
+ on_offset_cases = [
+ (WeekDay.SUN, datetime(2013, 1, 27), True),
+ (WeekDay.SAT, datetime(2013, 3, 30), True),
+ (WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
+ (WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
+ (WeekDay.MON, datetime(2013, 2, 25), True),
+ (WeekDay.SAT, datetime(2013, 11, 30), True),
+ (WeekDay.SAT, datetime(2006, 8, 26), True),
+ (WeekDay.SAT, datetime(2007, 8, 25), True),
+ (WeekDay.SAT, datetime(2008, 8, 30), True),
+ (WeekDay.SAT, datetime(2009, 8, 29), True),
+ (WeekDay.SAT, datetime(2010, 8, 28), True),
+ (WeekDay.SAT, datetime(2011, 8, 27), True),
+ (WeekDay.SAT, datetime(2019, 8, 31), True),
+ ]
+
+ @pytest.mark.parametrize("case", on_offset_cases)
+ def test_is_on_offset(self, case):
+ weekday, dt, expected = case
+ offset = LastWeekOfMonth(weekday=weekday)
+ assert offset.is_on_offset(dt) == expected
+
+ def test_repr(self):
+ assert (
+ repr(LastWeekOfMonth(n=2, weekday=1)) == "<2 * LastWeekOfMonths: weekday=1>"
+ )
diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py
index 9921355bdf2ee..260f7368123a4 100644
--- a/pandas/tests/tseries/offsets/test_yqm_offsets.py
+++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py
@@ -7,6 +7,11 @@
import pandas as pd
from pandas import Timestamp
+from pandas.tests.tseries.offsets.common import (
+ Base,
+ assert_is_on_offset,
+ assert_offset_equal,
+)
from pandas.tseries.offsets import (
BMonthBegin,
@@ -23,9 +28,6 @@
YearEnd,
)
-from .common import assert_is_on_offset, assert_offset_equal
-from .test_offsets import Base
-
# --------------------------------------------------------------------
# Misc
| This is to address xref #26807 specifically for pandas/tests/tseries/offsets/test_offsets.py . I tried to get all the new modules below about 1000 lines and break it up in logical ways.
Other than moving code, I have changed some list construction for pytest parameterizing to be a tiny bit more compact by changing a bunch of sequential `append`s to a single list literal. Other than that no changes.
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38924 | 2021-01-03T17:59:12Z | 2021-01-03T23:22:06Z | 2021-01-03T23:22:06Z | 2021-01-05T17:03:38Z |
TST: GH30999 Add placeholder messages to pandas/tests/io/test_sql.py and remove test for numexpr < 2.6.8 | diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py
index 9fc3ed4800d09..8fa11ab75dd67 100644
--- a/pandas/tests/computation/test_compat.py
+++ b/pandas/tests/computation/test_compat.py
@@ -36,14 +36,10 @@ def testit():
if engine == "numexpr":
try:
- import numexpr as ne
+ import numexpr as ne # noqa F401
except ImportError:
pytest.skip("no numexpr")
else:
- if LooseVersion(ne.__version__) < LooseVersion(VERSIONS["numexpr"]):
- with pytest.raises(ImportError):
- testit()
- else:
- testit()
+ testit()
else:
testit()
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 52869f3f2fd42..6fb120faa6db2 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -2896,7 +2896,7 @@ def test_execute_fail(self):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
- with pytest.raises(Exception):
+ with pytest.raises(Exception, match="<insert message here>"):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self, request, datapath):
@@ -2917,7 +2917,7 @@ def test_execute_closed_connection(self, request, datapath):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
- with pytest.raises(Exception):
+ with pytest.raises(Exception, match="<insert message here>"):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
| This is my attempt to finally finish off #30999
In pandas/tests/io/test_sql.py, there is a whole test class skipped. It looks like xref #20536 is supposed to address that, but no one has commented there since March 2018, so I don't think that's going to be fixed any time soon. I noticed that there were other tests in the same module with `match="<insert message here>"` so I decided to put it in the two tests that I can't figure out the correct error message for.
In pandas/tests/computation/test_compat.py there was an if statement that the numexpr library is at least 2.6.8. I tried to set up an environment with a lower version but conda couldn't resolve the dependencies. That test isn't running in the CI (xref #38876) and that test was last touched in a substantive way in 2016. I think that portion of the test is no longer required.
Not sure that I actually addressed these correctly, but I made an attempt so we could have a conversation about it in a more concrete way.
- [x] xref #30999
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38920 | 2021-01-03T12:22:31Z | 2021-01-03T17:24:50Z | 2021-01-03T17:24:50Z | 2021-01-05T17:01:47Z |
BUG: fix the bad error raised by HDFStore.put() | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 45f8344a1ebe0..5b58ba14a1eff 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -273,6 +273,7 @@ I/O
- Allow custom error values for parse_dates argument of :func:`read_sql`, :func:`read_sql_query` and :func:`read_sql_table` (:issue:`35185`)
- Bug in :func:`to_hdf` raising ``KeyError`` when trying to apply
for subclasses of ``DataFrame`` or ``Series`` (:issue:`33748`).
+- Bug in :meth:`~HDFStore.put` raising a wrong ``TypeError`` when saving a DataFrame with non-string dtype (:issue:`34274`)
- Bug in :func:`json_normalize` resulting in the first element of a generator object not being included in the returned ``DataFrame`` (:issue:`35923`)
- Bug in :func:`read_excel` forward filling :class:`MultiIndex` names with multiple header and index columns specified (:issue:`34673`)
- :func:`pandas.read_excel` now respects :func:``pandas.set_option`` (:issue:`34252`)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index ceaf6e1ac21e5..d2b02038f8b78 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3915,6 +3915,7 @@ def _create_axes(
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
+ block_columns=b_items,
)
adj_name = _maybe_adjust_name(new_name, self.version)
@@ -4878,7 +4879,14 @@ def _unconvert_index(
def _maybe_convert_for_string_atom(
- name: str, block: "Block", existing_col, min_itemsize, nan_rep, encoding, errors
+ name: str,
+ block: "Block",
+ existing_col,
+ min_itemsize,
+ nan_rep,
+ encoding,
+ errors,
+ block_columns: List[str],
):
if not block.is_object:
return block.values
@@ -4912,14 +4920,20 @@ def _maybe_convert_for_string_atom(
# we cannot serialize this data, so report an exception on a column
# by column basis
- for i in range(len(block.shape[0])):
+
+ # expected behaviour:
+ # search block for a non-string object column by column
+ for i in range(block.shape[0]):
col = block.iget(i)
inferred_type = lib.infer_dtype(col, skipna=False)
if inferred_type != "string":
- iloc = block.mgr_locs.indexer[i]
+ error_column_label = (
+ block_columns[i] if len(block_columns) > i else f"No.{i}"
+ )
raise TypeError(
- f"Cannot serialize the column [{iloc}] because\n"
- f"its data contents are [{inferred_type}] object dtype"
+ f"Cannot serialize the column [{error_column_label}]\n"
+ f"because its data contents are not [string] but "
+ f"[{inferred_type}] object dtype"
)
# itemsize is the maximum length of a string (along any dimension)
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 7e288ec6f5063..3f0fd6e7483f8 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -2055,7 +2055,10 @@ def test_append_raise(self, setup_path):
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
- msg = re.escape("object of type 'int' has no len()")
+ msg = re.escape(
+ """Cannot serialize the column [invalid]
+because its data contents are not [string] but [mixed] object dtype"""
+ )
with pytest.raises(TypeError, match=msg):
store.append("df", df)
@@ -2221,7 +2224,10 @@ def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
- msg = "object of type 'int' has no len()"
+ msg = re.escape(
+ """Cannot serialize the column [datetime1]
+because its data contents are not [string] but [date] object dtype"""
+ )
with pytest.raises(TypeError, match=msg):
store.append("df_unimplemented", df)
| - [x] closes #34274
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
*P.S. something was wrong with `git diff upstream/master` so I directly ran `flake8 ./pandas/io/pytables.py` as it's the only file changed*
------
I was running into the same problem as #34274 and found where the error is. But (as I've just started using pandas for several days:sweet_smile:) I have few knowledge about pandas, so maybe I didn't make the full use of pandas' components.
As seen in #34274, if a `DataFrame` contains non-string elements and is about to be written into an HDF5 file by `HDFStore.put()`, a `TypeError: object of type 'int' has no len()` error is raised.
But it's not the right "error" expected. `HDFStore.put()` can't serialize some types of element, `so report an exception on a column by column basis` is actually needed.
This commit fixes this, now it raises `TypeError: Cannot serialize the column [{column_No}] because its data contents are not string but [{non_string_type}] object dtype` as expected. | https://api.github.com/repos/pandas-dev/pandas/pulls/38919 | 2021-01-03T12:00:19Z | 2021-01-05T02:22:33Z | 2021-01-05T02:22:33Z | 2021-01-05T02:22:38Z |
Remove Python2 numeric relics | diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py
index 5a3febdcf75e7..7478efbf22609 100644
--- a/asv_bench/benchmarks/arithmetic.py
+++ b/asv_bench/benchmarks/arithmetic.py
@@ -122,8 +122,8 @@ def setup(self, op):
n_rows = 500
# construct dataframe with 2 blocks
- arr1 = np.random.randn(n_rows, int(n_cols / 2)).astype("f8")
- arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype("f4")
+ arr1 = np.random.randn(n_rows, n_cols // 2).astype("f8")
+ arr2 = np.random.randn(n_rows, n_cols // 2).astype("f4")
df = pd.concat(
[pd.DataFrame(arr1), pd.DataFrame(arr2)], axis=1, ignore_index=True
)
@@ -131,9 +131,9 @@ def setup(self, op):
df._consolidate_inplace()
# TODO: GH#33198 the setting here shoudlnt need two steps
- arr1 = np.random.randn(n_rows, int(n_cols / 4)).astype("f8")
- arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype("i8")
- arr3 = np.random.randn(n_rows, int(n_cols / 4)).astype("f8")
+ arr1 = np.random.randn(n_rows, n_cols // 4).astype("f8")
+ arr2 = np.random.randn(n_rows, n_cols // 2).astype("i8")
+ arr3 = np.random.randn(n_rows, n_cols // 4).astype("f8")
df2 = pd.concat(
[pd.DataFrame(arr1), pd.DataFrame(arr2), pd.DataFrame(arr3)],
axis=1,
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 7386b0b903afd..dc6fd2ff61423 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -263,7 +263,7 @@ class Repr:
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
- arrays = np.tile(np.random.randn(3, int(nrows / 100)), 100)
+ arrays = np.tile(np.random.randn(3, nrows // 100), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
@@ -648,9 +648,9 @@ class Describe:
def setup(self):
self.df = DataFrame(
{
- "a": np.random.randint(0, 100, int(1e6)),
- "b": np.random.randint(0, 100, int(1e6)),
- "c": np.random.randint(0, 100, int(1e6)),
+ "a": np.random.randint(0, 100, 10 ** 6),
+ "b": np.random.randint(0, 100, 10 ** 6),
+ "c": np.random.randint(0, 100, 10 ** 6),
}
)
diff --git a/asv_bench/benchmarks/hash_functions.py b/asv_bench/benchmarks/hash_functions.py
index 17bf434acf38a..5227ad0f53a04 100644
--- a/asv_bench/benchmarks/hash_functions.py
+++ b/asv_bench/benchmarks/hash_functions.py
@@ -103,9 +103,9 @@ class Float64GroupIndex:
# GH28303
def setup(self):
self.df = pd.date_range(
- start="1/1/2018", end="1/2/2018", periods=1e6
+ start="1/1/2018", end="1/2/2018", periods=10 ** 6
).to_frame()
- self.group_index = np.round(self.df.index.astype(int) / 1e9)
+ self.group_index = np.round(self.df.index.astype(int) / 10 ** 9)
def time_groupby(self):
self.df.groupby(self.group_index).last()
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py
index 40b064229ae49..e17c985321c47 100644
--- a/asv_bench/benchmarks/inference.py
+++ b/asv_bench/benchmarks/inference.py
@@ -42,7 +42,7 @@ class ToNumericDowncast:
]
N = 500000
- N2 = int(N / 2)
+ N2 = N // 2
data_dict = {
"string-int": ["1"] * N2 + [2] * N2,
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index a572b8a70a680..b0ad43ace88b5 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -158,7 +158,7 @@ def setup(self):
daily_dates = date_index.to_period("D").to_timestamp("S", "S")
self.fracofday = date_index.values - daily_dates.values
self.fracofday = self.fracofday.astype("timedelta64[ns]")
- self.fracofday = self.fracofday.astype(np.float64) / 86400000000000.0
+ self.fracofday = self.fracofday.astype(np.float64) / 86_400_000_000_000
self.fracofday = Series(self.fracofday, daily_dates)
index = date_range(date_index.min(), date_index.max(), freq="D")
self.temp = Series(1.0, index)[self.fracofday.index]
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index ab9c46fd2bf0b..306083e9c22b2 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -171,7 +171,7 @@ class PeakMemFixedWindowMinMax:
params = ["min", "max"]
def setup(self, operation):
- N = int(1e6)
+ N = 10 ** 6
arr = np.random.random(N)
self.roll = pd.Series(arr).rolling(2)
@@ -233,7 +233,7 @@ class GroupbyLargeGroups:
def setup(self):
N = 100000
- self.df = pd.DataFrame({"A": [1, 2] * int(N / 2), "B": np.random.randn(N)})
+ self.df = pd.DataFrame({"A": [1, 2] * (N // 2), "B": np.random.randn(N)})
def time_rolling_multiindex_creation(self):
self.df.groupby("A").rolling(3).mean()
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index b52c8142334be..3f4da8acf4db0 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -284,7 +284,7 @@ def time_dir_strings(self):
class SeriesGetattr:
# https://github.com/pandas-dev/pandas/issues/19764
def setup(self):
- self.s = Series(1, index=date_range("2012-01-01", freq="s", periods=int(1e6)))
+ self.s = Series(1, index=date_range("2012-01-01", freq="s", periods=10 ** 6))
def time_series_datetimeindex_repr(self):
getattr(self.s, "a", None)
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 4ed542b3a28e3..94498e54f0f06 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -346,7 +346,7 @@ def time_iso8601_tz_spaceformat(self):
class ToDatetimeNONISO8601:
def setup(self):
N = 10000
- half = int(N / 2)
+ half = N // 2
ts_string_1 = "March 1, 2018 12:00:00+0400"
ts_string_2 = "March 1, 2018 12:00:00+0500"
self.same_offset = [ts_string_1] * N
@@ -376,7 +376,7 @@ def setup(self):
self.same_offset = ["10/11/2018 00:00:00.045-07:00"] * N
self.diff_offset = [
f"10/11/2018 00:00:00.045-0{offset}:00" for offset in range(10)
- ] * int(N / 10)
+ ] * (N // 10)
def time_exact(self):
to_datetime(self.s2, format="%d%b%y")
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py
index 897f4ab59c370..0591fc6afd633 100644
--- a/pandas/_testing/__init__.py
+++ b/pandas/_testing/__init__.py
@@ -901,7 +901,7 @@ def _create_missing_idx(nrows, ncols, density, random_state=None):
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
- size = int(np.round((1 - density) * nrows * ncols))
+ size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 2e43937ddd0c2..1291fc25fc21d 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -835,7 +835,7 @@ def value_counts(
result = result.sort_values(ascending=ascending)
if normalize:
- result = result / float(counts.sum())
+ result = result / counts.sum()
return result
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 8b350fef27fb1..fe5db3ec5fd8c 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2214,7 +2214,7 @@ def describe(self):
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
- freqs = counts / float(counts.sum())
+ freqs = counts / counts.sum()
from pandas.core.reshape.concat import concat
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 58345aa22eac1..86c8d15a21227 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -572,7 +572,7 @@ def __iter__(self):
data = self.asi8
length = len(self)
chunksize = 10000
- chunks = int(length / chunksize) + 1
+ chunks = (length // chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
@@ -1847,12 +1847,12 @@ def to_julian_date(self):
+ 1_721_118.5
+ (
self.hour
- + self.minute / 60.0
- + self.second / 3600.0
- + self.microsecond / 3600.0 / 1e6
- + self.nanosecond / 3600.0 / 1e9
+ + self.minute / 60
+ + self.second / 3600
+ + self.microsecond / 3600 / 10 ** 6
+ + self.nanosecond / 3600 / 10 ** 9
)
- / 24.0
+ / 24
)
# -----------------------------------------------------------------
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index fa648157d7678..26dbe5e0dba44 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -578,7 +578,7 @@ def density(self):
>>> s.density
0.6
"""
- return float(self.sp_index.npoints) / float(self.sp_index.length)
+ return self.sp_index.npoints / self.sp_index.length
@property
def npoints(self) -> int:
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 8410f3d491891..55136e0dedcf5 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -338,7 +338,7 @@ def __iter__(self):
data = self.asi8
length = len(self)
chunksize = 10000
- chunks = int(length / chunksize) + 1
+ chunks = (length // chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index eee5f72a05738..fe86bf3f582ca 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5336,7 +5336,7 @@ def sample(
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
- n = int(round(frac * axis_length))
+ n = round(frac * axis_length)
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 69f11484237a3..56e171e1a5db1 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -122,7 +122,7 @@ def should_cache(
return False
if len(arg) <= 5000:
- check_count = int(len(arg) * 0.1)
+ check_count = len(arg) // 10
else:
check_count = 500
else:
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index 85118549300ca..983f7220c2fb9 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -61,7 +61,7 @@ def get_center_of_mass(
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
- comass = (span - 1) / 2.0
+ comass = (span - 1) / 2
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
@@ -70,7 +70,7 @@ def get_center_of_mass(
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
- comass = (1.0 - alpha) / alpha
+ comass = (1 - alpha) / alpha
else:
raise ValueError("Must pass one of comass, span, halflife, or alpha")
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index b23b5fe5b34a8..b43dde7d2a053 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -856,7 +856,7 @@ def _value_with_fmt(self, val):
elif isinstance(val, datetime.date):
fmt = self.date_format
elif isinstance(val, datetime.timedelta):
- val = val.total_seconds() / float(86400)
+ val = val.total_seconds() / 86400
fmt = "0"
else:
val = str(val)
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 5ad06bdcd8383..65c51c78383a9 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -282,7 +282,7 @@ def _generate_multiindex_header_rows(self) -> Iterator[List[Label]]:
def _save_body(self) -> None:
nrows = len(self.data_index)
- chunks = int(nrows / self.chunksize) + 1
+ chunks = (nrows // self.chunksize) + 1
for i in range(chunks):
start_i = i * self.chunksize
end_i = min(start_i + self.chunksize, nrows)
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index b3c2411304f6b..8265d5ef8f94b 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1637,7 +1637,7 @@ def is_dates_only(
values_int = values.asi8
consider_values = values_int != iNaT
- one_day_nanos = 86400 * 1e9
+ one_day_nanos = 86400 * 10 ** 9
even_days = (
np.logical_and(consider_values, values_int % int(one_day_nanos) != 0).sum() == 0
)
@@ -1741,7 +1741,7 @@ def get_format_timedelta64(
consider_values = values_int != iNaT
- one_day_nanos = 86400 * 1e9
+ one_day_nanos = 86400 * 10 ** 9
even_days = (
np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
)
diff --git a/pandas/io/formats/string.py b/pandas/io/formats/string.py
index 4ebb78f29c739..1fe2ed9806535 100644
--- a/pandas/io/formats/string.py
+++ b/pandas/io/formats/string.py
@@ -160,7 +160,7 @@ def _fit_strcols_to_terminal_width(self, strcols: List[List[str]]) -> str:
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
- mid = int(round(n_cols / 2.0))
+ mid = round(n_cols / 2)
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index aa4bcd8b1565a..bbc5e6ad82493 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -905,7 +905,7 @@ def insert(self, chunksize: Optional[int] = None, method: Optional[str] = None):
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
- chunks = int(nrows / chunksize) + 1
+ chunks = (nrows // chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py
index 38789fffed8a0..978010efd7ee5 100644
--- a/pandas/plotting/_matplotlib/converter.py
+++ b/pandas/plotting/_matplotlib/converter.py
@@ -38,7 +38,7 @@
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
-MUSEC_PER_DAY = 1e6 * SEC_PER_DAY
+MUSEC_PER_DAY = 10 ** 6 * SEC_PER_DAY
_mpl_units = {} # Cache for units overwritten by us
@@ -116,7 +116,7 @@ def deregister():
def _to_ordinalf(tm: pydt.time) -> float:
- tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + float(tm.microsecond / 1e6)
+ tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10 ** 6
return tot_sec
@@ -182,7 +182,7 @@ def __call__(self, x, pos=0) -> str:
"""
fmt = "%H:%M:%S.%f"
s = int(x)
- msus = int(round((x - s) * 1e6))
+ msus = round((x - s) * 10 ** 6)
ms = msus // 1000
us = msus % 1000
m, s = divmod(s, 60)
@@ -429,7 +429,7 @@ def _from_ordinal(x, tz: Optional[tzinfo] = None) -> datetime:
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
- microsecond = int(1e6 * remainder)
+ microsecond = int(1_000_000 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime(
@@ -439,7 +439,7 @@ def _from_ordinal(x, tz: Optional[tzinfo] = None) -> datetime:
dt = dt.astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
- dt += timedelta(microseconds=1e6 - microsecond)
+ dt += timedelta(microseconds=1_000_000 - microsecond)
return dt
@@ -611,27 +611,27 @@ def _second_finder(label_interval):
info_fmt[day_start] = "%H:%M:%S\n%d-%b"
info_fmt[year_start] = "%H:%M:%S\n%d-%b\n%Y"
- if span < periodsperday / 12000.0:
+ if span < periodsperday / 12000:
_second_finder(1)
- elif span < periodsperday / 6000.0:
+ elif span < periodsperday / 6000:
_second_finder(2)
- elif span < periodsperday / 2400.0:
+ elif span < periodsperday / 2400:
_second_finder(5)
- elif span < periodsperday / 1200.0:
+ elif span < periodsperday / 1200:
_second_finder(10)
- elif span < periodsperday / 800.0:
+ elif span < periodsperday / 800:
_second_finder(15)
- elif span < periodsperday / 400.0:
+ elif span < periodsperday / 400:
_second_finder(30)
- elif span < periodsperday / 150.0:
+ elif span < periodsperday / 150:
_minute_finder(1)
- elif span < periodsperday / 70.0:
+ elif span < periodsperday / 70:
_minute_finder(2)
- elif span < periodsperday / 24.0:
+ elif span < periodsperday / 24:
_minute_finder(5)
- elif span < periodsperday / 12.0:
+ elif span < periodsperday / 12:
_minute_finder(15)
- elif span < periodsperday / 6.0:
+ elif span < periodsperday / 6:
_minute_finder(30)
elif span < periodsperday / 2.5:
_hour_finder(1, False)
@@ -1058,7 +1058,7 @@ def format_timedelta_ticks(x, pos, n_decimals: int) -> str:
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
- s, ns = divmod(x, 1e9)
+ s, ns = divmod(x, 10 ** 9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
@@ -1072,7 +1072,7 @@ def format_timedelta_ticks(x, pos, n_decimals: int) -> str:
def __call__(self, x, pos=0) -> str:
(vmin, vmax) = tuple(self.axis.get_view_interval())
- n_decimals = int(np.ceil(np.log10(100 * 1e9 / abs(vmax - vmin))))
+ n_decimals = int(np.ceil(np.log10(100 * 10 ** 9 / abs(vmax - vmin))))
if n_decimals > 9:
n_decimals = 9
return self.format_timedelta_ticks(x, pos, n_decimals)
diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py
index f519d1e96f5b0..c564e6ed39f7d 100644
--- a/pandas/plotting/_matplotlib/misc.py
+++ b/pandas/plotting/_matplotlib/misc.py
@@ -55,7 +55,7 @@ def scatter_matrix(
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
- rdelta_ext = (rmax_ - rmin_) * range_padding / 2.0
+ rdelta_ext = (rmax_ - rmin_) * range_padding / 2
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in enumerate(df.columns):
@@ -158,10 +158,7 @@ def normalize(series):
m = len(frame.columns) - 1
s = np.array(
- [
- (np.cos(t), np.sin(t))
- for t in [2.0 * np.pi * (i / float(m)) for i in range(m)]
- ]
+ [(np.cos(t), np.sin(t)) for t in [2 * np.pi * (i / m) for i in range(m)]]
)
for i in range(n):
@@ -447,10 +444,10 @@ def autocorrelation_plot(
ax.set_xlim(1, n)
ax.set_ylim(-1.0, 1.0)
mean = np.mean(data)
- c0 = np.sum((data - mean) ** 2) / float(n)
+ c0 = np.sum((data - mean) ** 2) / n
def r(h):
- return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
+ return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / n / c0
x = np.arange(n) + 1
y = [r(loc) for loc in x]
diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py
index 0af2f70f896be..f288e6ebb783c 100644
--- a/pandas/plotting/_matplotlib/tools.py
+++ b/pandas/plotting/_matplotlib/tools.py
@@ -61,12 +61,10 @@ def _get_layout(nplots: int, layout=None, layout_type: str = "box") -> Tuple[int
nrows, ncols = layout
- # Python 2 compat
- ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols > 0:
- layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
+ layout = nrows, ncols = (ceil(nplots / ncols), ncols)
elif ncols == -1 and nrows > 0:
- layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
+ layout = nrows, ncols = (nrows, ceil(nplots / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 6a18810700205..199c521cfc81b 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -305,14 +305,14 @@ def test_truncate_out_of_bounds(self):
# GH11382
# small
- shape = [int(2e3)] + ([1] * (self._ndim - 1))
+ shape = [2000] + ([1] * (self._ndim - 1))
small = self._construct(shape, dtype="int8", value=1)
self._compare(small.truncate(), small)
self._compare(small.truncate(before=0, after=3e3), small)
self._compare(small.truncate(before=-1, after=2e3), small)
# big
- shape = [int(2e6)] + ([1] * (self._ndim - 1))
+ shape = [2_000_000] + ([1] * (self._ndim - 1))
big = self._construct(shape, dtype="int8", value=1)
self._compare(big.truncate(), big)
self._compare(big.truncate(before=0, after=3e6), big)
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 197738330efe1..c930acd179330 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -727,7 +727,7 @@ def test_to_excel_timedelta(self, path):
df["new"] = df["A"].apply(lambda x: timedelta(seconds=x))
expected["new"] = expected["A"].apply(
- lambda x: timedelta(seconds=x).total_seconds() / float(86400)
+ lambda x: timedelta(seconds=x).total_seconds() / 86400
)
df.to_excel(path, "test1")
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 3f4c21389daed..7e288ec6f5063 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -3164,7 +3164,7 @@ def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
- chunksize = int(1e4)
+ chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 66a4f9598c49b..e3fd404ec1906 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1075,7 +1075,7 @@ def test_time_musec(self):
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
- us = int(round((t - int(t)) * 1e6))
+ us = round((t - int(t)) * 1e6)
h, m = divmod(m, 60)
rs = l.get_text()
diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py
index b707757574ecd..9cd13b2312ea7 100644
--- a/pandas/tests/plotting/test_series.py
+++ b/pandas/tests/plotting/test_series.py
@@ -341,7 +341,7 @@ def test_pie_series(self):
ax = _check_plot_works(
series.plot.pie, colors=color_args, autopct="%.2f", fontsize=7
)
- pcts = [f"{s*100:.2f}" for s in series.values / float(series.sum())]
+ pcts = [f"{s*100:.2f}" for s in series.values / series.sum()]
expected_texts = list(chain.from_iterable(zip(series.index, pcts)))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index 8ec8f1e0457fb..7aefd42ada322 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -393,9 +393,9 @@ def test_td_div_td64_non_nano(self):
# truediv
td = Timedelta("1 days 2 hours 3 ns")
result = td / np.timedelta64(1, "D")
- assert result == td.value / float(86400 * 1e9)
+ assert result == td.value / (86400 * 10 ** 9)
result = td / np.timedelta64(1, "s")
- assert result == td.value / float(1e9)
+ assert result == td.value / 10 ** 9
result = td / np.timedelta64(1, "ns")
assert result == td.value
@@ -416,7 +416,7 @@ def test_td_div_numeric_scalar(self):
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
- result = td / 5.0
+ result = td / 5
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
index f94e174a26824..3fa411b421015 100644
--- a/pandas/tests/series/methods/test_fillna.py
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -231,7 +231,7 @@ def test_timedelta_fillna(self, frame_or_series):
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
- result = obj.fillna(np.timedelta64(int(1e9)))
+ result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py
index c0196549cee33..de3ff6e80ad66 100644
--- a/pandas/tests/tools/test_to_timedelta.py
+++ b/pandas/tests/tools/test_to_timedelta.py
@@ -184,7 +184,7 @@ def test_to_timedelta_float(self):
# https://github.com/pandas-dev/pandas/issues/25077
arr = np.arange(0, 1, 1e-6)[-10:]
result = pd.to_timedelta(arr, unit="s")
- expected_asi8 = np.arange(999990000, int(1e9), 1000, dtype="int64")
+ expected_asi8 = np.arange(999990000, 10 ** 9, 1000, dtype="int64")
tm.assert_numpy_array_equal(result.asi8, expected_asi8)
def test_to_timedelta_coerce_strings_unit(self):
diff --git a/pandas/tests/window/moments/test_moments_rolling_quantile.py b/pandas/tests/window/moments/test_moments_rolling_quantile.py
index 1b6d4a5c82164..e06a5faabe310 100644
--- a/pandas/tests/window/moments/test_moments_rolling_quantile.py
+++ b/pandas/tests/window/moments/test_moments_rolling_quantile.py
@@ -18,8 +18,8 @@ def scoreatpercentile(a, per):
retval = values[-1]
else:
- qlow = float(idx) / float(values.shape[0] - 1)
- qhig = float(idx + 1) / float(values.shape[0] - 1)
+ qlow = idx / (values.shape[0] - 1)
+ qhig = (idx + 1) / (values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Modernize the code to Python 3 by removing Python 2 numeric hacks, such as:
- `a / b` of two integers now always returns a float, so no need to convert `a` and/or `b` to float (or to use float literals)
- `a // b` of two integers returns an integer, use it instead of `int(a / b)`
- `math.ceil`, `math.floor`, and `round` return integer, so no need to convert the result to integer
- `1e6` is a float, converting it to `int(1e6)` is slower than `10 ** 6` (or `1_000_000`)
| https://api.github.com/repos/pandas-dev/pandas/pulls/38916 | 2021-01-03T05:52:11Z | 2021-01-03T17:09:49Z | 2021-01-03T17:09:49Z | 2021-01-05T07:10:41Z |
REF: de-duplicate code in libparsing/libperiod | diff --git a/pandas/_libs/tslibs/parsing.pxd b/pandas/_libs/tslibs/parsing.pxd
index 9c9262beaafad..25667f00e42b5 100644
--- a/pandas/_libs/tslibs/parsing.pxd
+++ b/pandas/_libs/tslibs/parsing.pxd
@@ -1,2 +1,3 @@
cpdef str get_rule_month(str source)
+cpdef quarter_to_myear(int year, int quarter, str freq)
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index aeb1be121bc9e..5c3417ee2d93c 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -378,7 +378,7 @@ cpdef bint _does_string_look_like_datetime(str py_string):
cdef inline object _parse_dateabbr_string(object date_string, datetime default,
- object freq):
+ str freq=None):
cdef:
object ret
# year initialized to prevent compiler warnings
@@ -438,21 +438,13 @@ cdef inline object _parse_dateabbr_string(object date_string, datetime default,
f'quarter must be '
f'between 1 and 4: {date_string}')
- if freq is not None:
- # TODO: hack attack, #1228
- freq = getattr(freq, "freqstr", freq)
- try:
- mnum = c_MONTH_NUMBERS[get_rule_month(freq)] + 1
- except (KeyError, ValueError):
- raise DateParseError(f'Unable to retrieve month '
- f'information from given '
- f'freq: {freq}')
-
- month = (mnum + (quarter - 1) * 3) % 12 + 1
- if month > mnum:
- year -= 1
- else:
- month = (quarter - 1) * 3 + 1
+ try:
+ # GH#1228
+ year, month = quarter_to_myear(year, quarter, freq)
+ except KeyError:
+ raise DateParseError("Unable to retrieve month "
+ "information from given "
+ f"freq: {freq}")
ret = default.replace(year=year, month=month)
return ret, 'quarter'
@@ -482,6 +474,41 @@ cdef inline object _parse_dateabbr_string(object date_string, datetime default,
raise ValueError(f'Unable to parse {date_string}')
+cpdef quarter_to_myear(int year, int quarter, str freq):
+ """
+ A quarterly frequency defines a "year" which may not coincide with
+ the calendar-year. Find the calendar-year and calendar-month associated
+ with the given year and quarter under the `freq`-derived calendar.
+
+ Parameters
+ ----------
+ year : int
+ quarter : int
+ freq : str or None
+
+ Returns
+ -------
+ year : int
+ month : int
+
+ See Also
+ --------
+ Period.qyear
+ """
+ if quarter <= 0 or quarter > 4:
+ raise ValueError("Quarter must be 1 <= q <= 4")
+
+ if freq is not None:
+ mnum = c_MONTH_NUMBERS[get_rule_month(freq)] + 1
+ month = (mnum + (quarter - 1) * 3) % 12 + 1
+ if month > mnum:
+ year -= 1
+ else:
+ month = (quarter - 1) * 3 + 1
+
+ return year, month
+
+
cdef dateutil_parse(
str timestr,
object default,
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index cbd4e2e6704a9..f0d21a3a7a957 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -74,8 +74,7 @@ from pandas._libs.tslibs.dtypes cimport (
PeriodDtypeBase,
attrname_to_abbrevs,
)
-from pandas._libs.tslibs.parsing cimport get_rule_month
-
+from pandas._libs.tslibs.parsing cimport quarter_to_myear
from pandas._libs.tslibs.parsing import parse_time_string
from pandas._libs.tslibs.nattype cimport (
@@ -2461,40 +2460,6 @@ cdef int64_t _ordinal_from_fields(int year, int month, quarter, int day,
minute, second, 0, 0, base)
-def quarter_to_myear(year: int, quarter: int, freqstr: str):
- """
- A quarterly frequency defines a "year" which may not coincide with
- the calendar-year. Find the calendar-year and calendar-month associated
- with the given year and quarter under the `freq`-derived calendar.
-
- Parameters
- ----------
- year : int
- quarter : int
- freqstr : str
- Equivalent to freq.freqstr
-
- Returns
- -------
- year : int
- month : int
-
- See Also
- --------
- Period.qyear
- """
- if quarter <= 0 or quarter > 4:
- raise ValueError('Quarter must be 1 <= q <= 4')
-
- mnum = c_MONTH_NUMBERS[get_rule_month(freqstr)] + 1
- month = (mnum + (quarter - 1) * 3) % 12 + 1
- if month > mnum:
- year -= 1
-
- return year, month
- # TODO: This whole func is really similar to parsing.pyx L434-L450
-
-
def validate_end_alias(how):
how_dict = {'S': 'S', 'E': 'E',
'START': 'S', 'FINISH': 'E',
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index e0e40a666896d..94d36aef8da52 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -12,6 +12,7 @@
delta_to_nanoseconds,
dt64arr_to_periodarr as c_dt64arr_to_periodarr,
iNaT,
+ parsing,
period as libperiod,
to_offset,
)
@@ -1074,7 +1075,7 @@ def _range_from_fields(
freqstr = freq.freqstr
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
- y, m = libperiod.quarter_to_myear(y, q, freqstr)
+ y, m = parsing.quarter_to_myear(y, q, freqstr)
val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38915 | 2021-01-03T05:19:22Z | 2021-01-04T00:10:20Z | 2021-01-04T00:10:20Z | 2021-01-04T01:18:49Z |
CLN: re-use sanitize_index | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7f2039c998f53..f1217e97aef5d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -97,6 +97,7 @@
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import check_bool_indexer
from pandas.core.internals import SingleBlockManager
+from pandas.core.internals.construction import sanitize_index
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import ensure_key_mapped, nargsort
from pandas.core.strings import StringMethods
@@ -319,17 +320,7 @@ def __init__(
data = [data]
index = ibase.default_index(len(data))
elif is_list_like(data):
-
- # a scalar numpy array is list-like but doesn't
- # have a proper length
- try:
- if len(index) != len(data):
- raise ValueError(
- f"Length of passed values is {len(data)}, "
- f"index implies {len(index)}."
- )
- except TypeError:
- pass
+ sanitize_index(data, index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py
index 5c9e5dcf3ae24..9dbfd2a5589c0 100644
--- a/pandas/tests/extension/base/constructors.py
+++ b/pandas/tests/extension/base/constructors.py
@@ -74,7 +74,7 @@ def test_dataframe_from_series(self, data):
assert isinstance(result._mgr.blocks[0], ExtensionBlock)
def test_series_given_mismatched_index_raises(self, data):
- msg = "Length of passed values is 3, index implies 5"
+ msg = r"Length of values \(3\) does not match length of index \(5\)"
with pytest.raises(ValueError, match=msg):
pd.Series(data[:3], index=[0, 1, 2, 3, 4])
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index c7bd38bbd00b9..e35f37944e7da 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -576,7 +576,7 @@ def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
- msg = "Length of passed values is 3, index implies 4"
+ msg = r"Length of values \(3\) does not match length of index \(4\)"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
@@ -592,7 +592,7 @@ def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
- msg = "Length of passed values is 1, index implies 3"
+ msg = r"Length of values \(1\) does not match length of index \(3\)"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38912 | 2021-01-03T01:02:09Z | 2021-01-03T17:11:39Z | 2021-01-03T17:11:39Z | 2021-01-03T18:05:17Z |
DOC: how to revert MultiIndex.to_flat_index | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index c80dadcc42022..ad10b41093f27 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1787,6 +1787,10 @@ def to_flat_index(self):
pd.Index
Index with the MultiIndex data represented in Tuples.
+ See Also
+ --------
+ MultiIndex.from_tuples : Convert flat index back to MultiIndex.
+
Notes
-----
This method will simply return the caller if called by anything other
| Took me way too long to figure this out. Hopefully this benefits someone else! | https://api.github.com/repos/pandas-dev/pandas/pulls/38911 | 2021-01-02T23:28:30Z | 2021-01-04T01:12:07Z | 2021-01-04T01:12:07Z | 2021-01-04T01:12:59Z |
REF: simplify Index.__new__ | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index b0c89000a53a9..2db803e5c1b19 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -6,6 +6,7 @@
TYPE_CHECKING,
Any,
Callable,
+ Dict,
FrozenSet,
Hashable,
List,
@@ -131,6 +132,11 @@
_Identity = NewType("_Identity", object)
+def disallow_kwargs(kwargs: Dict[str, Any]):
+ if kwargs:
+ raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}")
+
+
def _new_Index(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't
@@ -296,13 +302,19 @@ def __new__(
return result.astype(dtype, copy=False)
return result
- if is_ea_or_datetimelike_dtype(dtype):
+ elif is_ea_or_datetimelike_dtype(dtype):
# non-EA dtype indexes have special casting logic, so we punt here
klass = cls._dtype_to_subclass(dtype)
if klass is not Index:
return klass(data, dtype=dtype, copy=copy, name=name, **kwargs)
- if is_ea_or_datetimelike_dtype(data_dtype):
+ ea_cls = dtype.construct_array_type()
+ data = ea_cls._from_sequence(data, dtype=dtype, copy=copy)
+ data = np.asarray(data, dtype=object)
+ disallow_kwargs(kwargs)
+ return Index._simple_new(data, name=name)
+
+ elif is_ea_or_datetimelike_dtype(data_dtype):
klass = cls._dtype_to_subclass(data_dtype)
if klass is not Index:
result = klass(data, copy=copy, name=name, **kwargs)
@@ -310,18 +322,9 @@ def __new__(
return result.astype(dtype, copy=False)
return result
- # extension dtype
- if is_extension_array_dtype(data_dtype) or is_extension_array_dtype(dtype):
- if not (dtype is None or is_object_dtype(dtype)):
- # coerce to the provided dtype
- ea_cls = dtype.construct_array_type()
- data = ea_cls._from_sequence(data, dtype=dtype, copy=False)
- else:
- data = np.asarray(data, dtype=object)
-
- # coerce to the object dtype
- data = data.astype(object)
- return Index(data, dtype=object, copy=copy, name=name, **kwargs)
+ data = np.array(data, dtype=object, copy=copy)
+ disallow_kwargs(kwargs)
+ return Index._simple_new(data, name=name)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
@@ -333,7 +336,7 @@ def __new__(
# should not be coerced
# GH 11836
data = _maybe_cast_with_dtype(data, dtype, copy)
- dtype = data.dtype # TODO: maybe not for object?
+ dtype = data.dtype
if data.dtype.kind in ["i", "u", "f"]:
# maybe coerce to a sub-class
@@ -342,16 +345,15 @@ def __new__(
arr = com.asarray_tuplesafe(data, dtype=object)
if dtype is None:
- new_data = _maybe_cast_data_without_dtype(arr)
- new_dtype = new_data.dtype
- return cls(
- new_data, dtype=new_dtype, copy=copy, name=name, **kwargs
- )
+ arr = _maybe_cast_data_without_dtype(arr)
+ dtype = arr.dtype
+
+ if kwargs:
+ return cls(arr, dtype, copy=copy, name=name, **kwargs)
klass = cls._dtype_to_subclass(arr.dtype)
arr = klass._ensure_array(arr, dtype, copy)
- if kwargs:
- raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}")
+ disallow_kwargs(kwargs)
return klass._simple_new(arr, name)
elif is_scalar(data):
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 249e9707be328..7d214829b1871 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -623,7 +623,7 @@ def _convert_arr_indexer(self, keyarr):
return com.asarray_tuplesafe(keyarr)
-class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, Int64Index):
+class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin):
"""
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
but not PeriodIndex
@@ -816,11 +816,7 @@ def _union(self, other, sort):
i8self = Int64Index._simple_new(self.asi8)
i8other = Int64Index._simple_new(other.asi8)
i8result = i8self._union(i8other, sort=sort)
- # pandas\core\indexes\datetimelike.py:887: error: Unexpected
- # keyword argument "freq" for "DatetimeTimedeltaMixin" [call-arg]
- result = type(self)(
- i8result, dtype=self.dtype, freq="infer" # type: ignore[call-arg]
- )
+ result = type(self)(i8result, dtype=self.dtype, freq="infer")
return result
# --------------------------------------------------------------------
| Avoid recursing where possible. | https://api.github.com/repos/pandas-dev/pandas/pulls/38910 | 2021-01-02T23:10:19Z | 2021-01-03T17:03:03Z | 2021-01-03T17:03:03Z | 2021-01-03T18:04:46Z |
BUG: Fixed regression in rolling.skew and rolling.kurt modifying object | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index b1f8389420cd9..c07d82432f45e 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -37,7 +37,7 @@ Fixed regressions
- Fixed regression in :meth:`.GroupBy.sem` where the presence of non-numeric columns would cause an error instead of being dropped (:issue:`38774`)
- Fixed regression in :func:`read_excel` with non-rawbyte file handles (:issue:`38788`)
- Bug in :meth:`read_csv` with ``float_precision="high"`` caused segfault or wrong parsing of long exponent strings. This resulted in a regression in some cases as the default for ``float_precision`` was changed in pandas 1.2.0 (:issue:`38753`)
--
+- Fixed regression in :meth:`Rolling.skew` and :meth:`Rolling.kurt` modifying the object inplace (:issue:`38908`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index c21e71c407630..3c00ff092422b 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -495,7 +495,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
float64_t x = 0, xx = 0, xxx = 0
int64_t nobs = 0, i, j, N = len(values), nobs_mean = 0
int64_t s, e
- ndarray[float64_t] output, mean_array
+ ndarray[float64_t] output, mean_array, values_copy
bint is_monotonic_increasing_bounds
minp = max(minp, 3)
@@ -504,10 +504,11 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
)
output = np.empty(N, dtype=float)
min_val = np.nanmin(values)
+ values_copy = np.copy(values)
with nogil:
for i in range(0, N):
- val = values[i]
+ val = values_copy[i]
if notnan(val):
nobs_mean += 1
sum_val += val
@@ -516,7 +517,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
if min_val - mean_val > -1e5:
mean_val = round(mean_val)
for i in range(0, N):
- values[i] = values[i] - mean_val
+ values_copy[i] = values_copy[i] - mean_val
for i in range(0, N):
@@ -528,7 +529,7 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
if i == 0 or not is_monotonic_increasing_bounds:
for j in range(s, e):
- val = values[j]
+ val = values_copy[j]
add_skew(val, &nobs, &x, &xx, &xxx, &compensation_x_add,
&compensation_xx_add, &compensation_xxx_add)
@@ -538,13 +539,13 @@ def roll_skew(ndarray[float64_t] values, ndarray[int64_t] start,
# and removed
# calculate deletes
for j in range(start[i - 1], s):
- val = values[j]
+ val = values_copy[j]
remove_skew(val, &nobs, &x, &xx, &xxx, &compensation_x_remove,
&compensation_xx_remove, &compensation_xxx_remove)
# calculate adds
for j in range(end[i - 1], e):
- val = values[j]
+ val = values_copy[j]
add_skew(val, &nobs, &x, &xx, &xxx, &compensation_x_add,
&compensation_xx_add, &compensation_xxx_add)
@@ -675,7 +676,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
float64_t compensation_x_remove = 0, compensation_x_add = 0
float64_t x = 0, xx = 0, xxx = 0, xxxx = 0
int64_t nobs = 0, i, j, s, e, N = len(values), nobs_mean = 0
- ndarray[float64_t] output
+ ndarray[float64_t] output, values_copy
bint is_monotonic_increasing_bounds
minp = max(minp, 4)
@@ -683,11 +684,12 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
start, end
)
output = np.empty(N, dtype=float)
+ values_copy = np.copy(values)
min_val = np.nanmin(values)
with nogil:
for i in range(0, N):
- val = values[i]
+ val = values_copy[i]
if notnan(val):
nobs_mean += 1
sum_val += val
@@ -696,7 +698,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
if min_val - mean_val > -1e4:
mean_val = round(mean_val)
for i in range(0, N):
- values[i] = values[i] - mean_val
+ values_copy[i] = values_copy[i] - mean_val
for i in range(0, N):
@@ -708,7 +710,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
if i == 0 or not is_monotonic_increasing_bounds:
for j in range(s, e):
- add_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx,
+ add_kurt(values_copy[j], &nobs, &x, &xx, &xxx, &xxxx,
&compensation_x_add, &compensation_xx_add,
&compensation_xxx_add, &compensation_xxxx_add)
@@ -718,13 +720,13 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start,
# and removed
# calculate deletes
for j in range(start[i - 1], s):
- remove_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx,
+ remove_kurt(values_copy[j], &nobs, &x, &xx, &xxx, &xxxx,
&compensation_x_remove, &compensation_xx_remove,
&compensation_xxx_remove, &compensation_xxxx_remove)
# calculate adds
for j in range(end[i - 1], e):
- add_kurt(values[j], &nobs, &x, &xx, &xxx, &xxxx,
+ add_kurt(values_copy[j], &nobs, &x, &xx, &xxx, &xxxx,
&compensation_x_add, &compensation_xx_add,
&compensation_xxx_add, &compensation_xxxx_add)
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index 84056299093cf..b275b64ff706b 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -1102,11 +1102,13 @@ def test_groupby_rolling_nan_included():
@pytest.mark.parametrize("method", ["skew", "kurt"])
def test_rolling_skew_kurt_numerical_stability(method):
- # GH: 6929
- s = Series(np.random.rand(10))
- expected = getattr(s.rolling(3), method)()
- s = s + 50000
- result = getattr(s.rolling(3), method)()
+ # GH#6929
+ ser = Series(np.random.rand(10))
+ ser_copy = ser.copy()
+ expected = getattr(ser.rolling(3), method)()
+ tm.assert_series_equal(ser, ser_copy)
+ ser = ser + 50000
+ result = getattr(ser.rolling(3), method)()
tm.assert_series_equal(result, expected)
| - [x] closes #38908
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Alternative would be np.copy before releasing gil. In this case we would have to touch values_copy twice in case of ``min_val - mean_val > -1e5`` | https://api.github.com/repos/pandas-dev/pandas/pulls/38909 | 2021-01-02T23:07:19Z | 2021-01-04T13:37:14Z | 2021-01-04T13:37:14Z | 2021-01-04T14:04:50Z |
BUG: casting on concat with empties | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 5e84947cd42f1..b4a31635687e1 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -294,7 +294,7 @@ Groupby/resample/rolling
Reshaping
^^^^^^^^^
- Bug in :meth:`DataFrame.unstack` with missing levels led to incorrect index names (:issue:`37510`)
-- Bug in :func:`concat` incorrectly casting to ``object`` dtype in some cases when one or more of the operands is empty (:issue:`38843`)
+- Bug in :func:`concat` incorrectly casting to ``object`` dtype in some cases when one or more of the operands is empty (:issue:`38843`, :issue:`38907`)
-
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 013e52248f5c4..a45933714ee96 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -318,6 +318,12 @@ def _concatenate_join_units(
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
+ nonempties = [
+ x for x in join_units if x.block is None or x.block.shape[concat_axis] > 0
+ ]
+ if nonempties:
+ join_units = nonempties
+
empty_dtype, upcasted_na = _get_empty_dtype_and_na(join_units)
to_concat = [
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index d8dd08ea13341..f2d628c70ae62 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -154,7 +154,8 @@ def test_partial_setting_mixed_dtype(self):
# columns will align
df = DataFrame(columns=["A", "B"])
df.loc[0] = Series(1, index=range(4))
- tm.assert_frame_equal(df, DataFrame(columns=["A", "B"], index=[0]))
+ expected = DataFrame(columns=["A", "B"], index=[0], dtype=int)
+ tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=["A", "B"])
diff --git a/pandas/tests/reshape/concat/test_append.py b/pandas/tests/reshape/concat/test_append.py
index ffeda703cd890..1a895aee98f0a 100644
--- a/pandas/tests/reshape/concat/test_append.py
+++ b/pandas/tests/reshape/concat/test_append.py
@@ -82,6 +82,7 @@ def test_append_length0_frame(self, sort):
df5 = df.append(df3, sort=sort)
expected = DataFrame(index=[0, 1], columns=["A", "B", "C"])
+ expected["C"] = expected["C"].astype(np.float64)
tm.assert_frame_equal(df5, expected)
def test_append_records(self):
@@ -340,16 +341,11 @@ def test_append_empty_frame_to_series_with_dateutil_tz(self):
expected = DataFrame(
[[np.nan, np.nan, 1.0, 2.0, date]], columns=["c", "d", "a", "b", "date"]
)
- # These columns get cast to object after append
- expected["c"] = expected["c"].astype(object)
- expected["d"] = expected["d"].astype(object)
tm.assert_frame_equal(result_a, expected)
expected = DataFrame(
[[np.nan, np.nan, 1.0, 2.0, date]] * 2, columns=["c", "d", "a", "b", "date"]
)
- expected["c"] = expected["c"].astype(object)
- expected["d"] = expected["d"].astype(object)
result_b = result_a.append(s, ignore_index=True)
tm.assert_frame_equal(result_b, expected)
diff --git a/pandas/tests/reshape/concat/test_empty.py b/pandas/tests/reshape/concat/test_empty.py
index dea04e98088e8..075785120677a 100644
--- a/pandas/tests/reshape/concat/test_empty.py
+++ b/pandas/tests/reshape/concat/test_empty.py
@@ -210,7 +210,6 @@ def test_concat_empty_df_object_dtype(self, dtype):
df_2 = DataFrame(columns=df_1.columns)
result = pd.concat([df_1, df_2], axis=0)
expected = df_1.copy()
- expected["EmptyCol"] = expected["EmptyCol"].astype(object) # TODO: why?
tm.assert_frame_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Covers the case missed by #38843 | https://api.github.com/repos/pandas-dev/pandas/pulls/38907 | 2021-01-02T19:38:03Z | 2021-01-03T16:58:04Z | 2021-01-03T16:58:04Z | 2021-01-03T18:08:41Z |
CI: enable parallel testing on arm64 build #36719 | diff --git a/.travis.yml b/.travis.yml
index 8ede978074a9c..e97994262e825 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -31,20 +31,13 @@ env:
git:
depth: false
-matrix:
+jobs:
fast_finish: true
include:
- arch: arm64
env:
- - JOB="3.7, arm64" PYTEST_WORKERS=1 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)"
-
- allow_failures:
- # Moved to allowed_failures 2020-09-29 due to timeouts https://github.com/pandas-dev/pandas/issues/36719
- - arch: arm64
- env:
- - JOB="3.7, arm64" PYTEST_WORKERS=1 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)"
-
+ - JOB="3.7, arm64" PYTEST_WORKERS=4 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)"
before_install:
- echo "before_install"
diff --git a/ci/run_tests.sh b/ci/run_tests.sh
index 593939431d5eb..b50b1c104b4af 100755
--- a/ci/run_tests.sh
+++ b/ci/run_tests.sh
@@ -20,7 +20,11 @@ if [[ $(uname) == "Linux" && -z $DISPLAY ]]; then
XVFB="xvfb-run "
fi
-PYTEST_CMD="${XVFB}pytest -m \"$PATTERN\" -n $PYTEST_WORKERS --dist=loadfile -s --strict-markers --durations=30 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas"
+# With --dist=no, pytest distributs one test at a time.
+# If using other options, running the test suite would be extremely slow on arm64 machine.
+# See https://pypi.org/project/pytest-xdist/ for details.
+# GH 36719
+PYTEST_CMD="${XVFB}pytest -m \"$PATTERN\" -n $PYTEST_WORKERS --dist=no -s --strict-markers --durations=30 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas"
if [[ $(uname) != "Linux" && $(uname) != "Darwin" ]]; then
# GH#37455 windows py38 build appears to be running out of memory
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index c36422884f2ec..6ad4b85aba2a6 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -37,7 +37,7 @@ else
fi
if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.8.5-1/Miniforge3-4.8.5-1-Linux-aarch64.sh"
+ CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.9.2-5/Miniforge3-4.9.2-5-Linux-aarch64.sh"
else
CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest-$CONDA_OS.sh"
fi
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index eb6cf4f9d7d85..c972cf3d39b51 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -26,6 +26,7 @@
PY39 = sys.version_info >= (3, 9)
PYPY = platform.python_implementation() == "PyPy"
IS64 = sys.maxsize > 2 ** 32
+ARM64 = platform.machine() == "arm64" or platform.machine() == "aarch64"
def set_function_name(f: F, name: str, cls) -> F:
diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py
index c269d6ff11896..180469be3fe2f 100644
--- a/pandas/tests/indexes/interval/test_astype.py
+++ b/pandas/tests/indexes/interval/test_astype.py
@@ -3,6 +3,8 @@
import numpy as np
import pytest
+from pandas.compat import ARM64
+
from pandas.core.dtypes.dtypes import CategoricalDtype, IntervalDtype
from pandas import (
@@ -165,6 +167,7 @@ def test_subtype_integer_with_non_integer_borders(self, subtype):
)
tm.assert_index_equal(result, expected)
+ @pytest.mark.xfail(ARM64, reason="GH 38923")
def test_subtype_integer_errors(self):
# float64 -> uint64 fails with negative values
index = interval_range(-10.0, 10.0)
diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py
index d5b4bda35ca2b..db3debc4c5524 100644
--- a/pandas/tests/tools/test_to_numeric.py
+++ b/pandas/tests/tools/test_to_numeric.py
@@ -4,6 +4,8 @@
from numpy import iinfo
import pytest
+from pandas.compat import ARM64
+
import pandas as pd
from pandas import DataFrame, Index, Series, to_numeric
import pandas._testing as tm
@@ -747,7 +749,7 @@ def test_to_numeric_from_nullable_string(values, expected):
"UInt64",
"signed",
"UInt64",
- marks=pytest.mark.xfail(reason="GH38798"),
+ marks=pytest.mark.xfail(not ARM64, reason="GH38798"),
),
([1, 1], "Int64", "unsigned", "UInt8"),
([1.0, 1.0], "Float32", "unsigned", "UInt8"),
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index b275b64ff706b..71c0945c4b37f 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -3,6 +3,7 @@
import numpy as np
import pytest
+from pandas.compat import ARM64
from pandas.errors import UnsupportedFunctionCall
from pandas import (
@@ -891,6 +892,7 @@ def test_rolling_sem(frame_or_series):
tm.assert_series_equal(result, expected)
+@pytest.mark.xfail(ARM64, reason="GH 38921")
@pytest.mark.parametrize(
("func", "third_value", "values"),
[
| - [x] closes #36719
| https://api.github.com/repos/pandas-dev/pandas/pulls/38905 | 2021-01-02T18:02:06Z | 2021-01-24T21:47:46Z | null | 2021-03-18T16:42:06Z |
ENH: Use Kahan summation to calculate groupby.sum() | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 9d1b3eaebdf8b..94a12fec6adcb 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -289,7 +289,7 @@ Groupby/resample/rolling
- Bug in :meth:`SeriesGroupBy.value_counts` where unobserved categories in a grouped categorical series were not tallied (:issue:`38672`)
- Bug in :meth:`.GroupBy.indices` would contain non-existent indices when null values were present in the groupby keys (:issue:`9304`)
--
+- Fixed bug in :meth:`DataFrameGroupBy.sum` and :meth:`SeriesGroupBy.sum` causing loss of precision through using Kahan summation (:issue:`38778`)
Reshaping
^^^^^^^^^
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index ffb75401013dc..ac8f22263f787 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -467,12 +467,12 @@ def _group_add(complexfloating_t[:, :] out,
const int64_t[:] labels,
Py_ssize_t min_count=0):
"""
- Only aggregates on axis=0
+ Only aggregates on axis=0 using Kahan summation
"""
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
- complexfloating_t val, count
- complexfloating_t[:, :] sumx
+ complexfloating_t val, count, t, y
+ complexfloating_t[:, :] sumx, compensation
int64_t[:, :] nobs
Py_ssize_t len_values = len(values), len_labels = len(labels)
@@ -481,6 +481,7 @@ def _group_add(complexfloating_t[:, :] out,
nobs = np.zeros((<object>out).shape, dtype=np.int64)
sumx = np.zeros_like(out)
+ compensation = np.zeros_like(out)
N, K = (<object>values).shape
@@ -497,12 +498,10 @@ def _group_add(complexfloating_t[:, :] out,
# not nan
if val == val:
nobs[lab, j] += 1
- if (complexfloating_t is complex64_t or
- complexfloating_t is complex128_t):
- # clang errors if we use += with these dtypes
- sumx[lab, j] = sumx[lab, j] + val
- else:
- sumx[lab, j] += val
+ y = val - compensation[lab, j]
+ t = sumx[lab, j] + y
+ compensation[lab, j] = t - sumx[lab, j] - y
+ sumx[lab, j] = t
for i in range(ncounts):
for j in range(K):
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index e5021b7b4dd5f..4e085a7608e31 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -5,6 +5,7 @@
import numpy as np
import pytest
+from pandas.compat import IS64
from pandas.errors import PerformanceWarning
import pandas as pd
@@ -2174,3 +2175,15 @@ def test_groupby_series_with_tuple_name():
expected = Series([2, 4], index=[1, 2], name=("a", "a"))
expected.index.name = ("b", "b")
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.xfail(not IS64, reason="GH#38778: fail on 32-bit system")
+def test_groupby_numerical_stability_sum():
+ # GH#38778
+ data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15]
+ df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data})
+ result = df.groupby("group").sum()
+ expected = DataFrame(
+ {"a": [97.0, 98.0], "b": [97.0, 98.0]}, index=Index([1, 2], name="group")
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] closes #38778
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This simplifies the op example.
I think
```
series = Series([1e16, 99, -5e15, -5e15])
series.sum()
```
is dispatched to numpy? The result is wrong too. ``100.0``
Mean doesn't work either. I will look through the functions to determine which need Kahan summation too and open an issue to track these. | https://api.github.com/repos/pandas-dev/pandas/pulls/38903 | 2021-01-02T15:58:45Z | 2021-01-03T23:25:09Z | 2021-01-03T23:25:09Z | 2021-01-03T23:29:09Z |
TST: strictly xfail | diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index 493cb979494c8..e38de67071f15 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -117,8 +117,9 @@ class TestConstructors(base.BaseConstructorsTests):
class TestReshaping(base.BaseReshapingTests):
+ @pytest.mark.xfail(reason="Deliberately upcast to object?")
def test_concat_with_reindex(self, data):
- pytest.xfail(reason="Deliberately upcast to object?")
+ super().test_concat_with_reindex(data)
class TestGetitem(base.BaseGetitemTests):
diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py
index 196d4a0b3bb76..1c397d6a6a1b5 100644
--- a/pandas/tests/frame/test_reductions.py
+++ b/pandas/tests/frame/test_reductions.py
@@ -1229,13 +1229,15 @@ def test_min_max_dt64_with_NaT(self):
exp = Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
- def test_min_max_dt64_with_NaT_skipna_false(self, tz_naive_fixture):
+ def test_min_max_dt64_with_NaT_skipna_false(self, request, tz_naive_fixture):
# GH#36907
tz = tz_naive_fixture
if isinstance(tz, tzlocal) and is_platform_windows():
- pytest.xfail(
- reason="GH#37659 OSError raised within tzlocal bc Windows "
- "chokes in times before 1970-01-01"
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="GH#37659 OSError raised within tzlocal bc Windows "
+ "chokes in times before 1970-01-01"
+ )
)
df = DataFrame(
diff --git a/pandas/tests/frame/test_ufunc.py b/pandas/tests/frame/test_ufunc.py
index 81c0dc65b4e97..8a29c2f2f89a1 100644
--- a/pandas/tests/frame/test_ufunc.py
+++ b/pandas/tests/frame/test_ufunc.py
@@ -24,10 +24,14 @@ def test_unary_unary(dtype):
@pytest.mark.parametrize("dtype", dtypes)
-def test_unary_binary(dtype):
+def test_unary_binary(request, dtype):
# unary input, binary output
if pd.api.types.is_extension_array_dtype(dtype) or isinstance(dtype, dict):
- pytest.xfail(reason="Extension / mixed with multiple outuputs not implemented.")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Extension / mixed with multiple outputs not implemented."
+ )
+ )
values = np.array([[-1, -1], [1, 1]], dtype="int64")
df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype)
@@ -55,14 +59,18 @@ def test_binary_input_dispatch_binop(dtype):
@pytest.mark.parametrize("dtype_a", dtypes)
@pytest.mark.parametrize("dtype_b", dtypes)
-def test_binary_input_aligns_columns(dtype_a, dtype_b):
+def test_binary_input_aligns_columns(request, dtype_a, dtype_b):
if (
pd.api.types.is_extension_array_dtype(dtype_a)
or isinstance(dtype_a, dict)
or pd.api.types.is_extension_array_dtype(dtype_b)
or isinstance(dtype_b, dict)
):
- pytest.xfail(reason="Extension / mixed with multiple inputs not implemented.")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Extension / mixed with multiple inputs not implemented."
+ )
+ )
df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}).astype(dtype_a)
@@ -80,9 +88,13 @@ def test_binary_input_aligns_columns(dtype_a, dtype_b):
@pytest.mark.parametrize("dtype", dtypes)
-def test_binary_input_aligns_index(dtype):
+def test_binary_input_aligns_index(request, dtype):
if pd.api.types.is_extension_array_dtype(dtype) or isinstance(dtype, dict):
- pytest.xfail(reason="Extension / mixed with multiple inputs not implemented.")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Extension / mixed with multiple inputs not implemented."
+ )
+ )
df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).astype(dtype)
df2 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "c"]).astype(dtype)
result = np.heaviside(df1, df2)
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index 4974d3fff1df4..73a68e8508644 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -547,14 +547,14 @@ def test_finalize_called_eval_numexpr():
(pd.DataFrame({"A": [1]}), pd.Series([1])),
],
)
-def test_binops(args, annotate, all_arithmetic_functions):
+def test_binops(request, args, annotate, all_arithmetic_functions):
# This generates 326 tests... Is that needed?
left, right = args
if annotate == "both" and isinstance(left, int) or isinstance(right, int):
return
if isinstance(left, pd.DataFrame) or isinstance(right, pd.DataFrame):
- pytest.xfail(reason="not implemented")
+ request.node.add_marker(pytest.mark.xfail(reason="not implemented"))
if annotate in {"left", "both"} and not isinstance(left, int):
left.attrs = {"a": 1}
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 72637400ff023..216d37a381c32 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -158,10 +158,10 @@ def test_transform_broadcast(tsframe, ts):
assert_fp_equal(res.xs(idx), agged[idx])
-def test_transform_axis_1(transformation_func):
+def test_transform_axis_1(request, transformation_func):
# GH 36308
if transformation_func == "tshift":
- pytest.xfail("tshift is deprecated")
+ request.node.add_marker(pytest.mark.xfail(reason="tshift is deprecated"))
args = ("ffill",) if transformation_func == "fillna" else ()
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"])
@@ -333,7 +333,7 @@ def test_dispatch_transform(tsframe):
tm.assert_frame_equal(filled, expected)
-def test_transform_transformation_func(transformation_func):
+def test_transform_transformation_func(request, transformation_func):
# GH 30918
df = DataFrame(
{
@@ -354,7 +354,7 @@ def test_transform_transformation_func(transformation_func):
"Current behavior of groupby.tshift is inconsistent with other "
"transformations. See GH34452 for more details"
)
- pytest.xfail(msg)
+ request.node.add_marker(pytest.mark.xfail(reason=msg))
else:
test_op = lambda x: x.transform(transformation_func)
mock_op = lambda x: getattr(x, transformation_func)()
@@ -1038,16 +1038,22 @@ def test_transform_invalid_name_raises():
Series([0, 0, 0, 1, 1, 1], index=["A", "B", "C", "D", "E", "F"]),
],
)
-def test_transform_agg_by_name(reduction_func, obj):
+def test_transform_agg_by_name(request, reduction_func, obj):
func = reduction_func
g = obj.groupby(np.repeat([0, 1], 3))
if func == "ngroup": # GH#27468
- pytest.xfail("TODO: g.transform('ngroup') doesn't work")
- if func == "size": # GH#27469
- pytest.xfail("TODO: g.transform('size') doesn't work")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="TODO: g.transform('ngroup') doesn't work")
+ )
+ if func == "size" and obj.ndim == 2: # GH#27469
+ request.node.add_marker(
+ pytest.mark.xfail(reason="TODO: g.transform('size') doesn't work")
+ )
if func == "corrwith" and isinstance(obj, Series): # GH#32293
- pytest.xfail("TODO: implement SeriesGroupBy.corrwith")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="TODO: implement SeriesGroupBy.corrwith")
+ )
args = {"nth": [0], "quantile": [0.5], "corrwith": [obj]}.get(func, [])
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 0352759e7381b..a24c8e252d234 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -123,10 +123,12 @@ def test_repeat(self, tz_naive_fixture):
("U", "microsecond"),
],
)
- def test_resolution(self, tz_naive_fixture, freq, expected):
+ def test_resolution(self, request, tz_naive_fixture, freq, expected):
tz = tz_naive_fixture
if freq == "A" and not IS64 and isinstance(tz, tzlocal):
- pytest.xfail(reason="OverflowError inside tzlocal past 2038")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")
+ )
idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index e5bb78604207f..1eca7f7a5d261 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1596,13 +1596,15 @@ def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
np.array([False, False]),
)
- def test_isin_nan_common_float64(self, nulls_fixture):
+ def test_isin_nan_common_float64(self, request, nulls_fixture):
if nulls_fixture is pd.NaT:
pytest.skip("pd.NaT not compatible with Float64Index")
# Float64Index overrides isin, so must be checked separately
if nulls_fixture is pd.NA:
- pytest.xfail("Float64Index cannot contain pd.NA")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="Float64Index cannot contain pd.NA")
+ )
tm.assert_numpy_array_equal(
Float64Index([1.0, nulls_fixture]).isin([np.nan]), np.array([False, True])
diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py
index 912743e45975a..64b08c6058b81 100644
--- a/pandas/tests/indexes/test_setops.py
+++ b/pandas/tests/indexes/test_setops.py
@@ -38,19 +38,21 @@ def test_union_same_types(index):
assert idx1.union(idx2).dtype == idx1.dtype
-def test_union_different_types(index, index_fixture2):
+def test_union_different_types(request, index, index_fixture2):
# This test only considers combinations of indices
# GH 23525
idx1, idx2 = index, index_fixture2
type_pair = tuple(sorted([type(idx1), type(idx2)], key=lambda x: str(x)))
if type_pair in COMPATIBLE_INCONSISTENT_PAIRS:
- pytest.xfail("This test only considers non compatible indexes.")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="This test only considers non compatible indexes.")
+ )
if any(isinstance(idx, pd.MultiIndex) for idx in (idx1, idx2)):
pytest.xfail("This test doesn't consider multiindixes.")
if is_dtype_equal(idx1.dtype, idx2.dtype):
- pytest.xfail("This test only considers non matching dtypes.")
+ pytest.skip("This test only considers non matching dtypes.")
# A union with a CategoricalIndex (even as dtype('O')) and a
# non-CategoricalIndex can only be made if both indices are monotonic.
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 41f967ce32796..8735e2a09920d 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -339,26 +339,33 @@ def test_setitem_index_float64(self, val, exp_dtype, request):
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
+ @pytest.mark.xfail(reason="Test not implemented")
def test_setitem_series_period(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_complex128(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_bool(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_datetime64(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_datetime64tz(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_timedelta64(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_period(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
class TestInsertIndexCoercion(CoercionBase):
@@ -511,11 +518,13 @@ def test_insert_index_period(self, insert, coerced_val, coerced_dtype):
# passing keywords to pd.Index
pd.Index(data, freq="M")
+ @pytest.mark.xfail(reason="Test not implemented")
def test_insert_index_complex128(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_insert_index_bool(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
class TestWhereCoercion(CoercionBase):
@@ -760,17 +769,21 @@ def test_where_index_datetime64tz(self):
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
+ @pytest.mark.xfail(reason="Test not implemented")
def test_where_index_complex128(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_where_index_bool(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_where_series_timedelta64(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_where_series_period(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
@pytest.mark.parametrize(
"value", [pd.Timedelta(days=9), timedelta(days=9), np.timedelta64(9, "D")]
@@ -822,8 +835,9 @@ class TestFillnaSeriesCoercion(CoercionBase):
method = "fillna"
+ @pytest.mark.xfail(reason="Test not implemented")
def test_has_comprehensive_tests(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
def _assert_fillna_conversion(self, original, value, expected, expected_dtype):
""" test coercion triggered by fillna """
@@ -942,29 +956,37 @@ def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype):
)
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
+ @pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_int64(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_int64(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_bool(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_bool(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_timedelta64(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_period(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_timedelta64(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
+ @pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_period(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
class TestReplaceSeriesCoercion(CoercionBase):
@@ -1120,5 +1142,6 @@ def test_replace_series_datetime_datetime(self, how, to_key, from_key):
tm.assert_series_equal(result, exp)
+ @pytest.mark.xfail(reason="Test not implemented")
def test_replace_series_period(self):
- pytest.xfail("Test not implemented")
+ raise NotImplementedError
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index f472e24ac9498..1b80b6429c8b5 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -136,9 +136,13 @@ def test_usecols_int(self, read_ext, df_ref):
usecols=3,
)
- def test_usecols_list(self, read_ext, df_ref):
+ def test_usecols_list(self, request, read_ext, df_ref):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
@@ -156,9 +160,13 @@ def test_usecols_list(self, read_ext, df_ref):
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
- def test_usecols_str(self, read_ext, df_ref):
+ def test_usecols_str(self, request, read_ext, df_ref):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel(
@@ -208,9 +216,15 @@ def test_usecols_str(self, read_ext, df_ref):
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
- def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
+ def test_usecols_diff_positional_int_columns_order(
+ self, request, read_ext, usecols, df_ref
+ ):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
expected = df_ref[["A", "C"]]
result = pd.read_excel(
@@ -226,17 +240,25 @@ def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_r
result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
- def test_read_excel_without_slicing(self, read_ext, df_ref):
+ def test_read_excel_without_slicing(self, request, read_ext, df_ref):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
expected = df_ref
result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
- def test_usecols_excel_range_str(self, read_ext, df_ref):
+ def test_usecols_excel_range_str(self, request, read_ext, df_ref):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
expected = df_ref[["C", "D"]]
result = pd.read_excel(
@@ -310,17 +332,25 @@ def test_excel_stop_iterator(self, read_ext):
expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
tm.assert_frame_equal(parsed, expected)
- def test_excel_cell_error_na(self, read_ext):
+ def test_excel_cell_error_na(self, request, read_ext):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
parsed = pd.read_excel("test3" + read_ext, sheet_name="Sheet1")
expected = DataFrame([[np.nan]], columns=["Test"])
tm.assert_frame_equal(parsed, expected)
- def test_excel_table(self, read_ext, df_ref):
+ def test_excel_table(self, request, read_ext, df_ref):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
df1 = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
df2 = pd.read_excel(
@@ -335,9 +365,13 @@ def test_excel_table(self, read_ext, df_ref):
)
tm.assert_frame_equal(df3, df1.iloc[:-1])
- def test_reader_special_dtypes(self, read_ext):
+ def test_reader_special_dtypes(self, request, read_ext):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
expected = DataFrame.from_dict(
{
@@ -568,10 +602,14 @@ def test_read_excel_blank_with_header(self, read_ext):
actual = pd.read_excel("blank_with_header" + read_ext, sheet_name="Sheet1")
tm.assert_frame_equal(actual, expected)
- def test_date_conversion_overflow(self, read_ext):
+ def test_date_conversion_overflow(self, request, read_ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
expected = DataFrame(
[
@@ -583,24 +621,29 @@ def test_date_conversion_overflow(self, read_ext):
)
if pd.read_excel.keywords["engine"] == "openpyxl":
- pytest.xfail("Maybe not supported by openpyxl")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="Maybe not supported by openpyxl")
+ )
- if pd.read_excel.keywords["engine"] is None:
+ if pd.read_excel.keywords["engine"] is None and read_ext in (".xlsx", ".xlsm"):
# GH 35029
- pytest.xfail("Defaults to openpyxl, maybe not supported")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="Defaults to openpyxl, maybe not supported")
+ )
result = pd.read_excel("testdateoverflow" + read_ext)
tm.assert_frame_equal(result, expected)
- def test_sheet_name(self, read_ext, df_ref):
+ def test_sheet_name(self, request, read_ext, df_ref):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
filename = "test1"
sheet_name = "Sheet1"
- if pd.read_excel.keywords["engine"] == "openpyxl":
- pytest.xfail("Maybe not supported by openpyxl")
-
df1 = pd.read_excel(
filename + read_ext, sheet_name=sheet_name, index_col=0
) # doc
@@ -730,9 +773,13 @@ def test_close_from_py_localpath(self, read_ext):
# should not throw an exception because the passed file was closed
f.read()
- def test_reader_seconds(self, read_ext):
+ def test_reader_seconds(self, request, read_ext):
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
# Test reading times with and without milliseconds. GH5945.
expected = DataFrame.from_dict(
@@ -759,10 +806,14 @@ def test_reader_seconds(self, read_ext):
actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1")
tm.assert_frame_equal(actual, expected)
- def test_read_excel_multiindex(self, read_ext):
+ def test_read_excel_multiindex(self, request, read_ext):
# see gh-4679
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = "testmultiindex" + read_ext
@@ -849,11 +900,15 @@ def test_read_excel_multiindex(self, read_ext):
],
)
def test_read_excel_multiindex_blank_after_name(
- self, read_ext, sheet_name, idx_lvl2
+ self, request, read_ext, sheet_name, idx_lvl2
):
# GH34673
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb (GH4679")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb (GH4679"
+ )
+ )
mi_file = "testmultiindex" + read_ext
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]], names=["c1", "c2"])
@@ -968,10 +1023,14 @@ def test_read_excel_bool_header_arg(self, read_ext):
with pytest.raises(TypeError, match=msg):
pd.read_excel("test1" + read_ext, header=arg)
- def test_read_excel_skiprows(self, read_ext):
+ def test_read_excel_skiprows(self, request, read_ext):
# GH 4903
if pd.read_excel.keywords["engine"] == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
actual = pd.read_excel(
"testskiprows" + read_ext, sheet_name="skiprows_list", skiprows=[0, 2]
@@ -1151,11 +1210,15 @@ def test_excel_passes_na_filter(self, read_ext, na_filter):
expected = DataFrame(expected, columns=["Test"])
tm.assert_frame_equal(parsed, expected)
- def test_excel_table_sheet_by_index(self, read_ext, df_ref):
+ def test_excel_table_sheet_by_index(self, request, read_ext, df_ref):
# For some reason pd.read_excel has no attribute 'keywords' here.
# Skipping based on read_ext instead.
if read_ext == ".xlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
with pd.ExcelFile("test1" + read_ext) as excel:
df1 = pd.read_excel(excel, sheet_name=0, index_col=0)
@@ -1178,11 +1241,15 @@ def test_excel_table_sheet_by_index(self, read_ext, df_ref):
tm.assert_frame_equal(df3, df1.iloc[:-1])
- def test_sheet_name(self, read_ext, df_ref):
+ def test_sheet_name(self, request, read_ext, df_ref):
# For some reason pd.read_excel has no attribute 'keywords' here.
# Skipping based on read_ext instead.
if read_ext == ".xlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
filename = "test1"
sheet_name = "Sheet1"
@@ -1262,10 +1329,14 @@ def test_header_with_index_col(self, engine, filename):
)
tm.assert_frame_equal(expected, result)
- def test_read_datetime_multiindex(self, engine, read_ext):
+ def test_read_datetime_multiindex(self, request, engine, read_ext):
# GH 34748
if engine == "pyxlsb":
- pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason="Sheets containing datetimes not supported by pyxlsb"
+ )
+ )
f = "test_datetime_mi" + read_ext
with pd.ExcelFile(f) as excel:
diff --git a/pandas/tests/io/excel/test_style.py b/pandas/tests/io/excel/test_style.py
index 6b1abebe0506a..ed996d32cf2fb 100644
--- a/pandas/tests/io/excel/test_style.py
+++ b/pandas/tests/io/excel/test_style.py
@@ -21,7 +21,7 @@
"openpyxl",
],
)
-def test_styler_to_excel(engine):
+def test_styler_to_excel(request, engine):
def style(df):
# TODO: RGB colors not supported in xlwt
return DataFrame(
@@ -44,8 +44,12 @@ def style(df):
def assert_equal_style(cell1, cell2, engine):
if engine in ["xlsxwriter", "openpyxl"]:
- pytest.xfail(
- reason=(f"GH25351: failing on some attribute comparisons in {engine}")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=(
+ f"GH25351: failing on some attribute comparisons in {engine}"
+ )
+ )
)
# TODO: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 851e64a3f2478..eaf35c845ab9a 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -165,7 +165,7 @@ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype, int_frame)
@pytest.mark.parametrize("dtype", [None, np.float64, int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
- def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
+ def test_roundtrip_str_axes(self, request, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
@@ -175,7 +175,9 @@ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
- pytest.xfail("Can't decode directly to array")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="Can't decode directly to array")
+ )
data = df.to_json(orient=orient)
result = pd.read_json(
@@ -202,14 +204,20 @@ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
- def test_roundtrip_categorical(self, orient, convert_axes, numpy):
+ def test_roundtrip_categorical(self, request, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
- pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
+ request.node.add_marker(
+ pytest.mark.xfail(
+ reason=f"Can't have duplicate index values for orient '{orient}')"
+ )
+ )
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
- pytest.xfail(f"Orient {orient} is broken with numpy=True")
+ request.node.add_marker(
+ pytest.mark.xfail(reason=f"Orient {orient} is broken with numpy=True")
+ )
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
@@ -264,9 +272,11 @@ def test_roundtrip_timestamp(self, orient, convert_axes, numpy, datetime_frame):
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
- def test_roundtrip_mixed(self, orient, convert_axes, numpy):
+ def test_roundtrip_mixed(self, request, orient, convert_axes, numpy):
if numpy and orient != "split":
- pytest.xfail("Can't decode directly to array")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="Can't decode directly to array")
+ )
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 822b412916726..c6969fd5bbb74 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -309,16 +309,18 @@ def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
tm.assert_frame_equal(result, df[["a", "d"]])
-def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
+def test_cross_engine_fp_pa(request, df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
if (
LooseVersion(pyarrow.__version__) < "0.15"
and LooseVersion(pyarrow.__version__) >= "0.13"
):
- pytest.xfail(
- "Reading fastparquet with pyarrow in 0.14 fails: "
- "https://issues.apache.org/jira/browse/ARROW-6492"
+ request.node.add_marker(
+ pytest.mark.xfail(
+ "Reading fastparquet with pyarrow in 0.14 fails: "
+ "https://issues.apache.org/jira/browse/ARROW-6492"
+ )
)
df = df_cross_compat
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index c12111e20a4b1..c669cf39c9a61 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -117,10 +117,12 @@ def test_aaa_group_order():
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)), df[4::5])
-def test_aggregate_normal(resample_method):
+def test_aggregate_normal(request, resample_method):
"""Check TimeGrouper's aggregation is identical as normal groupby."""
if resample_method == "ohlc":
- pytest.xfail(reason="DataError: No numeric types to aggregate")
+ request.node.add_marker(
+ pytest.mark.xfail(reason="DataError: No numeric types to aggregate")
+ )
data = np.random.randn(20, 4)
normal_df = DataFrame(data, columns=["A", "B", "C", "D"])
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Continuation of #38881. This handles cases where replacing pytest.xfail is clear, opened #38902 for the handful of remaining cases.
In the case where `pytest.xfail` is used for a test which has been yet to be implemented, I've added ~~`assert False` to maintain the current behavior of counting it as an xfail. Maybe there is a better alternative here.~~ `raise NotImplementedError`. | https://api.github.com/repos/pandas-dev/pandas/pulls/38901 | 2021-01-02T15:40:36Z | 2021-01-03T19:43:59Z | 2021-01-03T19:43:59Z | 2021-01-03T20:35:30Z |
Validate axis args | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 7f2039c998f53..274801431661f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2076,7 +2076,7 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs):
>>> s.idxmin(skipna=False)
nan
"""
- i = self.argmin(None, skipna=skipna)
+ i = self.argmin(axis, skipna, *args, **kwargs)
if i == -1:
return np.nan
return self.index[i]
@@ -2146,7 +2146,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs):
>>> s.idxmax(skipna=False)
nan
"""
- i = self.argmax(None, skipna=skipna)
+ i = self.argmax(axis, skipna, *args, **kwargs)
if i == -1:
return np.nan
return self.index[i]
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This is a follow up PR on #37924 @jreback cheers | https://api.github.com/repos/pandas-dev/pandas/pulls/38899 | 2021-01-02T13:21:07Z | 2021-01-03T17:14:40Z | 2021-01-03T17:14:40Z | 2021-01-03T17:14:44Z |
TST/REF: split io/parsers/test_common into multiple files | diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py
new file mode 100644
index 0000000000000..8c1475025b442
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_chunksize.py
@@ -0,0 +1,221 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas.errors import DtypeWarning
+
+from pandas import DataFrame, concat
+import pandas._testing as tm
+
+
+@pytest.mark.parametrize("index_col", [0, "index"])
+def test_read_chunksize_with_index(all_parsers, index_col):
+ parser = all_parsers
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+
+ expected = DataFrame(
+ [
+ ["foo", 2, 3, 4, 5],
+ ["bar", 7, 8, 9, 10],
+ ["baz", 12, 13, 14, 15],
+ ["qux", 12, 13, 14, 15],
+ ["foo2", 12, 13, 14, 15],
+ ["bar2", 12, 13, 14, 15],
+ ],
+ columns=["index", "A", "B", "C", "D"],
+ )
+ expected = expected.set_index("index")
+
+ with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:
+ chunks = list(reader)
+ tm.assert_frame_equal(chunks[0], expected[:2])
+ tm.assert_frame_equal(chunks[1], expected[2:4])
+ tm.assert_frame_equal(chunks[2], expected[4:])
+
+
+@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
+def test_read_chunksize_bad(all_parsers, chunksize):
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ parser = all_parsers
+ msg = r"'chunksize' must be an integer >=1"
+
+ with pytest.raises(ValueError, match=msg):
+ with parser.read_csv(StringIO(data), chunksize=chunksize) as _:
+ pass
+
+
+@pytest.mark.parametrize("chunksize", [2, 8])
+def test_read_chunksize_and_nrows(all_parsers, chunksize):
+ # see gh-15755
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ parser = all_parsers
+ kwargs = {"index_col": 0, "nrows": 5}
+
+ expected = parser.read_csv(StringIO(data), **kwargs)
+ with parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) as reader:
+ tm.assert_frame_equal(concat(reader), expected)
+
+
+def test_read_chunksize_and_nrows_changing_size(all_parsers):
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ parser = all_parsers
+ kwargs = {"index_col": 0, "nrows": 5}
+
+ expected = parser.read_csv(StringIO(data), **kwargs)
+ with parser.read_csv(StringIO(data), chunksize=8, **kwargs) as reader:
+ tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
+ tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
+
+ with pytest.raises(StopIteration, match=""):
+ reader.get_chunk(size=3)
+
+
+def test_get_chunk_passed_chunksize(all_parsers):
+ parser = all_parsers
+ data = """A,B,C
+1,2,3
+4,5,6
+7,8,9
+1,2,3"""
+
+ with parser.read_csv(StringIO(data), chunksize=2) as reader:
+ result = reader.get_chunk()
+
+ expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}])
+def test_read_chunksize_compat(all_parsers, kwargs):
+ # see gh-12185
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), **kwargs)
+ with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:
+ tm.assert_frame_equal(concat(reader), result)
+
+
+def test_read_chunksize_jagged_names(all_parsers):
+ # see gh-23509
+ parser = all_parsers
+ data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
+
+ expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
+ with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader:
+ result = concat(reader)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_chunk_begins_with_newline_whitespace(all_parsers):
+ # see gh-10022
+ parser = all_parsers
+ data = "\n hello\nworld\n"
+
+ result = parser.read_csv(StringIO(data), header=None)
+ expected = DataFrame([" hello", "world"])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.xfail(reason="GH38630, sometimes gives ResourceWarning", strict=False)
+def test_chunks_have_consistent_numerical_type(all_parsers):
+ parser = all_parsers
+ integers = [str(i) for i in range(499999)]
+ data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
+
+ # Coercions should work without warnings.
+ with tm.assert_produces_warning(None):
+ result = parser.read_csv(StringIO(data))
+
+ assert type(result.a[0]) is np.float64
+ assert result.a.dtype == float
+
+
+def test_warn_if_chunks_have_mismatched_type(all_parsers):
+ warning_type = None
+ parser = all_parsers
+ integers = [str(i) for i in range(499999)]
+ data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
+
+ # see gh-3866: if chunks are different types and can't
+ # be coerced using numerical types, then issue warning.
+ if parser.engine == "c" and parser.low_memory:
+ warning_type = DtypeWarning
+
+ with tm.assert_produces_warning(warning_type):
+ df = parser.read_csv(StringIO(data))
+ assert df.a.dtype == object
+
+
+@pytest.mark.parametrize("iterator", [True, False])
+def test_empty_with_nrows_chunksize(all_parsers, iterator):
+ # see gh-9535
+ parser = all_parsers
+ expected = DataFrame(columns=["foo", "bar"])
+
+ nrows = 10
+ data = StringIO("foo,bar\n")
+
+ if iterator:
+ with parser.read_csv(data, chunksize=nrows) as reader:
+ result = next(iter(reader))
+ else:
+ result = parser.read_csv(data, nrows=nrows)
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_csv_memory_growth_chunksize(all_parsers):
+ # see gh-24805
+ #
+ # Let's just make sure that we don't crash
+ # as we iteratively process all chunks.
+ parser = all_parsers
+
+ with tm.ensure_clean() as path:
+ with open(path, "w") as f:
+ for i in range(1000):
+ f.write(str(i) + "\n")
+
+ with parser.read_csv(path, chunksize=20) as result:
+ for _ in result:
+ pass
diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py
new file mode 100644
index 0000000000000..4fd754bf79ba2
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_common_basic.py
@@ -0,0 +1,727 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from datetime import datetime
+from inspect import signature
+from io import StringIO
+import os
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslib import Timestamp
+from pandas.errors import EmptyDataError, ParserError
+
+from pandas import DataFrame, Index, Series, compat
+import pandas._testing as tm
+
+from pandas.io.parsers import CParserWrapper, TextFileReader
+
+
+def test_override_set_noconvert_columns():
+ # see gh-17351
+ #
+ # Usecols needs to be sorted in _set_noconvert_columns based
+ # on the test_usecols_with_parse_dates test from test_usecols.py
+ class MyTextFileReader(TextFileReader):
+ def __init__(self):
+ self._currow = 0
+ self.squeeze = False
+
+ class MyCParserWrapper(CParserWrapper):
+ def _set_noconvert_columns(self):
+ if self.usecols_dtype == "integer":
+ # self.usecols is a set, which is documented as unordered
+ # but in practice, a CPython set of integers is sorted.
+ # In other implementations this assumption does not hold.
+ # The following code simulates a different order, which
+ # before GH 17351 would cause the wrong columns to be
+ # converted via the parse_dates parameter
+ self.usecols = list(self.usecols)
+ self.usecols.reverse()
+ return CParserWrapper._set_noconvert_columns(self)
+
+ data = """a,b,c,d,e
+0,1,20140101,0900,4
+0,1,20140102,1000,4"""
+
+ parse_dates = [[1, 2]]
+ cols = {
+ "a": [0, 0],
+ "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
+ }
+ expected = DataFrame(cols, columns=["c_d", "a"])
+
+ parser = MyTextFileReader()
+ parser.options = {
+ "usecols": [0, 2, 3],
+ "parse_dates": parse_dates,
+ "delimiter": ",",
+ }
+ parser.engine = "c"
+ parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
+
+ result = parser.read()
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_csv_local(all_parsers, csv1):
+ prefix = "file:///" if compat.is_platform_windows() else "file://"
+ parser = all_parsers
+
+ fname = prefix + str(os.path.abspath(csv1))
+ result = parser.read_csv(fname, index_col=0, parse_dates=True)
+
+ expected = DataFrame(
+ [
+ [0.980269, 3.685731, -0.364216805298, -1.159738],
+ [1.047916, -0.041232, -0.16181208307, 0.212549],
+ [0.498581, 0.731168, -0.537677223318, 1.346270],
+ [1.120202, 1.567621, 0.00364077397681, 0.675253],
+ [-0.487094, 0.571455, -1.6116394093, 0.103469],
+ [0.836649, 0.246462, 0.588542635376, 1.062782],
+ [-0.157161, 1.340307, 1.1957779562, -1.097007],
+ ],
+ columns=["A", "B", "C", "D"],
+ index=Index(
+ [
+ datetime(2000, 1, 3),
+ datetime(2000, 1, 4),
+ datetime(2000, 1, 5),
+ datetime(2000, 1, 6),
+ datetime(2000, 1, 7),
+ datetime(2000, 1, 10),
+ datetime(2000, 1, 11),
+ ],
+ name="index",
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_1000_sep(all_parsers):
+ parser = all_parsers
+ data = """A|B|C
+1|2,334|5
+10|13|10.
+"""
+ expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
+
+ result = parser.read_csv(StringIO(data), sep="|", thousands=",")
+ tm.assert_frame_equal(result, expected)
+
+
+def test_squeeze(all_parsers):
+ data = """\
+a,1
+b,2
+c,3
+"""
+ parser = all_parsers
+ index = Index(["a", "b", "c"], name=0)
+ expected = Series([1, 2, 3], name=1, index=index)
+
+ result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
+ tm.assert_series_equal(result, expected)
+
+ # see gh-8217
+ #
+ # Series should not be a view.
+ assert not result._is_view
+
+
+def test_unnamed_columns(all_parsers):
+ data = """A,B,C,,
+1,2,3,4,5
+6,7,8,9,10
+11,12,13,14,15
+"""
+ parser = all_parsers
+ expected = DataFrame(
+ [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
+ dtype=np.int64,
+ columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
+ )
+ result = parser.read_csv(StringIO(data))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_csv_mixed_type(all_parsers):
+ data = """A,B,C
+a,1,2
+b,3,4
+c,4,5
+"""
+ parser = all_parsers
+ expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
+ result = parser.read_csv(StringIO(data))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_csv_low_memory_no_rows_with_index(all_parsers):
+ # see gh-21141
+ parser = all_parsers
+
+ if not parser.low_memory:
+ pytest.skip("This is a low-memory specific test")
+
+ data = """A,B,C
+1,1,1,2
+2,2,3,4
+3,3,4,5
+"""
+ result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
+ expected = DataFrame(columns=["A", "B", "C"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_csv_dataframe(all_parsers, csv1):
+ parser = all_parsers
+ result = parser.read_csv(csv1, index_col=0, parse_dates=True)
+
+ expected = DataFrame(
+ [
+ [0.980269, 3.685731, -0.364216805298, -1.159738],
+ [1.047916, -0.041232, -0.16181208307, 0.212549],
+ [0.498581, 0.731168, -0.537677223318, 1.346270],
+ [1.120202, 1.567621, 0.00364077397681, 0.675253],
+ [-0.487094, 0.571455, -1.6116394093, 0.103469],
+ [0.836649, 0.246462, 0.588542635376, 1.062782],
+ [-0.157161, 1.340307, 1.1957779562, -1.097007],
+ ],
+ columns=["A", "B", "C", "D"],
+ index=Index(
+ [
+ datetime(2000, 1, 3),
+ datetime(2000, 1, 4),
+ datetime(2000, 1, 5),
+ datetime(2000, 1, 6),
+ datetime(2000, 1, 7),
+ datetime(2000, 1, 10),
+ datetime(2000, 1, 11),
+ ],
+ name="index",
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("nrows", [3, 3.0])
+def test_read_nrows(all_parsers, nrows):
+ # see gh-10476
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ expected = DataFrame(
+ [["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
+ columns=["index", "A", "B", "C", "D"],
+ )
+ parser = all_parsers
+
+ result = parser.read_csv(StringIO(data), nrows=nrows)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
+def test_read_nrows_bad(all_parsers, nrows):
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ msg = r"'nrows' must be an integer >=0"
+ parser = all_parsers
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), nrows=nrows)
+
+
+def test_nrows_skipfooter_errors(all_parsers):
+ msg = "'skipfooter' not supported with 'nrows'"
+ data = "a\n1\n2\n3\n4\n5\n6"
+ parser = all_parsers
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
+
+
+def test_missing_trailing_delimiters(all_parsers):
+ parser = all_parsers
+ data = """A,B,C,D
+1,2,3,4
+1,3,3,
+1,4,5"""
+
+ result = parser.read_csv(StringIO(data))
+ expected = DataFrame(
+ [[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
+ columns=["A", "B", "C", "D"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_skip_initial_space(all_parsers):
+ data = (
+ '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
+ "1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
+ "314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
+ "70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
+ "0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
+ "-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
+ )
+ parser = all_parsers
+
+ result = parser.read_csv(
+ StringIO(data),
+ names=list(range(33)),
+ header=None,
+ na_values=["-9999.0"],
+ skipinitialspace=True,
+ )
+ expected = DataFrame(
+ [
+ [
+ "09-Apr-2012",
+ "01:10:18.300",
+ 2456026.548822908,
+ 12849,
+ 1.00361,
+ 1.12551,
+ 330.65659,
+ 355626618.16711,
+ 73.48821,
+ 314.11625,
+ 1917.09447,
+ 179.71425,
+ 80.0,
+ 240.0,
+ -350,
+ 70.06056,
+ 344.9837,
+ 1,
+ 1,
+ -0.689265,
+ -0.692787,
+ 0.212036,
+ 14.7674,
+ 41.605,
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ 0,
+ 12,
+ 128,
+ ]
+ ]
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_trailing_delimiters(all_parsers):
+ # see gh-2442
+ data = """A,B,C
+1,2,3,
+4,5,6,
+7,8,9,"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), index_col=False)
+
+ expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_escapechar(all_parsers):
+ # https://stackoverflow.com/questions/13824840/feature-request-for-
+ # pandas-read-csv
+ data = '''SEARCH_TERM,ACTUAL_URL
+"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
+"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
+"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
+
+ parser = all_parsers
+ result = parser.read_csv(
+ StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
+ )
+
+ assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
+
+ tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
+
+
+def test_ignore_leading_whitespace(all_parsers):
+ # see gh-3374, gh-6607
+ parser = all_parsers
+ data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
+ result = parser.read_csv(StringIO(data), sep=r"\s+")
+
+ expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
+def test_uneven_lines_with_usecols(all_parsers, usecols):
+ # see gh-12203
+ parser = all_parsers
+ data = r"""a,b,c
+0,1,2
+3,4,5,6,7
+8,9,10"""
+
+ if usecols is None:
+ # Make sure that an error is still raised
+ # when the "usecols" parameter is not provided.
+ msg = r"Expected \d+ fields in line \d+, saw \d+"
+ with pytest.raises(ParserError, match=msg):
+ parser.read_csv(StringIO(data))
+ else:
+ expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
+
+ result = parser.read_csv(StringIO(data), usecols=usecols)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "data,kwargs,expected",
+ [
+ # First, check to see that the response of parser when faced with no
+ # provided columns raises the correct error, with or without usecols.
+ ("", {}, None),
+ ("", {"usecols": ["X"]}, None),
+ (
+ ",,",
+ {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
+ DataFrame(columns=["X"], index=[0], dtype=np.float64),
+ ),
+ (
+ "",
+ {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
+ DataFrame(columns=["X"]),
+ ),
+ ],
+)
+def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
+ # see gh-12493
+ parser = all_parsers
+
+ if expected is None:
+ msg = "No columns to parse from file"
+ with pytest.raises(EmptyDataError, match=msg):
+ parser.read_csv(StringIO(data), **kwargs)
+ else:
+ result = parser.read_csv(StringIO(data), **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "kwargs,expected",
+ [
+ # gh-8661, gh-8679: this should ignore six lines, including
+ # lines with trailing whitespace and blank lines.
+ (
+ {
+ "header": None,
+ "delim_whitespace": True,
+ "skiprows": [0, 1, 2, 3, 5, 6],
+ "skip_blank_lines": True,
+ },
+ DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
+ ),
+ # gh-8983: test skipping set of rows after a row with trailing spaces.
+ (
+ {
+ "delim_whitespace": True,
+ "skiprows": [1, 2, 3, 5, 6],
+ "skip_blank_lines": True,
+ },
+ DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
+ ),
+ ],
+)
+def test_trailing_spaces(all_parsers, kwargs, expected):
+ data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
+ parser = all_parsers
+
+ result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_raise_on_sep_with_delim_whitespace(all_parsers):
+ # see gh-6607
+ data = "a b c\n1 2 3"
+ parser = all_parsers
+
+ with pytest.raises(ValueError, match="you can only specify one"):
+ parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
+
+
+@pytest.mark.parametrize("delim_whitespace", [True, False])
+def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
+ # see gh-9710
+ parser = all_parsers
+ data = """\
+MyColumn
+a
+b
+a
+b\n"""
+
+ expected = DataFrame({"MyColumn": list("abab")})
+ result = parser.read_csv(
+ StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "sep,skip_blank_lines,exp_data",
+ [
+ (",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
+ (r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
+ (
+ ",",
+ False,
+ [
+ [1.0, 2.0, 4.0],
+ [np.nan, np.nan, np.nan],
+ [np.nan, np.nan, np.nan],
+ [5.0, np.nan, 10.0],
+ [np.nan, np.nan, np.nan],
+ [-70.0, 0.4, 1.0],
+ ],
+ ),
+ ],
+)
+def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
+ parser = all_parsers
+ data = """\
+A,B,C
+1,2.,4.
+
+
+5.,NaN,10.0
+
+-70,.4,1
+"""
+
+ if sep == r"\s+":
+ data = data.replace(",", " ")
+
+ result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
+ expected = DataFrame(exp_data, columns=["A", "B", "C"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_whitespace_lines(all_parsers):
+ parser = all_parsers
+ data = """
+
+\t \t\t
+\t
+A,B,C
+\t 1,2.,4.
+5.,NaN,10.0
+"""
+ expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
+ result = parser.read_csv(StringIO(data))
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "data,expected",
+ [
+ (
+ """ A B C D
+a 1 2 3 4
+b 1 2 3 4
+c 1 2 3 4
+""",
+ DataFrame(
+ [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
+ columns=["A", "B", "C", "D"],
+ index=["a", "b", "c"],
+ ),
+ ),
+ (
+ " a b c\n1 2 3 \n4 5 6\n 7 8 9",
+ DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
+ ),
+ ],
+)
+def test_whitespace_regex_separator(all_parsers, data, expected):
+ # see gh-6607
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), sep=r"\s+")
+ tm.assert_frame_equal(result, expected)
+
+
+def test_sub_character(all_parsers, csv_dir_path):
+ # see gh-16893
+ filename = os.path.join(csv_dir_path, "sub_char.csv")
+ expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
+
+ parser = all_parsers
+ result = parser.read_csv(filename)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
+def test_filename_with_special_chars(all_parsers, filename):
+ # see gh-15086.
+ parser = all_parsers
+ df = DataFrame({"a": [1, 2, 3]})
+
+ with tm.ensure_clean(filename) as path:
+ df.to_csv(path, index=False)
+
+ result = parser.read_csv(path)
+ tm.assert_frame_equal(result, df)
+
+
+def test_read_table_same_signature_as_read_csv(all_parsers):
+ # GH-34976
+ parser = all_parsers
+
+ table_sign = signature(parser.read_table)
+ csv_sign = signature(parser.read_csv)
+
+ assert table_sign.parameters.keys() == csv_sign.parameters.keys()
+ assert table_sign.return_annotation == csv_sign.return_annotation
+
+ for key, csv_param in csv_sign.parameters.items():
+ table_param = table_sign.parameters[key]
+ if key == "sep":
+ assert csv_param.default == ","
+ assert table_param.default == "\t"
+ assert table_param.annotation == csv_param.annotation
+ assert table_param.kind == csv_param.kind
+ continue
+ else:
+ assert table_param == csv_param
+
+
+def test_read_table_equivalency_to_read_csv(all_parsers):
+ # see gh-21948
+ # As of 0.25.0, read_table is undeprecated
+ parser = all_parsers
+ data = "a\tb\n1\t2\n3\t4"
+ expected = parser.read_csv(StringIO(data), sep="\t")
+ result = parser.read_table(StringIO(data))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_first_row_bom(all_parsers):
+ # see gh-26545
+ parser = all_parsers
+ data = '''\ufeff"Head1" "Head2" "Head3"'''
+
+ result = parser.read_csv(StringIO(data), delimiter="\t")
+ expected = DataFrame(columns=["Head1", "Head2", "Head3"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_first_row_bom_unquoted(all_parsers):
+ # see gh-36343
+ parser = all_parsers
+ data = """\ufeffHead1 Head2 Head3"""
+
+ result = parser.read_csv(StringIO(data), delimiter="\t")
+ expected = DataFrame(columns=["Head1", "Head2", "Head3"])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("nrows", range(1, 6))
+def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
+ # GH 28071
+ ref = DataFrame(
+ [[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
+ columns=list("ab"),
+ )
+ csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
+ parser = all_parsers
+ df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
+ tm.assert_frame_equal(df, ref[:nrows])
+
+
+def test_no_header_two_extra_columns(all_parsers):
+ # GH 26218
+ column_names = ["one", "two", "three"]
+ ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
+ stream = StringIO("foo,bar,baz,bam,blah")
+ parser = all_parsers
+ df = parser.read_csv(stream, header=None, names=column_names, index_col=False)
+ tm.assert_frame_equal(df, ref)
+
+
+def test_read_csv_names_not_accepting_sets(all_parsers):
+ # GH 34946
+ data = """\
+ 1,2,3
+ 4,5,6\n"""
+ parser = all_parsers
+ with pytest.raises(ValueError, match="Names should be an ordered collection."):
+ parser.read_csv(StringIO(data), names=set("QAZ"))
+
+
+def test_read_table_delim_whitespace_default_sep(all_parsers):
+ # GH: 35958
+ f = StringIO("a b c\n1 -2 -3\n4 5 6")
+ parser = all_parsers
+ result = parser.read_table(f, delim_whitespace=True)
+ expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("delimiter", [",", "\t"])
+def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):
+ # GH: 35958
+ f = StringIO("a b c\n1 -2 -3\n4 5 6")
+ parser = all_parsers
+ msg = (
+ "Specified a delimiter with both sep and "
+ "delim_whitespace=True; you can only specify one."
+ )
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(f, delim_whitespace=True, sep=delimiter)
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)
+
+
+@pytest.mark.parametrize("delimiter", [",", "\t"])
+def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
+ # GH: 35958
+ f = StringIO("a b c\n1 -2 -3\n4 5 6")
+ parser = all_parsers
+ msg = (
+ "Specified a delimiter with both sep and "
+ "delim_whitespace=True; you can only specify one."
+ )
+ with pytest.raises(ValueError, match=msg):
+ parser.read_table(f, delim_whitespace=True, sep=delimiter)
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_table(f, delim_whitespace=True, delimiter=delimiter)
+
+
+def test_dict_keys_as_names(all_parsers):
+ # GH: 36928
+ data = "1,2"
+
+ keys = {"a": int, "b": int}.keys()
+ parser = all_parsers
+
+ result = parser.read_csv(StringIO(data), names=keys)
+ expected = DataFrame({"a": [1], "b": [2]})
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/common/test_data_list.py b/pandas/tests/io/parser/common/test_data_list.py
new file mode 100644
index 0000000000000..92b8c864f1619
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_data_list.py
@@ -0,0 +1,82 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+import csv
+from io import StringIO
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+from pandas.io.parsers import TextParser
+
+
+def test_read_data_list(all_parsers):
+ parser = all_parsers
+ kwargs = {"index_col": 0}
+ data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
+
+ data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
+ expected = parser.read_csv(StringIO(data), **kwargs)
+
+ with TextParser(data_list, chunksize=2, **kwargs) as parser:
+ result = parser.read()
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_reader_list(all_parsers):
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ parser = all_parsers
+ kwargs = {"index_col": 0}
+
+ lines = list(csv.reader(StringIO(data)))
+ with TextParser(lines, chunksize=2, **kwargs) as reader:
+ chunks = list(reader)
+
+ expected = parser.read_csv(StringIO(data), **kwargs)
+
+ tm.assert_frame_equal(chunks[0], expected[:2])
+ tm.assert_frame_equal(chunks[1], expected[2:4])
+ tm.assert_frame_equal(chunks[2], expected[4:])
+
+
+def test_reader_list_skiprows(all_parsers):
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ parser = all_parsers
+ kwargs = {"index_col": 0}
+
+ lines = list(csv.reader(StringIO(data)))
+ with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:
+ chunks = list(reader)
+
+ expected = parser.read_csv(StringIO(data), **kwargs)
+
+ tm.assert_frame_equal(chunks[0], expected[1:3])
+
+
+def test_read_csv_parse_simple_list(all_parsers):
+ parser = all_parsers
+ data = """foo
+bar baz
+qux foo
+foo
+bar"""
+
+ result = parser.read_csv(StringIO(data), header=None)
+ expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/common/test_decimal.py b/pandas/tests/io/parser/common/test_decimal.py
new file mode 100644
index 0000000000000..7ca9f253bd501
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_decimal.py
@@ -0,0 +1,60 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import StringIO
+
+import pytest
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+@pytest.mark.parametrize(
+ "data,thousands,decimal",
+ [
+ (
+ """A|B|C
+1|2,334.01|5
+10|13|10.
+""",
+ ",",
+ ".",
+ ),
+ (
+ """A|B|C
+1|2.334,01|5
+10|13|10,
+""",
+ ".",
+ ",",
+ ),
+ ],
+)
+def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
+ parser = all_parsers
+ expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
+
+ result = parser.read_csv(
+ StringIO(data), sep="|", thousands=thousands, decimal=decimal
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_euro_decimal_format(all_parsers):
+ parser = all_parsers
+ data = """Id;Number1;Number2;Text1;Text2;Number3
+1;1521,1541;187101,9543;ABC;poi;4,738797819
+2;121,12;14897,76;DEF;uyt;0,377320872
+3;878,158;108013,434;GHI;rez;2,735694704"""
+
+ result = parser.read_csv(StringIO(data), sep=";", decimal=",")
+ expected = DataFrame(
+ [
+ [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
+ [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
+ [3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
+ ],
+ columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py
new file mode 100644
index 0000000000000..d0f1d63f88b3e
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_file_buffer_url.py
@@ -0,0 +1,434 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import BytesIO, StringIO
+import os
+import platform
+from urllib.error import URLError
+
+import pytest
+
+from pandas.errors import EmptyDataError, ParserError
+import pandas.util._test_decorators as td
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+@tm.network
+def test_url(all_parsers, csv_dir_path):
+ # TODO: FTP testing
+ parser = all_parsers
+ kwargs = {"sep": "\t"}
+
+ url = (
+ "https://raw.github.com/pandas-dev/pandas/master/"
+ "pandas/tests/io/parser/data/salaries.csv"
+ )
+ url_result = parser.read_csv(url, **kwargs)
+
+ local_path = os.path.join(csv_dir_path, "salaries.csv")
+ local_result = parser.read_csv(local_path, **kwargs)
+ tm.assert_frame_equal(url_result, local_result)
+
+
+@pytest.mark.slow
+def test_local_file(all_parsers, csv_dir_path):
+ parser = all_parsers
+ kwargs = {"sep": "\t"}
+
+ local_path = os.path.join(csv_dir_path, "salaries.csv")
+ local_result = parser.read_csv(local_path, **kwargs)
+ url = "file://localhost/" + local_path
+
+ try:
+ url_result = parser.read_csv(url, **kwargs)
+ tm.assert_frame_equal(url_result, local_result)
+ except URLError:
+ # Fails on some systems.
+ pytest.skip("Failing on: " + " ".join(platform.uname()))
+
+
+def test_path_path_lib(all_parsers):
+ parser = all_parsers
+ df = tm.makeDataFrame()
+ result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
+ tm.assert_frame_equal(df, result)
+
+
+def test_path_local_path(all_parsers):
+ parser = all_parsers
+ df = tm.makeDataFrame()
+ result = tm.round_trip_localpath(
+ df.to_csv, lambda p: parser.read_csv(p, index_col=0)
+ )
+ tm.assert_frame_equal(df, result)
+
+
+def test_nonexistent_path(all_parsers):
+ # gh-2428: pls no segfault
+ # gh-14086: raise more helpful FileNotFoundError
+ # GH#29233 "File foo" instead of "File b'foo'"
+ parser = all_parsers
+ path = f"{tm.rands(10)}.csv"
+
+ msg = r"\[Errno 2\]"
+ with pytest.raises(FileNotFoundError, match=msg) as e:
+ parser.read_csv(path)
+ assert path == e.value.filename
+
+
+@td.skip_if_windows # os.chmod does not work in windows
+def test_no_permission(all_parsers):
+ # GH 23784
+ parser = all_parsers
+
+ msg = r"\[Errno 13\]"
+ with tm.ensure_clean() as path:
+ os.chmod(path, 0) # make file unreadable
+
+ # verify that this process cannot open the file (not running as sudo)
+ try:
+ with open(path):
+ pass
+ pytest.skip("Running as sudo.")
+ except PermissionError:
+ pass
+
+ with pytest.raises(PermissionError, match=msg) as e:
+ parser.read_csv(path)
+ assert path == e.value.filename
+
+
+@pytest.mark.parametrize(
+ "data,kwargs,expected,msg",
+ [
+ # gh-10728: WHITESPACE_LINE
+ (
+ "a,b,c\n4,5,6\n ",
+ {},
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
+ None,
+ ),
+ # gh-10548: EAT_LINE_COMMENT
+ (
+ "a,b,c\n4,5,6\n#comment",
+ {"comment": "#"},
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
+ None,
+ ),
+ # EAT_CRNL_NOP
+ (
+ "a,b,c\n4,5,6\n\r",
+ {},
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
+ None,
+ ),
+ # EAT_COMMENT
+ (
+ "a,b,c\n4,5,6#comment",
+ {"comment": "#"},
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
+ None,
+ ),
+ # SKIP_LINE
+ (
+ "a,b,c\n4,5,6\nskipme",
+ {"skiprows": [2]},
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
+ None,
+ ),
+ # EAT_LINE_COMMENT
+ (
+ "a,b,c\n4,5,6\n#comment",
+ {"comment": "#", "skip_blank_lines": False},
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
+ None,
+ ),
+ # IN_FIELD
+ (
+ "a,b,c\n4,5,6\n ",
+ {"skip_blank_lines": False},
+ DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
+ None,
+ ),
+ # EAT_CRNL
+ (
+ "a,b,c\n4,5,6\n\r",
+ {"skip_blank_lines": False},
+ DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
+ None,
+ ),
+ # ESCAPED_CHAR
+ (
+ "a,b,c\n4,5,6\n\\",
+ {"escapechar": "\\"},
+ None,
+ "(EOF following escape character)|(unexpected end of data)",
+ ),
+ # ESCAPE_IN_QUOTED_FIELD
+ (
+ 'a,b,c\n4,5,6\n"\\',
+ {"escapechar": "\\"},
+ None,
+ "(EOF inside string starting at row 2)|(unexpected end of data)",
+ ),
+ # IN_QUOTED_FIELD
+ (
+ 'a,b,c\n4,5,6\n"',
+ {"escapechar": "\\"},
+ None,
+ "(EOF inside string starting at row 2)|(unexpected end of data)",
+ ),
+ ],
+ ids=[
+ "whitespace-line",
+ "eat-line-comment",
+ "eat-crnl-nop",
+ "eat-comment",
+ "skip-line",
+ "eat-line-comment",
+ "in-field",
+ "eat-crnl",
+ "escaped-char",
+ "escape-in-quoted-field",
+ "in-quoted-field",
+ ],
+)
+def test_eof_states(all_parsers, data, kwargs, expected, msg):
+ # see gh-10728, gh-10548
+ parser = all_parsers
+
+ if expected is None:
+ with pytest.raises(ParserError, match=msg):
+ parser.read_csv(StringIO(data), **kwargs)
+ else:
+ result = parser.read_csv(StringIO(data), **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_temporary_file(all_parsers):
+ # see gh-13398
+ parser = all_parsers
+ data = "0 0"
+
+ with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:
+ new_file.write(data)
+ new_file.flush()
+ new_file.seek(0)
+
+ result = parser.read_csv(new_file, sep=r"\s+", header=None)
+
+ expected = DataFrame([[0, 0]])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_internal_eof_byte(all_parsers):
+ # see gh-5500
+ parser = all_parsers
+ data = "a,b\n1\x1a,2"
+
+ expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
+ result = parser.read_csv(StringIO(data))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_internal_eof_byte_to_file(all_parsers):
+ # see gh-16559
+ parser = all_parsers
+ data = b'c1,c2\r\n"test \x1a test", test\r\n'
+ expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
+ path = f"__{tm.rands(10)}__.csv"
+
+ with tm.ensure_clean(path) as path:
+ with open(path, "wb") as f:
+ f.write(data)
+
+ result = parser.read_csv(path)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_file_handle_string_io(all_parsers):
+ # gh-14418
+ #
+ # Don't close user provided file handles.
+ parser = all_parsers
+ data = "a,b\n1,2"
+
+ fh = StringIO(data)
+ parser.read_csv(fh)
+ assert not fh.closed
+
+
+def test_file_handles_with_open(all_parsers, csv1):
+ # gh-14418
+ #
+ # Don't close user provided file handles.
+ parser = all_parsers
+
+ for mode in ["r", "rb"]:
+ with open(csv1, mode) as f:
+ parser.read_csv(f)
+ assert not f.closed
+
+
+def test_invalid_file_buffer_class(all_parsers):
+ # see gh-15337
+ class InvalidBuffer:
+ pass
+
+ parser = all_parsers
+ msg = "Invalid file path or buffer object type"
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(InvalidBuffer())
+
+
+def test_invalid_file_buffer_mock(all_parsers):
+ # see gh-15337
+ parser = all_parsers
+ msg = "Invalid file path or buffer object type"
+
+ class Foo:
+ pass
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(Foo())
+
+
+def test_valid_file_buffer_seems_invalid(all_parsers):
+ # gh-16135: we want to ensure that "tell" and "seek"
+ # aren't actually being used when we call `read_csv`
+ #
+ # Thus, while the object may look "invalid" (these
+ # methods are attributes of the `StringIO` class),
+ # it is still a valid file-object for our purposes.
+ class NoSeekTellBuffer(StringIO):
+ def tell(self):
+ raise AttributeError("No tell method")
+
+ def seek(self, pos, whence=0):
+ raise AttributeError("No seek method")
+
+ data = "a\n1"
+ parser = all_parsers
+ expected = DataFrame({"a": [1]})
+
+ result = parser.read_csv(NoSeekTellBuffer(data))
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("io_class", [StringIO, BytesIO])
+@pytest.mark.parametrize("encoding", [None, "utf-8"])
+def test_read_csv_file_handle(all_parsers, io_class, encoding):
+ """
+ Test whether read_csv does not close user-provided file handles.
+
+ GH 36980
+ """
+ parser = all_parsers
+ expected = DataFrame({"a": [1], "b": [2]})
+
+ content = "a,b\n1,2"
+ if io_class == BytesIO:
+ content = content.encode("utf-8")
+ handle = io_class(content)
+
+ tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected)
+ assert not handle.closed
+
+
+def test_memory_map_file_handle_silent_fallback(all_parsers, compression):
+ """
+ Do not fail for buffers with memory_map=True (cannot memory map BytesIO).
+
+ GH 37621
+ """
+ parser = all_parsers
+ expected = DataFrame({"a": [1], "b": [2]})
+
+ handle = BytesIO()
+ expected.to_csv(handle, index=False, compression=compression, mode="wb")
+ handle.seek(0)
+
+ tm.assert_frame_equal(
+ parser.read_csv(handle, memory_map=True, compression=compression),
+ expected,
+ )
+
+
+def test_memory_map_compression(all_parsers, compression):
+ """
+ Support memory map for compressed files.
+
+ GH 37621
+ """
+ parser = all_parsers
+ expected = DataFrame({"a": [1], "b": [2]})
+
+ with tm.ensure_clean() as path:
+ expected.to_csv(path, index=False, compression=compression)
+
+ tm.assert_frame_equal(
+ parser.read_csv(path, memory_map=True, compression=compression),
+ expected,
+ )
+
+
+def test_context_manager(all_parsers, datapath):
+ # make sure that opened files are closed
+ parser = all_parsers
+
+ path = datapath("io", "data", "csv", "iris.csv")
+
+ reader = parser.read_csv(path, chunksize=1)
+ assert not reader._engine.handles.handle.closed
+ try:
+ with reader:
+ next(reader)
+ assert False
+ except AssertionError:
+ assert reader._engine.handles.handle.closed
+
+
+def test_context_manageri_user_provided(all_parsers, datapath):
+ # make sure that user-provided handles are not closed
+ parser = all_parsers
+
+ with open(datapath("io", "data", "csv", "iris.csv"), mode="r") as path:
+
+ reader = parser.read_csv(path, chunksize=1)
+ assert not reader._engine.handles.handle.closed
+ try:
+ with reader:
+ next(reader)
+ assert False
+ except AssertionError:
+ assert not reader._engine.handles.handle.closed
+
+
+def test_file_descriptor_leak(all_parsers):
+ # GH 31488
+
+ parser = all_parsers
+ with tm.ensure_clean() as path:
+
+ def test():
+ with pytest.raises(EmptyDataError, match="No columns to parse from file"):
+ parser.read_csv(path)
+
+ td.check_file_leaks(test)()
+
+
+@td.check_file_leaks
+def test_memory_map(all_parsers, csv_dir_path):
+ mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
+ parser = all_parsers
+
+ expected = DataFrame(
+ {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}
+ )
+
+ result = parser.read_csv(mmap_file, memory_map=True)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/common/test_float.py b/pandas/tests/io/parser/common/test_float.py
new file mode 100644
index 0000000000000..29aa387e2b045
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_float.py
@@ -0,0 +1,66 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas.compat import is_platform_linux
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+def test_float_parser(all_parsers):
+ # see gh-9565
+ parser = all_parsers
+ data = "45e-1,4.5,45.,inf,-inf"
+ result = parser.read_csv(StringIO(data), header=None)
+
+ expected = DataFrame([[float(s) for s in data.split(",")]])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_scientific_no_exponent(all_parsers_all_precisions):
+ # see gh-12215
+ df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
+ data = df.to_csv(index=False)
+ parser, precision = all_parsers_all_precisions
+ if parser == "pyarrow":
+ pytest.skip()
+
+ df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
+ tm.assert_frame_equal(df_roundtrip, df)
+
+
+@pytest.mark.parametrize("neg_exp", [-617, -100000, -99999999999999999])
+def test_very_negative_exponent(all_parsers_all_precisions, neg_exp):
+ # GH#38753
+ parser, precision = all_parsers_all_precisions
+ if parser == "pyarrow":
+ pytest.skip()
+ data = f"data\n10E{neg_exp}"
+ result = parser.read_csv(StringIO(data), float_precision=precision)
+ expected = DataFrame({"data": [0.0]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("exp", [999999999999999999, -999999999999999999])
+def test_too_many_exponent_digits(all_parsers_all_precisions, exp, request):
+ # GH#38753
+ parser, precision = all_parsers_all_precisions
+ data = f"data\n10E{exp}"
+ result = parser.read_csv(StringIO(data), float_precision=precision)
+ if precision == "round_trip":
+ if exp == 999999999999999999 and is_platform_linux():
+ mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result")
+ request.node.add_marker(mark)
+
+ value = np.inf if exp > 0 else 0.0
+ expected = DataFrame({"data": [value]})
+ else:
+ expected = DataFrame({"data": [f"10E{exp}"]})
+
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/common/test_index.py b/pandas/tests/io/parser/common/test_index.py
new file mode 100644
index 0000000000000..a133e1be49946
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_index.py
@@ -0,0 +1,281 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from datetime import datetime
+from io import StringIO
+import os
+
+import pytest
+
+from pandas import DataFrame, Index, MultiIndex
+import pandas._testing as tm
+
+
+@pytest.mark.parametrize(
+ "data,kwargs,expected",
+ [
+ (
+ """foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+""",
+ {"index_col": 0, "names": ["index", "A", "B", "C", "D"]},
+ DataFrame(
+ [
+ [2, 3, 4, 5],
+ [7, 8, 9, 10],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ ],
+ index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
+ columns=["A", "B", "C", "D"],
+ ),
+ ),
+ (
+ """foo,one,2,3,4,5
+foo,two,7,8,9,10
+foo,three,12,13,14,15
+bar,one,12,13,14,15
+bar,two,12,13,14,15
+""",
+ {"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},
+ DataFrame(
+ [
+ [2, 3, 4, 5],
+ [7, 8, 9, 10],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ ],
+ index=MultiIndex.from_tuples(
+ [
+ ("foo", "one"),
+ ("foo", "two"),
+ ("foo", "three"),
+ ("bar", "one"),
+ ("bar", "two"),
+ ],
+ names=["index1", "index2"],
+ ),
+ columns=["A", "B", "C", "D"],
+ ),
+ ),
+ ],
+)
+def test_pass_names_with_index(all_parsers, data, kwargs, expected):
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
+def test_multi_index_no_level_names(all_parsers, index_col):
+ data = """index1,index2,A,B,C,D
+foo,one,2,3,4,5
+foo,two,7,8,9,10
+foo,three,12,13,14,15
+bar,one,12,13,14,15
+bar,two,12,13,14,15
+"""
+ headless_data = "\n".join(data.split("\n")[1:])
+
+ names = ["A", "B", "C", "D"]
+ parser = all_parsers
+
+ result = parser.read_csv(
+ StringIO(headless_data), index_col=index_col, header=None, names=names
+ )
+ expected = parser.read_csv(StringIO(data), index_col=index_col)
+
+ # No index names in headless data.
+ expected.index.names = [None] * 2
+ tm.assert_frame_equal(result, expected)
+
+
+def test_multi_index_no_level_names_implicit(all_parsers):
+ parser = all_parsers
+ data = """A,B,C,D
+foo,one,2,3,4,5
+foo,two,7,8,9,10
+foo,three,12,13,14,15
+bar,one,12,13,14,15
+bar,two,12,13,14,15
+"""
+
+ result = parser.read_csv(StringIO(data))
+ expected = DataFrame(
+ [
+ [2, 3, 4, 5],
+ [7, 8, 9, 10],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ ],
+ columns=["A", "B", "C", "D"],
+ index=MultiIndex.from_tuples(
+ [
+ ("foo", "one"),
+ ("foo", "two"),
+ ("foo", "three"),
+ ("bar", "one"),
+ ("bar", "two"),
+ ]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "data,expected,header",
+ [
+ ("a,b", DataFrame(columns=["a", "b"]), [0]),
+ (
+ "a,b\nc,d",
+ DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
+ [0, 1],
+ ),
+ ],
+)
+@pytest.mark.parametrize("round_trip", [True, False])
+def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
+ # see gh-14545
+ parser = all_parsers
+ data = expected.to_csv(index=False) if round_trip else data
+
+ result = parser.read_csv(StringIO(data), header=header)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_no_unnamed_index(all_parsers):
+ parser = all_parsers
+ data = """ id c0 c1 c2
+0 1 0 a b
+1 2 0 c d
+2 2 2 e f
+"""
+ result = parser.read_csv(StringIO(data), sep=" ")
+ expected = DataFrame(
+ [[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
+ columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_duplicate_index_explicit(all_parsers):
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo,12,13,14,15
+bar,12,13,14,15
+"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), index_col=0)
+
+ expected = DataFrame(
+ [
+ [2, 3, 4, 5],
+ [7, 8, 9, 10],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ ],
+ columns=["A", "B", "C", "D"],
+ index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_duplicate_index_implicit(all_parsers):
+ data = """A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo,12,13,14,15
+bar,12,13,14,15
+"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data))
+
+ expected = DataFrame(
+ [
+ [2, 3, 4, 5],
+ [7, 8, 9, 10],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ [12, 13, 14, 15],
+ ],
+ columns=["A", "B", "C", "D"],
+ index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_csv_no_index_name(all_parsers, csv_dir_path):
+ parser = all_parsers
+ csv2 = os.path.join(csv_dir_path, "test2.csv")
+ result = parser.read_csv(csv2, index_col=0, parse_dates=True)
+
+ expected = DataFrame(
+ [
+ [0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
+ [1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
+ [0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
+ [1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
+ [-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
+ ],
+ columns=["A", "B", "C", "D", "E"],
+ index=Index(
+ [
+ datetime(2000, 1, 3),
+ datetime(2000, 1, 4),
+ datetime(2000, 1, 5),
+ datetime(2000, 1, 6),
+ datetime(2000, 1, 7),
+ ]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_empty_with_index(all_parsers):
+ # see gh-10184
+ data = "x,y"
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), index_col=0)
+
+ expected = DataFrame(columns=["y"], index=Index([], name="x"))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_empty_with_multi_index(all_parsers):
+ # see gh-10467
+ data = "x,y,z"
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), index_col=["x", "y"])
+
+ expected = DataFrame(
+ columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_empty_with_reversed_multi_index(all_parsers):
+ data = "x,y,z"
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), index_col=[1, 0])
+
+ expected = DataFrame(
+ columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/common/test_inf.py b/pandas/tests/io/parser/common/test_inf.py
new file mode 100644
index 0000000000000..fca4aaaba6675
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_inf.py
@@ -0,0 +1,61 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas import DataFrame, option_context
+import pandas._testing as tm
+
+
+@pytest.mark.parametrize("na_filter", [True, False])
+def test_inf_parsing(all_parsers, na_filter):
+ parser = all_parsers
+ data = """\
+,A
+a,inf
+b,-inf
+c,+Inf
+d,-Inf
+e,INF
+f,-INF
+g,+INf
+h,-INf
+i,inF
+j,-inF"""
+ expected = DataFrame(
+ {"A": [float("inf"), float("-inf")] * 5},
+ index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
+ )
+ result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("na_filter", [True, False])
+def test_infinity_parsing(all_parsers, na_filter):
+ parser = all_parsers
+ data = """\
+,A
+a,Infinity
+b,-Infinity
+c,+Infinity
+"""
+ expected = DataFrame(
+ {"A": [float("infinity"), float("-infinity"), float("+infinity")]},
+ index=["a", "b", "c"],
+ )
+ result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_csv_with_use_inf_as_na(all_parsers):
+ # https://github.com/pandas-dev/pandas/issues/35493
+ parser = all_parsers
+ data = "1.0\nNaN\n3.0"
+ with option_context("use_inf_as_na", True):
+ result = parser.read_csv(StringIO(data), header=None)
+ expected = DataFrame([1.0, np.nan, 3.0])
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/io/parser/common/test_ints.py b/pandas/tests/io/parser/common/test_ints.py
new file mode 100644
index 0000000000000..a8f5c43ea15c7
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_ints.py
@@ -0,0 +1,203 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas import DataFrame, Series
+import pandas._testing as tm
+
+
+def test_int_conversion(all_parsers):
+ data = """A,B
+1.0,1
+2.0,2
+3.0,3
+"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data))
+
+ expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "data,kwargs,expected",
+ [
+ (
+ "A,B\nTrue,1\nFalse,2\nTrue,3",
+ {},
+ DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
+ ),
+ (
+ "A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
+ {"true_values": ["yes", "Yes", "YES"], "false_values": ["no", "NO", "No"]},
+ DataFrame(
+ [[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
+ columns=["A", "B"],
+ ),
+ ),
+ (
+ "A,B\nTRUE,1\nFALSE,2\nTRUE,3",
+ {},
+ DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
+ ),
+ (
+ "A,B\nfoo,bar\nbar,foo",
+ {"true_values": ["foo"], "false_values": ["bar"]},
+ DataFrame([[True, False], [False, True]], columns=["A", "B"]),
+ ),
+ ],
+)
+def test_parse_bool(all_parsers, data, kwargs, expected):
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_parse_integers_above_fp_precision(all_parsers):
+ data = """Numbers
+17007000002000191
+17007000002000191
+17007000002000191
+17007000002000191
+17007000002000192
+17007000002000192
+17007000002000192
+17007000002000192
+17007000002000192
+17007000002000194"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data))
+ expected = DataFrame(
+ {
+ "Numbers": [
+ 17007000002000191,
+ 17007000002000191,
+ 17007000002000191,
+ 17007000002000191,
+ 17007000002000192,
+ 17007000002000192,
+ 17007000002000192,
+ 17007000002000192,
+ 17007000002000192,
+ 17007000002000194,
+ ]
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("sep", [" ", r"\s+"])
+def test_integer_overflow_bug(all_parsers, sep):
+ # see gh-2601
+ data = "65248E10 11\n55555E55 22\n"
+ parser = all_parsers
+
+ result = parser.read_csv(StringIO(data), header=None, sep=sep)
+ expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_int64_min_issues(all_parsers):
+ # see gh-2599
+ parser = all_parsers
+ data = "A,B\n0,0\n0,"
+ result = parser.read_csv(StringIO(data))
+
+ expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
+def test_int64_overflow(all_parsers, conv):
+ data = """ID
+00013007854817840016671868
+00013007854817840016749251
+00013007854817840016754630
+00013007854817840016781876
+00013007854817840017028824
+00013007854817840017963235
+00013007854817840018860166"""
+ parser = all_parsers
+
+ if conv is None:
+ # 13007854817840016671868 > UINT64_MAX, so this
+ # will overflow and return object as the dtype.
+ result = parser.read_csv(StringIO(data))
+ expected = DataFrame(
+ [
+ "00013007854817840016671868",
+ "00013007854817840016749251",
+ "00013007854817840016754630",
+ "00013007854817840016781876",
+ "00013007854817840017028824",
+ "00013007854817840017963235",
+ "00013007854817840018860166",
+ ],
+ columns=["ID"],
+ )
+ tm.assert_frame_equal(result, expected)
+ else:
+ # 13007854817840016671868 > UINT64_MAX, so attempts
+ # to cast to either int64 or uint64 will result in
+ # an OverflowError being raised.
+ msg = (
+ "(Python int too large to convert to C long)|"
+ "(long too big to convert)|"
+ "(int too big to convert)"
+ )
+
+ with pytest.raises(OverflowError, match=msg):
+ parser.read_csv(StringIO(data), converters={"ID": conv})
+
+
+@pytest.mark.parametrize(
+ "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
+)
+def test_int64_uint64_range(all_parsers, val):
+ # These numbers fall right inside the int64-uint64
+ # range, so they should be parsed as string.
+ parser = all_parsers
+ result = parser.read_csv(StringIO(str(val)), header=None)
+
+ expected = DataFrame([val])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
+)
+def test_outside_int64_uint64_range(all_parsers, val):
+ # These numbers fall just outside the int64-uint64
+ # range, so they should be parsed as string.
+ parser = all_parsers
+ result = parser.read_csv(StringIO(str(val)), header=None)
+
+ expected = DataFrame([str(val)])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]])
+def test_numeric_range_too_wide(all_parsers, exp_data):
+ # No numerical dtype can hold both negative and uint64
+ # values, so they should be cast as string.
+ parser = all_parsers
+ data = "\n".join(exp_data)
+ expected = DataFrame(exp_data)
+
+ result = parser.read_csv(StringIO(data), header=None)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_integer_precision(all_parsers):
+ # Gh 7072
+ s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765
+5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(s), header=None)[4]
+ expected = Series([4321583677327450765, 4321113141090630389], name=4)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/io/parser/common/test_iterator.py b/pandas/tests/io/parser/common/test_iterator.py
new file mode 100644
index 0000000000000..3cc30b0ab4029
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_iterator.py
@@ -0,0 +1,104 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import StringIO
+
+import pytest
+
+from pandas import DataFrame, Series, concat
+import pandas._testing as tm
+
+
+def test_iterator(all_parsers):
+ # see gh-6607
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ parser = all_parsers
+ kwargs = {"index_col": 0}
+
+ expected = parser.read_csv(StringIO(data), **kwargs)
+ with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader:
+
+ first_chunk = reader.read(3)
+ tm.assert_frame_equal(first_chunk, expected[:3])
+
+ last_chunk = reader.read(5)
+ tm.assert_frame_equal(last_chunk, expected[3:])
+
+
+def test_iterator2(all_parsers):
+ parser = all_parsers
+ data = """A,B,C
+foo,1,2,3
+bar,4,5,6
+baz,7,8,9
+"""
+
+ with parser.read_csv(StringIO(data), iterator=True) as reader:
+ result = list(reader)
+
+ expected = DataFrame(
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ index=["foo", "bar", "baz"],
+ columns=["A", "B", "C"],
+ )
+ tm.assert_frame_equal(result[0], expected)
+
+
+def test_iterator_stop_on_chunksize(all_parsers):
+ # gh-3967: stopping iteration when chunksize is specified
+ parser = all_parsers
+ data = """A,B,C
+foo,1,2,3
+bar,4,5,6
+baz,7,8,9
+"""
+
+ with parser.read_csv(StringIO(data), chunksize=1) as reader:
+ result = list(reader)
+
+ assert len(result) == 3
+ expected = DataFrame(
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ index=["foo", "bar", "baz"],
+ columns=["A", "B", "C"],
+ )
+ tm.assert_frame_equal(concat(result), expected)
+
+
+@pytest.mark.parametrize(
+ "kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}]
+)
+def test_iterator_skipfooter_errors(all_parsers, kwargs):
+ msg = "'skipfooter' not supported for iteration"
+ parser = all_parsers
+ data = "a\n1\n2"
+
+ with pytest.raises(ValueError, match=msg):
+ with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _:
+ pass
+
+
+def test_iteration_open_handle(all_parsers):
+ parser = all_parsers
+ kwargs = {"squeeze": True, "header": None}
+
+ with tm.ensure_clean() as path:
+ with open(path, "w") as f:
+ f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
+
+ with open(path) as f:
+ for line in f:
+ if "CCC" in line:
+ break
+
+ result = parser.read_csv(f, **kwargs)
+ expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py
new file mode 100644
index 0000000000000..a2787ddad3683
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_read_errors.py
@@ -0,0 +1,210 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+import codecs
+from io import StringIO
+import os
+
+import numpy as np
+import pytest
+
+from pandas.errors import EmptyDataError, ParserError
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+def test_empty_decimal_marker(all_parsers):
+ data = """A|B|C
+1|2,334|5
+10|13|10.
+"""
+ # Parsers support only length-1 decimals
+ msg = "Only length-1 decimal markers supported"
+ parser = all_parsers
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), decimal="")
+
+
+def test_bad_stream_exception(all_parsers, csv_dir_path):
+ # see gh-13652
+ #
+ # This test validates that both the Python engine and C engine will
+ # raise UnicodeDecodeError instead of C engine raising ParserError
+ # and swallowing the exception that caused read to fail.
+ path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
+ codec = codecs.lookup("utf-8")
+ utf8 = codecs.lookup("utf-8")
+ parser = all_parsers
+ msg = "'utf-8' codec can't decode byte"
+
+ # Stream must be binary UTF8.
+ with open(path, "rb") as handle, codecs.StreamRecoder(
+ handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
+ ) as stream:
+
+ with pytest.raises(UnicodeDecodeError, match=msg):
+ parser.read_csv(stream)
+
+
+def test_malformed(all_parsers):
+ # see gh-6607
+ parser = all_parsers
+ data = """ignore
+A,B,C
+1,2,3 # comment
+1,2,3,4,5
+2,3,4
+"""
+ msg = "Expected 3 fields in line 4, saw 5"
+ with pytest.raises(ParserError, match=msg):
+ parser.read_csv(StringIO(data), header=1, comment="#")
+
+
+@pytest.mark.parametrize("nrows", [5, 3, None])
+def test_malformed_chunks(all_parsers, nrows):
+ data = """ignore
+A,B,C
+skip
+1,2,3
+3,5,10 # comment
+1,2,3,4,5
+2,3,4
+"""
+ parser = all_parsers
+ msg = "Expected 3 fields in line 6, saw 5"
+ with parser.read_csv(
+ StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
+ ) as reader:
+ with pytest.raises(ParserError, match=msg):
+ reader.read(nrows)
+
+
+def test_catch_too_many_names(all_parsers):
+ # see gh-5156
+ data = """\
+1,2,3
+4,,6
+7,8,9
+10,11,12\n"""
+ parser = all_parsers
+ msg = (
+ "Too many columns specified: expected 4 and found 3"
+ if parser.engine == "c"
+ else "Number of passed names did not match "
+ "number of header fields in the file"
+ )
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
+
+
+@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
+def test_raise_on_no_columns(all_parsers, nrows):
+ parser = all_parsers
+ data = "\n" * nrows
+
+ msg = "No columns to parse from file"
+ with pytest.raises(EmptyDataError, match=msg):
+ parser.read_csv(StringIO(data))
+
+
+def test_read_csv_raises_on_header_prefix(all_parsers):
+ # gh-27394
+ parser = all_parsers
+ msg = "Argument prefix must be None if argument header is not None"
+
+ s = StringIO("0,1\n2,3")
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(s, header=0, prefix="_X")
+
+
+def test_unexpected_keyword_parameter_exception(all_parsers):
+ # GH-34976
+ parser = all_parsers
+
+ msg = "{}\\(\\) got an unexpected keyword argument 'foo'"
+ with pytest.raises(TypeError, match=msg.format("read_csv")):
+ parser.read_csv("foo.csv", foo=1)
+ with pytest.raises(TypeError, match=msg.format("read_table")):
+ parser.read_table("foo.tsv", foo=1)
+
+
+def test_suppress_error_output(all_parsers, capsys):
+ # see gh-15925
+ parser = all_parsers
+ data = "a\n1\n1,2,3\n4\n5,6,7"
+ expected = DataFrame({"a": [1, 4]})
+
+ result = parser.read_csv(
+ StringIO(data), error_bad_lines=False, warn_bad_lines=False
+ )
+ tm.assert_frame_equal(result, expected)
+
+ captured = capsys.readouterr()
+ assert captured.err == ""
+
+
+@pytest.mark.parametrize(
+ "kwargs",
+ [{}, {"error_bad_lines": True}], # Default is True. # Explicitly pass in.
+)
+@pytest.mark.parametrize(
+ "warn_kwargs", [{}, {"warn_bad_lines": True}, {"warn_bad_lines": False}]
+)
+def test_error_bad_lines(all_parsers, kwargs, warn_kwargs):
+ # see gh-15925
+ parser = all_parsers
+ kwargs.update(**warn_kwargs)
+ data = "a\n1\n1,2,3\n4\n5,6,7"
+
+ msg = "Expected 1 fields in line 3, saw 3"
+ with pytest.raises(ParserError, match=msg):
+ parser.read_csv(StringIO(data), **kwargs)
+
+
+def test_warn_bad_lines(all_parsers, capsys):
+ # see gh-15925
+ parser = all_parsers
+ data = "a\n1\n1,2,3\n4\n5,6,7"
+ expected = DataFrame({"a": [1, 4]})
+
+ result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True)
+ tm.assert_frame_equal(result, expected)
+
+ captured = capsys.readouterr()
+ assert "Skipping line 3" in captured.err
+ assert "Skipping line 5" in captured.err
+
+
+def test_read_csv_wrong_num_columns(all_parsers):
+ # Too few columns.
+ data = """A,B,C,D,E,F
+1,2,3,4,5,6
+6,7,8,9,10,11,12
+11,12,13,14,15,16
+"""
+ parser = all_parsers
+ msg = "Expected 6 fields in line 3, saw 7"
+
+ with pytest.raises(ParserError, match=msg):
+ parser.read_csv(StringIO(data))
+
+
+def test_null_byte_char(all_parsers):
+ # see gh-2741
+ data = "\x00,foo"
+ names = ["a", "b"]
+ parser = all_parsers
+
+ if parser.engine == "c":
+ expected = DataFrame([[np.nan, "foo"]], columns=names)
+ out = parser.read_csv(StringIO(data), names=names)
+ tm.assert_frame_equal(out, expected)
+ else:
+ msg = "NULL byte detected"
+ with pytest.raises(ParserError, match=msg):
+ parser.read_csv(StringIO(data), names=names)
diff --git a/pandas/tests/io/parser/common/test_verbose.py b/pandas/tests/io/parser/common/test_verbose.py
new file mode 100644
index 0000000000000..fdd905b48ea1e
--- /dev/null
+++ b/pandas/tests/io/parser/common/test_verbose.py
@@ -0,0 +1,51 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import StringIO
+
+
+def test_verbose_read(all_parsers, capsys):
+ parser = all_parsers
+ data = """a,b,c,d
+one,1,2,3
+one,1,2,3
+,1,2,3
+one,1,2,3
+,1,2,3
+,1,2,3
+one,1,2,3
+two,1,2,3"""
+
+ # Engines are verbose in different ways.
+ parser.read_csv(StringIO(data), verbose=True)
+ captured = capsys.readouterr()
+
+ if parser.engine == "c":
+ assert "Tokenization took:" in captured.out
+ assert "Parser memory cleanup took:" in captured.out
+ else: # Python engine
+ assert captured.out == "Filled 3 NA values in column a\n"
+
+
+def test_verbose_read2(all_parsers, capsys):
+ parser = all_parsers
+ data = """a,b,c,d
+one,1,2,3
+two,1,2,3
+three,1,2,3
+four,1,2,3
+five,1,2,3
+,1,2,3
+seven,1,2,3
+eight,1,2,3"""
+
+ parser.read_csv(StringIO(data), verbose=True, index_col=0)
+ captured = capsys.readouterr()
+
+ # Engines are verbose in different ways.
+ if parser.engine == "c":
+ assert "Tokenization took:" in captured.out
+ assert "Parser memory cleanup took:" in captured.out
+ else: # Python engine
+ assert captured.out == "Filled 1 NA values in column a\n"
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
deleted file mode 100644
index 31f1581a6184b..0000000000000
--- a/pandas/tests/io/parser/test_common.py
+++ /dev/null
@@ -1,2374 +0,0 @@
-"""
-Tests that work on both the Python and C engines but do not have a
-specific classification into the other test modules.
-"""
-import codecs
-import csv
-from datetime import datetime
-from inspect import signature
-from io import BytesIO, StringIO
-import os
-import platform
-from urllib.error import URLError
-
-import numpy as np
-import pytest
-
-from pandas._libs.tslib import Timestamp
-from pandas.compat import is_platform_linux
-from pandas.errors import DtypeWarning, EmptyDataError, ParserError
-import pandas.util._test_decorators as td
-
-from pandas import DataFrame, Index, MultiIndex, Series, compat, concat, option_context
-import pandas._testing as tm
-
-from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
-
-
-def test_override_set_noconvert_columns():
- # see gh-17351
- #
- # Usecols needs to be sorted in _set_noconvert_columns based
- # on the test_usecols_with_parse_dates test from test_usecols.py
- class MyTextFileReader(TextFileReader):
- def __init__(self):
- self._currow = 0
- self.squeeze = False
-
- class MyCParserWrapper(CParserWrapper):
- def _set_noconvert_columns(self):
- if self.usecols_dtype == "integer":
- # self.usecols is a set, which is documented as unordered
- # but in practice, a CPython set of integers is sorted.
- # In other implementations this assumption does not hold.
- # The following code simulates a different order, which
- # before GH 17351 would cause the wrong columns to be
- # converted via the parse_dates parameter
- self.usecols = list(self.usecols)
- self.usecols.reverse()
- return CParserWrapper._set_noconvert_columns(self)
-
- data = """a,b,c,d,e
-0,1,20140101,0900,4
-0,1,20140102,1000,4"""
-
- parse_dates = [[1, 2]]
- cols = {
- "a": [0, 0],
- "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
- }
- expected = DataFrame(cols, columns=["c_d", "a"])
-
- parser = MyTextFileReader()
- parser.options = {
- "usecols": [0, 2, 3],
- "parse_dates": parse_dates,
- "delimiter": ",",
- }
- parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
-
- result = parser.read()
- tm.assert_frame_equal(result, expected)
-
-
-def test_empty_decimal_marker(all_parsers):
- data = """A|B|C
-1|2,334|5
-10|13|10.
-"""
- # Parsers support only length-1 decimals
- msg = "Only length-1 decimal markers supported"
- parser = all_parsers
-
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(StringIO(data), decimal="")
-
-
-def test_bad_stream_exception(all_parsers, csv_dir_path):
- # see gh-13652
- #
- # This test validates that both the Python engine and C engine will
- # raise UnicodeDecodeError instead of C engine raising ParserError
- # and swallowing the exception that caused read to fail.
- path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
- codec = codecs.lookup("utf-8")
- utf8 = codecs.lookup("utf-8")
- parser = all_parsers
- msg = "'utf-8' codec can't decode byte"
-
- # Stream must be binary UTF8.
- with open(path, "rb") as handle, codecs.StreamRecoder(
- handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
- ) as stream:
-
- with pytest.raises(UnicodeDecodeError, match=msg):
- parser.read_csv(stream)
-
-
-def test_read_csv_local(all_parsers, csv1):
- prefix = "file:///" if compat.is_platform_windows() else "file://"
- parser = all_parsers
-
- fname = prefix + str(os.path.abspath(csv1))
- result = parser.read_csv(fname, index_col=0, parse_dates=True)
-
- expected = DataFrame(
- [
- [0.980269, 3.685731, -0.364216805298, -1.159738],
- [1.047916, -0.041232, -0.16181208307, 0.212549],
- [0.498581, 0.731168, -0.537677223318, 1.346270],
- [1.120202, 1.567621, 0.00364077397681, 0.675253],
- [-0.487094, 0.571455, -1.6116394093, 0.103469],
- [0.836649, 0.246462, 0.588542635376, 1.062782],
- [-0.157161, 1.340307, 1.1957779562, -1.097007],
- ],
- columns=["A", "B", "C", "D"],
- index=Index(
- [
- datetime(2000, 1, 3),
- datetime(2000, 1, 4),
- datetime(2000, 1, 5),
- datetime(2000, 1, 6),
- datetime(2000, 1, 7),
- datetime(2000, 1, 10),
- datetime(2000, 1, 11),
- ],
- name="index",
- ),
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_1000_sep(all_parsers):
- parser = all_parsers
- data = """A|B|C
-1|2,334|5
-10|13|10.
-"""
- expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
-
- result = parser.read_csv(StringIO(data), sep="|", thousands=",")
- tm.assert_frame_equal(result, expected)
-
-
-def test_squeeze(all_parsers):
- data = """\
-a,1
-b,2
-c,3
-"""
- parser = all_parsers
- index = Index(["a", "b", "c"], name=0)
- expected = Series([1, 2, 3], name=1, index=index)
-
- result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
- tm.assert_series_equal(result, expected)
-
- # see gh-8217
- #
- # Series should not be a view.
- assert not result._is_view
-
-
-def test_malformed(all_parsers):
- # see gh-6607
- parser = all_parsers
- data = """ignore
-A,B,C
-1,2,3 # comment
-1,2,3,4,5
-2,3,4
-"""
- msg = "Expected 3 fields in line 4, saw 5"
- with pytest.raises(ParserError, match=msg):
- parser.read_csv(StringIO(data), header=1, comment="#")
-
-
-@pytest.mark.parametrize("nrows", [5, 3, None])
-def test_malformed_chunks(all_parsers, nrows):
- data = """ignore
-A,B,C
-skip
-1,2,3
-3,5,10 # comment
-1,2,3,4,5
-2,3,4
-"""
- parser = all_parsers
- msg = "Expected 3 fields in line 6, saw 5"
- with parser.read_csv(
- StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
- ) as reader:
- with pytest.raises(ParserError, match=msg):
- reader.read(nrows)
-
-
-def test_unnamed_columns(all_parsers):
- data = """A,B,C,,
-1,2,3,4,5
-6,7,8,9,10
-11,12,13,14,15
-"""
- parser = all_parsers
- expected = DataFrame(
- [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
- dtype=np.int64,
- columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
- )
- result = parser.read_csv(StringIO(data))
- tm.assert_frame_equal(result, expected)
-
-
-def test_csv_mixed_type(all_parsers):
- data = """A,B,C
-a,1,2
-b,3,4
-c,4,5
-"""
- parser = all_parsers
- expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
- result = parser.read_csv(StringIO(data))
- tm.assert_frame_equal(result, expected)
-
-
-def test_read_csv_low_memory_no_rows_with_index(all_parsers):
- # see gh-21141
- parser = all_parsers
-
- if not parser.low_memory:
- pytest.skip("This is a low-memory specific test")
-
- data = """A,B,C
-1,1,1,2
-2,2,3,4
-3,3,4,5
-"""
- result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
- expected = DataFrame(columns=["A", "B", "C"])
- tm.assert_frame_equal(result, expected)
-
-
-def test_read_csv_dataframe(all_parsers, csv1):
- parser = all_parsers
- result = parser.read_csv(csv1, index_col=0, parse_dates=True)
-
- expected = DataFrame(
- [
- [0.980269, 3.685731, -0.364216805298, -1.159738],
- [1.047916, -0.041232, -0.16181208307, 0.212549],
- [0.498581, 0.731168, -0.537677223318, 1.346270],
- [1.120202, 1.567621, 0.00364077397681, 0.675253],
- [-0.487094, 0.571455, -1.6116394093, 0.103469],
- [0.836649, 0.246462, 0.588542635376, 1.062782],
- [-0.157161, 1.340307, 1.1957779562, -1.097007],
- ],
- columns=["A", "B", "C", "D"],
- index=Index(
- [
- datetime(2000, 1, 3),
- datetime(2000, 1, 4),
- datetime(2000, 1, 5),
- datetime(2000, 1, 6),
- datetime(2000, 1, 7),
- datetime(2000, 1, 10),
- datetime(2000, 1, 11),
- ],
- name="index",
- ),
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_read_csv_no_index_name(all_parsers, csv_dir_path):
- parser = all_parsers
- csv2 = os.path.join(csv_dir_path, "test2.csv")
- result = parser.read_csv(csv2, index_col=0, parse_dates=True)
-
- expected = DataFrame(
- [
- [0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
- [1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
- [0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
- [1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
- [-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
- ],
- columns=["A", "B", "C", "D", "E"],
- index=Index(
- [
- datetime(2000, 1, 3),
- datetime(2000, 1, 4),
- datetime(2000, 1, 5),
- datetime(2000, 1, 6),
- datetime(2000, 1, 7),
- ]
- ),
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_read_csv_wrong_num_columns(all_parsers):
- # Too few columns.
- data = """A,B,C,D,E,F
-1,2,3,4,5,6
-6,7,8,9,10,11,12
-11,12,13,14,15,16
-"""
- parser = all_parsers
- msg = "Expected 6 fields in line 3, saw 7"
-
- with pytest.raises(ParserError, match=msg):
- parser.read_csv(StringIO(data))
-
-
-def test_read_duplicate_index_explicit(all_parsers):
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo,12,13,14,15
-bar,12,13,14,15
-"""
- parser = all_parsers
- result = parser.read_csv(StringIO(data), index_col=0)
-
- expected = DataFrame(
- [
- [2, 3, 4, 5],
- [7, 8, 9, 10],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- ],
- columns=["A", "B", "C", "D"],
- index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_read_duplicate_index_implicit(all_parsers):
- data = """A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo,12,13,14,15
-bar,12,13,14,15
-"""
- parser = all_parsers
- result = parser.read_csv(StringIO(data))
-
- expected = DataFrame(
- [
- [2, 3, 4, 5],
- [7, 8, 9, 10],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- ],
- columns=["A", "B", "C", "D"],
- index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
- )
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "data,kwargs,expected",
- [
- (
- "A,B\nTrue,1\nFalse,2\nTrue,3",
- {},
- DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
- ),
- (
- "A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
- {"true_values": ["yes", "Yes", "YES"], "false_values": ["no", "NO", "No"]},
- DataFrame(
- [[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
- columns=["A", "B"],
- ),
- ),
- (
- "A,B\nTRUE,1\nFALSE,2\nTRUE,3",
- {},
- DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
- ),
- (
- "A,B\nfoo,bar\nbar,foo",
- {"true_values": ["foo"], "false_values": ["bar"]},
- DataFrame([[True, False], [False, True]], columns=["A", "B"]),
- ),
- ],
-)
-def test_parse_bool(all_parsers, data, kwargs, expected):
- parser = all_parsers
- result = parser.read_csv(StringIO(data), **kwargs)
- tm.assert_frame_equal(result, expected)
-
-
-def test_int_conversion(all_parsers):
- data = """A,B
-1.0,1
-2.0,2
-3.0,3
-"""
- parser = all_parsers
- result = parser.read_csv(StringIO(data))
-
- expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("nrows", [3, 3.0])
-def test_read_nrows(all_parsers, nrows):
- # see gh-10476
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
- expected = DataFrame(
- [["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
- columns=["index", "A", "B", "C", "D"],
- )
- parser = all_parsers
-
- result = parser.read_csv(StringIO(data), nrows=nrows)
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
-def test_read_nrows_bad(all_parsers, nrows):
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
- msg = r"'nrows' must be an integer >=0"
- parser = all_parsers
-
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(StringIO(data), nrows=nrows)
-
-
-@pytest.mark.parametrize("index_col", [0, "index"])
-def test_read_chunksize_with_index(all_parsers, index_col):
- parser = all_parsers
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
-
- expected = DataFrame(
- [
- ["foo", 2, 3, 4, 5],
- ["bar", 7, 8, 9, 10],
- ["baz", 12, 13, 14, 15],
- ["qux", 12, 13, 14, 15],
- ["foo2", 12, 13, 14, 15],
- ["bar2", 12, 13, 14, 15],
- ],
- columns=["index", "A", "B", "C", "D"],
- )
- expected = expected.set_index("index")
-
- with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:
- chunks = list(reader)
- tm.assert_frame_equal(chunks[0], expected[:2])
- tm.assert_frame_equal(chunks[1], expected[2:4])
- tm.assert_frame_equal(chunks[2], expected[4:])
-
-
-@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
-def test_read_chunksize_bad(all_parsers, chunksize):
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
- parser = all_parsers
- msg = r"'chunksize' must be an integer >=1"
-
- with pytest.raises(ValueError, match=msg):
- with parser.read_csv(StringIO(data), chunksize=chunksize) as _:
- pass
-
-
-@pytest.mark.parametrize("chunksize", [2, 8])
-def test_read_chunksize_and_nrows(all_parsers, chunksize):
- # see gh-15755
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
- parser = all_parsers
- kwargs = {"index_col": 0, "nrows": 5}
-
- expected = parser.read_csv(StringIO(data), **kwargs)
- with parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) as reader:
- tm.assert_frame_equal(concat(reader), expected)
-
-
-def test_read_chunksize_and_nrows_changing_size(all_parsers):
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
- parser = all_parsers
- kwargs = {"index_col": 0, "nrows": 5}
-
- expected = parser.read_csv(StringIO(data), **kwargs)
- with parser.read_csv(StringIO(data), chunksize=8, **kwargs) as reader:
- tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
- tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
-
- with pytest.raises(StopIteration, match=""):
- reader.get_chunk(size=3)
-
-
-def test_get_chunk_passed_chunksize(all_parsers):
- parser = all_parsers
- data = """A,B,C
-1,2,3
-4,5,6
-7,8,9
-1,2,3"""
-
- with parser.read_csv(StringIO(data), chunksize=2) as reader:
- result = reader.get_chunk()
-
- expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}])
-def test_read_chunksize_compat(all_parsers, kwargs):
- # see gh-12185
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
- parser = all_parsers
- result = parser.read_csv(StringIO(data), **kwargs)
- with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:
- tm.assert_frame_equal(concat(reader), result)
-
-
-def test_read_chunksize_jagged_names(all_parsers):
- # see gh-23509
- parser = all_parsers
- data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
-
- expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
- with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader:
- result = concat(reader)
- tm.assert_frame_equal(result, expected)
-
-
-def test_read_data_list(all_parsers):
- parser = all_parsers
- kwargs = {"index_col": 0}
- data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
-
- data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
- expected = parser.read_csv(StringIO(data), **kwargs)
-
- with TextParser(data_list, chunksize=2, **kwargs) as parser:
- result = parser.read()
-
- tm.assert_frame_equal(result, expected)
-
-
-def test_iterator(all_parsers):
- # see gh-6607
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
- parser = all_parsers
- kwargs = {"index_col": 0}
-
- expected = parser.read_csv(StringIO(data), **kwargs)
- with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader:
-
- first_chunk = reader.read(3)
- tm.assert_frame_equal(first_chunk, expected[:3])
-
- last_chunk = reader.read(5)
- tm.assert_frame_equal(last_chunk, expected[3:])
-
-
-def test_iterator2(all_parsers):
- parser = all_parsers
- data = """A,B,C
-foo,1,2,3
-bar,4,5,6
-baz,7,8,9
-"""
-
- with parser.read_csv(StringIO(data), iterator=True) as reader:
- result = list(reader)
-
- expected = DataFrame(
- [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
- index=["foo", "bar", "baz"],
- columns=["A", "B", "C"],
- )
- tm.assert_frame_equal(result[0], expected)
-
-
-def test_reader_list(all_parsers):
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
- parser = all_parsers
- kwargs = {"index_col": 0}
-
- lines = list(csv.reader(StringIO(data)))
- with TextParser(lines, chunksize=2, **kwargs) as reader:
- chunks = list(reader)
-
- expected = parser.read_csv(StringIO(data), **kwargs)
-
- tm.assert_frame_equal(chunks[0], expected[:2])
- tm.assert_frame_equal(chunks[1], expected[2:4])
- tm.assert_frame_equal(chunks[2], expected[4:])
-
-
-def test_reader_list_skiprows(all_parsers):
- data = """index,A,B,C,D
-foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-"""
- parser = all_parsers
- kwargs = {"index_col": 0}
-
- lines = list(csv.reader(StringIO(data)))
- with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:
- chunks = list(reader)
-
- expected = parser.read_csv(StringIO(data), **kwargs)
-
- tm.assert_frame_equal(chunks[0], expected[1:3])
-
-
-def test_iterator_stop_on_chunksize(all_parsers):
- # gh-3967: stopping iteration when chunksize is specified
- parser = all_parsers
- data = """A,B,C
-foo,1,2,3
-bar,4,5,6
-baz,7,8,9
-"""
-
- with parser.read_csv(StringIO(data), chunksize=1) as reader:
- result = list(reader)
-
- assert len(result) == 3
- expected = DataFrame(
- [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
- index=["foo", "bar", "baz"],
- columns=["A", "B", "C"],
- )
- tm.assert_frame_equal(concat(result), expected)
-
-
-@pytest.mark.parametrize(
- "kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}]
-)
-def test_iterator_skipfooter_errors(all_parsers, kwargs):
- msg = "'skipfooter' not supported for iteration"
- parser = all_parsers
- data = "a\n1\n2"
-
- with pytest.raises(ValueError, match=msg):
- with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _:
- pass
-
-
-def test_nrows_skipfooter_errors(all_parsers):
- msg = "'skipfooter' not supported with 'nrows'"
- data = "a\n1\n2\n3\n4\n5\n6"
- parser = all_parsers
-
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
-
-
-@pytest.mark.parametrize(
- "data,kwargs,expected",
- [
- (
- """foo,2,3,4,5
-bar,7,8,9,10
-baz,12,13,14,15
-qux,12,13,14,15
-foo2,12,13,14,15
-bar2,12,13,14,15
-""",
- {"index_col": 0, "names": ["index", "A", "B", "C", "D"]},
- DataFrame(
- [
- [2, 3, 4, 5],
- [7, 8, 9, 10],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- ],
- index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
- columns=["A", "B", "C", "D"],
- ),
- ),
- (
- """foo,one,2,3,4,5
-foo,two,7,8,9,10
-foo,three,12,13,14,15
-bar,one,12,13,14,15
-bar,two,12,13,14,15
-""",
- {"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},
- DataFrame(
- [
- [2, 3, 4, 5],
- [7, 8, 9, 10],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- ],
- index=MultiIndex.from_tuples(
- [
- ("foo", "one"),
- ("foo", "two"),
- ("foo", "three"),
- ("bar", "one"),
- ("bar", "two"),
- ],
- names=["index1", "index2"],
- ),
- columns=["A", "B", "C", "D"],
- ),
- ),
- ],
-)
-def test_pass_names_with_index(all_parsers, data, kwargs, expected):
- parser = all_parsers
- result = parser.read_csv(StringIO(data), **kwargs)
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
-def test_multi_index_no_level_names(all_parsers, index_col):
- data = """index1,index2,A,B,C,D
-foo,one,2,3,4,5
-foo,two,7,8,9,10
-foo,three,12,13,14,15
-bar,one,12,13,14,15
-bar,two,12,13,14,15
-"""
- headless_data = "\n".join(data.split("\n")[1:])
-
- names = ["A", "B", "C", "D"]
- parser = all_parsers
-
- result = parser.read_csv(
- StringIO(headless_data), index_col=index_col, header=None, names=names
- )
- expected = parser.read_csv(StringIO(data), index_col=index_col)
-
- # No index names in headless data.
- expected.index.names = [None] * 2
- tm.assert_frame_equal(result, expected)
-
-
-def test_multi_index_no_level_names_implicit(all_parsers):
- parser = all_parsers
- data = """A,B,C,D
-foo,one,2,3,4,5
-foo,two,7,8,9,10
-foo,three,12,13,14,15
-bar,one,12,13,14,15
-bar,two,12,13,14,15
-"""
-
- result = parser.read_csv(StringIO(data))
- expected = DataFrame(
- [
- [2, 3, 4, 5],
- [7, 8, 9, 10],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- [12, 13, 14, 15],
- ],
- columns=["A", "B", "C", "D"],
- index=MultiIndex.from_tuples(
- [
- ("foo", "one"),
- ("foo", "two"),
- ("foo", "three"),
- ("bar", "one"),
- ("bar", "two"),
- ]
- ),
- )
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "data,expected,header",
- [
- ("a,b", DataFrame(columns=["a", "b"]), [0]),
- (
- "a,b\nc,d",
- DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
- [0, 1],
- ),
- ],
-)
-@pytest.mark.parametrize("round_trip", [True, False])
-def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
- # see gh-14545
- parser = all_parsers
- data = expected.to_csv(index=False) if round_trip else data
-
- result = parser.read_csv(StringIO(data), header=header)
- tm.assert_frame_equal(result, expected)
-
-
-def test_no_unnamed_index(all_parsers):
- parser = all_parsers
- data = """ id c0 c1 c2
-0 1 0 a b
-1 2 0 c d
-2 2 2 e f
-"""
- result = parser.read_csv(StringIO(data), sep=" ")
- expected = DataFrame(
- [[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
- columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_read_csv_parse_simple_list(all_parsers):
- parser = all_parsers
- data = """foo
-bar baz
-qux foo
-foo
-bar"""
-
- result = parser.read_csv(StringIO(data), header=None)
- expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
- tm.assert_frame_equal(result, expected)
-
-
-@tm.network
-def test_url(all_parsers, csv_dir_path):
- # TODO: FTP testing
- parser = all_parsers
- kwargs = {"sep": "\t"}
-
- url = (
- "https://raw.github.com/pandas-dev/pandas/master/"
- "pandas/tests/io/parser/data/salaries.csv"
- )
- url_result = parser.read_csv(url, **kwargs)
-
- local_path = os.path.join(csv_dir_path, "salaries.csv")
- local_result = parser.read_csv(local_path, **kwargs)
- tm.assert_frame_equal(url_result, local_result)
-
-
-@pytest.mark.slow
-def test_local_file(all_parsers, csv_dir_path):
- parser = all_parsers
- kwargs = {"sep": "\t"}
-
- local_path = os.path.join(csv_dir_path, "salaries.csv")
- local_result = parser.read_csv(local_path, **kwargs)
- url = "file://localhost/" + local_path
-
- try:
- url_result = parser.read_csv(url, **kwargs)
- tm.assert_frame_equal(url_result, local_result)
- except URLError:
- # Fails on some systems.
- pytest.skip("Failing on: " + " ".join(platform.uname()))
-
-
-def test_path_path_lib(all_parsers):
- parser = all_parsers
- df = tm.makeDataFrame()
- result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
- tm.assert_frame_equal(df, result)
-
-
-def test_path_local_path(all_parsers):
- parser = all_parsers
- df = tm.makeDataFrame()
- result = tm.round_trip_localpath(
- df.to_csv, lambda p: parser.read_csv(p, index_col=0)
- )
- tm.assert_frame_equal(df, result)
-
-
-def test_nonexistent_path(all_parsers):
- # gh-2428: pls no segfault
- # gh-14086: raise more helpful FileNotFoundError
- # GH#29233 "File foo" instead of "File b'foo'"
- parser = all_parsers
- path = f"{tm.rands(10)}.csv"
-
- msg = r"\[Errno 2\]"
- with pytest.raises(FileNotFoundError, match=msg) as e:
- parser.read_csv(path)
- assert path == e.value.filename
-
-
-@td.skip_if_windows # os.chmod does not work in windows
-def test_no_permission(all_parsers):
- # GH 23784
- parser = all_parsers
-
- msg = r"\[Errno 13\]"
- with tm.ensure_clean() as path:
- os.chmod(path, 0) # make file unreadable
-
- # verify that this process cannot open the file (not running as sudo)
- try:
- with open(path):
- pass
- pytest.skip("Running as sudo.")
- except PermissionError:
- pass
-
- with pytest.raises(PermissionError, match=msg) as e:
- parser.read_csv(path)
- assert path == e.value.filename
-
-
-def test_missing_trailing_delimiters(all_parsers):
- parser = all_parsers
- data = """A,B,C,D
-1,2,3,4
-1,3,3,
-1,4,5"""
-
- result = parser.read_csv(StringIO(data))
- expected = DataFrame(
- [[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
- columns=["A", "B", "C", "D"],
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_skip_initial_space(all_parsers):
- data = (
- '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
- "1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
- "314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
- "70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
- "0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
- "-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
- )
- parser = all_parsers
-
- result = parser.read_csv(
- StringIO(data),
- names=list(range(33)),
- header=None,
- na_values=["-9999.0"],
- skipinitialspace=True,
- )
- expected = DataFrame(
- [
- [
- "09-Apr-2012",
- "01:10:18.300",
- 2456026.548822908,
- 12849,
- 1.00361,
- 1.12551,
- 330.65659,
- 355626618.16711,
- 73.48821,
- 314.11625,
- 1917.09447,
- 179.71425,
- 80.0,
- 240.0,
- -350,
- 70.06056,
- 344.9837,
- 1,
- 1,
- -0.689265,
- -0.692787,
- 0.212036,
- 14.7674,
- 41.605,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- np.nan,
- 0,
- 12,
- 128,
- ]
- ]
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_trailing_delimiters(all_parsers):
- # see gh-2442
- data = """A,B,C
-1,2,3,
-4,5,6,
-7,8,9,"""
- parser = all_parsers
- result = parser.read_csv(StringIO(data), index_col=False)
-
- expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
- tm.assert_frame_equal(result, expected)
-
-
-def test_escapechar(all_parsers):
- # https://stackoverflow.com/questions/13824840/feature-request-for-
- # pandas-read-csv
- data = '''SEARCH_TERM,ACTUAL_URL
-"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
-"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
-"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
-
- parser = all_parsers
- result = parser.read_csv(
- StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
- )
-
- assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
-
- tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
-
-
-def test_int64_min_issues(all_parsers):
- # see gh-2599
- parser = all_parsers
- data = "A,B\n0,0\n0,"
- result = parser.read_csv(StringIO(data))
-
- expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
- tm.assert_frame_equal(result, expected)
-
-
-def test_parse_integers_above_fp_precision(all_parsers):
- data = """Numbers
-17007000002000191
-17007000002000191
-17007000002000191
-17007000002000191
-17007000002000192
-17007000002000192
-17007000002000192
-17007000002000192
-17007000002000192
-17007000002000194"""
- parser = all_parsers
- result = parser.read_csv(StringIO(data))
- expected = DataFrame(
- {
- "Numbers": [
- 17007000002000191,
- 17007000002000191,
- 17007000002000191,
- 17007000002000191,
- 17007000002000192,
- 17007000002000192,
- 17007000002000192,
- 17007000002000192,
- 17007000002000192,
- 17007000002000194,
- ]
- }
- )
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.xfail(reason="GH38630, sometimes gives ResourceWarning", strict=False)
-def test_chunks_have_consistent_numerical_type(all_parsers):
- parser = all_parsers
- integers = [str(i) for i in range(499999)]
- data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
-
- # Coercions should work without warnings.
- with tm.assert_produces_warning(None):
- result = parser.read_csv(StringIO(data))
-
- assert type(result.a[0]) is np.float64
- assert result.a.dtype == float
-
-
-def test_warn_if_chunks_have_mismatched_type(all_parsers):
- warning_type = None
- parser = all_parsers
- integers = [str(i) for i in range(499999)]
- data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
-
- # see gh-3866: if chunks are different types and can't
- # be coerced using numerical types, then issue warning.
- if parser.engine == "c" and parser.low_memory:
- warning_type = DtypeWarning
-
- with tm.assert_produces_warning(warning_type):
- df = parser.read_csv(StringIO(data))
- assert df.a.dtype == object
-
-
-@pytest.mark.parametrize("sep", [" ", r"\s+"])
-def test_integer_overflow_bug(all_parsers, sep):
- # see gh-2601
- data = "65248E10 11\n55555E55 22\n"
- parser = all_parsers
-
- result = parser.read_csv(StringIO(data), header=None, sep=sep)
- expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
- tm.assert_frame_equal(result, expected)
-
-
-def test_catch_too_many_names(all_parsers):
- # see gh-5156
- data = """\
-1,2,3
-4,,6
-7,8,9
-10,11,12\n"""
- parser = all_parsers
- msg = (
- "Too many columns specified: expected 4 and found 3"
- if parser.engine == "c"
- else "Number of passed names did not match "
- "number of header fields in the file"
- )
-
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
-
-
-def test_ignore_leading_whitespace(all_parsers):
- # see gh-3374, gh-6607
- parser = all_parsers
- data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
- result = parser.read_csv(StringIO(data), sep=r"\s+")
-
- expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
- tm.assert_frame_equal(result, expected)
-
-
-def test_chunk_begins_with_newline_whitespace(all_parsers):
- # see gh-10022
- parser = all_parsers
- data = "\n hello\nworld\n"
-
- result = parser.read_csv(StringIO(data), header=None)
- expected = DataFrame([" hello", "world"])
- tm.assert_frame_equal(result, expected)
-
-
-def test_empty_with_index(all_parsers):
- # see gh-10184
- data = "x,y"
- parser = all_parsers
- result = parser.read_csv(StringIO(data), index_col=0)
-
- expected = DataFrame(columns=["y"], index=Index([], name="x"))
- tm.assert_frame_equal(result, expected)
-
-
-def test_empty_with_multi_index(all_parsers):
- # see gh-10467
- data = "x,y,z"
- parser = all_parsers
- result = parser.read_csv(StringIO(data), index_col=["x", "y"])
-
- expected = DataFrame(
- columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_empty_with_reversed_multi_index(all_parsers):
- data = "x,y,z"
- parser = all_parsers
- result = parser.read_csv(StringIO(data), index_col=[1, 0])
-
- expected = DataFrame(
- columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_float_parser(all_parsers):
- # see gh-9565
- parser = all_parsers
- data = "45e-1,4.5,45.,inf,-inf"
- result = parser.read_csv(StringIO(data), header=None)
-
- expected = DataFrame([[float(s) for s in data.split(",")]])
- tm.assert_frame_equal(result, expected)
-
-
-def test_scientific_no_exponent(all_parsers_all_precisions):
- # see gh-12215
- df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
- data = df.to_csv(index=False)
- parser, precision = all_parsers_all_precisions
-
- df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
- tm.assert_frame_equal(df_roundtrip, df)
-
-
-@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
-def test_int64_overflow(all_parsers, conv):
- data = """ID
-00013007854817840016671868
-00013007854817840016749251
-00013007854817840016754630
-00013007854817840016781876
-00013007854817840017028824
-00013007854817840017963235
-00013007854817840018860166"""
- parser = all_parsers
-
- if conv is None:
- # 13007854817840016671868 > UINT64_MAX, so this
- # will overflow and return object as the dtype.
- result = parser.read_csv(StringIO(data))
- expected = DataFrame(
- [
- "00013007854817840016671868",
- "00013007854817840016749251",
- "00013007854817840016754630",
- "00013007854817840016781876",
- "00013007854817840017028824",
- "00013007854817840017963235",
- "00013007854817840018860166",
- ],
- columns=["ID"],
- )
- tm.assert_frame_equal(result, expected)
- else:
- # 13007854817840016671868 > UINT64_MAX, so attempts
- # to cast to either int64 or uint64 will result in
- # an OverflowError being raised.
- msg = (
- "(Python int too large to convert to C long)|"
- "(long too big to convert)|"
- "(int too big to convert)"
- )
-
- with pytest.raises(OverflowError, match=msg):
- parser.read_csv(StringIO(data), converters={"ID": conv})
-
-
-@pytest.mark.parametrize(
- "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
-)
-def test_int64_uint64_range(all_parsers, val):
- # These numbers fall right inside the int64-uint64
- # range, so they should be parsed as string.
- parser = all_parsers
- result = parser.read_csv(StringIO(str(val)), header=None)
-
- expected = DataFrame([val])
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
-)
-def test_outside_int64_uint64_range(all_parsers, val):
- # These numbers fall just outside the int64-uint64
- # range, so they should be parsed as string.
- parser = all_parsers
- result = parser.read_csv(StringIO(str(val)), header=None)
-
- expected = DataFrame([str(val)])
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]])
-def test_numeric_range_too_wide(all_parsers, exp_data):
- # No numerical dtype can hold both negative and uint64
- # values, so they should be cast as string.
- parser = all_parsers
- data = "\n".join(exp_data)
- expected = DataFrame(exp_data)
-
- result = parser.read_csv(StringIO(data), header=None)
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("neg_exp", [-617, -100000, -99999999999999999])
-def test_very_negative_exponent(all_parsers_all_precisions, neg_exp):
- # GH#38753
- parser, precision = all_parsers_all_precisions
- data = f"data\n10E{neg_exp}"
- result = parser.read_csv(StringIO(data), float_precision=precision)
- expected = DataFrame({"data": [0.0]})
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("exp", [999999999999999999, -999999999999999999])
-def test_too_many_exponent_digits(all_parsers_all_precisions, exp, request):
- # GH#38753
- parser, precision = all_parsers_all_precisions
- data = f"data\n10E{exp}"
- result = parser.read_csv(StringIO(data), float_precision=precision)
- if precision == "round_trip":
- if exp == 999999999999999999 and is_platform_linux():
- mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result")
- request.node.add_marker(mark)
-
- value = np.inf if exp > 0 else 0.0
- expected = DataFrame({"data": [value]})
- else:
- expected = DataFrame({"data": [f"10E{exp}"]})
-
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("iterator", [True, False])
-def test_empty_with_nrows_chunksize(all_parsers, iterator):
- # see gh-9535
- parser = all_parsers
- expected = DataFrame(columns=["foo", "bar"])
-
- nrows = 10
- data = StringIO("foo,bar\n")
-
- if iterator:
- with parser.read_csv(data, chunksize=nrows) as reader:
- result = next(iter(reader))
- else:
- result = parser.read_csv(data, nrows=nrows)
-
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "data,kwargs,expected,msg",
- [
- # gh-10728: WHITESPACE_LINE
- (
- "a,b,c\n4,5,6\n ",
- {},
- DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
- None,
- ),
- # gh-10548: EAT_LINE_COMMENT
- (
- "a,b,c\n4,5,6\n#comment",
- {"comment": "#"},
- DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
- None,
- ),
- # EAT_CRNL_NOP
- (
- "a,b,c\n4,5,6\n\r",
- {},
- DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
- None,
- ),
- # EAT_COMMENT
- (
- "a,b,c\n4,5,6#comment",
- {"comment": "#"},
- DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
- None,
- ),
- # SKIP_LINE
- (
- "a,b,c\n4,5,6\nskipme",
- {"skiprows": [2]},
- DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
- None,
- ),
- # EAT_LINE_COMMENT
- (
- "a,b,c\n4,5,6\n#comment",
- {"comment": "#", "skip_blank_lines": False},
- DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
- None,
- ),
- # IN_FIELD
- (
- "a,b,c\n4,5,6\n ",
- {"skip_blank_lines": False},
- DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
- None,
- ),
- # EAT_CRNL
- (
- "a,b,c\n4,5,6\n\r",
- {"skip_blank_lines": False},
- DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
- None,
- ),
- # ESCAPED_CHAR
- (
- "a,b,c\n4,5,6\n\\",
- {"escapechar": "\\"},
- None,
- "(EOF following escape character)|(unexpected end of data)",
- ),
- # ESCAPE_IN_QUOTED_FIELD
- (
- 'a,b,c\n4,5,6\n"\\',
- {"escapechar": "\\"},
- None,
- "(EOF inside string starting at row 2)|(unexpected end of data)",
- ),
- # IN_QUOTED_FIELD
- (
- 'a,b,c\n4,5,6\n"',
- {"escapechar": "\\"},
- None,
- "(EOF inside string starting at row 2)|(unexpected end of data)",
- ),
- ],
- ids=[
- "whitespace-line",
- "eat-line-comment",
- "eat-crnl-nop",
- "eat-comment",
- "skip-line",
- "eat-line-comment",
- "in-field",
- "eat-crnl",
- "escaped-char",
- "escape-in-quoted-field",
- "in-quoted-field",
- ],
-)
-def test_eof_states(all_parsers, data, kwargs, expected, msg):
- # see gh-10728, gh-10548
- parser = all_parsers
-
- if expected is None:
- with pytest.raises(ParserError, match=msg):
- parser.read_csv(StringIO(data), **kwargs)
- else:
- result = parser.read_csv(StringIO(data), **kwargs)
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
-def test_uneven_lines_with_usecols(all_parsers, usecols):
- # see gh-12203
- parser = all_parsers
- data = r"""a,b,c
-0,1,2
-3,4,5,6,7
-8,9,10"""
-
- if usecols is None:
- # Make sure that an error is still raised
- # when the "usecols" parameter is not provided.
- msg = r"Expected \d+ fields in line \d+, saw \d+"
- with pytest.raises(ParserError, match=msg):
- parser.read_csv(StringIO(data))
- else:
- expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
-
- result = parser.read_csv(StringIO(data), usecols=usecols)
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "data,kwargs,expected",
- [
- # First, check to see that the response of parser when faced with no
- # provided columns raises the correct error, with or without usecols.
- ("", {}, None),
- ("", {"usecols": ["X"]}, None),
- (
- ",,",
- {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
- DataFrame(columns=["X"], index=[0], dtype=np.float64),
- ),
- (
- "",
- {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
- DataFrame(columns=["X"]),
- ),
- ],
-)
-def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
- # see gh-12493
- parser = all_parsers
-
- if expected is None:
- msg = "No columns to parse from file"
- with pytest.raises(EmptyDataError, match=msg):
- parser.read_csv(StringIO(data), **kwargs)
- else:
- result = parser.read_csv(StringIO(data), **kwargs)
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "kwargs,expected",
- [
- # gh-8661, gh-8679: this should ignore six lines, including
- # lines with trailing whitespace and blank lines.
- (
- {
- "header": None,
- "delim_whitespace": True,
- "skiprows": [0, 1, 2, 3, 5, 6],
- "skip_blank_lines": True,
- },
- DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
- ),
- # gh-8983: test skipping set of rows after a row with trailing spaces.
- (
- {
- "delim_whitespace": True,
- "skiprows": [1, 2, 3, 5, 6],
- "skip_blank_lines": True,
- },
- DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
- ),
- ],
-)
-def test_trailing_spaces(all_parsers, kwargs, expected):
- data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
- parser = all_parsers
-
- result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
- tm.assert_frame_equal(result, expected)
-
-
-def test_raise_on_sep_with_delim_whitespace(all_parsers):
- # see gh-6607
- data = "a b c\n1 2 3"
- parser = all_parsers
-
- with pytest.raises(ValueError, match="you can only specify one"):
- parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
-
-
-@pytest.mark.parametrize("delim_whitespace", [True, False])
-def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
- # see gh-9710
- parser = all_parsers
- data = """\
-MyColumn
-a
-b
-a
-b\n"""
-
- expected = DataFrame({"MyColumn": list("abab")})
- result = parser.read_csv(
- StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
- )
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "sep,skip_blank_lines,exp_data",
- [
- (",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
- (r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
- (
- ",",
- False,
- [
- [1.0, 2.0, 4.0],
- [np.nan, np.nan, np.nan],
- [np.nan, np.nan, np.nan],
- [5.0, np.nan, 10.0],
- [np.nan, np.nan, np.nan],
- [-70.0, 0.4, 1.0],
- ],
- ),
- ],
-)
-def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
- parser = all_parsers
- data = """\
-A,B,C
-1,2.,4.
-
-
-5.,NaN,10.0
-
--70,.4,1
-"""
-
- if sep == r"\s+":
- data = data.replace(",", " ")
-
- result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
- expected = DataFrame(exp_data, columns=["A", "B", "C"])
- tm.assert_frame_equal(result, expected)
-
-
-def test_whitespace_lines(all_parsers):
- parser = all_parsers
- data = """
-
-\t \t\t
-\t
-A,B,C
-\t 1,2.,4.
-5.,NaN,10.0
-"""
- expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
- result = parser.read_csv(StringIO(data))
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "data,expected",
- [
- (
- """ A B C D
-a 1 2 3 4
-b 1 2 3 4
-c 1 2 3 4
-""",
- DataFrame(
- [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
- columns=["A", "B", "C", "D"],
- index=["a", "b", "c"],
- ),
- ),
- (
- " a b c\n1 2 3 \n4 5 6\n 7 8 9",
- DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
- ),
- ],
-)
-def test_whitespace_regex_separator(all_parsers, data, expected):
- # see gh-6607
- parser = all_parsers
- result = parser.read_csv(StringIO(data), sep=r"\s+")
- tm.assert_frame_equal(result, expected)
-
-
-def test_verbose_read(all_parsers, capsys):
- parser = all_parsers
- data = """a,b,c,d
-one,1,2,3
-one,1,2,3
-,1,2,3
-one,1,2,3
-,1,2,3
-,1,2,3
-one,1,2,3
-two,1,2,3"""
-
- # Engines are verbose in different ways.
- parser.read_csv(StringIO(data), verbose=True)
- captured = capsys.readouterr()
-
- if parser.engine == "c":
- assert "Tokenization took:" in captured.out
- assert "Parser memory cleanup took:" in captured.out
- else: # Python engine
- assert captured.out == "Filled 3 NA values in column a\n"
-
-
-def test_verbose_read2(all_parsers, capsys):
- parser = all_parsers
- data = """a,b,c,d
-one,1,2,3
-two,1,2,3
-three,1,2,3
-four,1,2,3
-five,1,2,3
-,1,2,3
-seven,1,2,3
-eight,1,2,3"""
-
- parser.read_csv(StringIO(data), verbose=True, index_col=0)
- captured = capsys.readouterr()
-
- # Engines are verbose in different ways.
- if parser.engine == "c":
- assert "Tokenization took:" in captured.out
- assert "Parser memory cleanup took:" in captured.out
- else: # Python engine
- assert captured.out == "Filled 1 NA values in column a\n"
-
-
-def test_iteration_open_handle(all_parsers):
- parser = all_parsers
- kwargs = {"squeeze": True, "header": None}
-
- with tm.ensure_clean() as path:
- with open(path, "w") as f:
- f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
-
- with open(path) as f:
- for line in f:
- if "CCC" in line:
- break
-
- result = parser.read_csv(f, **kwargs)
- expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
- tm.assert_series_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "data,thousands,decimal",
- [
- (
- """A|B|C
-1|2,334.01|5
-10|13|10.
-""",
- ",",
- ".",
- ),
- (
- """A|B|C
-1|2.334,01|5
-10|13|10,
-""",
- ".",
- ",",
- ),
- ],
-)
-def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
- parser = all_parsers
- expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
-
- result = parser.read_csv(
- StringIO(data), sep="|", thousands=thousands, decimal=decimal
- )
- tm.assert_frame_equal(result, expected)
-
-
-def test_euro_decimal_format(all_parsers):
- parser = all_parsers
- data = """Id;Number1;Number2;Text1;Text2;Number3
-1;1521,1541;187101,9543;ABC;poi;4,738797819
-2;121,12;14897,76;DEF;uyt;0,377320872
-3;878,158;108013,434;GHI;rez;2,735694704"""
-
- result = parser.read_csv(StringIO(data), sep=";", decimal=",")
- expected = DataFrame(
- [
- [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
- [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
- [3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
- ],
- columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
- )
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("na_filter", [True, False])
-def test_inf_parsing(all_parsers, na_filter):
- parser = all_parsers
- data = """\
-,A
-a,inf
-b,-inf
-c,+Inf
-d,-Inf
-e,INF
-f,-INF
-g,+INf
-h,-INf
-i,inF
-j,-inF"""
- expected = DataFrame(
- {"A": [float("inf"), float("-inf")] * 5},
- index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
- )
- result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("na_filter", [True, False])
-def test_infinity_parsing(all_parsers, na_filter):
- parser = all_parsers
- data = """\
-,A
-a,Infinity
-b,-Infinity
-c,+Infinity
-"""
- expected = DataFrame(
- {"A": [float("infinity"), float("-infinity"), float("+infinity")]},
- index=["a", "b", "c"],
- )
- result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
-def test_raise_on_no_columns(all_parsers, nrows):
- parser = all_parsers
- data = "\n" * nrows
-
- msg = "No columns to parse from file"
- with pytest.raises(EmptyDataError, match=msg):
- parser.read_csv(StringIO(data))
-
-
-@td.check_file_leaks
-def test_memory_map(all_parsers, csv_dir_path):
- mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
- parser = all_parsers
-
- expected = DataFrame(
- {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}
- )
-
- result = parser.read_csv(mmap_file, memory_map=True)
- tm.assert_frame_equal(result, expected)
-
-
-def test_null_byte_char(all_parsers):
- # see gh-2741
- data = "\x00,foo"
- names = ["a", "b"]
- parser = all_parsers
-
- if parser.engine == "c":
- expected = DataFrame([[np.nan, "foo"]], columns=names)
- out = parser.read_csv(StringIO(data), names=names)
- tm.assert_frame_equal(out, expected)
- else:
- msg = "NULL byte detected"
- with pytest.raises(ParserError, match=msg):
- parser.read_csv(StringIO(data), names=names)
-
-
-def test_temporary_file(all_parsers):
- # see gh-13398
- parser = all_parsers
- data = "0 0"
-
- with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:
- new_file.write(data)
- new_file.flush()
- new_file.seek(0)
-
- result = parser.read_csv(new_file, sep=r"\s+", header=None)
-
- expected = DataFrame([[0, 0]])
- tm.assert_frame_equal(result, expected)
-
-
-def test_internal_eof_byte(all_parsers):
- # see gh-5500
- parser = all_parsers
- data = "a,b\n1\x1a,2"
-
- expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
- result = parser.read_csv(StringIO(data))
- tm.assert_frame_equal(result, expected)
-
-
-def test_internal_eof_byte_to_file(all_parsers):
- # see gh-16559
- parser = all_parsers
- data = b'c1,c2\r\n"test \x1a test", test\r\n'
- expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
- path = f"__{tm.rands(10)}__.csv"
-
- with tm.ensure_clean(path) as path:
- with open(path, "wb") as f:
- f.write(data)
-
- result = parser.read_csv(path)
- tm.assert_frame_equal(result, expected)
-
-
-def test_sub_character(all_parsers, csv_dir_path):
- # see gh-16893
- filename = os.path.join(csv_dir_path, "sub_char.csv")
- expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
-
- parser = all_parsers
- result = parser.read_csv(filename)
- tm.assert_frame_equal(result, expected)
-
-
-def test_file_handle_string_io(all_parsers):
- # gh-14418
- #
- # Don't close user provided file handles.
- parser = all_parsers
- data = "a,b\n1,2"
-
- fh = StringIO(data)
- parser.read_csv(fh)
- assert not fh.closed
-
-
-def test_file_handles_with_open(all_parsers, csv1):
- # gh-14418
- #
- # Don't close user provided file handles.
- parser = all_parsers
-
- for mode in ["r", "rb"]:
- with open(csv1, mode) as f:
- parser.read_csv(f)
- assert not f.closed
-
-
-def test_invalid_file_buffer_class(all_parsers):
- # see gh-15337
- class InvalidBuffer:
- pass
-
- parser = all_parsers
- msg = "Invalid file path or buffer object type"
-
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(InvalidBuffer())
-
-
-def test_invalid_file_buffer_mock(all_parsers):
- # see gh-15337
- parser = all_parsers
- msg = "Invalid file path or buffer object type"
-
- class Foo:
- pass
-
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(Foo())
-
-
-def test_valid_file_buffer_seems_invalid(all_parsers):
- # gh-16135: we want to ensure that "tell" and "seek"
- # aren't actually being used when we call `read_csv`
- #
- # Thus, while the object may look "invalid" (these
- # methods are attributes of the `StringIO` class),
- # it is still a valid file-object for our purposes.
- class NoSeekTellBuffer(StringIO):
- def tell(self):
- raise AttributeError("No tell method")
-
- def seek(self, pos, whence=0):
- raise AttributeError("No seek method")
-
- data = "a\n1"
- parser = all_parsers
- expected = DataFrame({"a": [1]})
-
- result = parser.read_csv(NoSeekTellBuffer(data))
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize(
- "kwargs",
- [{}, {"error_bad_lines": True}], # Default is True. # Explicitly pass in.
-)
-@pytest.mark.parametrize(
- "warn_kwargs", [{}, {"warn_bad_lines": True}, {"warn_bad_lines": False}]
-)
-def test_error_bad_lines(all_parsers, kwargs, warn_kwargs):
- # see gh-15925
- parser = all_parsers
- kwargs.update(**warn_kwargs)
- data = "a\n1\n1,2,3\n4\n5,6,7"
-
- msg = "Expected 1 fields in line 3, saw 3"
- with pytest.raises(ParserError, match=msg):
- parser.read_csv(StringIO(data), **kwargs)
-
-
-def test_warn_bad_lines(all_parsers, capsys):
- # see gh-15925
- parser = all_parsers
- data = "a\n1\n1,2,3\n4\n5,6,7"
- expected = DataFrame({"a": [1, 4]})
-
- result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True)
- tm.assert_frame_equal(result, expected)
-
- captured = capsys.readouterr()
- assert "Skipping line 3" in captured.err
- assert "Skipping line 5" in captured.err
-
-
-def test_suppress_error_output(all_parsers, capsys):
- # see gh-15925
- parser = all_parsers
- data = "a\n1\n1,2,3\n4\n5,6,7"
- expected = DataFrame({"a": [1, 4]})
-
- result = parser.read_csv(
- StringIO(data), error_bad_lines=False, warn_bad_lines=False
- )
- tm.assert_frame_equal(result, expected)
-
- captured = capsys.readouterr()
- assert captured.err == ""
-
-
-@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
-def test_filename_with_special_chars(all_parsers, filename):
- # see gh-15086.
- parser = all_parsers
- df = DataFrame({"a": [1, 2, 3]})
-
- with tm.ensure_clean(filename) as path:
- df.to_csv(path, index=False)
-
- result = parser.read_csv(path)
- tm.assert_frame_equal(result, df)
-
-
-def test_read_csv_memory_growth_chunksize(all_parsers):
- # see gh-24805
- #
- # Let's just make sure that we don't crash
- # as we iteratively process all chunks.
- parser = all_parsers
-
- with tm.ensure_clean() as path:
- with open(path, "w") as f:
- for i in range(1000):
- f.write(str(i) + "\n")
-
- with parser.read_csv(path, chunksize=20) as result:
- for _ in result:
- pass
-
-
-def test_read_csv_raises_on_header_prefix(all_parsers):
- # gh-27394
- parser = all_parsers
- msg = "Argument prefix must be None if argument header is not None"
-
- s = StringIO("0,1\n2,3")
-
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(s, header=0, prefix="_X")
-
-
-def test_unexpected_keyword_parameter_exception(all_parsers):
- # GH-34976
- parser = all_parsers
-
- msg = "{}\\(\\) got an unexpected keyword argument 'foo'"
- with pytest.raises(TypeError, match=msg.format("read_csv")):
- parser.read_csv("foo.csv", foo=1)
- with pytest.raises(TypeError, match=msg.format("read_table")):
- parser.read_table("foo.tsv", foo=1)
-
-
-def test_read_table_same_signature_as_read_csv(all_parsers):
- # GH-34976
- parser = all_parsers
-
- table_sign = signature(parser.read_table)
- csv_sign = signature(parser.read_csv)
-
- assert table_sign.parameters.keys() == csv_sign.parameters.keys()
- assert table_sign.return_annotation == csv_sign.return_annotation
-
- for key, csv_param in csv_sign.parameters.items():
- table_param = table_sign.parameters[key]
- if key == "sep":
- assert csv_param.default == ","
- assert table_param.default == "\t"
- assert table_param.annotation == csv_param.annotation
- assert table_param.kind == csv_param.kind
- continue
- else:
- assert table_param == csv_param
-
-
-def test_read_table_equivalency_to_read_csv(all_parsers):
- # see gh-21948
- # As of 0.25.0, read_table is undeprecated
- parser = all_parsers
- data = "a\tb\n1\t2\n3\t4"
- expected = parser.read_csv(StringIO(data), sep="\t")
- result = parser.read_table(StringIO(data))
- tm.assert_frame_equal(result, expected)
-
-
-def test_first_row_bom(all_parsers):
- # see gh-26545
- parser = all_parsers
- data = '''\ufeff"Head1" "Head2" "Head3"'''
-
- result = parser.read_csv(StringIO(data), delimiter="\t")
- expected = DataFrame(columns=["Head1", "Head2", "Head3"])
- tm.assert_frame_equal(result, expected)
-
-
-def test_first_row_bom_unquoted(all_parsers):
- # see gh-36343
- parser = all_parsers
- data = """\ufeffHead1 Head2 Head3"""
-
- result = parser.read_csv(StringIO(data), delimiter="\t")
- expected = DataFrame(columns=["Head1", "Head2", "Head3"])
- tm.assert_frame_equal(result, expected)
-
-
-def test_integer_precision(all_parsers):
- # Gh 7072
- s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765
-5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389"""
- parser = all_parsers
- result = parser.read_csv(StringIO(s), header=None)[4]
- expected = Series([4321583677327450765, 4321113141090630389], name=4)
- tm.assert_series_equal(result, expected)
-
-
-def test_file_descriptor_leak(all_parsers):
- # GH 31488
-
- parser = all_parsers
- with tm.ensure_clean() as path:
-
- def test():
- with pytest.raises(EmptyDataError, match="No columns to parse from file"):
- parser.read_csv(path)
-
- td.check_file_leaks(test)()
-
-
-@pytest.mark.parametrize("nrows", range(1, 6))
-def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
- # GH 28071
- ref = DataFrame(
- [[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
- columns=list("ab"),
- )
- csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
- parser = all_parsers
- df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
- tm.assert_frame_equal(df, ref[:nrows])
-
-
-def test_no_header_two_extra_columns(all_parsers):
- # GH 26218
- column_names = ["one", "two", "three"]
- ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
- stream = StringIO("foo,bar,baz,bam,blah")
- parser = all_parsers
- df = parser.read_csv(stream, header=None, names=column_names, index_col=False)
- tm.assert_frame_equal(df, ref)
-
-
-def test_read_csv_names_not_accepting_sets(all_parsers):
- # GH 34946
- data = """\
- 1,2,3
- 4,5,6\n"""
- parser = all_parsers
- with pytest.raises(ValueError, match="Names should be an ordered collection."):
- parser.read_csv(StringIO(data), names=set("QAZ"))
-
-
-def test_read_csv_with_use_inf_as_na(all_parsers):
- # https://github.com/pandas-dev/pandas/issues/35493
- parser = all_parsers
- data = "1.0\nNaN\n3.0"
- with option_context("use_inf_as_na", True):
- result = parser.read_csv(StringIO(data), header=None)
- expected = DataFrame([1.0, np.nan, 3.0])
- tm.assert_frame_equal(result, expected)
-
-
-def test_read_table_delim_whitespace_default_sep(all_parsers):
- # GH: 35958
- f = StringIO("a b c\n1 -2 -3\n4 5 6")
- parser = all_parsers
- result = parser.read_table(f, delim_whitespace=True)
- expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("delimiter", [",", "\t"])
-def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):
- # GH: 35958
- f = StringIO("a b c\n1 -2 -3\n4 5 6")
- parser = all_parsers
- msg = (
- "Specified a delimiter with both sep and "
- "delim_whitespace=True; you can only specify one."
- )
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(f, delim_whitespace=True, sep=delimiter)
-
- with pytest.raises(ValueError, match=msg):
- parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)
-
-
-@pytest.mark.parametrize("delimiter", [",", "\t"])
-def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
- # GH: 35958
- f = StringIO("a b c\n1 -2 -3\n4 5 6")
- parser = all_parsers
- msg = (
- "Specified a delimiter with both sep and "
- "delim_whitespace=True; you can only specify one."
- )
- with pytest.raises(ValueError, match=msg):
- parser.read_table(f, delim_whitespace=True, sep=delimiter)
-
- with pytest.raises(ValueError, match=msg):
- parser.read_table(f, delim_whitespace=True, delimiter=delimiter)
-
-
-def test_dict_keys_as_names(all_parsers):
- # GH: 36928
- data = "1,2"
-
- keys = {"a": int, "b": int}.keys()
- parser = all_parsers
-
- result = parser.read_csv(StringIO(data), names=keys)
- expected = DataFrame({"a": [1], "b": [2]})
- tm.assert_frame_equal(result, expected)
-
-
-@pytest.mark.parametrize("io_class", [StringIO, BytesIO])
-@pytest.mark.parametrize("encoding", [None, "utf-8"])
-def test_read_csv_file_handle(all_parsers, io_class, encoding):
- """
- Test whether read_csv does not close user-provided file handles.
-
- GH 36980
- """
- parser = all_parsers
- expected = DataFrame({"a": [1], "b": [2]})
-
- content = "a,b\n1,2"
- if io_class == BytesIO:
- content = content.encode("utf-8")
- handle = io_class(content)
-
- tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected)
- assert not handle.closed
-
-
-def test_memory_map_file_handle_silent_fallback(all_parsers, compression):
- """
- Do not fail for buffers with memory_map=True (cannot memory map BytesIO).
-
- GH 37621
- """
- parser = all_parsers
- expected = DataFrame({"a": [1], "b": [2]})
-
- handle = BytesIO()
- expected.to_csv(handle, index=False, compression=compression, mode="wb")
- handle.seek(0)
-
- tm.assert_frame_equal(
- parser.read_csv(handle, memory_map=True, compression=compression),
- expected,
- )
-
-
-def test_memory_map_compression(all_parsers, compression):
- """
- Support memory map for compressed files.
-
- GH 37621
- """
- parser = all_parsers
- expected = DataFrame({"a": [1], "b": [2]})
-
- with tm.ensure_clean() as path:
- expected.to_csv(path, index=False, compression=compression)
-
- tm.assert_frame_equal(
- parser.read_csv(path, memory_map=True, compression=compression),
- expected,
- )
-
-
-def test_context_manager(all_parsers, datapath):
- # make sure that opened files are closed
- parser = all_parsers
-
- path = datapath("io", "data", "csv", "iris.csv")
-
- reader = parser.read_csv(path, chunksize=1)
- assert not reader._engine.handles.handle.closed
- try:
- with reader:
- next(reader)
- assert False
- except AssertionError:
- assert reader._engine.handles.handle.closed
-
-
-def test_context_manageri_user_provided(all_parsers, datapath):
- # make sure that user-provided handles are not closed
- parser = all_parsers
-
- with open(datapath("io", "data", "csv", "iris.csv"), mode="r") as path:
-
- reader = parser.read_csv(path, chunksize=1)
- assert not reader._engine.handles.handle.closed
- try:
- with reader:
- next(reader)
- assert False
- except AssertionError:
- assert not reader._engine.handles.handle.closed
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Another precursor to #38370 | https://api.github.com/repos/pandas-dev/pandas/pulls/38897 | 2021-01-02T08:35:39Z | 2021-01-04T00:50:12Z | 2021-01-04T00:50:12Z | 2021-01-04T00:50:18Z |
ENH: Add numba engine to several rolling aggregations | diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index 306083e9c22b2..5f8cdb2a0bdac 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -50,20 +50,24 @@ class Engine:
["int", "float"],
[np.sum, lambda x: np.sum(x) + 5],
["cython", "numba"],
+ ["sum", "max", "min", "median", "mean"],
)
- param_names = ["constructor", "dtype", "function", "engine"]
+ param_names = ["constructor", "dtype", "function", "engine", "method"]
- def setup(self, constructor, dtype, function, engine):
+ def setup(self, constructor, dtype, function, engine, method):
N = 10 ** 3
arr = (100 * np.random.random(N)).astype(dtype)
self.data = getattr(pd, constructor)(arr)
- def time_rolling_apply(self, constructor, dtype, function, engine):
+ def time_rolling_apply(self, constructor, dtype, function, engine, method):
self.data.rolling(10).apply(function, raw=True, engine=engine)
- def time_expanding_apply(self, constructor, dtype, function, engine):
+ def time_expanding_apply(self, constructor, dtype, function, engine, method):
self.data.expanding().apply(function, raw=True, engine=engine)
+ def time_rolling_methods(self, constructor, dtype, function, engine, method):
+ getattr(self.data.rolling(10), method)(engine=engine)
+
class ExpandingMethods:
diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst
index 08641bc5b17ae..9db4a4bb873bd 100644
--- a/doc/source/user_guide/window.rst
+++ b/doc/source/user_guide/window.rst
@@ -321,6 +321,10 @@ Numba will be applied in potentially two routines:
#. If ``func`` is a standard Python function, the engine will `JIT <https://numba.pydata.org/numba-doc/latest/user/overview.html>`__ the passed function. ``func`` can also be a JITed function in which case the engine will not JIT the function again.
#. The engine will JIT the for loop where the apply function is applied to each window.
+.. versionadded:: 1.3
+
+``mean``, ``median``, ``max``, ``min``, and ``sum`` also support the ``engine`` and ``engine_kwargs`` arguments.
+
The ``engine_kwargs`` argument is a dictionary of keyword arguments that will be passed into the
`numba.jit decorator <https://numba.pydata.org/numba-doc/latest/reference/jit-compilation.html#numba.jit>`__.
These keyword arguments will be applied to *both* the passed function (if a standard Python function)
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index b4b98ec0403a8..1760e773c7b93 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -52,6 +52,7 @@ Other enhancements
- Improved integer type mapping from pandas to SQLAlchemy when using :meth:`DataFrame.to_sql` (:issue:`35076`)
- :func:`to_numeric` now supports downcasting of nullable ``ExtensionDtype`` objects (:issue:`33013`)
- :func:`pandas.read_excel` can now auto detect .xlsb files (:issue:`35416`)
+- :meth:`.Rolling.sum`, :meth:`.Expanding.sum`, :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.Rolling.median`, :meth:`.Expanding.median`, :meth:`.Rolling.max`, :meth:`.Expanding.max`, :meth:`.Rolling.min`, and :meth:`.Expanding.min` now support ``Numba`` execution with the ``engine`` keyword (:issue:`38895`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py
index 81aa6699c3c61..1f0c16fb5aa8f 100644
--- a/pandas/core/window/expanding.py
+++ b/pandas/core/window/expanding.py
@@ -172,33 +172,33 @@ def apply(
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
- def sum(self, *args, **kwargs):
+ def sum(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
- return super().sum(*args, **kwargs)
+ return super().sum(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="expanding", func_name="max")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
- def max(self, *args, **kwargs):
+ def max(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_expanding_func("max", args, kwargs)
- return super().max(*args, **kwargs)
+ return super().max(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["min"])
- def min(self, *args, **kwargs):
+ def min(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_expanding_func("min", args, kwargs)
- return super().min(*args, **kwargs)
+ return super().min(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["mean"])
- def mean(self, *args, **kwargs):
+ def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_expanding_func("mean", args, kwargs)
- return super().mean(*args, **kwargs)
+ return super().mean(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["median"])
- def median(self, **kwargs):
- return super().median(**kwargs)
+ def median(self, engine=None, engine_kwargs=None, **kwargs):
+ return super().median(engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="expanding", versionadded="")
@Appender(_shared_docs["std"])
@@ -256,9 +256,16 @@ def kurt(self, **kwargs):
@Substitution(name="expanding")
@Appender(_shared_docs["quantile"])
- def quantile(self, quantile, interpolation="linear", **kwargs):
+ def quantile(
+ self,
+ quantile,
+ interpolation="linear",
+ **kwargs,
+ ):
return super().quantile(
- quantile=quantile, interpolation=interpolation, **kwargs
+ quantile=quantile,
+ interpolation=interpolation,
+ **kwargs,
)
@Substitution(name="expanding", func_name="cov")
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index db8a48300206b..7ae1e61d426b9 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -1241,6 +1241,7 @@ def count(self):
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
+
engine : str, default None
* ``'cython'`` : Runs rolling apply through C-extensions from cython.
* ``'numba'`` : Runs rolling apply through JIT compiled code from numba.
@@ -1351,8 +1352,21 @@ def apply_func(values, begin, end, min_periods, raw=raw):
return apply_func
- def sum(self, *args, **kwargs):
+ def sum(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("sum", args, kwargs)
+ if maybe_use_numba(engine):
+ if self.method == "table":
+ raise NotImplementedError("method='table' is not supported.")
+ # Once numba supports np.nansum with axis, args will be relevant.
+ # https://github.com/numba/numba/issues/6610
+ args = () if self.method == "single" else (0,)
+ return self.apply(
+ np.nansum,
+ raw=True,
+ engine=engine,
+ engine_kwargs=engine_kwargs,
+ args=args,
+ )
window_func = window_aggregations.roll_sum
return self._apply(window_func, name="sum", **kwargs)
@@ -1362,13 +1376,43 @@ def sum(self, *args, **kwargs):
Parameters
----------
- *args, **kwargs
- Arguments and keyword arguments to be passed into func.
+ engine : str, default None
+ * ``'cython'`` : Runs rolling max through C-extensions from cython.
+ * ``'numba'`` : Runs rolling max through JIT compiled code from numba.
+ * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
+
+ .. versionadded:: 1.3.0
+
+ engine_kwargs : dict, default None
+ * For ``'cython'`` engine, there are no accepted ``engine_kwargs``
+ * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
+ and ``parallel`` dictionary keys. The values must either be ``True`` or
+ ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
+ ``{'nopython': True, 'nogil': False, 'parallel': False}``
+
+ .. versionadded:: 1.3.0
+
+ **kwargs
+ For compatibility with other %(name)s methods. Has no effect on
+ the result.
"""
)
- def max(self, *args, **kwargs):
+ def max(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("max", args, kwargs)
+ if maybe_use_numba(engine):
+ if self.method == "table":
+ raise NotImplementedError("method='table' is not supported.")
+ # Once numba supports np.nanmax with axis, args will be relevant.
+ # https://github.com/numba/numba/issues/6610
+ args = () if self.method == "single" else (0,)
+ return self.apply(
+ np.nanmax,
+ raw=True,
+ engine=engine,
+ engine_kwargs=engine_kwargs,
+ args=args,
+ )
window_func = window_aggregations.roll_max
return self._apply(window_func, name="max", **kwargs)
@@ -1378,8 +1422,25 @@ def max(self, *args, **kwargs):
Parameters
----------
+ engine : str, default None
+ * ``'cython'`` : Runs rolling min through C-extensions from cython.
+ * ``'numba'`` : Runs rolling min through JIT compiled code from numba.
+ * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
+
+ .. versionadded:: 1.3.0
+
+ engine_kwargs : dict, default None
+ * For ``'cython'`` engine, there are no accepted ``engine_kwargs``
+ * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
+ and ``parallel`` dictionary keys. The values must either be ``True`` or
+ ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
+ ``{'nopython': True, 'nogil': False, 'parallel': False}``
+
+ .. versionadded:: 1.3.0
+
**kwargs
- Under Review.
+ For compatibility with other %(name)s methods. Has no effect on
+ the result.
Returns
-------
@@ -1409,13 +1470,39 @@ def max(self, *args, **kwargs):
"""
)
- def min(self, *args, **kwargs):
+ def min(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("min", args, kwargs)
+ if maybe_use_numba(engine):
+ if self.method == "table":
+ raise NotImplementedError("method='table' is not supported.")
+ # Once numba supports np.nanmin with axis, args will be relevant.
+ # https://github.com/numba/numba/issues/6610
+ args = () if self.method == "single" else (0,)
+ return self.apply(
+ np.nanmin,
+ raw=True,
+ engine=engine,
+ engine_kwargs=engine_kwargs,
+ args=args,
+ )
window_func = window_aggregations.roll_min
return self._apply(window_func, name="min", **kwargs)
- def mean(self, *args, **kwargs):
+ def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_window_func("mean", args, kwargs)
+ if maybe_use_numba(engine):
+ if self.method == "table":
+ raise NotImplementedError("method='table' is not supported.")
+ # Once numba supports np.nanmean with axis, args will be relevant.
+ # https://github.com/numba/numba/issues/6610
+ args = () if self.method == "single" else (0,)
+ return self.apply(
+ np.nanmean,
+ raw=True,
+ engine=engine,
+ engine_kwargs=engine_kwargs,
+ args=args,
+ )
window_func = window_aggregations.roll_mean
return self._apply(window_func, name="mean", **kwargs)
@@ -1425,9 +1512,25 @@ def mean(self, *args, **kwargs):
Parameters
----------
+ engine : str, default None
+ * ``'cython'`` : Runs rolling median through C-extensions from cython.
+ * ``'numba'`` : Runs rolling median through JIT compiled code from numba.
+ * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
+
+ .. versionadded:: 1.3.0
+
+ engine_kwargs : dict, default None
+ * For ``'cython'`` engine, there are no accepted ``engine_kwargs``
+ * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
+ and ``parallel`` dictionary keys. The values must either be ``True`` or
+ ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
+ ``{'nopython': True, 'nogil': False, 'parallel': False}``
+
+ .. versionadded:: 1.3.0
+
**kwargs
For compatibility with other %(name)s methods. Has no effect
- on the computed median.
+ on the computed result.
Returns
-------
@@ -1456,10 +1559,21 @@ def mean(self, *args, **kwargs):
"""
)
- def median(self, **kwargs):
+ def median(self, engine=None, engine_kwargs=None, **kwargs):
+ if maybe_use_numba(engine):
+ if self.method == "table":
+ raise NotImplementedError("method='table' is not supported.")
+ # Once numba supports np.nanmedian with axis, args will be relevant.
+ # https://github.com/numba/numba/issues/6610
+ args = () if self.method == "single" else (0,)
+ return self.apply(
+ np.nanmedian,
+ raw=True,
+ engine=engine,
+ engine_kwargs=engine_kwargs,
+ args=args,
+ )
window_func = window_aggregations.roll_median_c
- # GH 32865. Move max window size calculation to
- # the median function implementation
return self._apply(window_func, name="median", **kwargs)
def std(self, ddof: int = 1, *args, **kwargs):
@@ -1492,7 +1606,8 @@ def var(self, ddof: int = 1, *args, **kwargs):
Parameters
----------
**kwargs
- Keyword arguments to be passed into func.
+ For compatibility with other %(name)s methods. Has no effect on
+ the result.
"""
def skew(self, **kwargs):
@@ -1512,7 +1627,8 @@ def skew(self, **kwargs):
Parameters
----------
**kwargs
- Under Review.
+ For compatibility with other %(name)s methods. Has no effect on
+ the result.
Returns
-------
@@ -1604,6 +1720,7 @@ def kurt(self, **kwargs):
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
+
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
@@ -1614,6 +1731,23 @@ def kurt(self, **kwargs):
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
+
+ engine : str, default None
+ * ``'cython'`` : Runs rolling quantile through C-extensions from cython.
+ * ``'numba'`` : Runs rolling quantile through JIT compiled code from numba.
+ * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
+
+ .. versionadded:: 1.3.0
+
+ engine_kwargs : dict, default None
+ * For ``'cython'`` engine, there are no accepted ``engine_kwargs``
+ * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
+ and ``parallel`` dictionary keys. The values must either be ``True`` or
+ ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
+ ``{'nopython': True, 'nogil': False, 'parallel': False}``
+
+ .. versionadded:: 1.3.0
+
**kwargs
For compatibility with other %(name)s methods. Has no effect on
the result.
@@ -1995,33 +2129,33 @@ def apply(
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
- def sum(self, *args, **kwargs):
+ def sum(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
- return super().sum(*args, **kwargs)
+ return super().sum(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="rolling", func_name="max")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
- def max(self, *args, **kwargs):
+ def max(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
- return super().max(*args, **kwargs)
+ return super().max(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
- def min(self, *args, **kwargs):
+ def min(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
- return super().min(*args, **kwargs)
+ return super().min(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
- def mean(self, *args, **kwargs):
+ def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
- return super().mean(*args, **kwargs)
+ return super().mean(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
- def median(self, **kwargs):
- return super().median(**kwargs)
+ def median(self, engine=None, engine_kwargs=None, **kwargs):
+ return super().median(engine=engine, engine_kwargs=engine_kwargs, **kwargs)
@Substitution(name="rolling", versionadded="")
@Appender(_shared_docs["std"])
@@ -2081,7 +2215,9 @@ def kurt(self, **kwargs):
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
- quantile=quantile, interpolation=interpolation, **kwargs
+ quantile=quantile,
+ interpolation=interpolation,
+ **kwargs,
)
@Substitution(name="rolling", func_name="cov")
diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py
index a765f268cfb07..70bead489d2c6 100644
--- a/pandas/tests/window/conftest.py
+++ b/pandas/tests/window/conftest.py
@@ -47,12 +47,26 @@ def win_types_special(request):
"kurt",
"skew",
"count",
+ "sem",
]
)
def arithmetic_win_operators(request):
return request.param
+@pytest.fixture(
+ params=[
+ "sum",
+ "mean",
+ "median",
+ "max",
+ "min",
+ ]
+)
+def arithmetic_numba_supported_operators(request):
+ return request.param
+
+
@pytest.fixture(params=["right", "left", "both", "neither"])
def closed(request):
return request.param
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py
index 4d22495e6c69a..9d9c216801d73 100644
--- a/pandas/tests/window/test_numba.py
+++ b/pandas/tests/window/test_numba.py
@@ -12,9 +12,9 @@
@td.skip_if_no("numba", "0.46.0")
@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
-class TestRollingApply:
+class TestEngine:
@pytest.mark.parametrize("jit", [True, False])
- def test_numba_vs_cython(self, jit, nogil, parallel, nopython, center):
+ def test_numba_vs_cython_apply(self, jit, nogil, parallel, nopython, center):
def f(x, *args):
arg_sum = 0
for arg in args:
@@ -38,8 +38,47 @@ def f(x, *args):
)
tm.assert_series_equal(result, expected)
+ def test_numba_vs_cython_rolling_methods(
+ self, nogil, parallel, nopython, arithmetic_numba_supported_operators
+ ):
+
+ method = arithmetic_numba_supported_operators
+
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
+
+ df = DataFrame(np.eye(5))
+ roll = df.rolling(2)
+ result = getattr(roll, method)(engine="numba", engine_kwargs=engine_kwargs)
+ expected = getattr(roll, method)(engine="cython")
+
+ # Check the cache
+ assert (getattr(np, f"nan{method}"), "Rolling_apply_single") in NUMBA_FUNC_CACHE
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_numba_vs_cython_expanding_methods(
+ self, nogil, parallel, nopython, arithmetic_numba_supported_operators
+ ):
+
+ method = arithmetic_numba_supported_operators
+
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
+
+ df = DataFrame(np.eye(5))
+ expand = df.expanding()
+ result = getattr(expand, method)(engine="numba", engine_kwargs=engine_kwargs)
+ expected = getattr(expand, method)(engine="cython")
+
+ # Check the cache
+ assert (
+ getattr(np, f"nan{method}"),
+ "Expanding_apply_single",
+ ) in NUMBA_FUNC_CACHE
+
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize("jit", [True, False])
- def test_cache(self, jit, nogil, parallel, nopython):
+ def test_cache_apply(self, jit, nogil, parallel, nopython):
# Test that the functions are cached correctly if we switch functions
def func_1(x):
return np.mean(x) + 4
@@ -138,7 +177,27 @@ def f(x):
f, engine="numba", raw=True
)
- def test_table_method_rolling(self, axis, nogil, parallel, nopython):
+ @pytest.mark.xfail(
+ raises=NotImplementedError, reason="method='table' is not supported."
+ )
+ def test_table_method_rolling_methods(
+ self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
+ ):
+ method = arithmetic_numba_supported_operators
+
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
+
+ df = DataFrame(np.eye(3))
+
+ result = getattr(
+ df.rolling(2, method="table", axis=axis, min_periods=0), method
+ )(engine_kwargs=engine_kwargs, engine="numba")
+ expected = getattr(
+ df.rolling(2, method="single", axis=axis, min_periods=0), method
+ )(engine_kwargs=engine_kwargs, engine="numba")
+ tm.assert_frame_equal(result, expected)
+
+ def test_table_method_rolling_apply(self, axis, nogil, parallel, nopython):
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
def f(x):
@@ -173,7 +232,7 @@ def weighted_mean(x):
)
tm.assert_frame_equal(result, expected)
- def test_table_method_expanding(self, axis, nogil, parallel, nopython):
+ def test_table_method_expanding_apply(self, axis, nogil, parallel, nopython):
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
def f(x):
@@ -187,3 +246,23 @@ def f(x):
f, raw=True, engine_kwargs=engine_kwargs, engine="numba"
)
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.xfail(
+ raises=NotImplementedError, reason="method='table' is not supported."
+ )
+ def test_table_method_expanding_methods(
+ self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
+ ):
+ method = arithmetic_numba_supported_operators
+
+ engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
+
+ df = DataFrame(np.eye(3))
+
+ result = getattr(df.expanding(method="table", axis=axis), method)(
+ engine_kwargs=engine_kwargs, engine="numba"
+ )
+ expected = getattr(df.expanding(method="single", axis=axis), method)(
+ engine_kwargs=engine_kwargs, engine="numba"
+ )
+ tm.assert_frame_equal(result, expected)
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Adds `engine ` and `engine_kwargs` argument to `mean`, `median`, `sum`, `min`, `max`
| https://api.github.com/repos/pandas-dev/pandas/pulls/38895 | 2021-01-02T02:06:51Z | 2021-01-04T21:10:56Z | 2021-01-04T21:10:56Z | 2021-06-14T20:11:38Z |
TST/CLN: deduplicate troublesome rank values | diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py
index 991a91275ae1d..6ad1b475e28a2 100644
--- a/pandas/tests/frame/methods/test_rank.py
+++ b/pandas/tests/frame/methods/test_rank.py
@@ -392,7 +392,7 @@ def test_pct_max_many_rows(self):
([NegInfinity(), "1", "A", "BA", "Ba", "C", Infinity()], "object"),
],
)
- def test_rank_inf_and_nan(self, contents, dtype):
+ def test_rank_inf_and_nan(self, contents, dtype, frame_or_series):
dtype_na_map = {
"float64": np.nan,
"float32": np.nan,
@@ -410,12 +410,13 @@ def test_rank_inf_and_nan(self, contents, dtype):
nan_indices = np.random.choice(range(len(values)), 5)
values = np.insert(values, nan_indices, na_value)
exp_order = np.insert(exp_order, nan_indices, np.nan)
- # shuffle the testing array and expected results in the same way
+
+ # Shuffle the testing array and expected results in the same way
random_order = np.random.permutation(len(values))
- df = DataFrame({"a": values[random_order]})
- expected = DataFrame({"a": exp_order[random_order]}, dtype="float64")
- result = df.rank()
- tm.assert_frame_equal(result, expected)
+ obj = frame_or_series(values[random_order])
+ expected = frame_or_series(exp_order[random_order], dtype="float64")
+ result = obj.rank()
+ tm.assert_equal(result, expected)
def test_df_series_inf_nan_consistency(self):
# GH#32593
diff --git a/pandas/tests/series/methods/test_rank.py b/pandas/tests/series/methods/test_rank.py
index 6d3c37659f5c4..9d052e2236aae 100644
--- a/pandas/tests/series/methods/test_rank.py
+++ b/pandas/tests/series/methods/test_rank.py
@@ -3,7 +3,6 @@
import numpy as np
import pytest
-from pandas._libs import iNaT
from pandas._libs.algos import Infinity, NegInfinity
import pandas.util._test_decorators as td
@@ -206,91 +205,6 @@ def test_rank_signature(self):
with pytest.raises(ValueError, match=msg):
s.rank("average")
- @pytest.mark.parametrize(
- "contents,dtype",
- [
- (
- [
- -np.inf,
- -50,
- -1,
- -1e-20,
- -1e-25,
- -1e-50,
- 0,
- 1e-40,
- 1e-20,
- 1e-10,
- 2,
- 40,
- np.inf,
- ],
- "float64",
- ),
- (
- [
- -np.inf,
- -50,
- -1,
- -1e-20,
- -1e-25,
- -1e-45,
- 0,
- 1e-40,
- 1e-20,
- 1e-10,
- 2,
- 40,
- np.inf,
- ],
- "float32",
- ),
- ([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max], "uint8"),
- pytest.param(
- [
- np.iinfo(np.int64).min,
- -100,
- 0,
- 1,
- 9999,
- 100000,
- 1e10,
- np.iinfo(np.int64).max,
- ],
- "int64",
- marks=pytest.mark.xfail(
- reason="iNaT is equivalent to minimum value of dtype"
- "int64 pending issue GH#16674"
- ),
- ),
- ([NegInfinity(), "1", "A", "BA", "Ba", "C", Infinity()], "object"),
- ],
- )
- def test_rank_inf(self, contents, dtype):
- dtype_na_map = {
- "float64": np.nan,
- "float32": np.nan,
- "int64": iNaT,
- "object": None,
- }
- # Insert nans at random positions if underlying dtype has missing
- # value. Then adjust the expected order by adding nans accordingly
- # This is for testing whether rank calculation is affected
- # when values are interwined with nan values.
- values = np.array(contents, dtype=dtype)
- exp_order = np.array(range(len(values)), dtype="float64") + 1.0
- if dtype in dtype_na_map:
- na_value = dtype_na_map[dtype]
- nan_indices = np.random.choice(range(len(values)), 5)
- values = np.insert(values, nan_indices, na_value)
- exp_order = np.insert(exp_order, nan_indices, np.nan)
- # shuffle the testing array and expected results in the same way
- random_order = np.random.permutation(len(values))
- iseries = Series(values[random_order])
- exp = Series(exp_order[random_order], dtype="float64")
- iranks = iseries.rank()
- tm.assert_series_equal(iranks, exp)
-
def test_rank_tie_methods(self):
s = self.s
| xref https://github.com/pandas-dev/pandas/pull/38681#discussion_r548554225
| https://api.github.com/repos/pandas-dev/pandas/pulls/38894 | 2021-01-02T01:11:31Z | 2021-01-04T03:35:48Z | 2021-01-04T03:35:48Z | 2021-01-04T03:55:08Z |
doc fix for testing.assert_series_equal check_freq arg | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index b1f8389420cd9..4102bdd07aa8f 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -15,12 +15,12 @@ including other versions of pandas.
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. _whatsnew_121.api_breaking.testing.assert_frame_equal:
+.. _whatsnew_121.api_breaking.testing.check_freq:
-Added ``check_freq`` argument to ``testing.assert_frame_equal``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Added ``check_freq`` argument to ``testing.assert_frame_equal`` and ``testing.assert_series_equal``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` now raises ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked by :func:`testing.assert_frame_equal`.
+The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` and :func:`testing.assert_series_equal` now raise ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked.
.. ---------------------------------------------------------------------------
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py
index 13115d6b959d9..69e17199c9a90 100644
--- a/pandas/_testing/asserters.py
+++ b/pandas/_testing/asserters.py
@@ -878,6 +878,8 @@ def assert_series_equal(
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
+
+ .. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
| This fixes `check_freq` arg docs for `assert_series_equal` like #38471 did for `assert_frame_equal`.
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38893 | 2021-01-02T00:46:40Z | 2021-01-03T17:01:53Z | 2021-01-03T17:01:53Z | 2021-01-04T16:50:08Z |
ASV: Add asv for groupby.indices | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index bf210352bcb5d..b4d9db95af163 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -126,6 +126,9 @@ def setup(self, data, key):
def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
+ def time_series_indices(self, data, key):
+ self.ser.groupby(self.ser).indices
+
class GroupManyLabels:
| - [x] closes #38495
Asv for the indices performance regression
| https://api.github.com/repos/pandas-dev/pandas/pulls/38892 | 2021-01-01T23:56:57Z | 2021-01-03T18:44:22Z | 2021-01-03T18:44:22Z | 2021-01-03T18:44:59Z |
Backport PR #38471 on branch 1.2.x (DOC: fixes for assert_frame_equal check_freq argument) | diff --git a/doc/source/whatsnew/v1.2.1.rst b/doc/source/whatsnew/v1.2.1.rst
index a1612117072a5..b1f8389420cd9 100644
--- a/doc/source/whatsnew/v1.2.1.rst
+++ b/doc/source/whatsnew/v1.2.1.rst
@@ -10,6 +10,20 @@ including other versions of pandas.
.. ---------------------------------------------------------------------------
+.. _whatsnew_121.api_breaking:
+
+Backwards incompatible API changes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _whatsnew_121.api_breaking.testing.assert_frame_equal:
+
+Added ``check_freq`` argument to ``testing.assert_frame_equal``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``check_freq`` argument was added to :func:`testing.assert_frame_equal` in pandas 1.1.0 and defaults to ``True``. :func:`testing.assert_frame_equal` now raises ``AssertionError`` if the indexes do not have the same frequency. Before pandas 1.1.0, the index frequency was not checked by :func:`testing.assert_frame_equal`.
+
+.. ---------------------------------------------------------------------------
+
.. _whatsnew_121.regressions:
Fixed regressions
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 73b1dcf31979f..0b0778f3d3e5c 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1578,6 +1578,8 @@ def assert_frame_equal(
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
+
+ .. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
| Backport PR #38471
| https://api.github.com/repos/pandas-dev/pandas/pulls/38891 | 2021-01-01T23:29:45Z | 2021-01-02T11:11:58Z | 2021-01-02T11:11:58Z | 2021-01-04T16:49:22Z |
CLN: add typing for dtype arg in directories core/indexes and core/strings (GH38808) | diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 9062667298d7c..24920e9b5faa7 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -7,7 +7,7 @@
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
-from pandas._typing import ArrayLike, Label
+from pandas._typing import ArrayLike, Dtype, Label
from pandas.util._decorators import Appender, doc
from pandas.core.dtypes.common import (
@@ -180,7 +180,13 @@ def _engine_type(self):
# Constructors
def __new__(
- cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None
+ cls,
+ data=None,
+ categories=None,
+ ordered=None,
+ dtype: Optional[Dtype] = None,
+ copy=False,
+ name=None,
):
name = maybe_extract_name(name, data, cls)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index d176b6a5d8e6d..f82ee27aef534 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -14,7 +14,7 @@
to_offset,
)
from pandas._libs.tslibs.offsets import prefix_mapping
-from pandas._typing import DtypeObj
+from pandas._typing import Dtype, DtypeObj
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly, doc
@@ -289,7 +289,7 @@ def __new__(
ambiguous="raise",
dayfirst=False,
yearfirst=False,
- dtype=None,
+ dtype: Optional[Dtype] = None,
copy=False,
name=None,
):
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 054b21d2857ff..aabc3e741641f 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -11,7 +11,7 @@
from pandas._libs import lib
from pandas._libs.interval import Interval, IntervalMixin, IntervalTree
from pandas._libs.tslibs import BaseOffset, Timedelta, Timestamp, to_offset
-from pandas._typing import DtypeObj, Label
+from pandas._typing import Dtype, DtypeObj, Label
from pandas.errors import InvalidIndexError
from pandas.util._decorators import Appender, cache_readonly
from pandas.util._exceptions import rewrite_exception
@@ -192,7 +192,7 @@ def __new__(
cls,
data,
closed=None,
- dtype=None,
+ dtype: Optional[Dtype] = None,
copy: bool = False,
name=None,
verify_integrity: bool = True,
@@ -249,7 +249,12 @@ def _simple_new(cls, array: IntervalArray, name: Label = None):
}
)
def from_breaks(
- cls, breaks, closed: str = "right", name=None, copy: bool = False, dtype=None
+ cls,
+ breaks,
+ closed: str = "right",
+ name=None,
+ copy: bool = False,
+ dtype: Optional[Dtype] = None,
):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(
@@ -281,7 +286,7 @@ def from_arrays(
closed: str = "right",
name=None,
copy: bool = False,
- dtype=None,
+ dtype: Optional[Dtype] = None,
):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(
@@ -307,7 +312,12 @@ def from_arrays(
}
)
def from_tuples(
- cls, data, closed: str = "right", name=None, copy: bool = False, dtype=None
+ cls,
+ data,
+ closed: str = "right",
+ name=None,
+ copy: bool = False,
+ dtype: Optional[Dtype] = None,
):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 2c2888e1c6f72..59793d1a63813 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -1,4 +1,4 @@
-from typing import Any
+from typing import Any, Optional
import warnings
import numpy as np
@@ -45,7 +45,7 @@ class NumericIndex(Index):
_is_numeric_dtype = True
_can_hold_strings = False
- def __new__(cls, data=None, dtype=None, copy=False, name=None):
+ def __new__(cls, data=None, dtype: Optional[Dtype] = None, copy=False, name=None):
name = maybe_extract_name(name, data, cls)
subarr = cls._ensure_array(data, dtype, copy)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 7746d7e617f8b..7762198246603 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -1,5 +1,5 @@
from datetime import datetime, timedelta
-from typing import Any
+from typing import Any, Optional
import warnings
import numpy as np
@@ -7,7 +7,7 @@
from pandas._libs import index as libindex, lib
from pandas._libs.tslibs import BaseOffset, Period, Resolution, Tick
from pandas._libs.tslibs.parsing import DateParseError, parse_time_string
-from pandas._typing import DtypeObj
+from pandas._typing import Dtype, DtypeObj
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly, doc
@@ -190,7 +190,7 @@ def __new__(
data=None,
ordinal=None,
freq=None,
- dtype=None,
+ dtype: Optional[Dtype] = None,
copy=False,
name=None,
**fields,
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 029c4a30a6b22..4256ad93695e9 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -8,7 +8,7 @@
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
-from pandas._typing import Label
+from pandas._typing import Dtype, Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import cache_readonly, doc
@@ -83,7 +83,13 @@ class RangeIndex(Int64Index):
# Constructors
def __new__(
- cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None
+ cls,
+ start=None,
+ stop=None,
+ step=None,
+ dtype: Optional[Dtype] = None,
+ copy=False,
+ name=None,
):
cls._validate_dtype(dtype)
@@ -113,7 +119,9 @@ def __new__(
return cls._simple_new(rng, name=name)
@classmethod
- def from_range(cls, data: range, name=None, dtype=None) -> "RangeIndex":
+ def from_range(
+ cls, data: range, name=None, dtype: Optional[Dtype] = None
+ ) -> "RangeIndex":
"""
Create RangeIndex from a range object.
@@ -405,7 +413,7 @@ def _shallow_copy(self, values=None, name: Label = no_default):
return result
@doc(Int64Index.copy)
- def copy(self, name=None, deep=False, dtype=None, names=None):
+ def copy(self, name=None, deep=False, dtype: Optional[Dtype] = None, names=None):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._shallow_copy(name=name)
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py
index a29d84edd3a77..471f1e521b991 100644
--- a/pandas/core/strings/object_array.py
+++ b/pandas/core/strings/object_array.py
@@ -1,6 +1,6 @@
import re
import textwrap
-from typing import Pattern, Set, Union, cast
+from typing import Optional, Pattern, Set, Union, cast
import unicodedata
import warnings
@@ -9,7 +9,7 @@
import pandas._libs.lib as lib
import pandas._libs.missing as libmissing
import pandas._libs.ops as libops
-from pandas._typing import Scalar
+from pandas._typing import Dtype, Scalar
from pandas.core.dtypes.common import is_re, is_scalar
from pandas.core.dtypes.missing import isna
@@ -28,7 +28,7 @@ def __len__(self):
# For typing, _str_map relies on the object being sized.
raise NotImplementedError
- def _str_map(self, f, na_value=None, dtype=None):
+ def _str_map(self, f, na_value=None, dtype: Optional[Dtype] = None):
"""
Map a callable over valid element of the array.
| Follow on PR for #38808
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38890 | 2021-01-01T23:22:05Z | 2021-01-04T01:11:37Z | 2021-01-04T01:11:37Z | 2021-01-04T01:11:41Z |
DOC: create shared includes for comparison docs, take III | diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst
index c6f508aae0e21..eb11b75027909 100644
--- a/doc/source/getting_started/comparison/comparison_with_sas.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sas.rst
@@ -8,7 +8,7 @@ For potential users coming from `SAS <https://en.wikipedia.org/wiki/SAS_(softwar
this page is meant to demonstrate how different SAS operations would be
performed in pandas.
-.. include:: comparison_boilerplate.rst
+.. include:: includes/introduction.rst
.. note::
@@ -93,16 +93,7 @@ specifying the column names.
;
run;
-A pandas ``DataFrame`` can be constructed in many different ways,
-but for a small number of values, it is often convenient to specify it as
-a Python dictionary, where the keys are the column names
-and the values are the data.
-
-.. ipython:: python
-
- df = pd.DataFrame({"x": [1, 3, 5], "y": [2, 4, 6]})
- df
-
+.. include:: includes/construct_dataframe.rst
Reading external data
~~~~~~~~~~~~~~~~~~~~~
@@ -217,12 +208,7 @@ or more columns.
DATA step begins and can also be used in PROC statements */
run;
-DataFrames can be filtered in multiple ways; the most intuitive of which is using
-:ref:`boolean indexing <indexing.boolean>`
-
-.. ipython:: python
-
- tips[tips["total_bill"] > 10].head()
+.. include:: includes/filtering.rst
If/then logic
~~~~~~~~~~~~~
@@ -239,18 +225,7 @@ In SAS, if/then logic can be used to create new columns.
else bucket = 'high';
run;
-The same operation in pandas can be accomplished using
-the ``where`` method from ``numpy``.
-
-.. ipython:: python
-
- tips["bucket"] = np.where(tips["total_bill"] < 10, "low", "high")
- tips.head()
-
-.. ipython:: python
- :suppress:
-
- tips = tips.drop("bucket", axis=1)
+.. include:: includes/if_then.rst
Date functionality
~~~~~~~~~~~~~~~~~~
@@ -278,28 +253,7 @@ functions pandas supports other Time Series features
not available in Base SAS (such as resampling and custom offsets) -
see the :ref:`timeseries documentation<timeseries>` for more details.
-.. ipython:: python
-
- tips["date1"] = pd.Timestamp("2013-01-15")
- tips["date2"] = pd.Timestamp("2015-02-15")
- tips["date1_year"] = tips["date1"].dt.year
- tips["date2_month"] = tips["date2"].dt.month
- tips["date1_next"] = tips["date1"] + pd.offsets.MonthBegin()
- tips["months_between"] = tips["date2"].dt.to_period("M") - tips[
- "date1"
- ].dt.to_period("M")
-
- tips[
- ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"]
- ].head()
-
-.. ipython:: python
- :suppress:
-
- tips = tips.drop(
- ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"],
- axis=1,
- )
+.. include:: includes/time_date.rst
Selection of columns
~~~~~~~~~~~~~~~~~~~~
@@ -349,14 +303,7 @@ Sorting in SAS is accomplished via ``PROC SORT``
by sex total_bill;
run;
-pandas objects have a :meth:`~DataFrame.sort_values` method, which
-takes a list of columns to sort by.
-
-.. ipython:: python
-
- tips = tips.sort_values(["sex", "total_bill"])
- tips.head()
-
+.. include:: includes/sorting.rst
String processing
-----------------
@@ -377,14 +324,7 @@ functions. ``LENGTHN`` excludes trailing blanks and ``LENGTHC`` includes trailin
put(LENGTHC(time));
run;
-Python determines the length of a character string with the ``len`` function.
-``len`` includes trailing blanks. Use ``len`` and ``rstrip`` to exclude
-trailing blanks.
-
-.. ipython:: python
-
- tips["time"].str.len().head()
- tips["time"].str.rstrip().str.len().head()
+.. include:: includes/length.rst
Find
diff --git a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
index 73645d429cc66..7b779b02e20f8 100644
--- a/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
+++ b/doc/source/getting_started/comparison/comparison_with_spreadsheets.rst
@@ -14,7 +14,7 @@ terminology and link to documentation for Excel, but much will be the same/simil
`Apple Numbers <https://www.apple.com/mac/numbers/compatibility/functions.html>`_, and other
Excel-compatible spreadsheet software.
-.. include:: comparison_boilerplate.rst
+.. include:: includes/introduction.rst
Data structures
---------------
diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst
index 4fe7b7e96cf50..52799442d6118 100644
--- a/doc/source/getting_started/comparison/comparison_with_sql.rst
+++ b/doc/source/getting_started/comparison/comparison_with_sql.rst
@@ -8,7 +8,7 @@ Since many potential pandas users have some familiarity with
`SQL <https://en.wikipedia.org/wiki/SQL>`_, this page is meant to provide some examples of how
various SQL operations would be performed using pandas.
-.. include:: comparison_boilerplate.rst
+.. include:: includes/introduction.rst
Most of the examples will utilize the ``tips`` dataset found within pandas tests. We'll read
the data into a DataFrame called ``tips`` and assume we have a database table of the same name and
@@ -65,24 +65,9 @@ Filtering in SQL is done via a WHERE clause.
SELECT *
FROM tips
- WHERE time = 'Dinner'
- LIMIT 5;
-
-DataFrames can be filtered in multiple ways; the most intuitive of which is using
-:ref:`boolean indexing <indexing.boolean>`
-
-.. ipython:: python
-
- tips[tips["time"] == "Dinner"].head(5)
-
-The above statement is simply passing a ``Series`` of True/False objects to the DataFrame,
-returning all rows with True.
-
-.. ipython:: python
+ WHERE time = 'Dinner';
- is_dinner = tips["time"] == "Dinner"
- is_dinner.value_counts()
- tips[is_dinner].head(5)
+.. include:: includes/filtering.rst
Just like SQL's OR and AND, multiple conditions can be passed to a DataFrame using | (OR) and &
(AND).
diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst
index b3ed9b1ba630f..d1ad18bddb0a7 100644
--- a/doc/source/getting_started/comparison/comparison_with_stata.rst
+++ b/doc/source/getting_started/comparison/comparison_with_stata.rst
@@ -8,7 +8,7 @@ For potential users coming from `Stata <https://en.wikipedia.org/wiki/Stata>`__
this page is meant to demonstrate how different Stata operations would be
performed in pandas.
-.. include:: comparison_boilerplate.rst
+.. include:: includes/introduction.rst
.. note::
@@ -89,16 +89,7 @@ specifying the column names.
5 6
end
-A pandas ``DataFrame`` can be constructed in many different ways,
-but for a small number of values, it is often convenient to specify it as
-a Python dictionary, where the keys are the column names
-and the values are the data.
-
-.. ipython:: python
-
- df = pd.DataFrame({"x": [1, 3, 5], "y": [2, 4, 6]})
- df
-
+.. include:: includes/construct_dataframe.rst
Reading external data
~~~~~~~~~~~~~~~~~~~~~
@@ -210,12 +201,7 @@ Filtering in Stata is done with an ``if`` clause on one or more columns.
list if total_bill > 10
-DataFrames can be filtered in multiple ways; the most intuitive of which is using
-:ref:`boolean indexing <indexing.boolean>`.
-
-.. ipython:: python
-
- tips[tips["total_bill"] > 10].head()
+.. include:: includes/filtering.rst
If/then logic
~~~~~~~~~~~~~
@@ -227,18 +213,7 @@ In Stata, an ``if`` clause can also be used to create new columns.
generate bucket = "low" if total_bill < 10
replace bucket = "high" if total_bill >= 10
-The same operation in pandas can be accomplished using
-the ``where`` method from ``numpy``.
-
-.. ipython:: python
-
- tips["bucket"] = np.where(tips["total_bill"] < 10, "low", "high")
- tips.head()
-
-.. ipython:: python
- :suppress:
-
- tips = tips.drop("bucket", axis=1)
+.. include:: includes/if_then.rst
Date functionality
~~~~~~~~~~~~~~~~~~
@@ -266,28 +241,7 @@ functions, pandas supports other Time Series features
not available in Stata (such as time zone handling and custom offsets) --
see the :ref:`timeseries documentation<timeseries>` for more details.
-.. ipython:: python
-
- tips["date1"] = pd.Timestamp("2013-01-15")
- tips["date2"] = pd.Timestamp("2015-02-15")
- tips["date1_year"] = tips["date1"].dt.year
- tips["date2_month"] = tips["date2"].dt.month
- tips["date1_next"] = tips["date1"] + pd.offsets.MonthBegin()
- tips["months_between"] = tips["date2"].dt.to_period("M") - tips[
- "date1"
- ].dt.to_period("M")
-
- tips[
- ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"]
- ].head()
-
-.. ipython:: python
- :suppress:
-
- tips = tips.drop(
- ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"],
- axis=1,
- )
+.. include:: includes/time_date.rst
Selection of columns
~~~~~~~~~~~~~~~~~~~~
@@ -327,14 +281,7 @@ Sorting in Stata is accomplished via ``sort``
sort sex total_bill
-pandas objects have a :meth:`DataFrame.sort_values` method, which
-takes a list of columns to sort by.
-
-.. ipython:: python
-
- tips = tips.sort_values(["sex", "total_bill"])
- tips.head()
-
+.. include:: includes/sorting.rst
String processing
-----------------
@@ -350,14 +297,7 @@ Stata determines the length of a character string with the :func:`strlen` and
generate strlen_time = strlen(time)
generate ustrlen_time = ustrlen(time)
-Python determines the length of a character string with the ``len`` function.
-In Python 3, all strings are Unicode strings. ``len`` includes trailing blanks.
-Use ``len`` and ``rstrip`` to exclude trailing blanks.
-
-.. ipython:: python
-
- tips["time"].str.len().head()
- tips["time"].str.rstrip().str.len().head()
+.. include:: includes/length.rst
Finding position of substring
diff --git a/doc/source/getting_started/comparison/includes/construct_dataframe.rst b/doc/source/getting_started/comparison/includes/construct_dataframe.rst
new file mode 100644
index 0000000000000..4d066c7962d98
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/construct_dataframe.rst
@@ -0,0 +1,9 @@
+A pandas ``DataFrame`` can be constructed in many different ways,
+but for a small number of values, it is often convenient to specify it as
+a Python dictionary, where the keys are the column names
+and the values are the data.
+
+.. ipython:: python
+
+ df = pd.DataFrame({"x": [1, 3, 5], "y": [2, 4, 6]})
+ df
diff --git a/doc/source/getting_started/comparison/includes/filtering.rst b/doc/source/getting_started/comparison/includes/filtering.rst
new file mode 100644
index 0000000000000..861a93d92c2c2
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/filtering.rst
@@ -0,0 +1,16 @@
+DataFrames can be filtered in multiple ways; the most intuitive of which is using
+:ref:`boolean indexing <indexing.boolean>`
+
+.. ipython:: python
+
+ tips[tips["total_bill"] > 10]
+
+The above statement is simply passing a ``Series`` of ``True``/``False`` objects to the DataFrame,
+returning all rows with ``True``.
+
+.. ipython:: python
+
+ is_dinner = tips["time"] == "Dinner"
+ is_dinner
+ is_dinner.value_counts()
+ tips[is_dinner]
diff --git a/doc/source/getting_started/comparison/includes/if_then.rst b/doc/source/getting_started/comparison/includes/if_then.rst
new file mode 100644
index 0000000000000..d7977366cfc33
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/if_then.rst
@@ -0,0 +1,12 @@
+The same operation in pandas can be accomplished using
+the ``where`` method from ``numpy``.
+
+.. ipython:: python
+
+ tips["bucket"] = np.where(tips["total_bill"] < 10, "low", "high")
+ tips.head()
+
+.. ipython:: python
+ :suppress:
+
+ tips = tips.drop("bucket", axis=1)
diff --git a/doc/source/getting_started/comparison/comparison_boilerplate.rst b/doc/source/getting_started/comparison/includes/introduction.rst
similarity index 100%
rename from doc/source/getting_started/comparison/comparison_boilerplate.rst
rename to doc/source/getting_started/comparison/includes/introduction.rst
diff --git a/doc/source/getting_started/comparison/includes/length.rst b/doc/source/getting_started/comparison/includes/length.rst
new file mode 100644
index 0000000000000..9581c661c0170
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/length.rst
@@ -0,0 +1,8 @@
+Python determines the length of a character string with the ``len`` function.
+In Python 3, all strings are Unicode strings. ``len`` includes trailing blanks.
+Use ``len`` and ``rstrip`` to exclude trailing blanks.
+
+.. ipython:: python
+
+ tips["time"].str.len().head()
+ tips["time"].str.rstrip().str.len().head()
diff --git a/doc/source/getting_started/comparison/includes/sorting.rst b/doc/source/getting_started/comparison/includes/sorting.rst
new file mode 100644
index 0000000000000..23f11ff485474
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/sorting.rst
@@ -0,0 +1,7 @@
+pandas objects have a :meth:`DataFrame.sort_values` method, which
+takes a list of columns to sort by.
+
+.. ipython:: python
+
+ tips = tips.sort_values(["sex", "total_bill"])
+ tips.head()
diff --git a/doc/source/getting_started/comparison/includes/time_date.rst b/doc/source/getting_started/comparison/includes/time_date.rst
new file mode 100644
index 0000000000000..12a00b36dc97d
--- /dev/null
+++ b/doc/source/getting_started/comparison/includes/time_date.rst
@@ -0,0 +1,22 @@
+.. ipython:: python
+
+ tips["date1"] = pd.Timestamp("2013-01-15")
+ tips["date2"] = pd.Timestamp("2015-02-15")
+ tips["date1_year"] = tips["date1"].dt.year
+ tips["date2_month"] = tips["date2"].dt.month
+ tips["date1_next"] = tips["date1"] + pd.offsets.MonthBegin()
+ tips["months_between"] = tips["date2"].dt.to_period("M") - tips[
+ "date1"
+ ].dt.to_period("M")
+
+ tips[
+ ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"]
+ ].head()
+
+.. ipython:: python
+ :suppress:
+
+ tips = tips.drop(
+ ["date1", "date2", "date1_year", "date2_month", "date1_next", "months_between"],
+ axis=1,
+ )
diff --git a/setup.cfg b/setup.cfg
index 56b2fa190ac99..2138e87ae5988 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -49,7 +49,10 @@ ignore = E203, # space before : (needed for how black formats slicing)
E711, # comparison to none should be 'if cond is none:'
exclude =
- doc/source/development/contributing_docstring.rst
+ doc/source/development/contributing_docstring.rst,
+ # work around issue of undefined variable warnings
+ # https://github.com/pandas-dev/pandas/pull/38837#issuecomment-752884156
+ doc/source/getting_started/comparison/includes/*.rst
[tool:pytest]
# sync minversion with setup.cfg & install.rst
| From original pull request (https://github.com/pandas-dev/pandas/pull/38771):
> This will help ensure consistency between the examples.
- [ ] ~~closes #xxxx~~
- [x] tests added / passed
- [ ] ~~passes `black pandas`~~
- [ ] ~~passes `git diff upstream/master -u -- "*.py" | flake8 --diff`~~
- [ ] ~~whatsnew entry~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/38887 | 2021-01-01T22:35:15Z | 2021-01-03T17:26:02Z | 2021-01-03T17:26:02Z | 2021-01-04T08:43:07Z |
CLN: add typing for dtype arg in core/arrays (GH38808) | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 3391e2760187c..872f17b7f0770 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1,7 +1,7 @@
import operator
from operator import le, lt
import textwrap
-from typing import Sequence, Type, TypeVar
+from typing import Optional, Sequence, Type, TypeVar, cast
import numpy as np
@@ -14,7 +14,7 @@
intervals_to_interval_bounds,
)
from pandas._libs.missing import NA
-from pandas._typing import ArrayLike
+from pandas._typing import ArrayLike, Dtype, NpDtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
@@ -170,7 +170,7 @@ def __new__(
cls,
data,
closed=None,
- dtype=None,
+ dtype: Optional[Dtype] = None,
copy: bool = False,
verify_integrity: bool = True,
):
@@ -212,7 +212,13 @@ def __new__(
@classmethod
def _simple_new(
- cls, left, right, closed=None, copy=False, dtype=None, verify_integrity=True
+ cls,
+ left,
+ right,
+ closed=None,
+ copy=False,
+ dtype: Optional[Dtype] = None,
+ verify_integrity=True,
):
result = IntervalMixin.__new__(cls)
@@ -223,12 +229,14 @@ def _simple_new(
if dtype is not None:
# GH 19262: dtype must be an IntervalDtype to override inferred
dtype = pandas_dtype(dtype)
- if not is_interval_dtype(dtype):
+ if is_interval_dtype(dtype):
+ dtype = cast(IntervalDtype, dtype)
+ if dtype.subtype is not None:
+ left = left.astype(dtype.subtype)
+ right = right.astype(dtype.subtype)
+ else:
msg = f"dtype must be an IntervalDtype, got {dtype}"
raise TypeError(msg)
- elif dtype.subtype is not None:
- left = left.astype(dtype.subtype)
- right = right.astype(dtype.subtype)
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
@@ -279,7 +287,9 @@ def _simple_new(
return result
@classmethod
- def _from_sequence(cls, scalars, *, dtype=None, copy=False):
+ def _from_sequence(
+ cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False
+ ):
return cls(scalars, dtype=dtype, copy=copy)
@classmethod
@@ -338,7 +348,9 @@ def _from_factorized(cls, values, original):
),
}
)
- def from_breaks(cls, breaks, closed="right", copy=False, dtype=None):
+ def from_breaks(
+ cls, breaks, closed="right", copy: bool = False, dtype: Optional[Dtype] = None
+ ):
breaks = maybe_convert_platform_interval(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)
@@ -407,7 +419,9 @@ def from_breaks(cls, breaks, closed="right", copy=False, dtype=None):
),
}
)
- def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
+ def from_arrays(
+ cls, left, right, closed="right", copy=False, dtype: Optional[Dtype] = None
+ ):
left = maybe_convert_platform_interval(left)
right = maybe_convert_platform_interval(right)
@@ -464,7 +478,9 @@ def from_arrays(cls, left, right, closed="right", copy=False, dtype=None):
),
}
)
- def from_tuples(cls, data, closed="right", copy=False, dtype=None):
+ def from_tuples(
+ cls, data, closed="right", copy=False, dtype: Optional[Dtype] = None
+ ):
if len(data):
left, right = [], []
else:
@@ -1277,7 +1293,7 @@ def is_non_overlapping_monotonic(self):
# ---------------------------------------------------------------------
# Conversion
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
"""
Return the IntervalArray's data as a numpy array of Interval
objects (with dtype='object')
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 3cf25847ed3d0..e4a98a54ee94c 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -5,7 +5,7 @@
import numpy as np
from pandas._libs import lib, missing as libmissing
-from pandas._typing import ArrayLike, Dtype, Scalar
+from pandas._typing import ArrayLike, Dtype, NpDtype, Scalar
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly, doc
@@ -147,7 +147,10 @@ def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(~self._data, self._mask)
def to_numpy(
- self, dtype=None, copy: bool = False, na_value: Scalar = lib.no_default
+ self,
+ dtype: Optional[NpDtype] = None,
+ copy: bool = False,
+ na_value: Scalar = lib.no_default,
) -> np.ndarray:
"""
Convert to a NumPy Array.
@@ -257,7 +260,7 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
__array_priority__ = 1000 # higher than ndarray so ops dispatch to us
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
"""
the array interface, return my values
We return an object array here to preserve our scalar values
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index ae131d8a51ba1..9ed6306e5b9bc 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -1,11 +1,11 @@
import numbers
-from typing import Tuple, Type, Union
+from typing import Optional, Tuple, Type, Union
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas._libs import lib
-from pandas._typing import Scalar
+from pandas._typing import Dtype, NpDtype, Scalar
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.dtypes import ExtensionDtype
@@ -38,7 +38,7 @@ class PandasDtype(ExtensionDtype):
_metadata = ("_dtype",)
- def __init__(self, dtype: object):
+ def __init__(self, dtype: Optional[NpDtype]):
self._dtype = np.dtype(dtype)
def __repr__(self) -> str:
@@ -173,7 +173,7 @@ def __init__(self, values: Union[np.ndarray, "PandasArray"], copy: bool = False)
@classmethod
def _from_sequence(
- cls, scalars, *, dtype=None, copy: bool = False
+ cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False
) -> "PandasArray":
if isinstance(dtype, PandasDtype):
dtype = dtype._dtype
@@ -200,7 +200,7 @@ def dtype(self) -> PandasDtype:
# ------------------------------------------------------------------------
# NumPy Array Interface
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
return np.asarray(self._ndarray, dtype=dtype)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
@@ -311,7 +311,15 @@ def prod(self, *, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar:
)
return self._wrap_reduction_result(axis, result)
- def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
+ def mean(
+ self,
+ *,
+ axis=None,
+ dtype: Optional[NpDtype] = None,
+ out=None,
+ keepdims=False,
+ skipna=True,
+ ):
nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims})
result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
@@ -326,7 +334,14 @@ def median(
return self._wrap_reduction_result(axis, result)
def std(
- self, *, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True
+ self,
+ *,
+ axis=None,
+ dtype: Optional[NpDtype] = None,
+ out=None,
+ ddof=1,
+ keepdims=False,
+ skipna=True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
@@ -335,7 +350,14 @@ def std(
return self._wrap_reduction_result(axis, result)
def var(
- self, *, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True
+ self,
+ *,
+ axis=None,
+ dtype: Optional[NpDtype] = None,
+ out=None,
+ ddof=1,
+ keepdims=False,
+ skipna=True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var"
@@ -344,7 +366,14 @@ def var(
return self._wrap_reduction_result(axis, result)
def sem(
- self, *, axis=None, dtype=None, out=None, ddof=1, keepdims=False, skipna=True
+ self,
+ *,
+ axis=None,
+ dtype: Optional[NpDtype] = None,
+ out=None,
+ ddof=1,
+ keepdims=False,
+ skipna=True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem"
@@ -352,14 +381,30 @@ def sem(
result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
- def kurt(self, *, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
+ def kurt(
+ self,
+ *,
+ axis=None,
+ dtype: Optional[NpDtype] = None,
+ out=None,
+ keepdims=False,
+ skipna=True,
+ ):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt"
)
result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
- def skew(self, *, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
+ def skew(
+ self,
+ *,
+ axis=None,
+ dtype: Optional[NpDtype] = None,
+ out=None,
+ keepdims=False,
+ skipna=True,
+ ):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew"
)
@@ -370,7 +415,10 @@ def skew(self, *, axis=None, dtype=None, out=None, keepdims=False, skipna=True):
# Additional Methods
def to_numpy(
- self, dtype=None, copy: bool = False, na_value=lib.no_default
+ self,
+ dtype: Optional[NpDtype] = None,
+ copy: bool = False,
+ na_value=lib.no_default,
) -> np.ndarray:
result = np.asarray(self._ndarray, dtype=dtype)
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index e0e40a666896d..e06315fbd4f78 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -26,7 +26,7 @@
get_period_field_arr,
period_asfreq_arr,
)
-from pandas._typing import AnyArrayLike, Dtype
+from pandas._typing import AnyArrayLike, Dtype, NpDtype
from pandas.util._decorators import cache_readonly, doc
from pandas.core.dtypes.common import (
@@ -159,7 +159,7 @@ class PeriodArray(PeriodMixin, dtl.DatelikeOps):
# --------------------------------------------------------------------
# Constructors
- def __init__(self, values, dtype=None, freq=None, copy=False):
+ def __init__(self, values, dtype: Optional[Dtype] = None, freq=None, copy=False):
freq = validate_dtype_freq(dtype, freq)
if freq is not None:
@@ -186,7 +186,10 @@ def __init__(self, values, dtype=None, freq=None, copy=False):
@classmethod
def _simple_new(
- cls, values: np.ndarray, freq: Optional[BaseOffset] = None, dtype=None
+ cls,
+ values: np.ndarray,
+ freq: Optional[BaseOffset] = None,
+ dtype: Optional[Dtype] = None,
) -> "PeriodArray":
# alias for PeriodArray.__init__
assertion_msg = "Should be numpy array of type i8"
@@ -220,7 +223,7 @@ def _from_sequence(
@classmethod
def _from_sequence_of_strings(
- cls, strings, *, dtype=None, copy=False
+ cls, strings, *, dtype: Optional[Dtype] = None, copy=False
) -> "PeriodArray":
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@@ -301,7 +304,7 @@ def freq(self) -> BaseOffset:
"""
return self.dtype.freq
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
if dtype == "i8":
return self.asi8
elif dtype == bool:
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 26dbe5e0dba44..b4d4fd5cc7106 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -4,7 +4,7 @@
from collections import abc
import numbers
import operator
-from typing import Any, Callable, Sequence, Type, TypeVar, Union
+from typing import Any, Callable, Optional, Sequence, Type, TypeVar, Union
import warnings
import numpy as np
@@ -13,7 +13,7 @@
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
-from pandas._typing import Scalar
+from pandas._typing import Dtype, NpDtype, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
@@ -174,7 +174,7 @@ def _sparse_array_op(
return _wrap_result(name, result, index, fill, dtype=result_dtype)
-def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
+def _wrap_result(name, data, sparse_index, fill_value, dtype: Optional[Dtype] = None):
"""
wrap op result to have correct dtype
"""
@@ -281,7 +281,7 @@ def __init__(
index=None,
fill_value=None,
kind="integer",
- dtype=None,
+ dtype: Optional[Dtype] = None,
copy=False,
):
@@ -454,7 +454,7 @@ def from_spmatrix(cls, data):
return cls._simple_new(arr, index, dtype)
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
fill_value = self.fill_value
if self.sp_index.ngaps == 0:
@@ -487,7 +487,7 @@ def __setitem__(self, key, value):
raise TypeError(msg)
@classmethod
- def _from_sequence(cls, scalars, *, dtype=None, copy=False):
+ def _from_sequence(cls, scalars, *, dtype: Optional[Dtype] = None, copy=False):
return cls(scalars, dtype=dtype)
@classmethod
@@ -998,7 +998,7 @@ def _concat_same_type(
return cls(data, sparse_index=sp_index, fill_value=fill_value)
- def astype(self, dtype=None, copy=True):
+ def astype(self, dtype: Optional[Dtype] = None, copy=True):
"""
Change the dtype of a SparseArray.
@@ -1461,7 +1461,9 @@ def _formatter(self, boxed=False):
return None
-def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None):
+def make_sparse(
+ arr: np.ndarray, kind="block", fill_value=None, dtype: Optional[NpDtype] = None
+):
"""
Convert ndarray to sparse format
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py
index 74a41a0b64ff8..3d0ac3380ec39 100644
--- a/pandas/core/arrays/string_.py
+++ b/pandas/core/arrays/string_.py
@@ -1,9 +1,9 @@
-from typing import TYPE_CHECKING, Type, Union
+from typing import TYPE_CHECKING, Optional, Type, Union
import numpy as np
from pandas._libs import lib, missing as libmissing
-from pandas._typing import Scalar
+from pandas._typing import Dtype, Scalar
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype
@@ -206,7 +206,7 @@ def _validate(self):
)
@classmethod
- def _from_sequence(cls, scalars, *, dtype=None, copy=False):
+ def _from_sequence(cls, scalars, *, dtype: Optional[Dtype] = None, copy=False):
if dtype:
assert dtype == "string"
@@ -234,7 +234,9 @@ def _from_sequence(cls, scalars, *, dtype=None, copy=False):
return new_string_array
@classmethod
- def _from_sequence_of_strings(cls, strings, *, dtype=None, copy=False):
+ def _from_sequence_of_strings(
+ cls, strings, *, dtype: Optional[Dtype] = None, copy=False
+ ):
return cls._from_sequence(strings, dtype=dtype, copy=copy)
def __arrow_array__(self, type=None):
@@ -381,7 +383,7 @@ def _cmp_method(self, other, op):
# String methods interface
_str_na_value = StringDtype.na_value
- def _str_map(self, f, na_value=None, dtype=None):
+ def _str_map(self, f, na_value=None, dtype: Optional[Dtype] = None):
from pandas.arrays import BooleanArray, IntegerArray, StringArray
from pandas.core.arrays.string_ import StringDtype
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py
index 3a351bf497662..d37e91e55a9cf 100644
--- a/pandas/core/arrays/string_arrow.py
+++ b/pandas/core/arrays/string_arrow.py
@@ -1,11 +1,12 @@
from __future__ import annotations
from distutils.version import LooseVersion
-from typing import TYPE_CHECKING, Any, Sequence, Type, Union
+from typing import TYPE_CHECKING, Any, Optional, Sequence, Type, Union
import numpy as np
from pandas._libs import lib, missing as libmissing
+from pandas._typing import Dtype, NpDtype
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.base import ExtensionDtype
@@ -203,14 +204,16 @@ def _chk_pyarrow_available(cls) -> None:
raise ImportError(msg)
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy=False):
+ def _from_sequence(cls, scalars, dtype: Optional[Dtype] = None, copy=False):
cls._chk_pyarrow_available()
# convert non-na-likes to str, and nan-likes to ArrowStringDtype.na_value
scalars = lib.ensure_string_array(scalars, copy=False)
return cls(pa.array(scalars, type=pa.string(), from_pandas=True))
@classmethod
- def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
+ def _from_sequence_of_strings(
+ cls, strings, dtype: Optional[Dtype] = None, copy=False
+ ):
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@property
@@ -220,7 +223,7 @@ def dtype(self) -> ArrowStringDtype:
"""
return self._dtype
- def __array__(self, dtype=None) -> np.ndarray:
+ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
"""Correctly construct numpy arrays when passed to `np.asarray()`."""
return self.to_numpy(dtype=dtype)
@@ -229,7 +232,10 @@ def __arrow_array__(self, type=None):
return self._data
def to_numpy(
- self, dtype=None, copy: bool = False, na_value=lib.no_default
+ self,
+ dtype: Optional[NpDtype] = None,
+ copy: bool = False,
+ na_value=lib.no_default,
) -> np.ndarray:
"""
Convert to a NumPy ndarray.
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 55136e0dedcf5..62d5a4d30563b 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -22,6 +22,7 @@
ints_to_pytimedelta,
parse_timedelta_unit,
)
+from pandas._typing import NpDtype
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.cast import astype_td64_unit_conversion
@@ -352,7 +353,7 @@ def sum(
self,
*,
axis=None,
- dtype=None,
+ dtype: Optional[NpDtype] = None,
out=None,
keepdims: bool = False,
initial=None,
@@ -372,7 +373,7 @@ def std(
self,
*,
axis=None,
- dtype=None,
+ dtype: Optional[NpDtype] = None,
out=None,
ddof: int = 1,
keepdims: bool = False,
| Follow on PR for #38808
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38886 | 2021-01-01T22:25:32Z | 2021-01-05T02:02:08Z | 2021-01-05T02:02:08Z | 2021-01-05T02:02:14Z |
TST: Add test_groupby_std_on_nullable_column. | diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 8d7fcbfcfe694..77f0ab1092b4b 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1121,3 +1121,18 @@ def test_groupby_sum_below_mincount_nullable_integer():
result = grouped.sum(min_count=2)
expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx)
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("agg_func", ["std", "var"])
+def test_groupby_std_on_nullable_column(agg_func, any_numeric_dtype):
+ # GH 35516
+ df = DataFrame(
+ {
+ "A": [2, 1, 1, 1, 2, 2, 1],
+ "B": Series(np.full(7, np.nan), dtype=any_numeric_dtype),
+ }
+ )
+ result = df.groupby("A").agg(agg_func).astype("float64")
+ expected = DataFrame([np.nan, np.nan], index=[1, 2], columns=["B"])
+ expected.index.name = "A"
+ tm.assert_frame_equal(result, expected)
| - [x] closes #35516
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38883 | 2021-01-01T20:59:20Z | 2021-01-03T17:12:36Z | null | 2023-05-11T01:20:39Z |
CLN: Use signed integers in khash maps for signed integer keys | diff --git a/pandas/_libs/khash.pxd b/pandas/_libs/khash.pxd
index 0d0c5ae058b21..e9f5766f78435 100644
--- a/pandas/_libs/khash.pxd
+++ b/pandas/_libs/khash.pxd
@@ -16,11 +16,11 @@ from numpy cimport (
cdef extern from "khash_python.h":
const int KHASH_TRACE_DOMAIN
- ctypedef uint32_t khint_t
- ctypedef khint_t khiter_t
+ ctypedef uint32_t khuint_t
+ ctypedef khuint_t khiter_t
ctypedef struct kh_pymap_t:
- khint_t n_buckets, size, n_occupied, upper_bound
+ khuint_t n_buckets, size, n_occupied, upper_bound
uint32_t *flags
PyObject **keys
size_t *vals
@@ -28,15 +28,15 @@ cdef extern from "khash_python.h":
kh_pymap_t* kh_init_pymap()
void kh_destroy_pymap(kh_pymap_t*)
void kh_clear_pymap(kh_pymap_t*)
- khint_t kh_get_pymap(kh_pymap_t*, PyObject*)
- void kh_resize_pymap(kh_pymap_t*, khint_t)
- khint_t kh_put_pymap(kh_pymap_t*, PyObject*, int*)
- void kh_del_pymap(kh_pymap_t*, khint_t)
+ khuint_t kh_get_pymap(kh_pymap_t*, PyObject*)
+ void kh_resize_pymap(kh_pymap_t*, khuint_t)
+ khuint_t kh_put_pymap(kh_pymap_t*, PyObject*, int*)
+ void kh_del_pymap(kh_pymap_t*, khuint_t)
bint kh_exist_pymap(kh_pymap_t*, khiter_t)
ctypedef struct kh_pyset_t:
- khint_t n_buckets, size, n_occupied, upper_bound
+ khuint_t n_buckets, size, n_occupied, upper_bound
uint32_t *flags
PyObject **keys
size_t *vals
@@ -44,17 +44,17 @@ cdef extern from "khash_python.h":
kh_pyset_t* kh_init_pyset()
void kh_destroy_pyset(kh_pyset_t*)
void kh_clear_pyset(kh_pyset_t*)
- khint_t kh_get_pyset(kh_pyset_t*, PyObject*)
- void kh_resize_pyset(kh_pyset_t*, khint_t)
- khint_t kh_put_pyset(kh_pyset_t*, PyObject*, int*)
- void kh_del_pyset(kh_pyset_t*, khint_t)
+ khuint_t kh_get_pyset(kh_pyset_t*, PyObject*)
+ void kh_resize_pyset(kh_pyset_t*, khuint_t)
+ khuint_t kh_put_pyset(kh_pyset_t*, PyObject*, int*)
+ void kh_del_pyset(kh_pyset_t*, khuint_t)
bint kh_exist_pyset(kh_pyset_t*, khiter_t)
ctypedef char* kh_cstr_t
ctypedef struct kh_str_t:
- khint_t n_buckets, size, n_occupied, upper_bound
+ khuint_t n_buckets, size, n_occupied, upper_bound
uint32_t *flags
kh_cstr_t *keys
size_t *vals
@@ -62,10 +62,10 @@ cdef extern from "khash_python.h":
kh_str_t* kh_init_str() nogil
void kh_destroy_str(kh_str_t*) nogil
void kh_clear_str(kh_str_t*) nogil
- khint_t kh_get_str(kh_str_t*, kh_cstr_t) nogil
- void kh_resize_str(kh_str_t*, khint_t) nogil
- khint_t kh_put_str(kh_str_t*, kh_cstr_t, int*) nogil
- void kh_del_str(kh_str_t*, khint_t) nogil
+ khuint_t kh_get_str(kh_str_t*, kh_cstr_t) nogil
+ void kh_resize_str(kh_str_t*, khuint_t) nogil
+ khuint_t kh_put_str(kh_str_t*, kh_cstr_t, int*) nogil
+ void kh_del_str(kh_str_t*, khuint_t) nogil
bint kh_exist_str(kh_str_t*, khiter_t) nogil
@@ -74,16 +74,16 @@ cdef extern from "khash_python.h":
int starts[256]
kh_str_starts_t* kh_init_str_starts() nogil
- khint_t kh_put_str_starts_item(kh_str_starts_t* table, char* key,
- int* ret) nogil
- khint_t kh_get_str_starts_item(kh_str_starts_t* table, char* key) nogil
+ khuint_t kh_put_str_starts_item(kh_str_starts_t* table, char* key,
+ int* ret) nogil
+ khuint_t kh_get_str_starts_item(kh_str_starts_t* table, char* key) nogil
void kh_destroy_str_starts(kh_str_starts_t*) nogil
- void kh_resize_str_starts(kh_str_starts_t*, khint_t) nogil
+ void kh_resize_str_starts(kh_str_starts_t*, khuint_t) nogil
# sweep factorize
ctypedef struct kh_strbox_t:
- khint_t n_buckets, size, n_occupied, upper_bound
+ khuint_t n_buckets, size, n_occupied, upper_bound
uint32_t *flags
kh_cstr_t *keys
PyObject **vals
@@ -91,10 +91,10 @@ cdef extern from "khash_python.h":
kh_strbox_t* kh_init_strbox() nogil
void kh_destroy_strbox(kh_strbox_t*) nogil
void kh_clear_strbox(kh_strbox_t*) nogil
- khint_t kh_get_strbox(kh_strbox_t*, kh_cstr_t) nogil
- void kh_resize_strbox(kh_strbox_t*, khint_t) nogil
- khint_t kh_put_strbox(kh_strbox_t*, kh_cstr_t, int*) nogil
- void kh_del_strbox(kh_strbox_t*, khint_t) nogil
+ khuint_t kh_get_strbox(kh_strbox_t*, kh_cstr_t) nogil
+ void kh_resize_strbox(kh_strbox_t*, khuint_t) nogil
+ khuint_t kh_put_strbox(kh_strbox_t*, kh_cstr_t, int*) nogil
+ void kh_del_strbox(kh_strbox_t*, khuint_t) nogil
bint kh_exist_strbox(kh_strbox_t*, khiter_t) nogil
diff --git a/pandas/_libs/khash_for_primitive_helper.pxi.in b/pandas/_libs/khash_for_primitive_helper.pxi.in
index db8d3e0b19417..9073d87aa91cc 100644
--- a/pandas/_libs/khash_for_primitive_helper.pxi.in
+++ b/pandas/_libs/khash_for_primitive_helper.pxi.in
@@ -24,7 +24,7 @@ primitive_types = [('int64', 'int64_t'),
cdef extern from "khash_python.h":
ctypedef struct kh_{{name}}_t:
- khint_t n_buckets, size, n_occupied, upper_bound
+ khuint_t n_buckets, size, n_occupied, upper_bound
uint32_t *flags
{{c_type}} *keys
size_t *vals
@@ -32,10 +32,10 @@ cdef extern from "khash_python.h":
kh_{{name}}_t* kh_init_{{name}}() nogil
void kh_destroy_{{name}}(kh_{{name}}_t*) nogil
void kh_clear_{{name}}(kh_{{name}}_t*) nogil
- khint_t kh_get_{{name}}(kh_{{name}}_t*, {{c_type}}) nogil
- void kh_resize_{{name}}(kh_{{name}}_t*, khint_t) nogil
- khint_t kh_put_{{name}}(kh_{{name}}_t*, {{c_type}}, int*) nogil
- void kh_del_{{name}}(kh_{{name}}_t*, khint_t) nogil
+ khuint_t kh_get_{{name}}(kh_{{name}}_t*, {{c_type}}) nogil
+ void kh_resize_{{name}}(kh_{{name}}_t*, khuint_t) nogil
+ khuint_t kh_put_{{name}}(kh_{{name}}_t*, {{c_type}}, int*) nogil
+ void kh_del_{{name}}(kh_{{name}}_t*, khuint_t) nogil
bint kh_exist_{{name}}(kh_{{name}}_t*, khiter_t) nogil
diff --git a/pandas/_libs/src/klib/khash.h b/pandas/_libs/src/klib/khash.h
index bb56b2fe2d145..03b11f77580a5 100644
--- a/pandas/_libs/src/klib/khash.h
+++ b/pandas/_libs/src/klib/khash.h
@@ -134,32 +134,39 @@ int main() {
#if UINT_MAX == 0xffffffffu
-typedef unsigned int khint32_t;
+typedef unsigned int khuint32_t;
+typedef signed int khint32_t;
#elif ULONG_MAX == 0xffffffffu
-typedef unsigned long khint32_t;
+typedef unsigned long khuint32_t;
+typedef signed long khint32_t;
#endif
#if ULONG_MAX == ULLONG_MAX
-typedef unsigned long khint64_t;
+typedef unsigned long khuint64_t;
+typedef signed long khint64_t;
#else
-typedef unsigned long long khint64_t;
+typedef unsigned long long khuint64_t;
+typedef signed long long khint64_t;
#endif
#if UINT_MAX == 0xffffu
-typedef unsigned int khint16_t;
+typedef unsigned int khuint16_t;
+typedef signed int khint16_t;
#elif USHRT_MAX == 0xffffu
-typedef unsigned short khint16_t;
+typedef unsigned short khuint16_t;
+typedef signed short khint16_t;
#endif
#if UCHAR_MAX == 0xffu
-typedef unsigned char khint8_t;
+typedef unsigned char khuint8_t;
+typedef signed char khint8_t;
#endif
typedef double khfloat64_t;
typedef float khfloat32_t;
-typedef khint32_t khint_t;
-typedef khint_t khiter_t;
+typedef khuint32_t khuint_t;
+typedef khuint_t khiter_t;
#define __ac_isempty(flag, i) ((flag[i>>5]>>(i&0x1fU))&1)
#define __ac_isdel(flag, i) (0)
@@ -172,15 +179,15 @@ typedef khint_t khiter_t;
// specializations of https://github.com/aappleby/smhasher/blob/master/src/MurmurHash2.cpp
-khint32_t PANDAS_INLINE murmur2_32to32(khint32_t k){
- const khint32_t SEED = 0xc70f6907UL;
+khuint32_t PANDAS_INLINE murmur2_32to32(khuint32_t k){
+ const khuint32_t SEED = 0xc70f6907UL;
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
- const khint32_t M_32 = 0x5bd1e995;
+ const khuint32_t M_32 = 0x5bd1e995;
const int R_32 = 24;
// Initialize the hash to a 'random' value
- khint32_t h = SEED ^ 4;
+ khuint32_t h = SEED ^ 4;
//handle 4 bytes:
k *= M_32;
@@ -204,15 +211,15 @@ khint32_t PANDAS_INLINE murmur2_32to32(khint32_t k){
// - the same case for 32bit and 64bit builds
// - no performance difference could be measured compared to a possible x64-version
-khint32_t PANDAS_INLINE murmur2_32_32to32(khint32_t k1, khint32_t k2){
- const khint32_t SEED = 0xc70f6907UL;
+khuint32_t PANDAS_INLINE murmur2_32_32to32(khuint32_t k1, khuint32_t k2){
+ const khuint32_t SEED = 0xc70f6907UL;
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
- const khint32_t M_32 = 0x5bd1e995;
+ const khuint32_t M_32 = 0x5bd1e995;
const int R_32 = 24;
// Initialize the hash to a 'random' value
- khint32_t h = SEED ^ 4;
+ khuint32_t h = SEED ^ 4;
//handle first 4 bytes:
k1 *= M_32;
@@ -238,9 +245,9 @@ khint32_t PANDAS_INLINE murmur2_32_32to32(khint32_t k1, khint32_t k2){
return h;
}
-khint32_t PANDAS_INLINE murmur2_64to32(khint64_t k){
- khint32_t k1 = (khint32_t)k;
- khint32_t k2 = (khint32_t)(k >> 32);
+khuint32_t PANDAS_INLINE murmur2_64to32(khuint64_t k){
+ khuint32_t k1 = (khuint32_t)k;
+ khuint32_t k2 = (khuint32_t)(k >> 32);
return murmur2_32_32to32(k1, k2);
}
@@ -262,23 +269,23 @@ static const double __ac_HASH_UPPER = 0.77;
#define KHASH_DECLARE(name, khkey_t, khval_t) \
typedef struct { \
- khint_t n_buckets, size, n_occupied, upper_bound; \
- khint32_t *flags; \
+ khuint_t n_buckets, size, n_occupied, upper_bound; \
+ khuint32_t *flags; \
khkey_t *keys; \
khval_t *vals; \
} kh_##name##_t; \
extern kh_##name##_t *kh_init_##name(); \
extern void kh_destroy_##name(kh_##name##_t *h); \
extern void kh_clear_##name(kh_##name##_t *h); \
- extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \
- extern void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \
- extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
- extern void kh_del_##name(kh_##name##_t *h, khint_t x);
+ extern khuint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \
+ extern void kh_resize_##name(kh_##name##_t *h, khuint_t new_n_buckets); \
+ extern khuint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
+ extern void kh_del_##name(kh_##name##_t *h, khuint_t x);
#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
typedef struct { \
- khint_t n_buckets, size, n_occupied, upper_bound; \
- khint32_t *flags; \
+ khuint_t n_buckets, size, n_occupied, upper_bound; \
+ khuint32_t *flags; \
khkey_t *keys; \
khval_t *vals; \
} kh_##name##_t; \
@@ -296,14 +303,14 @@ static const double __ac_HASH_UPPER = 0.77;
SCOPE void kh_clear_##name(kh_##name##_t *h) \
{ \
if (h && h->flags) { \
- memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \
+ memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khuint32_t)); \
h->size = h->n_occupied = 0; \
} \
} \
- SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
+ SCOPE khuint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
{ \
if (h->n_buckets) { \
- khint_t inc, k, i, last, mask; \
+ khuint_t inc, k, i, last, mask; \
mask = h->n_buckets - 1; \
k = __hash_func(key); i = k & mask; \
inc = __ac_inc(k, mask); last = i; /* inc==1 for linear probing */ \
@@ -314,17 +321,17 @@ static const double __ac_HASH_UPPER = 0.77;
return __ac_iseither(h->flags, i)? h->n_buckets : i; \
} else return 0; \
} \
- SCOPE void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
+ SCOPE void kh_resize_##name(kh_##name##_t *h, khuint_t new_n_buckets) \
{ /* This function uses 0.25*n_bucktes bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \
- khint32_t *new_flags = 0; \
- khint_t j = 1; \
+ khuint32_t *new_flags = 0; \
+ khuint_t j = 1; \
{ \
kroundup32(new_n_buckets); \
if (new_n_buckets < 4) new_n_buckets = 4; \
- if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \
+ if (h->size >= (khuint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \
else { /* hash table size to be changed (shrink or expand); rehash */ \
- new_flags = (khint32_t*)KHASH_MALLOC(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
- memset(new_flags, 0xff, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+ new_flags = (khuint32_t*)KHASH_MALLOC(__ac_fsize(new_n_buckets) * sizeof(khuint32_t)); \
+ memset(new_flags, 0xff, __ac_fsize(new_n_buckets) * sizeof(khuint32_t)); \
if (h->n_buckets < new_n_buckets) { /* expand */ \
h->keys = (khkey_t*)KHASH_REALLOC(h->keys, new_n_buckets * sizeof(khkey_t)); \
if (kh_is_map) h->vals = (khval_t*)KHASH_REALLOC(h->vals, new_n_buckets * sizeof(khval_t)); \
@@ -336,12 +343,12 @@ static const double __ac_HASH_UPPER = 0.77;
if (__ac_iseither(h->flags, j) == 0) { \
khkey_t key = h->keys[j]; \
khval_t val; \
- khint_t new_mask; \
+ khuint_t new_mask; \
new_mask = new_n_buckets - 1; \
if (kh_is_map) val = h->vals[j]; \
__ac_set_isempty_true(h->flags, j); \
while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
- khint_t inc, k, i; \
+ khuint_t inc, k, i; \
k = __hash_func(key); \
i = k & new_mask; \
inc = __ac_inc(k, new_mask); \
@@ -367,18 +374,18 @@ static const double __ac_HASH_UPPER = 0.77;
h->flags = new_flags; \
h->n_buckets = new_n_buckets; \
h->n_occupied = h->size; \
- h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \
+ h->upper_bound = (khuint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \
} \
} \
- SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
+ SCOPE khuint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
{ \
- khint_t x; \
+ khuint_t x; \
if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
if (h->n_buckets > (h->size<<1)) kh_resize_##name(h, h->n_buckets - 1); /* clear "deleted" elements */ \
else kh_resize_##name(h, h->n_buckets + 1); /* expand the hash table */ \
} /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
{ \
- khint_t inc, k, i, site, last, mask = h->n_buckets - 1; \
+ khuint_t inc, k, i, site, last, mask = h->n_buckets - 1; \
x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \
if (__ac_isempty(h->flags, i)) x = i; /* for speed up */ \
else { \
@@ -407,7 +414,7 @@ static const double __ac_HASH_UPPER = 0.77;
} else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
return x; \
} \
- SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
+ SCOPE void kh_del_##name(kh_##name##_t *h, khuint_t x) \
{ \
if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \
__ac_set_isdel_true(h->flags, x); \
@@ -422,20 +429,23 @@ static const double __ac_HASH_UPPER = 0.77;
/*! @function
@abstract Integer hash function
- @param key The integer [khint32_t]
- @return The hash value [khint_t]
+ @param key The integer [khuint32_t]
+ @return The hash value [khuint_t]
*/
-#define kh_int_hash_func(key) (khint32_t)(key)
+#define kh_int_hash_func(key) (khuint32_t)(key)
/*! @function
@abstract Integer comparison function
*/
#define kh_int_hash_equal(a, b) ((a) == (b))
/*! @function
@abstract 64-bit integer hash function
- @param key The integer [khint64_t]
- @return The hash value [khint_t]
+ @param key The integer [khuint64_t]
+ @return The hash value [khuint_t]
*/
-#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11)
+PANDAS_INLINE khuint_t kh_int64_hash_func(khuint64_t key)
+{
+ return (khuint_t)((key)>>33^(key)^(key)<<11);
+}
/*! @function
@abstract 64-bit integer comparison function
*/
@@ -446,16 +456,16 @@ static const double __ac_HASH_UPPER = 0.77;
@param s Pointer to a null terminated string
@return The hash value
*/
-PANDAS_INLINE khint_t __ac_X31_hash_string(const char *s)
+PANDAS_INLINE khuint_t __ac_X31_hash_string(const char *s)
{
- khint_t h = *s;
+ khuint_t h = *s;
if (h) for (++s ; *s; ++s) h = (h << 5) - h + *s;
return h;
}
/*! @function
@abstract Another interface to const char* hash function
@param key Pointer to a null terminated string [const char*]
- @return The hash value [khint_t]
+ @return The hash value [khuint_t]
*/
#define kh_str_hash_func(key) __ac_X31_hash_string(key)
/*! @function
@@ -463,7 +473,7 @@ PANDAS_INLINE khint_t __ac_X31_hash_string(const char *s)
*/
#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
-PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
+PANDAS_INLINE khuint_t __ac_Wang_hash(khuint_t key)
{
key += ~(key << 15);
key ^= (key >> 10);
@@ -473,7 +483,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
key ^= (key >> 16);
return key;
}
-#define kh_int_hash_func2(k) __ac_Wang_hash((khint_t)key)
+#define kh_int_hash_func2(k) __ac_Wang_hash((khuint_t)key)
/* --- END OF HASH FUNCTIONS --- */
@@ -510,7 +520,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
@abstract Resize a hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
- @param s New size [khint_t]
+ @param s New size [khuint_t]
*/
#define kh_resize(name, h, s) kh_resize_##name(h, s)
@@ -522,7 +532,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
@param r Extra return code: 0 if the key is present in the hash table;
1 if the bucket is empty (never used); 2 if the element in
the bucket has been deleted [int*]
- @return Iterator to the inserted element [khint_t]
+ @return Iterator to the inserted element [khuint_t]
*/
#define kh_put(name, h, k, r) kh_put_##name(h, k, r)
@@ -531,7 +541,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Key [type of keys]
- @return Iterator to the found element, or kh_end(h) is the element is absent [khint_t]
+ @return Iterator to the found element, or kh_end(h) is the element is absent [khuint_t]
*/
#define kh_get(name, h, k) kh_get_##name(h, k)
@@ -539,14 +549,14 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
@abstract Remove a key from the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
- @param k Iterator to the element to be deleted [khint_t]
+ @param k Iterator to the element to be deleted [khuint_t]
*/
#define kh_del(name, h, k) kh_del_##name(h, k)
/*! @function
@abstract Test whether a bucket contains data.
@param h Pointer to the hash table [khash_t(name)*]
- @param x Iterator to the bucket [khint_t]
+ @param x Iterator to the bucket [khuint_t]
@return 1 if containing data; 0 otherwise [int]
*/
#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
@@ -554,7 +564,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
/*! @function
@abstract Get key given an iterator
@param h Pointer to the hash table [khash_t(name)*]
- @param x Iterator to the bucket [khint_t]
+ @param x Iterator to the bucket [khuint_t]
@return Key [type of keys]
*/
#define kh_key(h, x) ((h)->keys[x])
@@ -562,7 +572,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
/*! @function
@abstract Get value given an iterator
@param h Pointer to the hash table [khash_t(name)*]
- @param x Iterator to the bucket [khint_t]
+ @param x Iterator to the bucket [khuint_t]
@return Value [type of values]
@discussion For hash sets, calling this results in segfault.
*/
@@ -576,28 +586,28 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
/*! @function
@abstract Get the start iterator
@param h Pointer to the hash table [khash_t(name)*]
- @return The start iterator [khint_t]
+ @return The start iterator [khuint_t]
*/
-#define kh_begin(h) (khint_t)(0)
+#define kh_begin(h) (khuint_t)(0)
/*! @function
@abstract Get the end iterator
@param h Pointer to the hash table [khash_t(name)*]
- @return The end iterator [khint_t]
+ @return The end iterator [khuint_t]
*/
#define kh_end(h) ((h)->n_buckets)
/*! @function
@abstract Get the number of elements in the hash table
@param h Pointer to the hash table [khash_t(name)*]
- @return Number of elements in the hash table [khint_t]
+ @return Number of elements in the hash table [khuint_t]
*/
#define kh_size(h) ((h)->size)
/*! @function
@abstract Get the number of buckets in the hash table
@param h Pointer to the hash table [khash_t(name)*]
- @return Number of buckets in the hash table [khint_t]
+ @return Number of buckets in the hash table [khuint_t]
*/
#define kh_n_buckets(h) ((h)->n_buckets)
@@ -615,25 +625,18 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
*/
-
-// we implicitly convert signed int to unsigned int, thus potential overflows
-// for operations (<<,*,+) don't trigger undefined behavior, also >>-operator
-// is implementation defined for signed ints if sign-bit is set.
-// because we never really "get" the keys, there will be no convertion from
-// unsigend int to (signed) int (which would be implementation defined behavior)
-// this holds also for 64-, 16- and 8-bit integers
#define KHASH_MAP_INIT_INT(name, khval_t) \
KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
#define KHASH_MAP_INIT_UINT(name, khval_t) \
- KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
+ KHASH_INIT(name, khuint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
/*! @function
@abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol]
*/
#define KHASH_SET_INIT_UINT64(name) \
- KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
+ KHASH_INIT(name, khuint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
#define KHASH_SET_INIT_INT64(name) \
KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
@@ -644,7 +647,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
@param khval_t Type of values [type]
*/
#define KHASH_MAP_INIT_UINT64(name, khval_t) \
- KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
+ KHASH_INIT(name, khuint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
#define KHASH_MAP_INIT_INT64(name, khval_t) \
KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
@@ -658,7 +661,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
KHASH_INIT(name, khint16_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
#define KHASH_MAP_INIT_UINT16(name, khval_t) \
- KHASH_INIT(name, khint16_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
+ KHASH_INIT(name, khuint16_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
/*! @function
@abstract Instantiate a hash map containing 8bit-integer keys
@@ -669,7 +672,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
KHASH_INIT(name, khint8_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
#define KHASH_MAP_INIT_UINT8(name, khval_t) \
- KHASH_INIT(name, khint8_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
+ KHASH_INIT(name, khuint8_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h
index 8e4e61b4f3077..c67eff21a1ab1 100644
--- a/pandas/_libs/src/klib/khash_python.h
+++ b/pandas/_libs/src/klib/khash_python.h
@@ -75,14 +75,14 @@ void traced_free(void* ptr){
// predisposed to superlinear running times (see GH 36729 for comparison)
-khint64_t PANDAS_INLINE asint64(double key) {
- khint64_t val;
+khuint64_t PANDAS_INLINE asuint64(double key) {
+ khuint64_t val;
memcpy(&val, &key, sizeof(double));
return val;
}
-khint32_t PANDAS_INLINE asint32(float key) {
- khint32_t val;
+khuint32_t PANDAS_INLINE asuint32(float key) {
+ khuint32_t val;
memcpy(&val, &key, sizeof(float));
return val;
}
@@ -90,7 +90,7 @@ khint32_t PANDAS_INLINE asint32(float key) {
#define ZERO_HASH 0
#define NAN_HASH 0
-khint32_t PANDAS_INLINE kh_float64_hash_func(double val){
+khuint32_t PANDAS_INLINE kh_float64_hash_func(double val){
// 0.0 and -0.0 should have the same hash:
if (val == 0.0){
return ZERO_HASH;
@@ -99,11 +99,11 @@ khint32_t PANDAS_INLINE kh_float64_hash_func(double val){
if ( val!=val ){
return NAN_HASH;
}
- khint64_t as_int = asint64(val);
+ khuint64_t as_int = asuint64(val);
return murmur2_64to32(as_int);
}
-khint32_t PANDAS_INLINE kh_float32_hash_func(float val){
+khuint32_t PANDAS_INLINE kh_float32_hash_func(float val){
// 0.0 and -0.0 should have the same hash:
if (val == 0.0f){
return ZERO_HASH;
@@ -112,7 +112,7 @@ khint32_t PANDAS_INLINE kh_float32_hash_func(float val){
if ( val!=val ){
return NAN_HASH;
}
- khint32_t as_int = asint32(val);
+ khuint32_t as_int = asuint32(val);
return murmur2_32to32(as_int);
}
@@ -186,15 +186,15 @@ p_kh_str_starts_t PANDAS_INLINE kh_init_str_starts(void) {
return result;
}
-khint_t PANDAS_INLINE kh_put_str_starts_item(kh_str_starts_t* table, char* key, int* ret) {
- khint_t result = kh_put_str(table->table, key, ret);
+khuint_t PANDAS_INLINE kh_put_str_starts_item(kh_str_starts_t* table, char* key, int* ret) {
+ khuint_t result = kh_put_str(table->table, key, ret);
if (*ret != 0) {
table->starts[(unsigned char)key[0]] = 1;
}
return result;
}
-khint_t PANDAS_INLINE kh_get_str_starts_item(const kh_str_starts_t* table, const char* key) {
+khuint_t PANDAS_INLINE kh_get_str_starts_item(const kh_str_starts_t* table, const char* key) {
unsigned char ch = *key;
if (table->starts[ch]) {
if (ch == '\0' || kh_get_str(table->table, key) != table->table->n_buckets) return 1;
@@ -207,6 +207,6 @@ void PANDAS_INLINE kh_destroy_str_starts(kh_str_starts_t* table) {
KHASH_FREE(table);
}
-void PANDAS_INLINE kh_resize_str_starts(kh_str_starts_t* table, khint_t val) {
+void PANDAS_INLINE kh_resize_str_starts(kh_str_starts_t* table, khuint_t val) {
kh_resize_str(table->table, val);
}
|
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The goal of this PR is to make this comment obsolete: https://github.com/pandas-dev/pandas/blob/1fc5efd76ea1f85979fa2364a292d85482e338aa/pandas/_libs/src/klib/khash.h#L619-L624
see also [this discussion]( https://github.com/pandas-dev/pandas/pull/37920#discussion_r527191378) for even more details.
The issue is:
- the current way (inherited from khash-original) is too clever (and suprising)
- it is wrong as the (unsigned) keys are converted back to signed values (which is implementation defined behavior) for example here: https://github.com/pandas-dev/pandas/blob/1fc5efd76ea1f85979fa2364a292d85482e338aa/pandas/_libs/hashtable_func_helper.pxi.in#L110
Using signed integer, one must take into account that hash-function with signed integers might have undefined/implementation defined behavior, thus we cast signed integers to unsigned counterpart in the (64bit-) hash function now. | https://api.github.com/repos/pandas-dev/pandas/pulls/38882 | 2021-01-01T20:56:10Z | 2021-01-03T16:56:24Z | 2021-01-03T16:56:24Z | 2021-01-03T16:56:28Z |
TST: strictly xfail mid-test | diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 72637400ff023..b66e1137830d8 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -158,10 +158,12 @@ def test_transform_broadcast(tsframe, ts):
assert_fp_equal(res.xs(idx), agged[idx])
-def test_transform_axis_1(transformation_func):
+def test_transform_axis_1(request, transformation_func):
# GH 36308
if transformation_func == "tshift":
- pytest.xfail("tshift is deprecated")
+ request.applymarker(pytest.mark.xfail(reason="tshift is deprecated"))
+ if transformation_func == "fillna":
+ request.applymarker(pytest.mark.xfail(reason="whoops, this works"))
args = ("ffill",) if transformation_func == "fillna" else ()
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"])
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Using `pytest.xfail` when a test is running will immediately stop the test and will not xpass if test would otherwise succeed. It doesn't seem to be documented anywhere, but an alternative would be to use the request fixture. In this demo, I've added an xfail that runs successfully, making the test fail (xpass).
If this looks like a good idea, I can make a tracking issue to replace `pytest.xfail` with the method used here.
| https://api.github.com/repos/pandas-dev/pandas/pulls/38881 | 2021-01-01T19:07:34Z | 2021-01-02T15:41:46Z | null | 2021-01-02T15:41:53Z |
TST: add missing loc label indexing test | diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 89315b16937b1..6c5cd0f335faa 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -56,9 +56,13 @@ def test_loc_getitem_label_out_of_range(self):
self.check_result("loc", 20, typs=["floats"], axes=0, fails=KeyError)
def test_loc_getitem_label_list(self):
- # TODO: test something here?
# list of labels
- pass
+ self.check_result(
+ "loc", [0, 1, 2], typs=["ints", "uints", "floats"], fails=KeyError
+ )
+ self.check_result(
+ "loc", [1, 3.0, "A"], typs=["ints", "uints", "floats"], fails=KeyError
+ )
def test_loc_getitem_label_list_with_missing(self):
self.check_result("loc", [0, 1, 2], typs=["empty"], fails=KeyError)
| - [x] xref #38824
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38880 | 2021-01-01T19:06:49Z | 2021-01-03T17:17:59Z | 2021-01-03T17:17:59Z | 2023-09-21T16:53:33Z |
BUG: silently ignoring dtype kwarg in Index.__new__ | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index 560f4fccd44fc..2b80fcdcdd0be 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -313,7 +313,7 @@ ExtensionArray
Other
^^^^^
-
+- Bug in :class:`Index` constructor sometimes silently ignorning a a specified ``dtype`` (:issue:`38879`)
-
-
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 5869b2cf22516..46aff11835cec 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1529,6 +1529,19 @@ def is_extension_array_dtype(arr_or_dtype) -> bool:
return isinstance(dtype, ExtensionDtype) or registry.find(dtype) is not None
+def is_ea_or_datetimelike_dtype(dtype: Optional[DtypeObj]) -> bool:
+ """
+ Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype.
+
+ Notes
+ -----
+ Checks only for dtype objects, not dtype-castable strings or types.
+ """
+ return isinstance(dtype, ExtensionDtype) or (
+ isinstance(dtype, np.dtype) and dtype.kind in ["m", "M"]
+ )
+
+
def is_complex_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a complex dtype.
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 17ed42a188b4e..b0c89000a53a9 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -44,8 +44,8 @@
ensure_platform_int,
is_bool_dtype,
is_categorical_dtype,
- is_datetime64_any_dtype,
is_dtype_equal,
+ is_ea_or_datetimelike_dtype,
is_extension_array_dtype,
is_float,
is_float_dtype,
@@ -56,10 +56,8 @@
is_iterator,
is_list_like,
is_object_dtype,
- is_period_dtype,
is_scalar,
is_signed_integer_dtype,
- is_timedelta64_dtype,
is_unsigned_integer_dtype,
needs_i8_conversion,
pandas_dtype,
@@ -69,6 +67,7 @@
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
+ ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
@@ -87,6 +86,7 @@
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimes import tz_to_dtype, validate_tz_from_dtype
+from pandas.core.arrays.sparse import SparseDtype
from pandas.core.base import IndexOpsMixin, PandasObject
import pandas.core.common as com
from pandas.core.construction import extract_array
@@ -286,44 +286,32 @@ def __new__(
# range
if isinstance(data, RangeIndex):
- return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
+ result = RangeIndex(start=data, copy=copy, name=name)
+ if dtype is not None:
+ return result.astype(dtype, copy=False)
+ return result
elif isinstance(data, range):
- return RangeIndex.from_range(data, dtype=dtype, name=name)
-
- # categorical
- elif is_categorical_dtype(data_dtype) or is_categorical_dtype(dtype):
- # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
- from pandas.core.indexes.category import CategoricalIndex
-
- return _maybe_asobject(dtype, CategoricalIndex, data, copy, name, **kwargs)
-
- # interval
- elif is_interval_dtype(data_dtype) or is_interval_dtype(dtype):
- # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
- from pandas.core.indexes.interval import IntervalIndex
-
- return _maybe_asobject(dtype, IntervalIndex, data, copy, name, **kwargs)
-
- elif is_datetime64_any_dtype(data_dtype) or is_datetime64_any_dtype(dtype):
- # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
- from pandas import DatetimeIndex
-
- return _maybe_asobject(dtype, DatetimeIndex, data, copy, name, **kwargs)
-
- elif is_timedelta64_dtype(data_dtype) or is_timedelta64_dtype(dtype):
- # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
- from pandas import TimedeltaIndex
-
- return _maybe_asobject(dtype, TimedeltaIndex, data, copy, name, **kwargs)
-
- elif is_period_dtype(data_dtype) or is_period_dtype(dtype):
- # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
- from pandas import PeriodIndex
+ result = RangeIndex.from_range(data, name=name)
+ if dtype is not None:
+ return result.astype(dtype, copy=False)
+ return result
- return _maybe_asobject(dtype, PeriodIndex, data, copy, name, **kwargs)
+ if is_ea_or_datetimelike_dtype(dtype):
+ # non-EA dtype indexes have special casting logic, so we punt here
+ klass = cls._dtype_to_subclass(dtype)
+ if klass is not Index:
+ return klass(data, dtype=dtype, copy=copy, name=name, **kwargs)
+
+ if is_ea_or_datetimelike_dtype(data_dtype):
+ klass = cls._dtype_to_subclass(data_dtype)
+ if klass is not Index:
+ result = klass(data, copy=copy, name=name, **kwargs)
+ if dtype is not None:
+ return result.astype(dtype, copy=False)
+ return result
# extension dtype
- elif is_extension_array_dtype(data_dtype) or is_extension_array_dtype(dtype):
+ if is_extension_array_dtype(data_dtype) or is_extension_array_dtype(dtype):
if not (dtype is None or is_object_dtype(dtype)):
# coerce to the provided dtype
ea_cls = dtype.construct_array_type()
@@ -407,26 +395,38 @@ def _ensure_array(cls, data, dtype, copy: bool):
def _dtype_to_subclass(cls, dtype: DtypeObj):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
- if isinstance(dtype, DatetimeTZDtype) or dtype == np.dtype("M8[ns]"):
+ if isinstance(dtype, ExtensionDtype):
+ if isinstance(dtype, DatetimeTZDtype):
+ from pandas import DatetimeIndex
+
+ return DatetimeIndex
+ elif isinstance(dtype, CategoricalDtype):
+ from pandas import CategoricalIndex
+
+ return CategoricalIndex
+ elif isinstance(dtype, IntervalDtype):
+ from pandas import IntervalIndex
+
+ return IntervalIndex
+ elif isinstance(dtype, PeriodDtype):
+ from pandas import PeriodIndex
+
+ return PeriodIndex
+
+ elif isinstance(dtype, SparseDtype):
+ return cls._dtype_to_subclass(dtype.subtype)
+
+ return Index
+
+ if dtype.kind == "M":
from pandas import DatetimeIndex
return DatetimeIndex
- elif dtype == "m8[ns]":
+
+ elif dtype.kind == "m":
from pandas import TimedeltaIndex
return TimedeltaIndex
- elif isinstance(dtype, CategoricalDtype):
- from pandas import CategoricalIndex
-
- return CategoricalIndex
- elif isinstance(dtype, IntervalDtype):
- from pandas import IntervalIndex
-
- return IntervalIndex
- elif isinstance(dtype, PeriodDtype):
- from pandas import PeriodIndex
-
- return PeriodIndex
elif is_float_dtype(dtype):
from pandas import Float64Index
@@ -445,6 +445,9 @@ def _dtype_to_subclass(cls, dtype: DtypeObj):
# NB: assuming away MultiIndex
return Index
+ elif issubclass(dtype.type, (str, bool, np.bool_)):
+ return Index
+
raise NotImplementedError(dtype)
"""
@@ -6253,43 +6256,6 @@ def _try_convert_to_int_array(
raise ValueError
-def _maybe_asobject(dtype, klass, data, copy: bool, name: Label, **kwargs):
- """
- If an object dtype was specified, create the non-object Index
- and then convert it to object.
-
- Parameters
- ----------
- dtype : np.dtype, ExtensionDtype, str
- klass : Index subclass
- data : list-like
- copy : bool
- name : hashable
- **kwargs
-
- Returns
- -------
- Index
-
- Notes
- -----
- We assume that calling .astype(object) on this klass will make a copy.
- """
-
- # GH#23524 passing `dtype=object` to DatetimeIndex is invalid,
- # will raise in the where `data` is already tz-aware. So
- # we leave it out of this step and cast to object-dtype after
- # the DatetimeIndex construction.
-
- if is_dtype_equal(_o_dtype, dtype):
- # Note we can pass copy=False because the .astype below
- # will always make a copy
- index = klass(data, copy=False, name=name, **kwargs)
- return index.astype(object)
-
- return klass(data, dtype=dtype, copy=copy, name=name, **kwargs)
-
-
def get_unanimous_names(*indexes: Index) -> Tuple[Label, ...]:
"""
Return common name if all indices agree, otherwise None (level-by-level).
diff --git a/pandas/tests/indexes/ranges/test_constructors.py b/pandas/tests/indexes/ranges/test_constructors.py
index 7dd893bd16720..f83c885a7850b 100644
--- a/pandas/tests/indexes/ranges/test_constructors.py
+++ b/pandas/tests/indexes/ranges/test_constructors.py
@@ -114,11 +114,6 @@ def test_constructor_range(self):
expected = RangeIndex(1, 5, 2)
tm.assert_index_equal(result, expected, exact=True)
- with pytest.raises(
- ValueError,
- match="Incorrect `dtype` passed: expected signed integer, received float64",
- ):
- Index(range(1, 5, 2), dtype="float64")
msg = r"^from_range\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
RangeIndex.from_range(range(10), copy=True)
diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py
index c8f580babc0b2..de0850d37034d 100644
--- a/pandas/tests/indexes/test_index_new.py
+++ b/pandas/tests/indexes/test_index_new.py
@@ -8,10 +8,12 @@
from pandas import (
NA,
+ Categorical,
CategoricalIndex,
DatetimeIndex,
Index,
Int64Index,
+ IntervalIndex,
MultiIndex,
NaT,
PeriodIndex,
@@ -19,7 +21,9 @@
TimedeltaIndex,
Timestamp,
UInt64Index,
+ date_range,
period_range,
+ timedelta_range,
)
import pandas._testing as tm
@@ -122,6 +126,80 @@ def test_constructor_mixed_nat_objs_infers_object(self, swap_objs):
tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)
+class TestDtypeEnforced:
+ # check we don't silently ignore the dtype keyword
+
+ @pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"])
+ def test_constructor_range_values_mismatched_dtype(self, dtype):
+ rng = Index(range(5))
+
+ result = Index(rng, dtype=dtype)
+ assert result.dtype == dtype
+
+ result = Index(range(5), dtype=dtype)
+ assert result.dtype == dtype
+
+ @pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"])
+ def test_constructor_categorical_values_mismatched_non_ea_dtype(self, dtype):
+ cat = Categorical([1, 2, 3])
+
+ result = Index(cat, dtype=dtype)
+ assert result.dtype == dtype
+
+ def test_constructor_categorical_values_mismatched_dtype(self):
+ dti = date_range("2016-01-01", periods=3)
+ cat = Categorical(dti)
+ result = Index(cat, dti.dtype)
+ tm.assert_index_equal(result, dti)
+
+ dti2 = dti.tz_localize("Asia/Tokyo")
+ cat2 = Categorical(dti2)
+ result = Index(cat2, dti2.dtype)
+ tm.assert_index_equal(result, dti2)
+
+ ii = IntervalIndex.from_breaks(range(5))
+ cat3 = Categorical(ii)
+ result = Index(cat3, dtype=ii.dtype)
+ tm.assert_index_equal(result, ii)
+
+ def test_constructor_ea_values_mismatched_categorical_dtype(self):
+ dti = date_range("2016-01-01", periods=3)
+ result = Index(dti, dtype="category")
+ expected = CategoricalIndex(dti)
+ tm.assert_index_equal(result, expected)
+
+ dti2 = date_range("2016-01-01", periods=3, tz="US/Pacific")
+ result = Index(dti2, dtype="category")
+ expected = CategoricalIndex(dti2)
+ tm.assert_index_equal(result, expected)
+
+ def test_constructor_period_values_mismatched_dtype(self):
+ pi = period_range("2016-01-01", periods=3, freq="D")
+ result = Index(pi, dtype="category")
+ expected = CategoricalIndex(pi)
+ tm.assert_index_equal(result, expected)
+
+ def test_constructor_timedelta64_values_mismatched_dtype(self):
+ # check we don't silently ignore the dtype keyword
+ tdi = timedelta_range("4 Days", periods=5)
+ result = Index(tdi, dtype="category")
+ expected = CategoricalIndex(tdi)
+ tm.assert_index_equal(result, expected)
+
+ def test_constructor_interval_values_mismatched_dtype(self):
+ dti = date_range("2016-01-01", periods=3)
+ ii = IntervalIndex.from_breaks(dti)
+ result = Index(ii, dtype="category")
+ expected = CategoricalIndex(ii)
+ tm.assert_index_equal(result, expected)
+
+ def test_constructor_datetime64_values_mismatched_period_dtype(self):
+ dti = date_range("2016-01-01", periods=3)
+ result = Index(dti, dtype="Period[D]")
+ expected = dti.to_period("D")
+ tm.assert_index_equal(result, expected)
+
+
class TestIndexConstructorUnwrapping:
# Test passing different arraylike values to pd.Index
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
ATM we handle object but not others, xref #21311 | https://api.github.com/repos/pandas-dev/pandas/pulls/38879 | 2021-01-01T18:37:36Z | 2021-01-01T23:02:18Z | 2021-01-01T23:02:18Z | 2021-01-02T01:37:47Z |
TST: GH30999 TESTING PR do not merge or review | diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py
index 9fc3ed4800d09..f5103564bb7eb 100644
--- a/pandas/tests/computation/test_compat.py
+++ b/pandas/tests/computation/test_compat.py
@@ -41,8 +41,9 @@ def testit():
pytest.skip("no numexpr")
else:
if LooseVersion(ne.__version__) < LooseVersion(VERSIONS["numexpr"]):
- with pytest.raises(ImportError):
- testit()
+ # TODO comment this back in once we know the exception message
+ # with pytest.raises(ImportError):
+ testit()
else:
testit()
else:
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 52869f3f2fd42..16e6ce9389eda 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -2755,9 +2755,9 @@ def clean_up(test_table_to_drop):
@pytest.mark.single
@pytest.mark.db
-@pytest.mark.skip(
- reason="gh-13611: there is no support for MySQL if SQLAlchemy is not installed"
-)
+# @pytest.mark.skip(
+# reason="gh-13611: there is no support for MySQL if SQLAlchemy is not installed"
+# )
class TestXMySQL(MySQLMixIn):
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
@@ -2896,8 +2896,9 @@ def test_execute_fail(self):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
- with pytest.raises(Exception):
- sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
+ # TODO comment this back in once we know the exception message
+ # with pytest.raises(Exception):
+ sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self, request, datapath):
drop_sql = "DROP TABLE IF EXISTS test"
@@ -2917,8 +2918,9 @@ def test_execute_closed_connection(self, request, datapath):
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
- with pytest.raises(Exception):
- tquery("select * from test", con=self.conn)
+ # TODO comment this back in once we know the exception message
+ # with pytest.raises(Exception):
+ tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setup_method(request, datapath)
| There are 3 tests that don't run on my dev machine even with tweaks. They should fail in the CI and I can get the messages from there. So for now this is just a PR designed to run (and fail) the CI pipelines.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38876 | 2021-01-01T09:47:17Z | 2021-01-03T10:31:13Z | null | 2021-01-03T10:31:13Z |
TST: Series.update with categorical | diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 8ba8562affb67..410731820dc73 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -218,6 +218,12 @@ def test_repr_range_categories(self):
expected = "CategoricalDtype(categories=range(0, 3), ordered=False)"
assert result == expected
+ def test_update_dtype(self):
+ # GH 27338
+ result = CategoricalDtype(["a"]).update_dtype(Categorical(["b"], ordered=True))
+ expected = CategoricalDtype(["b"], ordered=True)
+ assert result == expected
+
class TestDatetimeTZDtype(Base):
@pytest.fixture
diff --git a/pandas/tests/series/methods/test_update.py b/pandas/tests/series/methods/test_update.py
index d00a4299cb690..51760c451ebca 100644
--- a/pandas/tests/series/methods/test_update.py
+++ b/pandas/tests/series/methods/test_update.py
@@ -108,3 +108,13 @@ def test_update_from_non_series(self, series, other, expected):
def test_update_extension_array_series(self, result, target, expected):
result.update(target)
tm.assert_series_equal(result, expected)
+
+ def test_update_with_categorical_type(self):
+ # GH 25744
+ dtype = CategoricalDtype(["a", "b", "c", "d"])
+ s1 = Series(["a", "b", "c"], index=[1, 2, 3], dtype=dtype)
+ s2 = Series(["b", "a"], index=[1, 2], dtype=dtype)
+ s1.update(s2)
+ result = s1
+ expected = Series(["b", "a", "c"], index=[1, 2, 3], dtype=dtype)
+ tm.assert_series_equal(result, expected)
| - [ ] closes #25744, closes #27338
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38873 | 2021-01-01T08:35:16Z | 2021-01-03T17:10:42Z | 2021-01-03T17:10:41Z | 2021-01-03T17:10:45Z |
TST: move generic/methods/ files to frame/methods/ | diff --git a/doc/source/development/test_writing.rst b/doc/source/development/test_writing.rst
index d9e24bb76eed8..76eae505471b7 100644
--- a/doc/source/development/test_writing.rst
+++ b/doc/source/development/test_writing.rst
@@ -149,13 +149,6 @@ be located.
``frame_or_series`` fixture, by convention it goes in the
``tests.frame`` file.
- - tests.generic.methods.test_mymethod
-
- .. note::
-
- The generic/methods/ directory is only for methods with tests
- that are fully parametrized over Series/DataFrame
-
7. Is your test for an Index method, not depending on Series/DataFrame?
This test likely belongs in one of:
diff --git a/pandas/tests/generic/methods/test_dot.py b/pandas/tests/frame/methods/test_dot.py
similarity index 100%
rename from pandas/tests/generic/methods/test_dot.py
rename to pandas/tests/frame/methods/test_dot.py
diff --git a/pandas/tests/generic/methods/test_first_valid_index.py b/pandas/tests/frame/methods/test_first_valid_index.py
similarity index 100%
rename from pandas/tests/generic/methods/test_first_valid_index.py
rename to pandas/tests/frame/methods/test_first_valid_index.py
diff --git a/pandas/tests/generic/methods/test_pipe.py b/pandas/tests/frame/methods/test_pipe.py
similarity index 100%
rename from pandas/tests/generic/methods/test_pipe.py
rename to pandas/tests/frame/methods/test_pipe.py
diff --git a/pandas/tests/generic/methods/test_reorder_levels.py b/pandas/tests/frame/methods/test_reorder_levels.py
similarity index 100%
rename from pandas/tests/generic/methods/test_reorder_levels.py
rename to pandas/tests/frame/methods/test_reorder_levels.py
diff --git a/pandas/tests/generic/methods/test_sample.py b/pandas/tests/frame/methods/test_sample.py
similarity index 100%
rename from pandas/tests/generic/methods/test_sample.py
rename to pandas/tests/frame/methods/test_sample.py
diff --git a/pandas/tests/generic/methods/test_set_axis.py b/pandas/tests/frame/methods/test_set_axis.py
similarity index 100%
rename from pandas/tests/generic/methods/test_set_axis.py
rename to pandas/tests/frame/methods/test_set_axis.py
diff --git a/pandas/tests/frame/methods/test_to_records.py b/pandas/tests/frame/methods/test_to_records.py
index e83882be9c680..2c96cf291c154 100644
--- a/pandas/tests/frame/methods/test_to_records.py
+++ b/pandas/tests/frame/methods/test_to_records.py
@@ -15,6 +15,15 @@
class TestDataFrameToRecords:
+ def test_to_records_timeseries(self):
+ index = date_range("1/1/2000", periods=10)
+ df = DataFrame(np.random.randn(10, 3), index=index, columns=["a", "b", "c"])
+
+ result = df.to_records()
+ assert result["index"].dtype == "M8[ns]"
+
+ result = df.to_records(index=False)
+
def test_to_records_dt64(self):
df = DataFrame(
[["one", "two", "three"], ["four", "five", "six"]],
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index a16dca256541d..4d57b43df2387 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2221,15 +2221,6 @@ def test_frame_datetime64_mixed_index_ctor_1681(self):
d = DataFrame({"A": "foo", "B": ts}, index=dr)
assert d["B"].isna().all()
- def test_frame_timeseries_to_records(self):
- index = date_range("1/1/2000", periods=10)
- df = DataFrame(np.random.randn(10, 3), index=index, columns=["a", "b", "c"])
-
- result = df.to_records()
- result["index"].dtype == "M8[ns]"
-
- result = df.to_records(index=False)
-
def test_frame_timeseries_column(self):
# GH19157
dr = date_range(start="20130101T10:00:00", periods=3, freq="T", tz="US/Eastern")
diff --git a/pandas/tests/generic/methods/__init__.py b/pandas/tests/generic/methods/__init__.py
deleted file mode 100644
index 5d18f97b8a55e..0000000000000
--- a/pandas/tests/generic/methods/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-Tests for methods shared by DataFrame and Series.
-"""
| https://api.github.com/repos/pandas-dev/pandas/pulls/38871 | 2021-01-01T03:04:28Z | 2021-01-01T20:38:59Z | 2021-01-01T20:38:59Z | 2021-01-01T21:11:38Z | |
TST/REF: implement tests.frame.constructors | diff --git a/pandas/tests/frame/constructors/__init__.py b/pandas/tests/frame/constructors/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/frame/constructors/test_from_dict.py b/pandas/tests/frame/constructors/test_from_dict.py
new file mode 100644
index 0000000000000..6c9e6751fd9a4
--- /dev/null
+++ b/pandas/tests/frame/constructors/test_from_dict.py
@@ -0,0 +1,186 @@
+from collections import OrderedDict
+
+import numpy as np
+import pytest
+
+from pandas import DataFrame, Index, MultiIndex, Series
+import pandas._testing as tm
+from pandas.core.construction import create_series_with_explicit_dtype
+
+
+class TestFromDict:
+ # Note: these tests are specific to the from_dict method, not for
+ # passing dictionaries to DataFrame.__init__
+
+ def test_from_dict_scalars_requires_index(self):
+ msg = "If using all scalar values, you must pass an index"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
+
+ def test_constructor_list_of_odicts(self):
+ data = [
+ OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
+ OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
+ OrderedDict([["a", 1.5], ["d", 6]]),
+ OrderedDict(),
+ OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
+ OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
+ ]
+
+ result = DataFrame(data)
+ expected = DataFrame.from_dict(
+ dict(zip(range(len(data)), data)), orient="index"
+ )
+ tm.assert_frame_equal(result, expected.reindex(result.index))
+
+ def test_constructor_single_row(self):
+ data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])]
+
+ result = DataFrame(data)
+ expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex(
+ result.index
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_constructor_list_of_series(self):
+ data = [
+ OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
+ OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
+ ]
+ sdict = OrderedDict(zip(["x", "y"], data))
+ idx = Index(["a", "b", "c"])
+
+ # all named
+ data2 = [
+ Series([1.5, 3, 4], idx, dtype="O", name="x"),
+ Series([1.5, 3, 6], idx, name="y"),
+ ]
+ result = DataFrame(data2)
+ expected = DataFrame.from_dict(sdict, orient="index")
+ tm.assert_frame_equal(result, expected)
+
+ # some unnamed
+ data2 = [
+ Series([1.5, 3, 4], idx, dtype="O", name="x"),
+ Series([1.5, 3, 6], idx),
+ ]
+ result = DataFrame(data2)
+
+ sdict = OrderedDict(zip(["x", "Unnamed 0"], data))
+ expected = DataFrame.from_dict(sdict, orient="index")
+ tm.assert_frame_equal(result, expected)
+
+ # none named
+ data = [
+ OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
+ OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
+ OrderedDict([["a", 1.5], ["d", 6]]),
+ OrderedDict(),
+ OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
+ OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
+ ]
+ data = [
+ create_series_with_explicit_dtype(d, dtype_if_empty=object) for d in data
+ ]
+
+ result = DataFrame(data)
+ sdict = OrderedDict(zip(range(len(data)), data))
+ expected = DataFrame.from_dict(sdict, orient="index")
+ tm.assert_frame_equal(result, expected.reindex(result.index))
+
+ result2 = DataFrame(data, index=np.arange(6))
+ tm.assert_frame_equal(result, result2)
+
+ result = DataFrame([Series(dtype=object)])
+ expected = DataFrame(index=[0])
+ tm.assert_frame_equal(result, expected)
+
+ data = [
+ OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
+ OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
+ ]
+ sdict = OrderedDict(zip(range(len(data)), data))
+
+ idx = Index(["a", "b", "c"])
+ data2 = [Series([1.5, 3, 4], idx, dtype="O"), Series([1.5, 3, 6], idx)]
+ result = DataFrame(data2)
+ expected = DataFrame.from_dict(sdict, orient="index")
+ tm.assert_frame_equal(result, expected)
+
+ def test_constructor_orient(self, float_string_frame):
+ data_dict = float_string_frame.T._series
+ recons = DataFrame.from_dict(data_dict, orient="index")
+ expected = float_string_frame.reindex(index=recons.index)
+ tm.assert_frame_equal(recons, expected)
+
+ # dict of sequence
+ a = {"hi": [32, 3, 3], "there": [3, 5, 3]}
+ rs = DataFrame.from_dict(a, orient="index")
+ xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
+ tm.assert_frame_equal(rs, xp)
+
+ def test_constructor_from_ordered_dict(self):
+ # GH#8425
+ a = OrderedDict(
+ [
+ ("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])),
+ ("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])),
+ ("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])),
+ ]
+ )
+ expected = DataFrame.from_dict(a, orient="columns").T
+ result = DataFrame.from_dict(a, orient="index")
+ tm.assert_frame_equal(result, expected)
+
+ def test_from_dict_columns_parameter(self):
+ # GH#18529
+ # Test new columns parameter for from_dict that was added to make
+ # from_items(..., orient='index', columns=[...]) easier to replicate
+ result = DataFrame.from_dict(
+ OrderedDict([("A", [1, 2]), ("B", [4, 5])]),
+ orient="index",
+ columns=["one", "two"],
+ )
+ expected = DataFrame([[1, 2], [4, 5]], index=["A", "B"], columns=["one", "two"])
+ tm.assert_frame_equal(result, expected)
+
+ msg = "cannot use columns parameter with orient='columns'"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame.from_dict(
+ {"A": [1, 2], "B": [4, 5]},
+ orient="columns",
+ columns=["one", "two"],
+ )
+ with pytest.raises(ValueError, match=msg):
+ DataFrame.from_dict({"A": [1, 2], "B": [4, 5]}, columns=["one", "two"])
+
+ @pytest.mark.parametrize(
+ "data_dict, keys, orient",
+ [
+ ({}, [], "index"),
+ ([{("a",): 1}, {("a",): 2}], [("a",)], "columns"),
+ ([OrderedDict([(("a",), 1), (("b",), 2)])], [("a",), ("b",)], "columns"),
+ ([{("a", "b"): 1}], [("a", "b")], "columns"),
+ ],
+ )
+ def test_constructor_from_dict_tuples(self, data_dict, keys, orient):
+ # GH#16769
+ df = DataFrame.from_dict(data_dict, orient)
+
+ result = df.columns
+ expected = Index(keys, dtype="object", tupleize_cols=False)
+
+ tm.assert_index_equal(result, expected)
+
+ def test_frame_dict_constructor_empty_series(self):
+ s1 = Series(
+ [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)])
+ )
+ s2 = Series(
+ [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)])
+ )
+ s3 = Series(dtype=object)
+
+ # it works!
+ DataFrame({"foo": s1, "bar": s2, "baz": s3})
+ DataFrame.from_dict({"foo": s1, "baz": s3, "bar": s2})
diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py
new file mode 100644
index 0000000000000..ed6333bac1caa
--- /dev/null
+++ b/pandas/tests/frame/constructors/test_from_records.py
@@ -0,0 +1,440 @@
+from datetime import datetime
+from decimal import Decimal
+
+import numpy as np
+import pytest
+import pytz
+
+from pandas.compat import is_platform_little_endian
+
+from pandas import CategoricalIndex, DataFrame, Index, Interval, RangeIndex, Series
+import pandas._testing as tm
+
+
+class TestFromRecords:
+ def test_from_records_with_datetimes(self):
+
+ # this may fail on certain platforms because of a numpy issue
+ # related GH#6140
+ if not is_platform_little_endian():
+ pytest.skip("known failure of test on non-little endian")
+
+ # construction with a null in a recarray
+ # GH#6140
+ expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
+
+ arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
+ dtypes = [("EXPIRY", "<M8[ns]")]
+
+ try:
+ recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
+ except (ValueError):
+ pytest.skip("known failure of numpy rec array creation")
+
+ result = DataFrame.from_records(recarray)
+ tm.assert_frame_equal(result, expected)
+
+ # coercion should work too
+ arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
+ dtypes = [("EXPIRY", "<M8[m]")]
+ recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
+ result = DataFrame.from_records(recarray)
+ tm.assert_frame_equal(result, expected)
+
+ def test_from_records_sequencelike(self):
+ df = DataFrame(
+ {
+ "A": np.array(np.random.randn(6), dtype=np.float64),
+ "A1": np.array(np.random.randn(6), dtype=np.float64),
+ "B": np.array(np.arange(6), dtype=np.int64),
+ "C": ["foo"] * 6,
+ "D": np.array([True, False] * 3, dtype=bool),
+ "E": np.array(np.random.randn(6), dtype=np.float32),
+ "E1": np.array(np.random.randn(6), dtype=np.float32),
+ "F": np.array(np.arange(6), dtype=np.int32),
+ }
+ )
+
+ # this is actually tricky to create the recordlike arrays and
+ # have the dtypes be intact
+ blocks = df._to_dict_of_blocks()
+ tuples = []
+ columns = []
+ dtypes = []
+ for dtype, b in blocks.items():
+ columns.extend(b.columns)
+ dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
+ for i in range(len(df.index)):
+ tup = []
+ for _, b in blocks.items():
+ tup.extend(b.iloc[i].values)
+ tuples.append(tuple(tup))
+
+ recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
+ recarray2 = df.to_records()
+ lists = [list(x) for x in tuples]
+
+ # tuples (lose the dtype info)
+ result = DataFrame.from_records(tuples, columns=columns).reindex(
+ columns=df.columns
+ )
+
+ # created recarray and with to_records recarray (have dtype info)
+ result2 = DataFrame.from_records(recarray, columns=columns).reindex(
+ columns=df.columns
+ )
+ result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
+ columns=df.columns
+ )
+
+ # list of tupels (no dtype info)
+ result4 = DataFrame.from_records(lists, columns=columns).reindex(
+ columns=df.columns
+ )
+
+ tm.assert_frame_equal(result, df, check_dtype=False)
+ tm.assert_frame_equal(result2, df)
+ tm.assert_frame_equal(result3, df)
+ tm.assert_frame_equal(result4, df, check_dtype=False)
+
+ # tuples is in the order of the columns
+ result = DataFrame.from_records(tuples)
+ tm.assert_index_equal(result.columns, RangeIndex(8))
+
+ # test exclude parameter & we are casting the results here (as we don't
+ # have dtype info to recover)
+ columns_to_test = [columns.index("C"), columns.index("E1")]
+
+ exclude = list(set(range(8)) - set(columns_to_test))
+ result = DataFrame.from_records(tuples, exclude=exclude)
+ result.columns = [columns[i] for i in sorted(columns_to_test)]
+ tm.assert_series_equal(result["C"], df["C"])
+ tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
+
+ # empty case
+ result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
+ assert len(result) == 0
+ tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
+
+ result = DataFrame.from_records([])
+ assert len(result) == 0
+ assert len(result.columns) == 0
+
+ def test_from_records_dictlike(self):
+
+ # test the dict methods
+ df = DataFrame(
+ {
+ "A": np.array(np.random.randn(6), dtype=np.float64),
+ "A1": np.array(np.random.randn(6), dtype=np.float64),
+ "B": np.array(np.arange(6), dtype=np.int64),
+ "C": ["foo"] * 6,
+ "D": np.array([True, False] * 3, dtype=bool),
+ "E": np.array(np.random.randn(6), dtype=np.float32),
+ "E1": np.array(np.random.randn(6), dtype=np.float32),
+ "F": np.array(np.arange(6), dtype=np.int32),
+ }
+ )
+
+ # columns is in a different order here than the actual items iterated
+ # from the dict
+ blocks = df._to_dict_of_blocks()
+ columns = []
+ for dtype, b in blocks.items():
+ columns.extend(b.columns)
+
+ asdict = {x: y for x, y in df.items()}
+ asdict2 = {x: y.values for x, y in df.items()}
+
+ # dict of series & dict of ndarrays (have dtype info)
+ results = []
+ results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
+ results.append(
+ DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
+ )
+ results.append(
+ DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
+ )
+
+ for r in results:
+ tm.assert_frame_equal(r, df)
+
+ def test_from_records_with_index_data(self):
+ df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
+
+ data = np.random.randn(10)
+ df1 = DataFrame.from_records(df, index=data)
+ tm.assert_index_equal(df1.index, Index(data))
+
+ def test_from_records_bad_index_column(self):
+ df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
+
+ # should pass
+ df1 = DataFrame.from_records(df, index=["C"])
+ tm.assert_index_equal(df1.index, Index(df.C))
+
+ df1 = DataFrame.from_records(df, index="C")
+ tm.assert_index_equal(df1.index, Index(df.C))
+
+ # should fail
+ msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame.from_records(df, index=[2])
+ with pytest.raises(KeyError, match=r"^2$"):
+ DataFrame.from_records(df, index=2)
+
+ def test_from_records_non_tuple(self):
+ class Record:
+ def __init__(self, *args):
+ self.args = args
+
+ def __getitem__(self, i):
+ return self.args[i]
+
+ def __iter__(self):
+ return iter(self.args)
+
+ recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
+ tups = [tuple(rec) for rec in recs]
+
+ result = DataFrame.from_records(recs)
+ expected = DataFrame.from_records(tups)
+ tm.assert_frame_equal(result, expected)
+
+ def test_from_records_len0_with_columns(self):
+ # GH#2633
+ result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
+ expected = Index(["bar"])
+
+ assert len(result) == 0
+ assert result.index.name == "foo"
+ tm.assert_index_equal(result.columns, expected)
+
+ def test_from_records_series_list_dict(self):
+ # GH#27358
+ expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
+ data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
+ result = DataFrame.from_records(data)
+ tm.assert_frame_equal(result, expected)
+
+ def test_from_records_series_categorical_index(self):
+ # GH#32805
+ index = CategoricalIndex(
+ [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
+ )
+ series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
+ frame = DataFrame.from_records(series_of_dicts, index=index)
+ expected = DataFrame(
+ {"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index
+ )
+ tm.assert_frame_equal(frame, expected)
+
+ def test_frame_from_records_utc(self):
+ rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}
+
+ # it works
+ DataFrame.from_records([rec], index="begin_time")
+
+ def test_from_records_to_records(self):
+ # from numpy documentation
+ arr = np.zeros((2,), dtype=("i4,f4,a10"))
+ arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
+
+ # TODO(wesm): unused
+ frame = DataFrame.from_records(arr) # noqa
+
+ index = Index(np.arange(len(arr))[::-1])
+ indexed_frame = DataFrame.from_records(arr, index=index)
+ tm.assert_index_equal(indexed_frame.index, index)
+
+ # without names, it should go to last ditch
+ arr2 = np.zeros((2, 3))
+ tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
+
+ # wrong length
+ msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame.from_records(arr, index=index[:-1])
+
+ indexed_frame = DataFrame.from_records(arr, index="f1")
+
+ # what to do?
+ records = indexed_frame.to_records()
+ assert len(records.dtype.names) == 3
+
+ records = indexed_frame.to_records(index=False)
+ assert len(records.dtype.names) == 2
+ assert "index" not in records.dtype.names
+
+ def test_from_records_nones(self):
+ tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]
+
+ df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"])
+ assert np.isnan(df["c"][0])
+
+ def test_from_records_iterator(self):
+ arr = np.array(
+ [(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],
+ dtype=[
+ ("x", np.float64),
+ ("u", np.float32),
+ ("y", np.int64),
+ ("z", np.int32),
+ ],
+ )
+ df = DataFrame.from_records(iter(arr), nrows=2)
+ xp = DataFrame(
+ {
+ "x": np.array([1.0, 3.0], dtype=np.float64),
+ "u": np.array([1.0, 3.0], dtype=np.float32),
+ "y": np.array([2, 4], dtype=np.int64),
+ "z": np.array([2, 4], dtype=np.int32),
+ }
+ )
+ tm.assert_frame_equal(df.reindex_like(xp), xp)
+
+ # no dtypes specified here, so just compare with the default
+ arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]
+ df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2)
+ tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False)
+
+ def test_from_records_tuples_generator(self):
+ def tuple_generator(length):
+ for i in range(length):
+ letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ yield (i, letters[i % len(letters)], i / length)
+
+ columns_names = ["Integer", "String", "Float"]
+ columns = [
+ [i[j] for i in tuple_generator(10)] for j in range(len(columns_names))
+ ]
+ data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
+ expected = DataFrame(data, columns=columns_names)
+
+ generator = tuple_generator(10)
+ result = DataFrame.from_records(generator, columns=columns_names)
+ tm.assert_frame_equal(result, expected)
+
+ def test_from_records_lists_generator(self):
+ def list_generator(length):
+ for i in range(length):
+ letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ yield [i, letters[i % len(letters)], i / length]
+
+ columns_names = ["Integer", "String", "Float"]
+ columns = [
+ [i[j] for i in list_generator(10)] for j in range(len(columns_names))
+ ]
+ data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
+ expected = DataFrame(data, columns=columns_names)
+
+ generator = list_generator(10)
+ result = DataFrame.from_records(generator, columns=columns_names)
+ tm.assert_frame_equal(result, expected)
+
+ def test_from_records_columns_not_modified(self):
+ tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)]
+
+ columns = ["a", "b", "c"]
+ original_columns = list(columns)
+
+ df = DataFrame.from_records(tuples, columns=columns, index="a") # noqa
+
+ assert columns == original_columns
+
+ def test_from_records_decimal(self):
+
+ tuples = [(Decimal("1.5"),), (Decimal("2.5"),), (None,)]
+
+ df = DataFrame.from_records(tuples, columns=["a"])
+ assert df["a"].dtype == object
+
+ df = DataFrame.from_records(tuples, columns=["a"], coerce_float=True)
+ assert df["a"].dtype == np.float64
+ assert np.isnan(df["a"].values[-1])
+
+ def test_from_records_duplicates(self):
+ result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
+
+ expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_from_records_set_index_name(self):
+ def create_dict(order_id):
+ return {
+ "order_id": order_id,
+ "quantity": np.random.randint(1, 10),
+ "price": np.random.randint(1, 10),
+ }
+
+ documents = [create_dict(i) for i in range(10)]
+ # demo missing data
+ documents.append({"order_id": 10, "quantity": 5})
+
+ result = DataFrame.from_records(documents, index="order_id")
+ assert result.index.name == "order_id"
+
+ # MultiIndex
+ result = DataFrame.from_records(documents, index=["order_id", "quantity"])
+ assert result.index.names == ("order_id", "quantity")
+
+ def test_from_records_misc_brokenness(self):
+ # GH#2179
+
+ data = {1: ["foo"], 2: ["bar"]}
+
+ result = DataFrame.from_records(data, columns=["a", "b"])
+ exp = DataFrame(data, columns=["a", "b"])
+ tm.assert_frame_equal(result, exp)
+
+ # overlap in index/index_names
+
+ data = {"a": [1, 2, 3], "b": [4, 5, 6]}
+
+ result = DataFrame.from_records(data, index=["a", "b", "c"])
+ exp = DataFrame(data, index=["a", "b", "c"])
+ tm.assert_frame_equal(result, exp)
+
+ # GH#2623
+ rows = []
+ rows.append([datetime(2010, 1, 1), 1])
+ rows.append([datetime(2010, 1, 2), "hi"]) # test col upconverts to obj
+ df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
+ result = df2_obj.dtypes
+ expected = Series(
+ [np.dtype("datetime64[ns]"), np.dtype("object")], index=["date", "test"]
+ )
+ tm.assert_series_equal(result, expected)
+
+ rows = []
+ rows.append([datetime(2010, 1, 1), 1])
+ rows.append([datetime(2010, 1, 2), 1])
+ df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
+ result = df2_obj.dtypes
+ expected = Series(
+ [np.dtype("datetime64[ns]"), np.dtype("int64")], index=["date", "test"]
+ )
+ tm.assert_series_equal(result, expected)
+
+ def test_from_records_empty(self):
+ # GH#3562
+ result = DataFrame.from_records([], columns=["a", "b", "c"])
+ expected = DataFrame(columns=["a", "b", "c"])
+ tm.assert_frame_equal(result, expected)
+
+ result = DataFrame.from_records([], columns=["a", "b", "b"])
+ expected = DataFrame(columns=["a", "b", "b"])
+ tm.assert_frame_equal(result, expected)
+
+ def test_from_records_empty_with_nonempty_fields_gh3682(self):
+ a = np.array([(1, 2)], dtype=[("id", np.int64), ("value", np.int64)])
+ df = DataFrame.from_records(a, index="id")
+ tm.assert_index_equal(df.index, Index([1], name="id"))
+ assert df.index.name == "id"
+ tm.assert_index_equal(df.columns, Index(["value"]))
+
+ b = np.array([], dtype=[("id", np.int64), ("value", np.int64)])
+ df = DataFrame.from_records(b, index="id")
+ tm.assert_index_equal(df.index, Index([], name="id"))
+ assert df.index.name == "id"
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 94b2431650359..d8308e9988107 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -10,7 +10,6 @@
import pytest
import pytz
-from pandas.compat import is_platform_little_endian
from pandas.compat.numpy import _np_version_under1p19, _np_version_under1p20
from pandas.core.dtypes.common import is_integer_dtype
@@ -34,7 +33,6 @@
)
import pandas._testing as tm
from pandas.arrays import IntervalArray, PeriodArray, SparseArray
-from pandas.core.construction import create_series_with_explicit_dtype
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
@@ -1209,35 +1207,12 @@ def test_constructor_generator(self):
expected = DataFrame({0: range(10), 1: "a"})
tm.assert_frame_equal(result, expected, check_dtype=False)
- def test_constructor_list_of_odicts(self):
- data = [
- OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
- OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
- OrderedDict([["a", 1.5], ["d", 6]]),
- OrderedDict(),
- OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
- OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
- ]
-
- result = DataFrame(data)
- expected = DataFrame.from_dict(
- dict(zip(range(len(data)), data)), orient="index"
- )
- tm.assert_frame_equal(result, expected.reindex(result.index))
+ def test_constructor_list_of_dicts(self):
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
- def test_constructor_single_row(self):
- data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])]
-
- result = DataFrame(data)
- expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex(
- result.index
- )
- tm.assert_frame_equal(result, expected)
-
@pytest.mark.parametrize("dict_type", [dict, OrderedDict])
def test_constructor_ordered_dict_preserve_order(self, dict_type):
# see gh-13304
@@ -1279,71 +1254,6 @@ def test_constructor_ordered_dict_conflicting_orders(self, dict_type):
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
- def test_constructor_list_of_series(self):
- data = [
- OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
- OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
- ]
- sdict = OrderedDict(zip(["x", "y"], data))
- idx = Index(["a", "b", "c"])
-
- # all named
- data2 = [
- Series([1.5, 3, 4], idx, dtype="O", name="x"),
- Series([1.5, 3, 6], idx, name="y"),
- ]
- result = DataFrame(data2)
- expected = DataFrame.from_dict(sdict, orient="index")
- tm.assert_frame_equal(result, expected)
-
- # some unnamed
- data2 = [
- Series([1.5, 3, 4], idx, dtype="O", name="x"),
- Series([1.5, 3, 6], idx),
- ]
- result = DataFrame(data2)
-
- sdict = OrderedDict(zip(["x", "Unnamed 0"], data))
- expected = DataFrame.from_dict(sdict, orient="index")
- tm.assert_frame_equal(result, expected)
-
- # none named
- data = [
- OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
- OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
- OrderedDict([["a", 1.5], ["d", 6]]),
- OrderedDict(),
- OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
- OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
- ]
- data = [
- create_series_with_explicit_dtype(d, dtype_if_empty=object) for d in data
- ]
-
- result = DataFrame(data)
- sdict = OrderedDict(zip(range(len(data)), data))
- expected = DataFrame.from_dict(sdict, orient="index")
- tm.assert_frame_equal(result, expected.reindex(result.index))
-
- result2 = DataFrame(data, index=np.arange(6))
- tm.assert_frame_equal(result, result2)
-
- result = DataFrame([Series(dtype=object)])
- expected = DataFrame(index=[0])
- tm.assert_frame_equal(result, expected)
-
- data = [
- OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
- OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
- ]
- sdict = OrderedDict(zip(range(len(data)), data))
-
- idx = Index(["a", "b", "c"])
- data2 = [Series([1.5, 3, 4], idx, dtype="O"), Series([1.5, 3, 6], idx)]
- result = DataFrame(data2)
- expected = DataFrame.from_dict(sdict, orient="index")
- tm.assert_frame_equal(result, expected)
-
def test_constructor_list_of_series_aligned_index(self):
series = [Series(i, index=["b", "a", "c"], name=str(i)) for i in range(3)]
result = DataFrame(series)
@@ -1502,84 +1412,6 @@ def test_constructor_list_of_dict_order(self):
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
- def test_constructor_orient(self, float_string_frame):
- data_dict = float_string_frame.T._series
- recons = DataFrame.from_dict(data_dict, orient="index")
- expected = float_string_frame.reindex(index=recons.index)
- tm.assert_frame_equal(recons, expected)
-
- # dict of sequence
- a = {"hi": [32, 3, 3], "there": [3, 5, 3]}
- rs = DataFrame.from_dict(a, orient="index")
- xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
- tm.assert_frame_equal(rs, xp)
-
- def test_constructor_from_ordered_dict(self):
- # GH8425
- a = OrderedDict(
- [
- ("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])),
- ("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])),
- ("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])),
- ]
- )
- expected = DataFrame.from_dict(a, orient="columns").T
- result = DataFrame.from_dict(a, orient="index")
- tm.assert_frame_equal(result, expected)
-
- def test_from_dict_columns_parameter(self):
- # GH 18529
- # Test new columns parameter for from_dict that was added to make
- # from_items(..., orient='index', columns=[...]) easier to replicate
- result = DataFrame.from_dict(
- OrderedDict([("A", [1, 2]), ("B", [4, 5])]),
- orient="index",
- columns=["one", "two"],
- )
- expected = DataFrame([[1, 2], [4, 5]], index=["A", "B"], columns=["one", "two"])
- tm.assert_frame_equal(result, expected)
-
- msg = "cannot use columns parameter with orient='columns'"
- with pytest.raises(ValueError, match=msg):
- DataFrame.from_dict(
- {"A": [1, 2], "B": [4, 5]},
- orient="columns",
- columns=["one", "two"],
- )
- with pytest.raises(ValueError, match=msg):
- DataFrame.from_dict({"A": [1, 2], "B": [4, 5]}, columns=["one", "two"])
-
- @pytest.mark.parametrize(
- "data_dict, keys, orient",
- [
- ({}, [], "index"),
- ([{("a",): 1}, {("a",): 2}], [("a",)], "columns"),
- ([OrderedDict([(("a",), 1), (("b",), 2)])], [("a",), ("b",)], "columns"),
- ([{("a", "b"): 1}], [("a", "b")], "columns"),
- ],
- )
- def test_constructor_from_dict_tuples(self, data_dict, keys, orient):
- # GH 16769
- df = DataFrame.from_dict(data_dict, orient)
-
- result = df.columns
- expected = Index(keys, dtype="object", tupleize_cols=False)
-
- tm.assert_index_equal(result, expected)
-
- def test_frame_dict_constructor_empty_series(self):
- s1 = Series(
- [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)])
- )
- s2 = Series(
- [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)])
- )
- s3 = Series(dtype=object)
-
- # it works!
- DataFrame({"foo": s1, "bar": s2, "baz": s3})
- DataFrame.from_dict({"foo": s1, "baz": s3, "bar": s2})
-
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=["a", "b", "c"], name="x")
df = DataFrame(a)
@@ -1723,10 +1555,6 @@ def test_constructor_column_duplicates(self):
tm.assert_frame_equal(idf, edf)
- msg = "If using all scalar values, you must pass an index"
- with pytest.raises(ValueError, match=msg):
- DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
-
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
@@ -1870,8 +1698,6 @@ def test_constructor_with_datetimes(self):
# GH 7594
# don't coerce tz-aware
- import pytz
-
tz = pytz.timezone("US/Eastern")
dt = tz.localize(datetime(2012, 1, 1))
@@ -2143,211 +1969,6 @@ def test_constructor_categorical_series(self):
df = DataFrame({"x": Series(["a", "b", "c"], dtype="category")}, index=index)
tm.assert_frame_equal(df, expected)
- def test_from_records_to_records(self):
- # from numpy documentation
- arr = np.zeros((2,), dtype=("i4,f4,a10"))
- arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
-
- # TODO(wesm): unused
- frame = DataFrame.from_records(arr) # noqa
-
- index = Index(np.arange(len(arr))[::-1])
- indexed_frame = DataFrame.from_records(arr, index=index)
- tm.assert_index_equal(indexed_frame.index, index)
-
- # without names, it should go to last ditch
- arr2 = np.zeros((2, 3))
- tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
-
- # wrong length
- msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
- with pytest.raises(ValueError, match=msg):
- DataFrame.from_records(arr, index=index[:-1])
-
- indexed_frame = DataFrame.from_records(arr, index="f1")
-
- # what to do?
- records = indexed_frame.to_records()
- assert len(records.dtype.names) == 3
-
- records = indexed_frame.to_records(index=False)
- assert len(records.dtype.names) == 2
- assert "index" not in records.dtype.names
-
- def test_from_records_nones(self):
- tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]
-
- df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"])
- assert np.isnan(df["c"][0])
-
- def test_from_records_iterator(self):
- arr = np.array(
- [(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],
- dtype=[
- ("x", np.float64),
- ("u", np.float32),
- ("y", np.int64),
- ("z", np.int32),
- ],
- )
- df = DataFrame.from_records(iter(arr), nrows=2)
- xp = DataFrame(
- {
- "x": np.array([1.0, 3.0], dtype=np.float64),
- "u": np.array([1.0, 3.0], dtype=np.float32),
- "y": np.array([2, 4], dtype=np.int64),
- "z": np.array([2, 4], dtype=np.int32),
- }
- )
- tm.assert_frame_equal(df.reindex_like(xp), xp)
-
- # no dtypes specified here, so just compare with the default
- arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]
- df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2)
- tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False)
-
- def test_from_records_tuples_generator(self):
- def tuple_generator(length):
- for i in range(length):
- letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- yield (i, letters[i % len(letters)], i / length)
-
- columns_names = ["Integer", "String", "Float"]
- columns = [
- [i[j] for i in tuple_generator(10)] for j in range(len(columns_names))
- ]
- data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
- expected = DataFrame(data, columns=columns_names)
-
- generator = tuple_generator(10)
- result = DataFrame.from_records(generator, columns=columns_names)
- tm.assert_frame_equal(result, expected)
-
- def test_from_records_lists_generator(self):
- def list_generator(length):
- for i in range(length):
- letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- yield [i, letters[i % len(letters)], i / length]
-
- columns_names = ["Integer", "String", "Float"]
- columns = [
- [i[j] for i in list_generator(10)] for j in range(len(columns_names))
- ]
- data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
- expected = DataFrame(data, columns=columns_names)
-
- generator = list_generator(10)
- result = DataFrame.from_records(generator, columns=columns_names)
- tm.assert_frame_equal(result, expected)
-
- def test_from_records_columns_not_modified(self):
- tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)]
-
- columns = ["a", "b", "c"]
- original_columns = list(columns)
-
- df = DataFrame.from_records(tuples, columns=columns, index="a") # noqa
-
- assert columns == original_columns
-
- def test_from_records_decimal(self):
- from decimal import Decimal
-
- tuples = [(Decimal("1.5"),), (Decimal("2.5"),), (None,)]
-
- df = DataFrame.from_records(tuples, columns=["a"])
- assert df["a"].dtype == object
-
- df = DataFrame.from_records(tuples, columns=["a"], coerce_float=True)
- assert df["a"].dtype == np.float64
- assert np.isnan(df["a"].values[-1])
-
- def test_from_records_duplicates(self):
- result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
-
- expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
-
- tm.assert_frame_equal(result, expected)
-
- def test_from_records_set_index_name(self):
- def create_dict(order_id):
- return {
- "order_id": order_id,
- "quantity": np.random.randint(1, 10),
- "price": np.random.randint(1, 10),
- }
-
- documents = [create_dict(i) for i in range(10)]
- # demo missing data
- documents.append({"order_id": 10, "quantity": 5})
-
- result = DataFrame.from_records(documents, index="order_id")
- assert result.index.name == "order_id"
-
- # MultiIndex
- result = DataFrame.from_records(documents, index=["order_id", "quantity"])
- assert result.index.names == ("order_id", "quantity")
-
- def test_from_records_misc_brokenness(self):
- # #2179
-
- data = {1: ["foo"], 2: ["bar"]}
-
- result = DataFrame.from_records(data, columns=["a", "b"])
- exp = DataFrame(data, columns=["a", "b"])
- tm.assert_frame_equal(result, exp)
-
- # overlap in index/index_names
-
- data = {"a": [1, 2, 3], "b": [4, 5, 6]}
-
- result = DataFrame.from_records(data, index=["a", "b", "c"])
- exp = DataFrame(data, index=["a", "b", "c"])
- tm.assert_frame_equal(result, exp)
-
- # GH 2623
- rows = []
- rows.append([datetime(2010, 1, 1), 1])
- rows.append([datetime(2010, 1, 2), "hi"]) # test col upconverts to obj
- df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
- result = df2_obj.dtypes
- expected = Series(
- [np.dtype("datetime64[ns]"), np.dtype("object")], index=["date", "test"]
- )
- tm.assert_series_equal(result, expected)
-
- rows = []
- rows.append([datetime(2010, 1, 1), 1])
- rows.append([datetime(2010, 1, 2), 1])
- df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
- result = df2_obj.dtypes
- expected = Series(
- [np.dtype("datetime64[ns]"), np.dtype("int64")], index=["date", "test"]
- )
- tm.assert_series_equal(result, expected)
-
- def test_from_records_empty(self):
- # 3562
- result = DataFrame.from_records([], columns=["a", "b", "c"])
- expected = DataFrame(columns=["a", "b", "c"])
- tm.assert_frame_equal(result, expected)
-
- result = DataFrame.from_records([], columns=["a", "b", "b"])
- expected = DataFrame(columns=["a", "b", "b"])
- tm.assert_frame_equal(result, expected)
-
- def test_from_records_empty_with_nonempty_fields_gh3682(self):
- a = np.array([(1, 2)], dtype=[("id", np.int64), ("value", np.int64)])
- df = DataFrame.from_records(a, index="id")
- tm.assert_index_equal(df.index, Index([1], name="id"))
- assert df.index.name == "id"
- tm.assert_index_equal(df.columns, Index(["value"]))
-
- b = np.array([], dtype=[("id", np.int64), ("value", np.int64)])
- df = DataFrame.from_records(b, index="id")
- tm.assert_index_equal(df.index, Index([], name="id"))
- assert df.index.name == "id"
-
@pytest.mark.parametrize(
"dtype",
tm.ALL_INT_DTYPES
@@ -2375,229 +1996,6 @@ def test_check_dtype_empty_string_column(self, dtype):
assert data.b.dtype.name == "object"
- def test_from_records_with_datetimes(self):
-
- # this may fail on certain platforms because of a numpy issue
- # related GH6140
- if not is_platform_little_endian():
- pytest.skip("known failure of test on non-little endian")
-
- # construction with a null in a recarray
- # GH 6140
- expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
-
- arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
- dtypes = [("EXPIRY", "<M8[ns]")]
-
- try:
- recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
- except (ValueError):
- pytest.skip("known failure of numpy rec array creation")
-
- result = DataFrame.from_records(recarray)
- tm.assert_frame_equal(result, expected)
-
- # coercion should work too
- arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
- dtypes = [("EXPIRY", "<M8[m]")]
- recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
- result = DataFrame.from_records(recarray)
- tm.assert_frame_equal(result, expected)
-
- def test_from_records_sequencelike(self):
- df = DataFrame(
- {
- "A": np.array(np.random.randn(6), dtype=np.float64),
- "A1": np.array(np.random.randn(6), dtype=np.float64),
- "B": np.array(np.arange(6), dtype=np.int64),
- "C": ["foo"] * 6,
- "D": np.array([True, False] * 3, dtype=bool),
- "E": np.array(np.random.randn(6), dtype=np.float32),
- "E1": np.array(np.random.randn(6), dtype=np.float32),
- "F": np.array(np.arange(6), dtype=np.int32),
- }
- )
-
- # this is actually tricky to create the recordlike arrays and
- # have the dtypes be intact
- blocks = df._to_dict_of_blocks()
- tuples = []
- columns = []
- dtypes = []
- for dtype, b in blocks.items():
- columns.extend(b.columns)
- dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
- for i in range(len(df.index)):
- tup = []
- for _, b in blocks.items():
- tup.extend(b.iloc[i].values)
- tuples.append(tuple(tup))
-
- recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
- recarray2 = df.to_records()
- lists = [list(x) for x in tuples]
-
- # tuples (lose the dtype info)
- result = DataFrame.from_records(tuples, columns=columns).reindex(
- columns=df.columns
- )
-
- # created recarray and with to_records recarray (have dtype info)
- result2 = DataFrame.from_records(recarray, columns=columns).reindex(
- columns=df.columns
- )
- result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
- columns=df.columns
- )
-
- # list of tupels (no dtype info)
- result4 = DataFrame.from_records(lists, columns=columns).reindex(
- columns=df.columns
- )
-
- tm.assert_frame_equal(result, df, check_dtype=False)
- tm.assert_frame_equal(result2, df)
- tm.assert_frame_equal(result3, df)
- tm.assert_frame_equal(result4, df, check_dtype=False)
-
- # tuples is in the order of the columns
- result = DataFrame.from_records(tuples)
- tm.assert_index_equal(result.columns, RangeIndex(8))
-
- # test exclude parameter & we are casting the results here (as we don't
- # have dtype info to recover)
- columns_to_test = [columns.index("C"), columns.index("E1")]
-
- exclude = list(set(range(8)) - set(columns_to_test))
- result = DataFrame.from_records(tuples, exclude=exclude)
- result.columns = [columns[i] for i in sorted(columns_to_test)]
- tm.assert_series_equal(result["C"], df["C"])
- tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
-
- # empty case
- result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
- assert len(result) == 0
- tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
-
- result = DataFrame.from_records([])
- assert len(result) == 0
- assert len(result.columns) == 0
-
- def test_from_records_dictlike(self):
-
- # test the dict methods
- df = DataFrame(
- {
- "A": np.array(np.random.randn(6), dtype=np.float64),
- "A1": np.array(np.random.randn(6), dtype=np.float64),
- "B": np.array(np.arange(6), dtype=np.int64),
- "C": ["foo"] * 6,
- "D": np.array([True, False] * 3, dtype=bool),
- "E": np.array(np.random.randn(6), dtype=np.float32),
- "E1": np.array(np.random.randn(6), dtype=np.float32),
- "F": np.array(np.arange(6), dtype=np.int32),
- }
- )
-
- # columns is in a different order here than the actual items iterated
- # from the dict
- blocks = df._to_dict_of_blocks()
- columns = []
- for dtype, b in blocks.items():
- columns.extend(b.columns)
-
- asdict = {x: y for x, y in df.items()}
- asdict2 = {x: y.values for x, y in df.items()}
-
- # dict of series & dict of ndarrays (have dtype info)
- results = []
- results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
- results.append(
- DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
- )
- results.append(
- DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
- )
-
- for r in results:
- tm.assert_frame_equal(r, df)
-
- def test_from_records_with_index_data(self):
- df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
-
- data = np.random.randn(10)
- df1 = DataFrame.from_records(df, index=data)
- tm.assert_index_equal(df1.index, Index(data))
-
- def test_from_records_bad_index_column(self):
- df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
-
- # should pass
- df1 = DataFrame.from_records(df, index=["C"])
- tm.assert_index_equal(df1.index, Index(df.C))
-
- df1 = DataFrame.from_records(df, index="C")
- tm.assert_index_equal(df1.index, Index(df.C))
-
- # should fail
- msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
- with pytest.raises(ValueError, match=msg):
- DataFrame.from_records(df, index=[2])
- with pytest.raises(KeyError, match=r"^2$"):
- DataFrame.from_records(df, index=2)
-
- def test_from_records_non_tuple(self):
- class Record:
- def __init__(self, *args):
- self.args = args
-
- def __getitem__(self, i):
- return self.args[i]
-
- def __iter__(self):
- return iter(self.args)
-
- recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
- tups = [tuple(rec) for rec in recs]
-
- result = DataFrame.from_records(recs)
- expected = DataFrame.from_records(tups)
- tm.assert_frame_equal(result, expected)
-
- def test_from_records_len0_with_columns(self):
- # #2633
- result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
- expected = Index(["bar"])
-
- assert len(result) == 0
- assert result.index.name == "foo"
- tm.assert_index_equal(result.columns, expected)
-
- def test_from_records_series_list_dict(self):
- # GH27358
- expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
- data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
- result = DataFrame.from_records(data)
- tm.assert_frame_equal(result, expected)
-
- def test_from_records_series_categorical_index(self):
- # GH 32805
- index = CategoricalIndex(
- [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
- )
- series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
- frame = DataFrame.from_records(series_of_dicts, index=index)
- expected = DataFrame(
- {"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index
- )
- tm.assert_frame_equal(frame, expected)
-
- def test_frame_from_records_utc(self):
- rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}
-
- # it works
- DataFrame.from_records([rec], index="begin_time")
-
def test_to_frame_with_falsey_names(self):
# GH 16114
result = Series(name=0, dtype=object).to_frame().dtypes
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38869 | 2020-12-31T23:07:40Z | 2020-12-31T23:52:13Z | 2020-12-31T23:52:13Z | 2021-01-01T02:02:11Z |
REF: Move aggregation into apply | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index c0c6c9084b560..edb6b97a73e7f 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1,12 +1,18 @@
import abc
import inspect
-from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Tuple, Type
+from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Type, cast
import numpy as np
from pandas._config import option_context
-from pandas._typing import AggFuncType, Axis, FrameOrSeriesUnion
+from pandas._typing import (
+ AggFuncType,
+ AggFuncTypeBase,
+ AggFuncTypeDict,
+ Axis,
+ FrameOrSeriesUnion,
+)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
@@ -17,6 +23,7 @@
)
from pandas.core.dtypes.generic import ABCSeries
+from pandas.core.aggregation import agg_dict_like, agg_list_like
from pandas.core.construction import create_series_with_explicit_dtype
if TYPE_CHECKING:
@@ -27,6 +34,7 @@
def frame_apply(
obj: "DataFrame",
+ how: str,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
@@ -44,6 +52,7 @@ def frame_apply(
return klass(
obj,
+ how,
func,
raw=raw,
result_type=result_type,
@@ -84,13 +93,16 @@ def wrap_results_for_axis(
def __init__(
self,
obj: "DataFrame",
+ how: str,
func,
raw: bool,
result_type: Optional[str],
args,
kwds,
):
+ assert how in ("apply", "agg")
self.obj = obj
+ self.how = how
self.raw = raw
self.args = args or ()
self.kwds = kwds or {}
@@ -104,7 +116,11 @@ def __init__(
self.result_type = result_type
# curry if needed
- if (kwds or args) and not isinstance(func, (np.ufunc, str)):
+ if (
+ (kwds or args)
+ and not isinstance(func, (np.ufunc, str))
+ and not is_list_like(func)
+ ):
def f(x):
return func(x, *args, **kwds)
@@ -112,7 +128,7 @@ def f(x):
else:
f = func
- self.f = f
+ self.f: AggFuncType = f
@property
def res_columns(self) -> "Index":
@@ -139,6 +155,54 @@ def agg_axis(self) -> "Index":
return self.obj._get_agg_axis(self.axis)
def get_result(self):
+ if self.how == "apply":
+ return self.apply()
+ else:
+ return self.agg()
+
+ def agg(self) -> Tuple[Optional[FrameOrSeriesUnion], Optional[bool]]:
+ """
+ Provide an implementation for the aggregators.
+
+ Returns
+ -------
+ tuple of result, how.
+
+ Notes
+ -----
+ how can be a string describe the required post-processing, or
+ None if not required.
+ """
+ obj = self.obj
+ arg = self.f
+ args = self.args
+ kwargs = self.kwds
+
+ _axis = kwargs.pop("_axis", None)
+ if _axis is None:
+ _axis = getattr(obj, "axis", 0)
+
+ if isinstance(arg, str):
+ return obj._try_aggregate_string_function(arg, *args, **kwargs), None
+ elif is_dict_like(arg):
+ arg = cast(AggFuncTypeDict, arg)
+ return agg_dict_like(obj, arg, _axis), True
+ elif is_list_like(arg):
+ # we require a list, but not a 'str'
+ arg = cast(List[AggFuncTypeBase], arg)
+ return agg_list_like(obj, arg, _axis=_axis), None
+ else:
+ result = None
+
+ if callable(arg):
+ f = obj._get_cython_func(arg)
+ if f and not args and not kwargs:
+ return getattr(obj, f)(), None
+
+ # caller can react
+ return result, True
+
+ def apply(self) -> FrameOrSeriesUnion:
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
@@ -191,6 +255,8 @@ def apply_empty_result(self):
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
+ assert callable(self.f)
+
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
@@ -246,6 +312,8 @@ def wrapper(*args, **kwargs):
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target: "DataFrame") -> "DataFrame":
+ assert callable(self.f)
+
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
@@ -279,6 +347,8 @@ def apply_standard(self):
return self.wrap_results(results, res_index)
def apply_series_generator(self) -> Tuple[ResType, "Index"]:
+ assert callable(self.f)
+
series_gen = self.series_generator
res_index = self.result_index
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index cc89823cd7817..6e9a8ab972abc 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -121,12 +121,7 @@
from pandas.core import algorithms, common as com, generic, nanops, ops
from pandas.core.accessor import CachedAccessor
-from pandas.core.aggregation import (
- aggregate,
- reconstruct_func,
- relabel_result,
- transform,
-)
+from pandas.core.aggregation import reconstruct_func, relabel_result, transform
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.sparse import SparseFrameAccessor
@@ -7623,13 +7618,24 @@ def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):
return result
def _aggregate(self, arg, axis: Axis = 0, *args, **kwargs):
+ from pandas.core.apply import frame_apply
+
+ op = frame_apply(
+ self if axis == 0 else self.T,
+ how="agg",
+ func=arg,
+ axis=0,
+ args=args,
+ kwds=kwargs,
+ )
+ result, how = op.get_result()
+
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
- result, how = aggregate(self.T, arg, *args, **kwargs)
result = result.T if result is not None else result
- return result, how
- return aggregate(self, arg, *args, **kwargs)
+
+ return result, how
agg = aggregate
@@ -7789,6 +7795,7 @@ def apply(
op = frame_apply(
self,
+ how="apply",
func=func,
axis=axis,
raw=raw,
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This is a step toward #35725. We can't simply ban agg from not aggregating because it's used by apply for series, dataframe, groupby, resampler, and rolling. There are two other related issues. First, apply, agg, and transform are all doing similar things but with different implementations leading to unintended inconsistencies. Second, we would also like to ban mutation in apply (and presumably agg and transform for the same reasons).
It seems best to me to handle all of these by first combining the implementation into apply, cleaning up and sharing code between the implementations, and then we would be in a good place to ban agg from not aggregating.
This starts the process by moving aggregation.aggregate into apply, but it is only utilized for frames. Once this is done for the remaining objects (series, groupby, resampler, rolling), the rest of the functions in aggregation can be moved into apply as well. | https://api.github.com/repos/pandas-dev/pandas/pulls/38867 | 2020-12-31T21:51:41Z | 2021-01-03T16:56:06Z | 2021-01-03T16:56:06Z | 2021-01-28T23:02:00Z |
ENH: implement FloatingArray.round() | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index d6fd017ff0897..ffacd82b35056 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -54,6 +54,7 @@ Other enhancements
- Add support for dict-like names in :class:`MultiIndex.set_names` and :class:`MultiIndex.rename` (:issue:`20421`)
- :func:`pandas.read_excel` can now auto detect .xlsb files (:issue:`35416`)
- :meth:`.Rolling.sum`, :meth:`.Expanding.sum`, :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.Rolling.median`, :meth:`.Expanding.median`, :meth:`.Rolling.max`, :meth:`.Expanding.max`, :meth:`.Rolling.min`, and :meth:`.Expanding.min` now support ``Numba`` execution with the ``engine`` keyword (:issue:`38895`)
+- Added :meth:`NumericArray.round` (:issue:`38844`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/arrays/numeric.py b/pandas/core/arrays/numeric.py
index 3c115ec42f6ec..0325b008d88ee 100644
--- a/pandas/core/arrays/numeric.py
+++ b/pandas/core/arrays/numeric.py
@@ -1,10 +1,12 @@
import datetime
-from typing import TYPE_CHECKING, Union
+from typing import TYPE_CHECKING, Callable, Union
import numpy as np
from pandas._libs import Timedelta, missing as libmissing
+from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
+from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
is_float,
@@ -56,6 +58,32 @@ def __from_arrow__(
return array_class._concat_same_type(results)
+_round_doc = """
+Round each value in NumericArray a to the given number of decimals.
+
+Parameters
+----------
+decimals : int, default 0
+ Number of decimal places to round to. If decimals is negative,
+ it specifies the number of positions to the left of the decimal point.
+*args, **kwargs
+ Additional arguments and keywords have no effect but might be
+ accepted for compatibility with NumPy.
+
+Returns
+-------
+NumericArray
+ Rounded values of the NumericArray.
+
+See Also
+--------
+numpy.around : Round values of an np.array.
+DataFrame.round : Round values of a DataFrame.
+Series.round : Round values of a Series.
+
+"""
+
+
class NumericArray(BaseMaskedArray):
"""
Base class for IntegerArray and FloatingArray.
@@ -130,3 +158,16 @@ def _arith_method(self, other, op):
)
return self._maybe_mask_result(result, mask, other, op_name)
+
+ def _apply(self, func: Callable, **kwargs) -> "NumericArray":
+ values = self._data[~self._mask]
+ values = np.round(values, **kwargs)
+
+ data = np.zeros(self._data.shape)
+ data[~self._mask] = values
+ return type(self)(data, self._mask)
+
+ @doc(_round_doc)
+ def round(self, decimals: int = 0, *args, **kwargs) -> "NumericArray":
+ nv.validate_round(args, kwargs)
+ return self._apply(np.round, decimals=decimals, **kwargs)
diff --git a/pandas/tests/series/methods/test_round.py b/pandas/tests/series/methods/test_round.py
index 88d5c428712dc..0f0a4e9adaa80 100644
--- a/pandas/tests/series/methods/test_round.py
+++ b/pandas/tests/series/methods/test_round.py
@@ -35,15 +35,23 @@ def test_round_numpy_with_nan(self):
expected = Series([2.0, np.nan, 0.0])
tm.assert_series_equal(result, expected)
- def test_round_builtin(self):
- ser = Series([1.123, 2.123, 3.123], index=range(3))
- result = round(ser)
- expected_rounded0 = Series([1.0, 2.0, 3.0], index=range(3))
+ def test_round_builtin(self, any_float_allowed_nullable_dtype):
+ ser = Series(
+ [1.123, 2.123, 3.123],
+ index=range(3),
+ dtype=any_float_allowed_nullable_dtype,
+ )
+ result = round(ser).astype(any_float_allowed_nullable_dtype)
+ expected_rounded0 = Series(
+ [1.0, 2.0, 3.0], index=range(3), dtype=any_float_allowed_nullable_dtype
+ )
tm.assert_series_equal(result, expected_rounded0)
decimals = 2
- expected_rounded = Series([1.12, 2.12, 3.12], index=range(3))
- result = round(ser, decimals)
+ expected_rounded = Series(
+ [1.12, 2.12, 3.12], index=range(3), dtype=any_float_allowed_nullable_dtype
+ )
+ result = round(ser, decimals).astype(any_float_allowed_nullable_dtype)
tm.assert_series_equal(result, expected_rounded)
@pytest.mark.parametrize("method", ["round", "floor", "ceil"])
| - [x] closes #38844
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/38866 | 2020-12-31T21:43:06Z | 2021-02-11T01:30:19Z | null | 2021-02-11T01:30:19Z |
TST: GH30999 add match=msg to 2 pytest.raises in pandas/tests/io/json/test_normalize.py | diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index 46f6367a7227f..b232c827f5ece 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -1,4 +1,3 @@
-from contextlib import nullcontext as does_not_raise
import json
import numpy as np
@@ -170,19 +169,21 @@ def test_empty_array(self):
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
- "data, record_path, error",
+ "data, record_path, exception_type",
[
- ([{"a": 0}, {"a": 1}], None, does_not_raise()),
- ({"a": [{"a": 0}, {"a": 1}]}, "a", does_not_raise()),
- ('{"a": [{"a": 0}, {"a": 1}]}', None, pytest.raises(NotImplementedError)),
- (None, None, pytest.raises(NotImplementedError)),
+ ([{"a": 0}, {"a": 1}], None, None),
+ ({"a": [{"a": 0}, {"a": 1}]}, "a", None),
+ ('{"a": [{"a": 0}, {"a": 1}]}', None, NotImplementedError),
+ (None, None, NotImplementedError),
],
)
- def test_accepted_input(self, data, record_path, error):
- with error:
+ def test_accepted_input(self, data, record_path, exception_type):
+ if exception_type is not None:
+ with pytest.raises(exception_type, match=tm.EMPTY_STRING_PATTERN):
+ json_normalize(data, record_path=record_path)
+ else:
result = json_normalize(data, record_path=record_path)
expected = DataFrame([0, 1], columns=["a"])
-
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
| xref #30999 for pandas/tests/io/json/test_normalize.py
And with this my work on #30999 is done. Will head over to the issue to explain why I am not fixing the remaining 15 instances of `pytest.raise`
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/38864 | 2020-12-31T20:06:22Z | 2021-01-01T20:43:19Z | 2021-01-01T20:43:19Z | 2021-01-01T20:43:23Z |
BUG: additional keys in groupby indices when NAs are present | diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst
index f66098633b45e..5e6d5c8c7466d 100644
--- a/doc/source/whatsnew/v1.3.0.rst
+++ b/doc/source/whatsnew/v1.3.0.rst
@@ -285,6 +285,7 @@ Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
- Bug in :meth:`SeriesGroupBy.value_counts` where unobserved categories in a grouped categorical series were not tallied (:issue:`38672`)
+- Bug in :meth:`.GroupBy.indices` would contain non-existent indices when null values were present in the groupby keys (:issue:`9304`)
-
Reshaping
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 4e451fc33b055..0f796b2b65dff 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -888,12 +888,17 @@ def indices_fast(ndarray index, const int64_t[:] labels, list keys,
k = len(keys)
- if n == 0:
+ # Start at the first non-null entry
+ j = 0
+ for j in range(0, n):
+ if labels[j] != -1:
+ break
+ else:
return result
+ cur = labels[j]
+ start = j
- start = 0
- cur = labels[0]
- for i in range(1, n):
+ for i in range(j+1, n):
lab = labels[i]
if lab != cur:
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 90396f1be0755..9417b626386fc 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -542,8 +542,7 @@ def get_indexer_dict(
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
if np.all(group_index == -1):
- # When all keys are nan and dropna=True, indices_fast can't handle this
- # and the return is empty anyway
+ # Short-circuit, lib.indices_fast will return the same
return {}
ngroups = (
((group_index.size and group_index.max()) + 1)
diff --git a/pandas/tests/groupby/test_missing.py b/pandas/tests/groupby/test_missing.py
index 56cf400258f0f..e2ca63d9ab922 100644
--- a/pandas/tests/groupby/test_missing.py
+++ b/pandas/tests/groupby/test_missing.py
@@ -126,3 +126,12 @@ def test_min_count(func, min_count, value):
result = getattr(df.groupby("a"), func)(min_count=min_count)
expected = DataFrame({"b": [value], "c": [np.nan]}, index=Index([1], name="a"))
tm.assert_frame_equal(result, expected)
+
+
+def test_indicies_with_missing():
+ # GH 9304
+ df = DataFrame({"a": [1, 1, np.nan], "b": [2, 3, 4], "c": [5, 6, 7]})
+ g = df.groupby(["a", "b"])
+ result = g.indices
+ expected = {(1.0, 2): np.array([0]), (1.0, 3): np.array([1])}
+ assert result == expected
| - [x] closes #9304
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
`lib.indices_fast` included logic to skip over null values (-1's), but did not include the case where the first index is null. Currently this function is only used in `sorting.get_indexer_dict` where the case of all null group_index is handled separately. | https://api.github.com/repos/pandas-dev/pandas/pulls/38861 | 2020-12-31T18:06:26Z | 2021-01-01T20:41:44Z | 2021-01-01T20:41:44Z | 2021-01-02T20:52:46Z |
DOC/CI: Fix building docs with --no-api | diff --git a/doc/make.py b/doc/make.py
index 40ce9ea3bbcd2..d90cd2428360d 100755
--- a/doc/make.py
+++ b/doc/make.py
@@ -46,6 +46,7 @@ def __init__(
warnings_are_errors=False,
):
self.num_jobs = num_jobs
+ self.include_api = include_api
self.verbosity = verbosity
self.warnings_are_errors = warnings_are_errors
@@ -188,7 +189,14 @@ def _add_redirects(self):
if not row or row[0].strip().startswith("#"):
continue
- path = os.path.join(BUILD_PATH, "html", *row[0].split("/")) + ".html"
+ html_path = os.path.join(BUILD_PATH, "html")
+ path = os.path.join(html_path, *row[0].split("/")) + ".html"
+
+ if not self.include_api and (
+ os.path.join(html_path, "reference") in path
+ or os.path.join(html_path, "generated") in path
+ ):
+ continue
try:
title = self._get_page_title(row[1])
@@ -198,11 +206,6 @@ def _add_redirects(self):
# sphinx specific stuff
title = "this page"
- if os.path.exists(path):
- raise RuntimeError(
- f"Redirection would overwrite an existing file: {path}"
- )
-
with open(path, "w") as moved_page_fd:
html = f"""\
<html>
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 951a6d4043786..3508dc2edd84c 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -72,13 +72,13 @@
try:
import nbconvert
except ImportError:
- logger.warn("nbconvert not installed. Skipping notebooks.")
+ logger.warning("nbconvert not installed. Skipping notebooks.")
exclude_patterns.append("**/*.ipynb")
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
- logger.warn("Pandoc not installed. Skipping notebooks.")
+ logger.warning("Pandoc not installed. Skipping notebooks.")
exclude_patterns.append("**/*.ipynb")
# sphinx_pattern can be '-api' to exclude the API pages,
@@ -86,15 +86,18 @@
# (e.g. '10min.rst' or 'pandas.DataFrame.head')
source_path = os.path.dirname(os.path.abspath(__file__))
pattern = os.environ.get("SPHINX_PATTERN")
+single_doc = pattern is not None and pattern != "-api"
+include_api = pattern != "-api"
if pattern:
for dirname, dirs, fnames in os.walk(source_path):
+ reldir = os.path.relpath(dirname, source_path)
for fname in fnames:
if os.path.splitext(fname)[-1] in (".rst", ".ipynb"):
fname = os.path.relpath(os.path.join(dirname, fname), source_path)
if fname == "index.rst" and os.path.abspath(dirname) == source_path:
continue
- elif pattern == "-api" and dirname == "reference":
+ elif pattern == "-api" and reldir.startswith("reference"):
exclude_patterns.append(fname)
elif pattern != "-api" and fname != pattern:
exclude_patterns.append(fname)
@@ -104,11 +107,11 @@
with open(os.path.join(source_path, "index.rst"), "w") as f:
f.write(
t.render(
- include_api=pattern is None,
- single_doc=(pattern if pattern is not None and pattern != "-api" else None),
+ include_api=include_api,
+ single_doc=(pattern if single_doc else None),
)
)
-autosummary_generate = True if pattern is None else ["index"]
+autosummary_generate = True if include_api else ["index"]
autodoc_typehints = "none"
# numpydoc
@@ -310,7 +313,7 @@
# ... and each of its public methods
moved_api_pages.append((f"{old}.{method}", f"{new}.{method}"))
-if pattern is None:
+if include_api:
html_additional_pages = {
"generated/" + page[0]: "api_redirect.html" for page in moved_api_pages
}
@@ -406,7 +409,7 @@
# latex_use_modindex = True
-if pattern is None:
+if include_api:
intersphinx_mapping = {
"dateutil": ("https://dateutil.readthedocs.io/en/latest/", None),
"matplotlib": ("https://matplotlib.org/", None),
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Fixes running docs via
python make.py --no-api
A few notes:
- The removed check `if os.path.exists(path):`; will incorrectly raise if docs are being rebuilt.
- The value `dirname` returned by `os.walk` is an absolute path, but the check was treating it as relative.
- If docs are built with api and then rebuilt with --no-api, we want to exclude everything in the `reference` directory; in particular `reference/api`. | https://api.github.com/repos/pandas-dev/pandas/pulls/38858 | 2020-12-31T17:28:16Z | 2021-01-13T14:30:21Z | 2021-01-13T14:30:21Z | 2021-01-13T15:03:03Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.