title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
CLN: Clean nanops.get_corr_func | diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 822ab775e7e46..9494248a423a8 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -1332,30 +1332,33 @@ def nancorr(
def get_corr_func(method):
- if method in ["kendall", "spearman"]:
- from scipy.stats import kendalltau, spearmanr
- elif method in ["pearson"]:
- pass
- elif callable(method):
- return method
- else:
- raise ValueError(
- f"Unknown method '{method}', expected one of 'kendall', 'spearman'"
- )
+ if method == "kendall":
+ from scipy.stats import kendalltau
+
+ def func(a, b):
+ return kendalltau(a, b)[0]
- def _pearson(a, b):
- return np.corrcoef(a, b)[0, 1]
+ return func
+ elif method == "spearman":
+ from scipy.stats import spearmanr
- def _kendall(a, b):
- # kendallttau returns a tuple of the tau statistic and pvalue
- rs = kendalltau(a, b)
- return rs[0]
+ def func(a, b):
+ return spearmanr(a, b)[0]
- def _spearman(a, b):
- return spearmanr(a, b)[0]
+ return func
+ elif method == "pearson":
- _cor_methods = {"pearson": _pearson, "kendall": _kendall, "spearman": _spearman}
- return _cor_methods[method]
+ def func(a, b):
+ return np.corrcoef(a, b)[0, 1]
+
+ return func
+ elif callable(method):
+ return method
+
+ raise ValueError(
+ f"Unknown method '{method}', expected one of "
+ "'kendall', 'spearman', 'pearson', or callable"
+ )
@disallow("M8", "m8")
| Small cleaning (instead of creating a dictionary of functions and then returning only one, just return the one) | https://api.github.com/repos/pandas-dev/pandas/pulls/33244 | 2020-04-02T18:06:58Z | 2020-04-07T00:18:58Z | 2020-04-07T00:18:58Z | 2020-04-07T01:25:31Z |
DOC: Contributing - escaping backslash. | diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 88782701b096c..31241287c61cb 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -295,7 +295,7 @@ Below is a brief overview on how to set-up a virtual environment with Powershell
under Windows. For details please refer to the
`official virtualenv user guide <https://virtualenv.pypa.io/en/stable/userguide/#activate-script>`__
-Use an ENV_DIR of your choice. We'll use ~\virtualenvs\pandas-dev where
+Use an ENV_DIR of your choice. We'll use ~\\virtualenvs\\pandas-dev where
'~' is the folder pointed to by either $env:USERPROFILE (Powershell) or
%USERPROFILE% (cmd.exe) environment variable. Any parent directories
should already exist.
| Non-escaped backslash disappeared in resulting doc.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33243 | 2020-04-02T18:05:19Z | 2020-04-02T23:25:28Z | 2020-04-02T23:25:28Z | 2020-04-02T23:25:34Z |
tostring->tobytes | diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx
index 091d76df26a17..2d5b31d7ccbcf 100644
--- a/pandas/_libs/writers.pyx
+++ b/pandas/_libs/writers.pyx
@@ -112,7 +112,7 @@ def convert_json_to_lines(arr: object) -> str:
if not in_quotes:
num_open_brackets_seen -= 1
- return narr.tostring().decode('utf-8')
+ return narr.tobytes().decode('utf-8')
# stata, pytables
diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx
index 11171af1e0c82..0038e39e2ffcc 100644
--- a/pandas/io/sas/sas.pyx
+++ b/pandas/io/sas/sas.pyx
@@ -431,7 +431,7 @@ cdef class Parser:
elif column_types[j] == column_type_string:
# string
string_chunk[js, current_row] = np.array(source[start:(
- start + lngt)]).tostring().rstrip(b"\x00 ")
+ start + lngt)]).tobytes().rstrip(b"\x00 ")
js += 1
self.current_row_on_page_index += 1
| - [x] closes #33238
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33241 | 2020-04-02T17:28:22Z | 2020-04-02T21:13:16Z | 2020-04-02T21:13:16Z | 2020-05-05T09:10:25Z |
CLN: remove Block.merge | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 8e2592a603716..de4e3b76420af 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -307,9 +307,6 @@ def shape(self):
def dtype(self):
return self.values.dtype
- def merge(self, other):
- return _merge_blocks([self, other])
-
def concat_same_type(self, to_concat):
"""
Concatenate list of single blocks of the same type.
@@ -2903,32 +2900,6 @@ def _block_shape(values, ndim=1, shape=None):
return values
-def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
-
- if len(blocks) == 1:
- return blocks[0]
-
- if _can_consolidate:
-
- if dtype is None:
- if len({b.dtype for b in blocks}) != 1:
- raise AssertionError("_merge_blocks are invalid!")
-
- # FIXME: optimization potential in case all mgrs contain slices and
- # combination of those slices is a slice, too.
- new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
- new_values = np.vstack([b.values for b in blocks])
-
- argsort = np.argsort(new_mgr_locs)
- new_values = new_values[argsort]
- new_mgr_locs = new_mgr_locs[argsort]
-
- return make_block(new_values, placement=new_mgr_locs)
-
- # no merge
- return blocks
-
-
def _safe_reshape(arr, new_shape):
"""
If possible, reshape `arr` to have shape `new_shape`,
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index ebb4899c1ba9a..45027bde58f14 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -43,7 +43,6 @@
ExtensionBlock,
ObjectValuesExtensionBlock,
_extend_blocks,
- _merge_blocks,
_safe_reshape,
get_block_type,
make_block,
@@ -1891,12 +1890,40 @@ def _consolidate(blocks):
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(
- list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate
+ list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
+def _merge_blocks(
+ blocks: List[Block], dtype: DtypeObj, can_consolidate: bool
+) -> List[Block]:
+
+ if len(blocks) == 1:
+ return blocks
+
+ if can_consolidate:
+
+ if dtype is None:
+ if len({b.dtype for b in blocks}) != 1:
+ raise AssertionError("_merge_blocks are invalid!")
+
+ # TODO: optimization potential in case all mgrs contain slices and
+ # combination of those slices is a slice, too.
+ new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
+ new_values = np.vstack([b.values for b in blocks])
+
+ argsort = np.argsort(new_mgr_locs)
+ new_values = new_values[argsort]
+ new_mgr_locs = new_mgr_locs[argsort]
+
+ return [make_block(new_values, placement=new_mgr_locs)]
+
+ # can't consolidate --> no merge
+ return blocks
+
+
def _compare_or_regex_search(a, b, regex=False):
"""
Compare two array_like inputs of the same shape or two scalar values
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 91ec1c29873cf..657849874f091 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -232,21 +232,6 @@ def test_attrs(self):
assert self.fblock.dtype == self.fblock.values.dtype
assert len(self.fblock) == len(self.fblock.values)
- def test_merge(self):
- avals = tm.randn(2, 10)
- bvals = tm.randn(2, 10)
-
- ref_cols = Index(["e", "a", "b", "d", "f"])
-
- ablock = make_block(avals, ref_cols.get_indexer(["e", "b"]))
- bblock = make_block(bvals, ref_cols.get_indexer(["a", "d"]))
- merged = ablock.merge(bblock)
- tm.assert_numpy_array_equal(
- merged.mgr_locs.as_array, np.array([0, 1, 2, 3], dtype=np.int64)
- )
- tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))
- tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))
-
def test_copy(self):
cop = self.fblock.copy()
assert cop is not self.fblock
| Then move _merge_blocks to the one module where it is used, and add annotations | https://api.github.com/repos/pandas-dev/pandas/pulls/33240 | 2020-04-02T17:22:50Z | 2020-04-03T17:37:51Z | 2020-04-03T17:37:51Z | 2020-04-03T17:39:09Z |
ENH: provide standard BaseIndexers in pandas.api.indexers | diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index 5133bbd285b50..f85dc83ab8605 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -165,4 +165,26 @@ def peakmem_fixed(self):
self.roll.max()
+class ForwardWindowMethods:
+ params = (
+ ["DataFrame", "Series"],
+ [10, 1000],
+ ["int", "float"],
+ ["median", "mean", "max", "min", "kurt", "sum"],
+ )
+ param_names = ["constructor", "window_size", "dtype", "method"]
+
+ def setup(self, constructor, window_size, dtype, method):
+ N = 10 ** 5
+ arr = np.random.random(N).astype(dtype)
+ indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=window_size)
+ self.roll = getattr(pd, constructor)(arr).rolling(window=indexer)
+
+ def time_rolling(self, constructor, window_size, dtype, method):
+ getattr(self.roll, method)()
+
+ def peakmem_rolling(self, constructor, window_size, dtype, method):
+ getattr(self.roll, method)()
+
+
from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst
index 570a0607ebd21..fb60a0d387ca2 100644
--- a/doc/source/reference/window.rst
+++ b/doc/source/reference/window.rst
@@ -85,3 +85,4 @@ Base class for defining custom window boundaries.
:toctree: api/
api.indexers.BaseIndexer
+ api.indexers.FixedForwardWindowIndexer
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index 0b7106aa127e5..af2f02a09428b 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -571,6 +571,20 @@ and we want to use an expanding window where ``use_expanding`` is ``True`` other
3 3.0
4 10.0
+.. versionadded:: 1.1
+
+For some problems knowledge of the future is available for analysis. For example, this occurs when
+each data point is a full time series read from an experiment, and the task is to extract underlying
+conditions. In these cases it can be useful to perform forward-looking rolling window computations.
+:func:`FixedForwardWindowIndexer <pandas.api.indexers.FixedForwardWindowIndexer>` class is available for this purpose.
+This :func:`BaseIndexer <pandas.api.indexers.BaseIndexer>` subclass implements a closed fixed-width
+forward-looking rolling window, and we can use it as follows:
+
+.. ipython:: ipython
+
+ from pandas.api.indexers import FixedForwardWindowIndexer
+ indexer = FixedForwardWindowIndexer(window_size=2)
+ df.rolling(indexer, min_periods=1).sum()
.. _stats.rolling_window.endpoints:
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 6f2b9b4f946c7..310dd0be4cde3 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -104,6 +104,7 @@ Other API changes
- ``loc`` lookups with an object-dtype :class:`Index` and an integer key will now raise ``KeyError`` instead of ``TypeError`` when key is missing (:issue:`31905`)
- Using a :func:`pandas.api.indexers.BaseIndexer` with ``std``, ``var``, ``count``, ``skew``, ``cov``, ``corr`` will now raise a ``NotImplementedError`` (:issue:`32865`)
- Using a :func:`pandas.api.indexers.BaseIndexer` with ``min``, ``max`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`)
+- Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations.
-
Backwards incompatible API changes
diff --git a/pandas/api/indexers/__init__.py b/pandas/api/indexers/__init__.py
index 826297e6b498f..0b36b53675e23 100644
--- a/pandas/api/indexers/__init__.py
+++ b/pandas/api/indexers/__init__.py
@@ -3,6 +3,6 @@
"""
from pandas.core.indexers import check_array_indexer
-from pandas.core.window.indexers import BaseIndexer
+from pandas.core.window.indexers import BaseIndexer, FixedForwardWindowIndexer
-__all__ = ["check_array_indexer", "BaseIndexer"]
+__all__ = ["check_array_indexer", "BaseIndexer", "FixedForwardWindowIndexer"]
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index 921cdb3c2523f..9a02c5231c151 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -120,3 +120,53 @@ def get_window_bounds(
np.zeros(num_values, dtype=np.int64),
np.arange(1, num_values + 1, dtype=np.int64),
)
+
+
+class FixedForwardWindowIndexer(BaseIndexer):
+ """
+ Creates window boundaries for fixed-length windows that include the
+ current row.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
+ >>> df
+ B
+ 0 0.0
+ 1 1.0
+ 2 2.0
+ 3 NaN
+ 4 4.0
+
+ >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
+ >>> df.rolling(window=indexer, min_periods=1).sum()
+ B
+ 0 1.0
+ 1 3.0
+ 2 2.0
+ 3 4.0
+ 4 4.0
+ """
+
+ @Appender(get_window_bounds_doc)
+ def get_window_bounds(
+ self,
+ num_values: int = 0,
+ min_periods: Optional[int] = None,
+ center: Optional[bool] = None,
+ closed: Optional[str] = None,
+ ) -> Tuple[np.ndarray, np.ndarray]:
+
+ if center:
+ raise ValueError("Forward-looking windows can't have center=True")
+ if closed is not None:
+ raise ValueError(
+ "Forward-looking windows don't support setting the closed argument"
+ )
+
+ start = np.arange(num_values, dtype="int64")
+ end_s = start[: -self.window_size] + self.window_size
+ end_e = np.full(self.window_size, num_values, dtype="int64")
+ end = np.concatenate([end_s, end_e])
+
+ return start, end
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 729e4069b1309..3fdf81c4bb570 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -900,6 +900,17 @@ class Window(_Window):
3 2.0
4 4.0
+ Same as above, but with forward-looking windows
+
+ >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
+ >>> df.rolling(window=indexer, min_periods=1).sum()
+ B
+ 0 1.0
+ 1 3.0
+ 2 2.0
+ 3 4.0
+ 4 4.0
+
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index 25d575e0ad0b6..bb93c70b8a597 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -3,7 +3,7 @@
from pandas import DataFrame, Series
import pandas._testing as tm
-from pandas.api.indexers import BaseIndexer
+from pandas.api.indexers import BaseIndexer, FixedForwardWindowIndexer
from pandas.core.window.indexers import ExpandingIndexer
@@ -105,19 +105,21 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
)
def test_rolling_forward_window(constructor, func, alt_func, expected):
# GH 32865
- class ForwardIndexer(BaseIndexer):
- def get_window_bounds(self, num_values, min_periods, center, closed):
- start = np.arange(num_values, dtype="int64")
- end_s = start[: -self.window_size] + self.window_size
- end_e = np.full(self.window_size, num_values, dtype="int64")
- end = np.concatenate([end_s, end_e])
-
- return start, end
-
values = np.arange(10)
values[5] = 100.0
- indexer = ForwardIndexer(window_size=3)
+ indexer = FixedForwardWindowIndexer(window_size=3)
+
+ match = "Forward-looking windows can't have center=True"
+ with pytest.raises(ValueError, match=match):
+ rolling = constructor(values).rolling(window=indexer, center=True)
+ result = getattr(rolling, func)()
+
+ match = "Forward-looking windows don't support setting the closed argument"
+ with pytest.raises(ValueError, match=match):
+ rolling = constructor(values).rolling(window=indexer, closed="right")
+ result = getattr(rolling, func)()
+
rolling = constructor(values).rolling(window=indexer, min_periods=2)
result = getattr(rolling, func)()
expected = constructor(expected)
| - [X] closes #33201
- [X] 1 test modified / 1 passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
This PR adds a forward-looking `pandas.api.indexers.BaseIndexer` subclass implementation and exposes it to users as discussed in #33180 .
### Exposing other indexers
It is also possible to expose a backward-looking indexer simply by exposing `pandas.core.window.indexers.FixedWindowIndexer` through the API. Seems redundant to me as this is basically the same as supplying an integer as the window argument when creating a rolling object but it can be done for consistency. I'd be grateful for a discussion.
### Note on tests
The class is incorporated into `test_rolling_forward_window` in `test_base_indexer.py` and this serves as a test that the new indexer is working correctly.
### Notes on performance
I've tested the implementation and it shows comparable performance to `pandas.core.window.indexers.FixedWindowIndexer`. In my tests the new indexer was slightly faster, either by luck, or because I streamlined the backward-looking indexer a bit when making the forward-looking one. | https://api.github.com/repos/pandas-dev/pandas/pulls/33236 | 2020-04-02T12:36:58Z | 2020-04-08T17:20:51Z | 2020-04-08T17:20:50Z | 2020-04-09T06:34:57Z |
TST: Use try/except block to properly catch and handle the exception | diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index 3ce3bc519b311..c251c92cb072a 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -12,9 +12,6 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in
from pandas._libs.tslibs.util cimport get_c_string
from pandas._libs.missing cimport C_NA
-cdef extern from "Python.h":
- void PyErr_Clear()
-
{{py:
# name, dtype, c_type
@@ -792,9 +789,9 @@ cdef class StringHashTable(HashTable):
labels[i] = na_sentinel
else:
# if ignore_na is False, we also stringify NaN/None/etc.
- v = get_c_string(<str>val)
- if v == NULL:
- PyErr_Clear()
+ try:
+ v = get_c_string(<str>val)
+ except UnicodeEncodeError:
v = get_c_string(<str>repr(val))
vecs[i] = v
diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd
index e7f6b3334eb65..cc98781dc73cf 100644
--- a/pandas/_libs/tslibs/util.pxd
+++ b/pandas/_libs/tslibs/util.pxd
@@ -219,7 +219,7 @@ cdef inline bint is_nan(object val):
cdef inline const char* get_c_string_buf_and_size(str py_string,
- Py_ssize_t *length):
+ Py_ssize_t *length) except NULL:
"""
Extract internal char* buffer of unicode or bytes object `py_string` with
getting length of this internal buffer saved in `length`.
@@ -238,12 +238,8 @@ cdef inline const char* get_c_string_buf_and_size(str py_string,
-------
buf : const char*
"""
- cdef:
- const char *buf
+ return PyUnicode_AsUTF8AndSize(py_string, length)
- buf = PyUnicode_AsUTF8AndSize(py_string, length)
- return buf
-
-cdef inline const char* get_c_string(str py_string):
+cdef inline const char* get_c_string(str py_string) except NULL:
return get_c_string_buf_and_size(py_string, NULL)
| Now no more warnings
- [x] closes #32951
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/33235 | 2020-04-02T11:58:29Z | 2020-04-28T19:40:32Z | 2020-04-28T19:40:31Z | 2020-05-28T22:10:39Z |
Ods loses spaces 32207 | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 19e8acdaa7384..329aa3200d43a 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -408,6 +408,7 @@ I/O
- Bug in :meth:`read_csv` was raising a misleading exception on a permissions issue (:issue:`23784`)
- Bug in :meth:`read_csv` was raising an ``IndexError`` when header=None and 2 extra data columns
- Bug in :meth:`DataFrame.to_sql` where an ``AttributeError`` was raised when saving an out of bounds date (:issue:`26761`)
+- Bug in :meth:`read_excel` did not correctly handle multiple embedded spaces in OpenDocument text cells. (:issue:`32207`)
Plotting
^^^^^^^^
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py
index 7af776dc1a10f..739c77d1c0b99 100644
--- a/pandas/io/excel/_odfreader.py
+++ b/pandas/io/excel/_odfreader.py
@@ -171,7 +171,7 @@ def _get_cell_value(self, cell, convert_float: bool) -> Scalar:
cell_value = cell.attributes.get((OFFICENS, "value"))
return float(cell_value)
elif cell_type == "string":
- return str(cell)
+ return self._get_cell_string_value(cell)
elif cell_type == "currency":
cell_value = cell.attributes.get((OFFICENS, "value"))
return float(cell_value)
@@ -182,3 +182,28 @@ def _get_cell_value(self, cell, convert_float: bool) -> Scalar:
return pd.to_datetime(str(cell)).time()
else:
raise ValueError(f"Unrecognized type {cell_type}")
+
+ def _get_cell_string_value(self, cell) -> str:
+ """
+ Find and decode OpenDocument text:s tags that represent
+ a run length encoded sequence of space characters.
+ """
+ from odf.element import Text, Element
+ from odf.text import S, P
+ from odf.namespaces import TEXTNS
+
+ text_p = P().qname
+ text_s = S().qname
+
+ p = cell.childNodes[0]
+
+ value = []
+ if p.qname == text_p:
+ for k, fragment in enumerate(p.childNodes):
+ if isinstance(fragment, Text):
+ value.append(fragment.data)
+ elif isinstance(fragment, Element):
+ if fragment.qname == text_s:
+ spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))
+ value.append(" " * spaces)
+ return "".join(value)
diff --git a/pandas/tests/io/data/excel/test_spaces.ods b/pandas/tests/io/data/excel/test_spaces.ods
new file mode 100644
index 0000000000000..375e839c8c221
Binary files /dev/null and b/pandas/tests/io/data/excel/test_spaces.ods differ
diff --git a/pandas/tests/io/data/excel/test_spaces.xls b/pandas/tests/io/data/excel/test_spaces.xls
new file mode 100644
index 0000000000000..316db172360d0
Binary files /dev/null and b/pandas/tests/io/data/excel/test_spaces.xls differ
diff --git a/pandas/tests/io/data/excel/test_spaces.xlsb b/pandas/tests/io/data/excel/test_spaces.xlsb
new file mode 100644
index 0000000000000..e38b6c2d8f170
Binary files /dev/null and b/pandas/tests/io/data/excel/test_spaces.xlsb differ
diff --git a/pandas/tests/io/data/excel/test_spaces.xlsm b/pandas/tests/io/data/excel/test_spaces.xlsm
new file mode 100644
index 0000000000000..a41ebe5bb0e65
Binary files /dev/null and b/pandas/tests/io/data/excel/test_spaces.xlsm differ
diff --git a/pandas/tests/io/data/excel/test_spaces.xlsx b/pandas/tests/io/data/excel/test_spaces.xlsx
new file mode 100644
index 0000000000000..9071543c4739b
Binary files /dev/null and b/pandas/tests/io/data/excel/test_spaces.xlsx differ
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index b1502ed3f3c09..99447c03e89af 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -464,6 +464,24 @@ def test_reader_dtype_str(self, read_ext, dtype, expected):
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
+ def test_reader_spaces(self, read_ext):
+ # see gh-32207
+ basename = "test_spaces"
+
+ actual = pd.read_excel(basename + read_ext)
+ expected = DataFrame(
+ {
+ "testcol": [
+ "this is great",
+ "4 spaces",
+ "1 trailing ",
+ " 1 leading",
+ "2 spaces multiple times",
+ ]
+ }
+ )
+ tm.assert_frame_equal(actual, expected)
+
def test_reading_all_sheets(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
| - [X] closes #32207
- [X] tests added / passed tests.io.excel.test_reader_spaces
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` (I had to use git diff master though)
- [ ] whatsnew entry. It's only a bug fix does it need a whatsnew?
I'm not sure how you make xlsxb files so that test wasn't implement when checking for the different types of space.
There's quite possibly many other ways this parser doesn't handle the odf specification.
| https://api.github.com/repos/pandas-dev/pandas/pulls/33233 | 2020-04-02T06:10:54Z | 2020-04-06T23:26:49Z | 2020-04-06T23:26:48Z | 2020-04-06T23:26:52Z |
REF: put concatenate_block_managers in internals.concat | diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py
index bc45b7c74ecc1..1090f862acb8a 100644
--- a/pandas/core/internals/__init__.py
+++ b/pandas/core/internals/__init__.py
@@ -14,11 +14,8 @@
_safe_reshape,
make_block,
)
-from pandas.core.internals.managers import (
- BlockManager,
- SingleBlockManager,
- concatenate_block_managers,
-)
+from pandas.core.internals.concat import concatenate_block_managers
+from pandas.core.internals.managers import BlockManager, SingleBlockManager
__all__ = [
"Block",
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 6839d138fbf73..720e6799a3bf3 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -23,9 +23,57 @@
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
+from pandas.core.internals.blocks import make_block
+from pandas.core.internals.managers import BlockManager
-def get_mgr_concatenation_plan(mgr, indexers):
+def concatenate_block_managers(
+ mgrs_indexers, axes, concat_axis: int, copy: bool
+) -> BlockManager:
+ """
+ Concatenate block managers into one.
+
+ Parameters
+ ----------
+ mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
+ axes : list of Index
+ concat_axis : int
+ copy : bool
+
+ Returns
+ -------
+ BlockManager
+ """
+ concat_plans = [
+ _get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
+ ]
+ concat_plan = _combine_concat_plans(concat_plans, concat_axis)
+ blocks = []
+
+ for placement, join_units in concat_plan:
+
+ if len(join_units) == 1 and not join_units[0].indexers:
+ b = join_units[0].block
+ values = b.values
+ if copy:
+ values = values.copy()
+ else:
+ values = values.view()
+ b = b.make_block_same_class(values, placement=placement)
+ elif _is_uniform_join_units(join_units):
+ b = join_units[0].block.concat_same_type([ju.block for ju in join_units])
+ b.mgr_locs = placement
+ else:
+ b = make_block(
+ _concatenate_join_units(join_units, concat_axis, copy=copy),
+ placement=placement,
+ )
+ blocks.append(b)
+
+ return BlockManager(blocks, axes)
+
+
+def _get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
@@ -232,7 +280,7 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
return values
-def concatenate_join_units(join_units, concat_axis, copy):
+def _concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
@@ -371,11 +419,11 @@ def _get_empty_dtype_and_na(join_units):
raise AssertionError(msg)
-def is_uniform_join_units(join_units) -> bool:
+def _is_uniform_join_units(join_units) -> bool:
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
- concatenate_join_units (which uses `concat_compat`).
+ _concatenate_join_units (which uses `concat_compat`).
"""
return (
@@ -429,7 +477,7 @@ def _trim_join_unit(join_unit, length):
return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)
-def combine_concat_plans(plans, concat_axis):
+def _combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 2f1206e800d9b..49da0cd69012e 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -47,12 +47,6 @@
get_block_type,
make_block,
)
-from pandas.core.internals.concat import ( # all for concatenate_block_managers
- combine_concat_plans,
- concatenate_join_units,
- get_mgr_concatenation_plan,
- is_uniform_join_units,
-)
from pandas.io.formats.printing import pprint_thing
@@ -2002,44 +1996,3 @@ def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return "fancy", indexer, len(indexer)
-
-
-def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
- """
- Concatenate block managers into one.
-
- Parameters
- ----------
- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
- axes : list of Index
- concat_axis : int
- copy : bool
-
- """
- concat_plans = [
- get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
- ]
- concat_plan = combine_concat_plans(concat_plans, concat_axis)
- blocks = []
-
- for placement, join_units in concat_plan:
-
- if len(join_units) == 1 and not join_units[0].indexers:
- b = join_units[0].block
- values = b.values
- if copy:
- values = values.copy()
- else:
- values = values.view()
- b = b.make_block_same_class(values, placement=placement)
- elif is_uniform_join_units(join_units):
- b = join_units[0].block.concat_same_type([ju.block for ju in join_units])
- b.mgr_locs = placement
- else:
- b = make_block(
- concatenate_join_units(join_units, concat_axis, copy=copy),
- placement=placement,
- )
- blocks.append(b)
-
- return BlockManager(blocks, axes)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33231 | 2020-04-02T03:23:23Z | 2020-04-06T23:20:24Z | 2020-04-06T23:20:24Z | 2020-04-06T23:28:14Z | |
DOC: Fixed examples in `pandas/core/groupby/` | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 0454150f61045..6f14c8183f0c4 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -288,10 +288,6 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
pytest -q --doctest-modules pandas/core/generic.py
RET=$(($RET + $?)) ; echo $MSG "DONE"
- MSG='Doctests groupby.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/groupby/groupby.py -k"-cumcount -describe -pipe"
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
MSG='Doctests series.py' ; echo $MSG
pytest -q --doctest-modules pandas/core/series.py
RET=$(($RET + $?)) ; echo $MSG "DONE"
@@ -314,6 +310,10 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
pytest -q --doctest-modules pandas/core/dtypes/
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ MSG='Doctests groupby' ; echo $MSG
+ pytest -q --doctest-modules pandas/core/groupby/
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
MSG='Doctests indexes' ; echo $MSG
pytest -q --doctest-modules pandas/core/indexes/
RET=$(($RET + $?)) ; echo $MSG "DONE"
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 208cbfc5b06d6..91839d8393f8c 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -833,10 +833,13 @@ class DataFrameGroupBy(GroupBy[DataFrame]):
"""
Examples
--------
-
- >>> df = pd.DataFrame({'A': [1, 1, 2, 2],
- ... 'B': [1, 2, 3, 4],
- ... 'C': np.random.randn(4)})
+ >>> df = pd.DataFrame(
+ ... {
+ ... "A": [1, 1, 2, 2],
+ ... "B": [1, 2, 3, 4],
+ ... "C": [0.362838, 0.227877, 1.267767, -0.562860],
+ ... }
+ ... )
>>> df
A B C
@@ -876,7 +879,7 @@ class DataFrameGroupBy(GroupBy[DataFrame]):
B C
min max sum
A
- 1 1 2 0.590716
+ 1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
@@ -887,8 +890,9 @@ class DataFrameGroupBy(GroupBy[DataFrame]):
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
- 1 1 -1.956929
- 2 3 -0.322183
+ 1 1 0.590715
+ 2 3 0.704907
+
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 1474e173b4f8c..ac5bdfe1ba042 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -200,14 +200,14 @@ class providing the base-class of operations.
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
->>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)
+>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
-... .pipe(h, arg2=b, arg3=c))
+... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
@@ -2011,7 +2011,9 @@ def cumcount(self, ascending: bool = True):
Essentially this is equivalent to
- >>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
+ .. code-block:: python
+
+ self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 2f50845fda4dc..9bd098d1d49a3 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -79,16 +79,51 @@ class Grouper:
--------
Syntactic sugar for ``df.groupby('A')``
- >>> df.groupby(Grouper(key='A'))
-
- Specify a resample operation on the column 'date'
-
- >>> df.groupby(Grouper(key='date', freq='60s'))
-
- Specify a resample operation on the level 'date' on the columns axis
- with a frequency of 60s
-
- >>> df.groupby(Grouper(level='date', freq='60s', axis=1))
+ >>> df = pd.DataFrame(
+ ... {
+ ... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
+ ... "Speed": [100, 5, 200, 300, 15],
+ ... }
+ ... )
+ >>> df
+ Animal Speed
+ 0 Falcon 100
+ 1 Parrot 5
+ 2 Falcon 200
+ 3 Falcon 300
+ 4 Parrot 15
+ >>> df.groupby(pd.Grouper(key="Animal")).mean()
+ Speed
+ Animal
+ Falcon 200
+ Parrot 10
+
+ Specify a resample operation on the column 'Publish date'
+
+ >>> df = pd.DataFrame(
+ ... {
+ ... "Publish date": [
+ ... pd.Timestamp("2000-01-02"),
+ ... pd.Timestamp("2000-01-02"),
+ ... pd.Timestamp("2000-01-09"),
+ ... pd.Timestamp("2000-01-16")
+ ... ],
+ ... "ID": [0, 1, 2, 3],
+ ... "Price": [10, 20, 30, 40]
+ ... }
+ ... )
+ >>> df
+ Publish date ID Price
+ 0 2000-01-02 0 10
+ 1 2000-01-02 1 20
+ 2 2000-01-09 2 30
+ 3 2000-01-16 3 40
+ >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
+ ID Price
+ Publish date
+ 2000-01-02 0.5 15.0
+ 2000-01-09 2.0 30.0
+ 2000-01-16 3.0 40.0
"""
_attributes: Tuple[str, ...] = ("key", "level", "freq", "axis", "sort")
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33230 | 2020-04-02T02:08:28Z | 2020-04-10T19:20:22Z | 2020-04-10T19:20:21Z | 2020-04-10T19:22:18Z |
REF: sql insert_data operate column-wise to avoid internals | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index a2e66e9ab8e30..c657a925a5eab 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -692,37 +692,25 @@ def insert_data(self):
column_names = list(map(str, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
- blocks = temp._mgr.blocks
-
- for b in blocks:
- if b.is_datetime:
- # return datetime.datetime objects
- if b.is_datetimetz:
- # GH 9086: Ensure we return datetimes with timezone info
- # Need to return 2-D data; DatetimeIndex is 1D
- d = b.values.to_pydatetime()
- d = np.atleast_2d(d)
- else:
- # convert to microsecond resolution for datetime.datetime
- d = b.values.astype("M8[us]").astype(object)
- elif b.is_timedelta:
- # numpy converts this to an object array of integers,
- # whereas b.astype(object).values would convert to
- # object array of Timedeltas
- d = b.values.astype(object)
+
+ for i, (_, ser) in enumerate(temp.items()):
+ vals = ser._values
+ if vals.dtype.kind == "M":
+ d = vals.to_pydatetime()
+ elif vals.dtype.kind == "m":
+ # store as integers, see GH#6921, GH#7076
+ d = vals.view("i8").astype(object)
else:
- # TODO(2DEA): astype-first can be avoided with 2D EAs
- # astype on the block instead of values to ensure we
- # get the right shape
- d = b.astype(object).values
+ d = vals.astype(object)
+
+ assert isinstance(d, np.ndarray), type(d)
- # replace NaN with None
- if b._can_hold_na:
+ if ser._can_hold_na:
+ # Note: this will miss timedeltas since they are converted to int
mask = isna(d)
d[mask] = None
- for col_loc, col in zip(b.mgr_locs, d):
- data_list[col_loc] = col
+ data_list[i] = d
return column_names, data_list
| https://api.github.com/repos/pandas-dev/pandas/pulls/33229 | 2020-04-02T01:11:32Z | 2020-04-07T18:19:41Z | 2020-04-07T18:19:41Z | 2020-04-07T18:26:37Z | |
Clean Up Categorical Test for JSON | diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index b74abc965f7fa..d9071a80b5db7 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -15,16 +15,6 @@
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
-_seriesd = tm.getSeriesData()
-
-_frame = DataFrame(_seriesd)
-
-_cat_frame = _frame.copy()
-cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
-_cat_frame.index = pd.CategoricalIndex(cat, name="E")
-_cat_frame["E"] = list(reversed(cat))
-_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
-
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
@@ -36,12 +26,6 @@ def assert_json_roundtrip_equal(result, expected, orient):
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
- @pytest.fixture(autouse=True)
- def setup(self):
- self.categorical = _cat_frame.copy()
-
- yield
-
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
@@ -183,25 +167,21 @@ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
- # TODO: create a better frame to test with and improve coverage
- if orient in ("index", "columns"):
- pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
+ cats = ["a", "b"]
+ df = pd.DataFrame(
+ pd.Categorical(cats), index=pd.CategoricalIndex(cats), columns=["cat"]
+ )
- data = self.categorical.to_json(orient=orient)
- if numpy and orient in ("records", "values"):
+ data = df.to_json(orient=orient)
+ if numpy and orient != "split":
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
- expected = self.categorical.copy()
- expected.index = expected.index.astype(str) # Categorical not preserved
- expected.index.name = None # index names aren't preserved in JSON
-
- if not numpy and orient == "index":
- expected = expected.sort_index()
-
+ # Categorical dtypes are not preserved on round trip
+ expected = pd.DataFrame(cats, index=cats, columns=["cat"])
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
| This is a little wonky in its current state, bolting categoricals onto an existing object / fixture
Instead just localized this to the one test that covers it for now | https://api.github.com/repos/pandas-dev/pandas/pulls/33228 | 2020-04-02T00:55:27Z | 2020-04-03T03:20:19Z | 2020-04-03T03:20:19Z | 2020-04-04T15:25:12Z |
CLN: De-privatize names | diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 6fa9159c469c2..5a8d0a0ec1670 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -44,8 +44,8 @@ from pandas._libs.tslibs.tzconversion cimport (
# ----------------------------------------------------------------------
# Constants
-NS_DTYPE = np.dtype('M8[ns]')
-TD_DTYPE = np.dtype('m8[ns]')
+DT64NS_DTYPE = np.dtype('M8[ns]')
+TD64NS_DTYPE = np.dtype('m8[ns]')
# ----------------------------------------------------------------------
@@ -105,11 +105,11 @@ def ensure_datetime64ns(arr: ndarray, copy: bool=True):
ivalues = arr.view(np.int64).ravel()
- result = np.empty(shape, dtype=NS_DTYPE)
+ result = np.empty(shape, dtype=DT64NS_DTYPE)
iresult = result.ravel().view(np.int64)
if len(iresult) == 0:
- result = arr.view(NS_DTYPE)
+ result = arr.view(DT64NS_DTYPE)
if copy:
result = result.copy()
return result
@@ -145,7 +145,7 @@ def ensure_timedelta64ns(arr: ndarray, copy: bool=True):
result : ndarray with dtype timedelta64[ns]
"""
- return arr.astype(TD_DTYPE, copy=copy)
+ return arr.astype(TD64NS_DTYPE, copy=copy)
# TODO: check for overflows when going from a lower-resolution to nanos
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index f283b6fd3b4b3..55c42f59f865e 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -378,7 +378,7 @@ def __init__(
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
- codes = _recode_for_categories(
+ codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
@@ -572,13 +572,13 @@ def _from_inferred_categories(
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
- codes = _recode_for_categories(inferred_codes, cats, categories)
+ codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
- codes = _recode_for_categories(inferred_codes, unsorted, categories)
+ codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
@@ -727,7 +727,7 @@ def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
- codes = _recode_for_categories(self.codes, self.categories, dtype.categories)
+ codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
@@ -849,7 +849,7 @@ def set_categories(self, new_categories, ordered=None, rename=False, inplace=Fal
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
- codes = _recode_for_categories(
+ codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
@@ -2034,7 +2034,7 @@ def __setitem__(self, key, value):
"without identical categories"
)
if not self.categories.equals(value.categories):
- new_codes = _recode_for_categories(
+ new_codes = recode_for_categories(
value.codes, value.categories, self.categories
)
value = Categorical.from_codes(new_codes, dtype=self.dtype)
@@ -2298,7 +2298,7 @@ def equals(self, other):
# fastpath to avoid re-coding
other_codes = other._codes
else:
- other_codes = _recode_for_categories(
+ other_codes = recode_for_categories(
other.codes, other.categories, self.categories
)
return np.array_equal(self._codes, other_codes)
@@ -2667,7 +2667,7 @@ def _get_codes_for_values(values, categories):
return coerce_indexer_dtype(t.lookup(vals), cats)
-def _recode_for_categories(codes: np.ndarray, old_categories, new_categories):
+def recode_for_categories(codes: np.ndarray, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
@@ -2685,7 +2685,7 @@ def _recode_for_categories(codes: np.ndarray, old_categories, new_categories):
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
- >>> _recode_for_categories(codes, old_cat, new_cat)
+ >>> recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1], dtype=int8)
"""
if len(old_categories) == 0:
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index e6a17491e9378..741290a4908a5 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -22,7 +22,7 @@
from pandas.core.dtypes.common import (
_INT64_DTYPE,
- _NS_DTYPE,
+ DT64NS_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
@@ -66,7 +66,7 @@ def tz_to_dtype(tz):
np.dtype or Datetime64TZDType
"""
if tz is None:
- return _NS_DTYPE
+ return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
@@ -209,7 +209,7 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps
_dtype: Union[np.dtype, DatetimeTZDtype]
_freq = None
- def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
+ def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy=False):
if isinstance(values, (ABCSeries, ABCIndexClass)):
values = values._values
@@ -246,9 +246,9 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
- values = values.view(_NS_DTYPE)
+ values = values.view(DT64NS_DTYPE)
- if values.dtype != _NS_DTYPE:
+ if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
@@ -282,11 +282,11 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
type(self)._validate_frequency(self, freq)
@classmethod
- def _simple_new(cls, values, freq=None, dtype=_NS_DTYPE):
+ def _simple_new(cls, values, freq=None, dtype=DT64NS_DTYPE):
assert isinstance(values, np.ndarray)
- if values.dtype != _NS_DTYPE:
+ if values.dtype != DT64NS_DTYPE:
assert values.dtype == "i8"
- values = values.view(_NS_DTYPE)
+ values = values.view(DT64NS_DTYPE)
result = object.__new__(cls)
result._data = values
@@ -970,7 +970,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"):
new_dates = conversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
- new_dates = new_dates.view(_NS_DTYPE)
+ new_dates = new_dates.view(DT64NS_DTYPE)
dtype = tz_to_dtype(tz)
return self._simple_new(new_dates, dtype=dtype, freq=self.freq)
@@ -1751,7 +1751,7 @@ def sequence_to_dt64ns(
elif is_datetime64_dtype(data):
# tz-naive DatetimeArray or ndarray[datetime64]
data = getattr(data, "_data", data)
- if data.dtype != _NS_DTYPE:
+ if data.dtype != DT64NS_DTYPE:
data = conversion.ensure_datetime64ns(data)
if tz is not None:
@@ -1760,9 +1760,9 @@ def sequence_to_dt64ns(
data = conversion.tz_localize_to_utc(
data.view("i8"), tz, ambiguous=ambiguous
)
- data = data.view(_NS_DTYPE)
+ data = data.view(DT64NS_DTYPE)
- assert data.dtype == _NS_DTYPE, data.dtype
+ assert data.dtype == DT64NS_DTYPE, data.dtype
result = data
else:
@@ -1773,7 +1773,7 @@ def sequence_to_dt64ns(
if data.dtype != _INT64_DTYPE:
data = data.astype(np.int64, copy=False)
- result = data.view(_NS_DTYPE)
+ result = data.view(DT64NS_DTYPE)
if copy:
# TODO: should this be deepcopy?
@@ -1897,7 +1897,7 @@ def maybe_convert_dtype(data, copy):
if is_float_dtype(data.dtype):
# Note: we must cast to datetime64[ns] here in order to treat these
# as wall-times instead of UTC timestamps.
- data = data.astype(_NS_DTYPE)
+ data = data.astype(DT64NS_DTYPE)
copy = False
# TODO: deprecate this behavior to instead treat symmetrically
# with integer dtypes. See discussion in GH#23675
@@ -1994,7 +1994,7 @@ def _validate_dt64_dtype(dtype):
)
raise ValueError(msg)
- if (isinstance(dtype, np.dtype) and dtype != _NS_DTYPE) or not isinstance(
+ if (isinstance(dtype, np.dtype) and dtype != DT64NS_DTYPE) or not isinstance(
dtype, (np.dtype, DatetimeTZDtype)
):
raise ValueError(
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index d9bd567f88845..39a3b553b3cf4 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -23,7 +23,7 @@
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
- _TD_DTYPE,
+ TD64NS_DTYPE,
ensure_object,
is_datetime64_dtype,
is_float_dtype,
@@ -718,10 +718,10 @@ def _check_timedeltalike_freq_compat(self, other):
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == "m"
- if other.dtype != _TD_DTYPE:
+ if other.dtype != TD64NS_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
- other = other.astype(_TD_DTYPE)
+ other = other.astype(TD64NS_DTYPE)
nanos = other.view("i8")
else:
# TimedeltaArray/Index
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index a25426c5c99cc..a9c8977991740 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -14,8 +14,8 @@
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
- _NS_DTYPE,
- _TD_DTYPE,
+ DT64NS_DTYPE,
+ TD64NS_DTYPE,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
@@ -136,12 +136,12 @@ def dtype(self):
-------
numpy.dtype
"""
- return _TD_DTYPE
+ return TD64NS_DTYPE
# ----------------------------------------------------------------
# Constructors
- def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
+ def __init__(self, values, dtype=TD64NS_DTYPE, freq=None, copy=False):
values = extract_array(values)
inferred_freq = getattr(values, "_freq", None)
@@ -167,7 +167,7 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
- values = values.view(_TD_DTYPE)
+ values = values.view(TD64NS_DTYPE)
_validate_td64_dtype(values.dtype)
dtype = _validate_td64_dtype(dtype)
@@ -192,21 +192,21 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
type(self)._validate_frequency(self, freq)
@classmethod
- def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE):
- assert dtype == _TD_DTYPE, dtype
+ def _simple_new(cls, values, freq=None, dtype=TD64NS_DTYPE):
+ assert dtype == TD64NS_DTYPE, dtype
assert isinstance(values, np.ndarray), type(values)
- if values.dtype != _TD_DTYPE:
+ if values.dtype != TD64NS_DTYPE:
assert values.dtype == "i8"
- values = values.view(_TD_DTYPE)
+ values = values.view(TD64NS_DTYPE)
result = object.__new__(cls)
result._data = values
result._freq = to_offset(freq)
- result._dtype = _TD_DTYPE
+ result._dtype = TD64NS_DTYPE
return result
@classmethod
- def _from_sequence(cls, data, dtype=_TD_DTYPE, copy=False, freq=None, unit=None):
+ def _from_sequence(cls, data, dtype=TD64NS_DTYPE, copy=False, freq=None, unit=None):
if dtype:
_validate_td64_dtype(dtype)
freq, freq_infer = dtl.maybe_infer_freq(freq)
@@ -428,7 +428,7 @@ def _add_datetimelike_scalar(self, other):
i8 = self.asi8
result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
- dtype = DatetimeTZDtype(tz=other.tz) if other.tz else _NS_DTYPE
+ dtype = DatetimeTZDtype(tz=other.tz) if other.tz else DT64NS_DTYPE
return DatetimeArray(result, dtype=dtype, freq=self.freq)
def _addsub_object_array(self, other, op):
@@ -950,10 +950,10 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"):
copy = False
elif is_timedelta64_dtype(data.dtype):
- if data.dtype != _TD_DTYPE:
+ if data.dtype != TD64NS_DTYPE:
# non-nano unit
# TODO: watch out for overflows
- data = data.astype(_TD_DTYPE)
+ data = data.astype(TD64NS_DTYPE)
copy = False
else:
@@ -1051,7 +1051,7 @@ def _validate_td64_dtype(dtype):
)
raise ValueError(msg)
- if not is_dtype_equal(dtype, _TD_DTYPE):
+ if not is_dtype_equal(dtype, TD64NS_DTYPE):
raise ValueError(f"dtype {dtype} cannot be converted to timedelta64[ns]")
return dtype
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index da9646aa8c46f..57c17f48e01ce 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -21,9 +21,9 @@
from pandas.core.dtypes.common import (
_INT64_DTYPE,
- _NS_DTYPE,
_POSSIBLY_CAST_DTYPES,
- _TD_DTYPE,
+ DT64NS_DTYPE,
+ TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
@@ -874,9 +874,9 @@ def coerce_to_dtypes(result, dtypes):
def conv(r, dtype):
if np.any(isna(r)):
pass
- elif dtype == _NS_DTYPE:
+ elif dtype == DT64NS_DTYPE:
r = tslibs.Timestamp(r)
- elif dtype == _TD_DTYPE:
+ elif dtype == TD64NS_DTYPE:
r = tslibs.Timedelta(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
@@ -944,7 +944,7 @@ def astype_nansafe(arr, dtype, copy: bool = True, skipna: bool = False):
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
- if dtype not in [_INT64_DTYPE, _TD_DTYPE]:
+ if dtype not in [_INT64_DTYPE, TD64NS_DTYPE]:
# allow frequency conversions
# we return a float here!
@@ -953,8 +953,8 @@ def astype_nansafe(arr, dtype, copy: bool = True, skipna: bool = False):
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
- elif dtype == _TD_DTYPE:
- return arr.astype(_TD_DTYPE, copy=copy)
+ elif dtype == TD64NS_DTYPE:
+ return arr.astype(TD64NS_DTYPE, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
@@ -1326,14 +1326,14 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"):
f"Please pass in '{dtype.name}[ns]' instead."
)
- if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE):
+ if is_datetime64 and not is_dtype_equal(dtype, DT64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("M8[ns]"):
if dtype.name == "datetime64":
raise ValueError(msg)
- dtype = _NS_DTYPE
+ dtype = DT64NS_DTYPE
else:
raise TypeError(f"cannot convert datetimelike to dtype [{dtype}]")
elif is_datetime64tz:
@@ -1344,14 +1344,14 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"):
if is_scalar(value) and isna(value):
value = [value]
- elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE):
+ elif is_timedelta64 and not is_dtype_equal(dtype, TD64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("m8[ns]"):
if dtype.name == "timedelta64":
raise ValueError(msg)
- dtype = _TD_DTYPE
+ dtype = TD64NS_DTYPE
else:
raise TypeError(f"cannot convert timedeltalike to dtype [{dtype}]")
@@ -1399,8 +1399,8 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"):
# coerce datetimelike to object
elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
- if value.dtype != _NS_DTYPE:
- value = value.astype(_NS_DTYPE)
+ if value.dtype != DT64NS_DTYPE:
+ value = value.astype(DT64NS_DTYPE)
ints = np.asarray(value).view("i8")
return tslib.ints_to_pydatetime(ints)
@@ -1416,10 +1416,10 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"):
if is_array and value.dtype.kind in ["M", "m"]:
dtype = value.dtype
- if dtype.kind == "M" and dtype != _NS_DTYPE:
+ if dtype.kind == "M" and dtype != DT64NS_DTYPE:
value = tslibs.conversion.ensure_datetime64ns(value)
- elif dtype.kind == "m" and dtype != _TD_DTYPE:
+ elif dtype.kind == "m" and dtype != TD64NS_DTYPE:
value = to_timedelta(value)
# only do this if we have an array and the dtype of the array is not
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index b4b7fb36ee4d0..16373bd697c1f 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -58,8 +58,8 @@
]
}
-_NS_DTYPE = conversion.NS_DTYPE
-_TD_DTYPE = conversion.TD_DTYPE
+DT64NS_DTYPE = conversion.DT64NS_DTYPE
+TD64NS_DTYPE = conversion.TD64NS_DTYPE
_INT64_DTYPE = np.dtype(np.int64)
# oh the troubles to reduce import time
@@ -981,7 +981,7 @@ def is_datetime64_ns_dtype(arr_or_dtype) -> bool:
tipo = _get_dtype(arr_or_dtype.dtype)
else:
return False
- return tipo == _NS_DTYPE or getattr(tipo, "base", None) == _NS_DTYPE
+ return tipo == DT64NS_DTYPE or getattr(tipo, "base", None) == DT64NS_DTYPE
def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:
@@ -1012,7 +1012,7 @@ def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
False
"""
- return _is_dtype(arr_or_dtype, lambda dtype: dtype == _TD_DTYPE)
+ return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE)
def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool:
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index ecfaac2210807..301c9bb7b3f5c 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -7,8 +7,8 @@
from pandas._libs import tslib, tslibs
from pandas.core.dtypes.common import (
- _NS_DTYPE,
- _TD_DTYPE,
+ DT64NS_DTYPE,
+ TD64NS_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
@@ -293,7 +293,7 @@ def union_categoricals(
Categories (3, object): [b, c, a]
"""
from pandas import Index, Categorical
- from pandas.core.arrays.categorical import _recode_for_categories
+ from pandas.core.arrays.categorical import recode_for_categories
if len(to_union) == 0:
raise ValueError("No Categoricals to union")
@@ -325,7 +325,7 @@ def _maybe_unwrap(x):
new_codes = np.concatenate([c.codes for c in to_union])
else:
codes = [first.codes] + [
- _recode_for_categories(other.codes, other.categories, first.categories)
+ recode_for_categories(other.codes, other.categories, first.categories)
for other in to_union[1:]
]
new_codes = np.concatenate(codes)
@@ -348,7 +348,7 @@ def _maybe_unwrap(x):
categories = categories.sort_values()
new_codes = [
- _recode_for_categories(c.codes, c.categories, categories) for c in to_union
+ recode_for_categories(c.codes, c.categories, categories) for c in to_union
]
new_codes = np.concatenate(new_codes)
else:
@@ -401,7 +401,7 @@ def concat_datetime(to_concat, axis=0, typs=None):
if "datetime" in typs:
to_concat = [x.astype(np.int64, copy=False) for x in to_concat]
- return _concatenate_2d(to_concat, axis=axis).view(_NS_DTYPE)
+ return _concatenate_2d(to_concat, axis=axis).view(DT64NS_DTYPE)
else:
# when to_concat has different tz, len(typs) > 1.
# thus no need to care
@@ -409,7 +409,7 @@ def concat_datetime(to_concat, axis=0, typs=None):
elif "timedelta" in typs:
return _concatenate_2d([x.view(np.int64) for x in to_concat], axis=axis).view(
- _TD_DTYPE
+ TD64NS_DTYPE
)
elif any(typ.startswith("period") for typ in typs):
@@ -423,7 +423,7 @@ def _convert_datetimelike_to_object(x):
# coerce datetimelike array to object dtype
# if dtype is of datetimetz or timezone
- if x.dtype.kind == _NS_DTYPE.kind:
+ if x.dtype.kind == DT64NS_DTYPE.kind:
if getattr(x, "tz", None) is not None:
x = np.asarray(x.astype(object))
else:
@@ -431,7 +431,7 @@ def _convert_datetimelike_to_object(x):
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel(), box="timestamp")
x = x.reshape(shape)
- elif x.dtype == _TD_DTYPE:
+ elif x.dtype == TD64NS_DTYPE:
shape = x.shape
x = tslibs.ints_to_pytimedelta(x.view(np.int64).ravel(), box=True)
x = x.reshape(shape)
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index ef681cb204598..d7ba150e3ec9d 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -467,7 +467,7 @@ def _hash_categories(categories, ordered: Ordered = True) -> int:
_combine_hash_arrays,
hash_tuples,
)
- from pandas.core.dtypes.common import is_datetime64tz_dtype, _NS_DTYPE
+ from pandas.core.dtypes.common import is_datetime64tz_dtype, DT64NS_DTYPE
if len(categories) and isinstance(categories[0], tuple):
# assumes if any individual category is a tuple, then all our. ATM
@@ -487,7 +487,7 @@ def _hash_categories(categories, ordered: Ordered = True) -> int:
if is_datetime64tz_dtype(categories.dtype):
# Avoid future warning.
- categories = categories.astype(_NS_DTYPE)
+ categories = categories.astype(DT64NS_DTYPE)
cat_array = hash_array(np.asarray(categories), categorize=False)
if ordered:
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index f7b0615366ba0..08a6d42042c1c 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -11,8 +11,8 @@
from pandas._typing import DtypeObj
from pandas.core.dtypes.common import (
- _NS_DTYPE,
- _TD_DTYPE,
+ DT64NS_DTYPE,
+ TD64NS_DTYPE,
ensure_object,
is_bool_dtype,
is_complex_dtype,
@@ -482,9 +482,9 @@ def _infer_fill_value(val):
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(ensure_object(val), skipna=False)
if dtype in ["datetime", "datetime64"]:
- return np.array("NaT", dtype=_NS_DTYPE)
+ return np.array("NaT", dtype=DT64NS_DTYPE)
elif dtype in ["timedelta", "timedelta64"]:
- return np.array("NaT", dtype=_TD_DTYPE)
+ return np.array("NaT", dtype=TD64NS_DTYPE)
return np.nan
diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py
index c71ebee397bbd..db734bb2f0c07 100644
--- a/pandas/core/groupby/categorical.py
+++ b/pandas/core/groupby/categorical.py
@@ -4,7 +4,7 @@
from pandas.core.arrays.categorical import (
Categorical,
CategoricalDtype,
- _recode_for_categories,
+ recode_for_categories,
)
@@ -51,7 +51,7 @@ def recode_for_groupby(c: Categorical, sort: bool, observed: bool):
# we recode according to the uniques
categories = c.categories.take(take_codes)
- codes = _recode_for_categories(c.codes, c.categories, categories)
+ codes = recode_for_categories(c.codes, c.categories, categories)
# return a new categorical that maps our new codes
# and categories
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 77c4e9e7a3330..f4814f2efb910 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -23,7 +23,7 @@
from pandas.core import accessor
from pandas.core.algorithms import take_1d
-from pandas.core.arrays.categorical import Categorical, _recode_for_categories, contains
+from pandas.core.arrays.categorical import Categorical, contains, recode_for_categories
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
@@ -540,7 +540,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
# we have the same codes
codes = target.codes
else:
- codes = _recode_for_categories(
+ codes = recode_for_categories(
target.codes, target.categories, self._values.categories
)
else:
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index ad6a3600752b6..cd6f1048d58c9 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -11,7 +11,7 @@
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
- _NS_DTYPE,
+ DT64NS_DTYPE,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
@@ -454,7 +454,7 @@ def snap(self, freq="S"):
# Superdumb, punting on any optimizing
freq = to_offset(freq)
- snapped = np.empty(len(self), dtype=_NS_DTYPE)
+ snapped = np.empty(len(self), dtype=DT64NS_DTYPE)
for i, v in enumerate(self):
s = v
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 6acf9562f9b80..62f063b4eed02 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -5,7 +5,7 @@
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
- _TD_DTYPE,
+ TD64NS_DTYPE,
is_float,
is_integer,
is_scalar,
@@ -134,7 +134,7 @@ def __new__(
unit=None,
freq=None,
closed=None,
- dtype=_TD_DTYPE,
+ dtype=TD64NS_DTYPE,
copy=False,
name=None,
):
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index bdfb44cdc2fa3..8e2592a603716 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -27,8 +27,8 @@
soft_convert_objects,
)
from pandas.core.dtypes.common import (
- _NS_DTYPE,
- _TD_DTYPE,
+ DT64NS_DTYPE,
+ TD64NS_DTYPE,
is_bool_dtype,
is_categorical,
is_categorical_dtype,
@@ -2081,7 +2081,7 @@ def _maybe_coerce_values(self, values):
Overridden by DatetimeTZBlock.
"""
- if values.dtype != _NS_DTYPE:
+ if values.dtype != DT64NS_DTYPE:
values = conversion.ensure_datetime64ns(values)
if isinstance(values, DatetimeArray):
@@ -2353,7 +2353,7 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
fill_value = np.timedelta64("NaT", "ns")
def __init__(self, values, placement, ndim=None):
- if values.dtype != _TD_DTYPE:
+ if values.dtype != TD64NS_DTYPE:
values = conversion.ensure_timedelta64ns(values)
if isinstance(values, TimedeltaArray):
values = values._data
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 182a5b14a1242..ebb4899c1ba9a 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -18,7 +18,7 @@
maybe_promote,
)
from pandas.core.dtypes.common import (
- _NS_DTYPE,
+ DT64NS_DTYPE,
is_datetimelike_v_numeric,
is_extension_array_dtype,
is_list_like,
@@ -1748,7 +1748,7 @@ def form_blocks(arrays, names, axes):
blocks.extend(int_blocks)
if len(items_dict["DatetimeBlock"]):
- datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], _NS_DTYPE)
+ datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], DT64NS_DTYPE)
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index e78d5ccaa30c7..c00da962d39a8 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -44,7 +44,7 @@
from pandas import Categorical, Index, MultiIndex
from pandas.core import groupby
import pandas.core.algorithms as algos
-from pandas.core.arrays.categorical import _recode_for_categories
+from pandas.core.arrays.categorical import recode_for_categories
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.core.frame import _merge_doc
@@ -1944,7 +1944,7 @@ def _factorize_keys(
rk = rk.codes
else:
# Same categories in different orders -> recode
- rk = _recode_for_categories(rk.codes, rk.categories, lk.categories)
+ rk = recode_for_categories(rk.codes, rk.categories, lk.categories)
lk = ensure_int64(lk.codes)
rk = ensure_int64(rk)
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 11fb8cc121fb8..66c2f5c9b927f 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -7,7 +7,7 @@
from pandas._libs.lib import infer_dtype
from pandas.core.dtypes.common import (
- _NS_DTYPE,
+ DT64NS_DTYPE,
ensure_int64,
is_bool_dtype,
is_categorical_dtype,
@@ -247,7 +247,7 @@ def cut(
else:
if is_datetime64tz_dtype(bins):
- bins = np.asarray(bins, dtype=_NS_DTYPE)
+ bins = np.asarray(bins, dtype=DT64NS_DTYPE)
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py
index b99e172674f66..691230620c2e8 100644
--- a/pandas/tests/arrays/categorical/test_api.py
+++ b/pandas/tests/arrays/categorical/test_api.py
@@ -5,7 +5,7 @@
from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series
import pandas._testing as tm
-from pandas.core.arrays.categorical import _recode_for_categories
+from pandas.core.arrays.categorical import recode_for_categories
from pandas.tests.arrays.categorical.common import TestCategorical
@@ -504,7 +504,7 @@ def test_recode_to_categories(self, codes, old, new, expected):
expected = np.asanyarray(expected, dtype=np.int8)
old = Index(old)
new = Index(new)
- result = _recode_for_categories(codes, old, new)
+ result = recode_for_categories(codes, old, new)
tm.assert_numpy_array_equal(result, expected)
def test_recode_to_categories_large(self):
@@ -513,5 +513,5 @@ def test_recode_to_categories_large(self):
old = Index(codes)
expected = np.arange(N - 1, -1, -1, dtype=np.int16)
new = Index(expected)
- result = _recode_for_categories(codes, old, new)
+ result = recode_for_categories(codes, old, new)
tm.assert_numpy_array_equal(result, expected)
| xref #32942, these are some of the more common offenders.
I'd actually prefer to not have the _NS_DTYPE and _TD_DTYPE objects and just use the pertinent strings, but if we are going to use the objects, might as well de-code-smell them. | https://api.github.com/repos/pandas-dev/pandas/pulls/33227 | 2020-04-01T22:58:54Z | 2020-04-02T14:20:30Z | 2020-04-02T14:20:30Z | 2020-04-02T15:23:16Z |
Fix typos in 09_timeseries.rst | diff --git a/doc/source/getting_started/intro_tutorials/09_timeseries.rst b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
index d7c1709ced51a..15bdf43543d9a 100644
--- a/doc/source/getting_started/intro_tutorials/09_timeseries.rst
+++ b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
@@ -96,7 +96,7 @@ objects. In pandas we call these datetime objects similar to
pd.read_csv("../data/air_quality_no2_long.csv", parse_dates=["datetime"])
-Why are these :class:`pandas.Timestamp` objects useful. Let’s illustrate the added
+Why are these :class:`pandas.Timestamp` objects useful? Let’s illustrate the added
value with some example cases.
What is the start and end date of the time series data set working
@@ -106,7 +106,7 @@ value with some example cases.
air_quality["datetime"].min(), air_quality["datetime"].max()
-Using :class:`pandas.Timestamp` for datetimes enable us to calculate with date
+Using :class:`pandas.Timestamp` for datetimes enables us to calculate with date
information and make them comparable. Hence, we can use this to get the
length of our time series:
@@ -122,7 +122,7 @@ from the standard Python library and defining a time duration.
<div class="d-flex flex-row gs-torefguide">
<span class="badge badge-info">To user guide</span>
-The different time concepts supported by pandas are explained in the user guide section on :ref:`time related concepts <timeseries.overview>`.
+The various time concepts supported by pandas are explained in the user guide section on :ref:`time related concepts <timeseries.overview>`.
.. raw:: html
@@ -157,7 +157,7 @@ accessible by the ``dt`` accessor.
An overview of the existing date properties is given in the
:ref:`time and date components overview table <timeseries.components>`. More details about the ``dt`` accessor
-to return datetime like properties is explained in a dedicated section on the :ref:`dt accessor <basics.dt_accessors>`.
+to return datetime like properties are explained in a dedicated section on the :ref:`dt accessor <basics.dt_accessors>`.
.. raw:: html
@@ -353,7 +353,7 @@ Make a plot of the daily mean :math:`NO_2` value in each of the stations.
<div class="d-flex flex-row gs-torefguide">
<span class="badge badge-info">To user guide</span>
-More details on the power of time series ``resampling`` is provided in the user gudie section on :ref:`resampling <timeseries.resampling>`.
+More details on the power of time series ``resampling`` is provided in the user guide section on :ref:`resampling <timeseries.resampling>`.
.. raw:: html
@@ -366,7 +366,7 @@ More details on the power of time series ``resampling`` is provided in the user
- Valid date strings can be converted to datetime objects using
``to_datetime`` function or as part of read functions.
-- Datetime objects in pandas supports calculations, logical operations
+- Datetime objects in pandas support calculations, logical operations
and convenient date-related properties using the ``dt`` accessor.
- A ``DatetimeIndex`` contains these date-related properties and
supports convenient slicing.
@@ -382,7 +382,7 @@ More details on the power of time series ``resampling`` is provided in the user
<div class="d-flex flex-row gs-torefguide">
<span class="badge badge-info">To user guide</span>
-A full overview on time series is given in the pages on :ref:`time series and date functionality <timeseries>`.
+A full overview on time series is given on the pages on :ref:`time series and date functionality <timeseries>`.
.. raw:: html
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33226 | 2020-04-01T22:48:58Z | 2020-04-02T16:18:02Z | 2020-04-02T16:18:02Z | 2020-04-02T16:18:08Z |
CI: Checking all the examples in `pandas/core/series.py` | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index d30785d675788..c8d08277e9a26 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -271,8 +271,7 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Doctests series.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/series.py \
- -k"-nonzero -reindex -searchsorted -to_dict"
+ pytest -q --doctest-modules pandas/core/series.py
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Doctests groupby.py' ; echo $MSG
| Since all the examples are passing, there is no reason not to check for all the examples (IMO).
| https://api.github.com/repos/pandas-dev/pandas/pulls/33225 | 2020-04-01T21:38:23Z | 2020-04-01T22:09:50Z | 2020-04-01T22:09:50Z | 2020-04-06T08:43:47Z |
REF: misplaced Index.__contains__ tests | diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py
index 1cbf64a1529c2..abfae189bb4d7 100644
--- a/pandas/tests/arrays/categorical/test_indexing.py
+++ b/pandas/tests/arrays/categorical/test_indexing.py
@@ -85,6 +85,15 @@ def test_setitem_same_ordered_rasies(self, other):
class TestCategoricalIndexing:
+ def test_getitem_slice(self):
+ cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
+ sliced = cat[3]
+ assert sliced == "d"
+
+ sliced = cat[3:5]
+ expected = Categorical(["d", "a"], categories=["a", "b", "c", "d"])
+ tm.assert_categorical_equal(sliced, expected)
+
def test_getitem_listlike(self):
# GH 9469
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index d9da059eb9e9c..d104c773227d5 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -528,6 +528,12 @@ def test_contains_td64_level(self):
assert "element_not_exit" not in idx
assert "0 day 09:30:00" in idx
+ @pytest.mark.slow
+ def test_large_mi_contains(self):
+ # GH#10645
+ result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
+ assert not (10 ** 6, 0) in result
+
def test_timestamp_multiindex_indexer():
# https://github.com/pandas-dev/pandas/issues/26944
diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py
new file mode 100644
index 0000000000000..a79bde9fd04e1
--- /dev/null
+++ b/pandas/tests/indexes/test_indexing.py
@@ -0,0 +1,84 @@
+"""
+test_indexing tests the following Index methods:
+ __getitem__
+ get_loc
+ get_value
+ __contains__
+ take
+ where
+ get_indexer
+ slice_locs
+ asof_locs
+
+The corresponding tests.indexes.[index_type].test_indexing files
+contain tests for the corresponding methods specific to those Index subclasses.
+"""
+import numpy as np
+import pytest
+
+from pandas import Float64Index, Index, Int64Index, UInt64Index
+
+
+class TestContains:
+ @pytest.mark.parametrize(
+ "index,val",
+ [
+ (Index([0, 1, 2]), 2),
+ (Index([0, 1, "2"]), "2"),
+ (Index([0, 1, 2, np.inf, 4]), 4),
+ (Index([0, 1, 2, np.nan, 4]), 4),
+ (Index([0, 1, 2, np.inf]), np.inf),
+ (Index([0, 1, 2, np.nan]), np.nan),
+ ],
+ )
+ def test_index_contains(self, index, val):
+ assert val in index
+
+ @pytest.mark.parametrize(
+ "index,val",
+ [
+ (Index([0, 1, 2]), "2"),
+ (Index([0, 1, "2"]), 2),
+ (Index([0, 1, 2, np.inf]), 4),
+ (Index([0, 1, 2, np.nan]), 4),
+ (Index([0, 1, 2, np.inf]), np.nan),
+ (Index([0, 1, 2, np.nan]), np.inf),
+ # Checking if np.inf in Int64Index should not cause an OverflowError
+ # Related to GH 16957
+ (Int64Index([0, 1, 2]), np.inf),
+ (Int64Index([0, 1, 2]), np.nan),
+ (UInt64Index([0, 1, 2]), np.inf),
+ (UInt64Index([0, 1, 2]), np.nan),
+ ],
+ )
+ def test_index_not_contains(self, index, val):
+ assert val not in index
+
+ @pytest.mark.parametrize(
+ "index,val", [(Index([0, 1, "2"]), 0), (Index([0, 1, "2"]), "2")]
+ )
+ def test_mixed_index_contains(self, index, val):
+ # GH#19860
+ assert val in index
+
+ @pytest.mark.parametrize(
+ "index,val", [(Index([0, 1, "2"]), "1"), (Index([0, 1, "2"]), 2)]
+ )
+ def test_mixed_index_not_contains(self, index, val):
+ # GH#19860
+ assert val not in index
+
+ def test_contains_with_float_index(self):
+ # GH#22085
+ integer_index = Int64Index([0, 1, 2, 3])
+ uinteger_index = UInt64Index([0, 1, 2, 3])
+ float_index = Float64Index([0.1, 1.1, 2.2, 3.3])
+
+ for index in (integer_index, uinteger_index):
+ assert 1.1 not in index
+ assert 1.0 in index
+ assert 1 in index
+
+ assert 1.1 in float_index
+ assert 1.0 not in float_index
+ assert 1 not in float_index
diff --git a/pandas/tests/indexing/multiindex/test_indexing_slow.py b/pandas/tests/indexing/multiindex/test_indexing_slow.py
index 8ea1cebd7bf7b..ea4453b8dd6eb 100644
--- a/pandas/tests/indexing/multiindex/test_indexing_slow.py
+++ b/pandas/tests/indexing/multiindex/test_indexing_slow.py
@@ -4,7 +4,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, MultiIndex, Series
+from pandas import DataFrame, Series
import pandas._testing as tm
@@ -83,10 +83,3 @@ def loop(mi, df, keys):
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
-
-
-@pytest.mark.slow
-def test_large_mi_dataframe_indexing():
- # GH10645
- result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
- assert not (10 ** 6, 0) in result
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index dcd2de3845cbc..829ee61197ff2 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -95,15 +95,6 @@ def test_getitem_scalar(self):
result = s[cats[0]]
assert result == expected
- def test_slicing_directly(self):
- cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
- sliced = cat[3]
- assert sliced == "d"
- sliced = cat[3:5]
- expected = Categorical(["d", "a"], categories=["a", "b", "c", "d"])
- tm.assert_numpy_array_equal(sliced._codes, expected._codes)
- tm.assert_index_equal(sliced.categories, expected.categories)
-
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index a8a21b0610c14..8bf0a72f2fb9d 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -621,69 +621,6 @@ def test_astype_assignment(self):
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
- @pytest.mark.parametrize(
- "index,val",
- [
- (Index([0, 1, 2]), 2),
- (Index([0, 1, "2"]), "2"),
- (Index([0, 1, 2, np.inf, 4]), 4),
- (Index([0, 1, 2, np.nan, 4]), 4),
- (Index([0, 1, 2, np.inf]), np.inf),
- (Index([0, 1, 2, np.nan]), np.nan),
- ],
- )
- def test_index_contains(self, index, val):
- assert val in index
-
- @pytest.mark.parametrize(
- "index,val",
- [
- (Index([0, 1, 2]), "2"),
- (Index([0, 1, "2"]), 2),
- (Index([0, 1, 2, np.inf]), 4),
- (Index([0, 1, 2, np.nan]), 4),
- (Index([0, 1, 2, np.inf]), np.nan),
- (Index([0, 1, 2, np.nan]), np.inf),
- # Checking if np.inf in Int64Index should not cause an OverflowError
- # Related to GH 16957
- (pd.Int64Index([0, 1, 2]), np.inf),
- (pd.Int64Index([0, 1, 2]), np.nan),
- (pd.UInt64Index([0, 1, 2]), np.inf),
- (pd.UInt64Index([0, 1, 2]), np.nan),
- ],
- )
- def test_index_not_contains(self, index, val):
- assert val not in index
-
- @pytest.mark.parametrize(
- "index,val", [(Index([0, 1, "2"]), 0), (Index([0, 1, "2"]), "2")]
- )
- def test_mixed_index_contains(self, index, val):
- # GH 19860
- assert val in index
-
- @pytest.mark.parametrize(
- "index,val", [(Index([0, 1, "2"]), "1"), (Index([0, 1, "2"]), 2)]
- )
- def test_mixed_index_not_contains(self, index, val):
- # GH 19860
- assert val not in index
-
- def test_contains_with_float_index(self):
- # GH#22085
- integer_index = pd.Int64Index([0, 1, 2, 3])
- uinteger_index = pd.UInt64Index([0, 1, 2, 3])
- float_index = pd.Float64Index([0.1, 1.1, 2.2, 3.3])
-
- for index in (integer_index, uinteger_index):
- assert 1.1 not in index
- assert 1.0 in index
- assert 1 in index
-
- assert 1.1 in float_index
- assert 1.0 not in float_index
- assert 1 not in float_index
-
def test_index_type_coercion(self):
# GH 11836
| https://api.github.com/repos/pandas-dev/pandas/pulls/33223 | 2020-04-01T21:15:02Z | 2020-04-02T14:19:51Z | 2020-04-02T14:19:51Z | 2020-04-02T15:32:22Z | |
CLN: Use C-API for datetime.date | diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index a48c3365947dc..ce4d3a4ef8e02 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -4,7 +4,6 @@ import time
import locale
import calendar
import re
-import datetime
from _thread import allocate_lock as _thread_allocate_lock
@@ -13,6 +12,7 @@ import pytz
import numpy as np
from numpy cimport int64_t
+cimport cpython.datetime as datetime
from pandas._libs.tslibs.np_datetime cimport (
check_dts_bounds, dtstruct_to_dt64, npy_datetimestruct)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33222 | 2020-04-01T21:00:37Z | 2020-04-02T01:38:15Z | 2020-04-02T01:38:15Z | 2020-04-03T11:38:33Z |
TST: misplaced validate_indices tests | diff --git a/pandas/tests/indexing/test_indexers.py b/pandas/tests/indexing/test_indexers.py
index 35c0c06e86099..744f9441e7376 100644
--- a/pandas/tests/indexing/test_indexers.py
+++ b/pandas/tests/indexing/test_indexers.py
@@ -1,7 +1,8 @@
# Tests aimed at pandas.core.indexers
import numpy as np
+import pytest
-from pandas.core.indexers import is_scalar_indexer, length_of_indexer
+from pandas.core.indexers import is_scalar_indexer, length_of_indexer, validate_indices
def test_length_of_indexer():
@@ -26,3 +27,25 @@ def test_is_scalar_indexer():
assert not is_scalar_indexer(indexer, 2)
assert not is_scalar_indexer(slice(None), 1)
+
+
+class TestValidateIndices:
+ def test_validate_indices_ok(self):
+ indices = np.asarray([0, 1])
+ validate_indices(indices, 2)
+ validate_indices(indices[:0], 0)
+ validate_indices(np.array([-1, -1]), 0)
+
+ def test_validate_indices_low(self):
+ indices = np.asarray([0, -2])
+ with pytest.raises(ValueError, match="'indices' contains"):
+ validate_indices(indices, 2)
+
+ def test_validate_indices_high(self):
+ indices = np.asarray([0, 1, 2])
+ with pytest.raises(IndexError, match="indices are out"):
+ validate_indices(indices, 2)
+
+ def test_validate_indices_empty(self):
+ with pytest.raises(IndexError, match="indices are out"):
+ validate_indices(np.array([0, 1]), 0)
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index a8a21b0610c14..7f892611281bc 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -12,7 +12,6 @@
import pandas as pd
from pandas import DataFrame, Index, NaT, Series
import pandas._testing as tm
-from pandas.core.indexers import validate_indices
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
from pandas.tests.indexing.common import _mklbl
@@ -1051,30 +1050,6 @@ def test_none_coercion_mixed_dtypes(self):
tm.assert_frame_equal(start_dataframe, exp)
-def test_validate_indices_ok():
- indices = np.asarray([0, 1])
- validate_indices(indices, 2)
- validate_indices(indices[:0], 0)
- validate_indices(np.array([-1, -1]), 0)
-
-
-def test_validate_indices_low():
- indices = np.asarray([0, -2])
- with pytest.raises(ValueError, match="'indices' contains"):
- validate_indices(indices, 2)
-
-
-def test_validate_indices_high():
- indices = np.asarray([0, 1, 2])
- with pytest.raises(IndexError, match="indices are out"):
- validate_indices(indices, 2)
-
-
-def test_validate_indices_empty():
- with pytest.raises(IndexError, match="indices are out"):
- validate_indices(np.array([0, 1]), 0)
-
-
def test_extension_array_cross_section():
# A cross-section of a homogeneous EA should be an EA
df = pd.DataFrame(
| https://api.github.com/repos/pandas-dev/pandas/pulls/33221 | 2020-04-01T20:42:28Z | 2020-04-02T14:23:10Z | 2020-04-02T14:23:10Z | 2020-04-02T15:22:43Z | |
ENH: Add isocalendar accessor to DatetimeIndex and Series.dt | diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 0d49a2d8db77c..a09a5576ca378 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -772,6 +772,7 @@ There are several time/date properties that one can access from ``Timestamp`` or
week,"The week ordinal of the year"
dayofweek,"The number of the day of the week with Monday=0, Sunday=6"
weekday,"The number of the day of the week with Monday=0, Sunday=6"
+ isocalendar,"The ISO 8601 year, week and day of the date"
quarter,"Quarter of the date: Jan-Mar = 1, Apr-Jun = 2, etc."
days_in_month,"The number of days in the month of the datetime"
is_month_start,"Logical indicating if first day of month (defined by frequency)"
@@ -786,6 +787,15 @@ Furthermore, if you have a ``Series`` with datetimelike values, then you can
access these properties via the ``.dt`` accessor, as detailed in the section
on :ref:`.dt accessors<basics.dt_accessors>`.
+.. versionadded:: 1.1.0
+
+You may obtain the year, week and day components of the ISO year from the ISO 8601 standard:
+
+.. ipython:: python
+
+ idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
+ idx.to_series().dt.isocalendar
+
.. _timeseries.offsets:
DateOffset objects
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 0cbc47539d759..502d1b37a025b 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -88,6 +88,7 @@ Other enhancements
- :class:`Series.str` now has a `fullmatch` method that matches a regular expression against the entire string in each row of the series, similar to `re.fullmatch` (:issue:`32806`).
- :meth:`DataFrame.sample` will now also allow array-like and BitGenerator objects to be passed to ``random_state`` as seeds (:issue:`32503`)
- :meth:`MultiIndex.union` will now raise `RuntimeWarning` if the object inside are unsortable, pass `sort=False` to suppress this warning (:issue:`33015`)
+- :class:`Series.dt` and :class:`DatatimeIndex` now have an `isocalendar` accessor that returns a :class:`DataFrame` with year, week, and day calculated according to the ISO 8601 calendar (:issue:`33206`).
- The :meth:`DataFrame.to_feather` method now supports additional keyword
arguments (e.g. to set the compression) that are added in pyarrow 0.17
(:issue:`33422`).
diff --git a/pandas/_libs/tslibs/ccalendar.pxd b/pandas/_libs/tslibs/ccalendar.pxd
index 59ecaaaf2266e..68ad1d1e68133 100644
--- a/pandas/_libs/tslibs/ccalendar.pxd
+++ b/pandas/_libs/tslibs/ccalendar.pxd
@@ -2,9 +2,11 @@ from cython cimport Py_ssize_t
from numpy cimport int64_t, int32_t
+ctypedef (int32_t, int32_t, int32_t) iso_calendar_t
cdef int dayofweek(int y, int m, int d) nogil
cdef bint is_leapyear(int64_t year) nogil
cpdef int32_t get_days_in_month(int year, Py_ssize_t month) nogil
cpdef int32_t get_week_of_year(int year, int month, int day) nogil
+cpdef iso_calendar_t get_iso_calendar(int year, int month, int day) nogil
cpdef int32_t get_day_of_year(int year, int month, int day) nogil
diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx
index 0588dfe20e2e2..0873084d29555 100644
--- a/pandas/_libs/tslibs/ccalendar.pyx
+++ b/pandas/_libs/tslibs/ccalendar.pyx
@@ -150,33 +150,65 @@ cpdef int32_t get_week_of_year(int year, int month, int day) nogil:
-------
week_of_year : int32_t
+ Notes
+ -----
+ Assumes the inputs describe a valid date.
+ """
+ return get_iso_calendar(year, month, day)[1]
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+cpdef iso_calendar_t get_iso_calendar(int year, int month, int day) nogil:
+ """
+ Return the year, week, and day of year corresponding to ISO 8601
+
+ Parameters
+ ----------
+ year : int
+ month : int
+ day : int
+
+ Returns
+ -------
+ year : int32_t
+ week : int32_t
+ day : int32_t
+
Notes
-----
Assumes the inputs describe a valid date.
"""
cdef:
int32_t doy, dow
- int woy
+ int32_t iso_year, iso_week
doy = get_day_of_year(year, month, day)
dow = dayofweek(year, month, day)
# estimate
- woy = (doy - 1) - dow + 3
- if woy >= 0:
- woy = woy // 7 + 1
+ iso_week = (doy - 1) - dow + 3
+ if iso_week >= 0:
+ iso_week = iso_week // 7 + 1
# verify
- if woy < 0:
- if (woy > -2) or (woy == -2 and is_leapyear(year - 1)):
- woy = 53
+ if iso_week < 0:
+ if (iso_week > -2) or (iso_week == -2 and is_leapyear(year - 1)):
+ iso_week = 53
else:
- woy = 52
- elif woy == 53:
+ iso_week = 52
+ elif iso_week == 53:
if 31 - day + dow < 3:
- woy = 1
+ iso_week = 1
+
+ iso_year = year
+ if iso_week == 1 and doy > 7:
+ iso_year += 1
+
+ elif iso_week >= 52 and doy < 7:
+ iso_year -= 1
- return woy
+ return iso_year, iso_week, dow + 1
@cython.wraparound(False)
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 50b7fba67e78f..184d368659714 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -8,14 +8,14 @@ from cython import Py_ssize_t
import numpy as np
cimport numpy as cnp
-from numpy cimport ndarray, int64_t, int32_t, int8_t
+from numpy cimport ndarray, int64_t, int32_t, int8_t, uint32_t
cnp.import_array()
from pandas._libs.tslibs.ccalendar import (
get_locale_names, MONTHS_FULL, DAYS_FULL, DAY_SECONDS)
from pandas._libs.tslibs.ccalendar cimport (
get_days_in_month, is_leapyear, dayofweek, get_week_of_year,
- get_day_of_year)
+ get_day_of_year, get_iso_calendar, iso_calendar_t)
from pandas._libs.tslibs.np_datetime cimport (
npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct,
td64_to_tdstruct)
@@ -670,3 +670,42 @@ cpdef isleapyear_arr(ndarray years):
np.logical_and(years % 4 == 0,
years % 100 > 0))] = 1
return out.view(bool)
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def build_isocalendar_sarray(const int64_t[:] dtindex):
+ """
+ Given a int64-based datetime array, return the ISO 8601 year, week, and day
+ as a structured array.
+ """
+ cdef:
+ Py_ssize_t i, count = len(dtindex)
+ npy_datetimestruct dts
+ ndarray[uint32_t] iso_years, iso_weeks, days
+ iso_calendar_t ret_val
+
+ sa_dtype = [
+ ("year", "u4"),
+ ("week", "u4"),
+ ("day", "u4"),
+ ]
+
+ out = np.empty(count, dtype=sa_dtype)
+
+ iso_years = out["year"]
+ iso_weeks = out["week"]
+ days = out["day"]
+
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT:
+ ret_val = 0, 0, 0
+ else:
+ dt64_to_dtstruct(dtindex[i], &dts)
+ ret_val = get_iso_calendar(dts.year, dts.month, dts.day)
+
+ iso_years[i] = ret_val[0]
+ iso_weeks[i] = ret_val[1]
+ days[i] = ret_val[2]
+ return out
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index b9f9edcebad5b..d6af11a442518 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -182,7 +182,7 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps
"microsecond",
"nanosecond",
]
- _other_ops = ["date", "time", "timetz"]
+ _other_ops = ["date", "time", "timetz", "isocalendar"]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods = [
"to_period",
@@ -1234,6 +1234,50 @@ def date(self):
return tslib.ints_to_pydatetime(timestamps, box="date")
+ @property
+ def isocalendar(self):
+ """
+ Returns a DataFrame with the year, week, and day calculated according to
+ the ISO 8601 standard.
+
+ .. versionadded:: 1.1.0
+
+ Returns
+ -------
+ DataFrame
+ with columns year, week and day
+
+ See Also
+ --------
+ Timestamp.isocalendar
+ datetime.date.isocalendar
+
+ Examples
+ --------
+ >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
+ >>> idx.isocalendar
+ year week day
+ 0 2019 52 7
+ 1 2020 1 1
+ 2 2020 1 2
+ 3 2020 1 3
+ >>> idx.isocalendar.week
+ 0 52
+ 1 1
+ 2 1
+ 3 1
+ Name: week, dtype: UInt32
+ """
+ from pandas import DataFrame
+
+ sarray = fields.build_isocalendar_sarray(self.asi8)
+ iso_calendar_df = DataFrame(
+ sarray, columns=["year", "week", "day"], dtype="UInt32"
+ )
+ if self._hasnans:
+ iso_calendar_df.iloc[self._isnan] = None
+ return iso_calendar_df
+
year = _field_accessor(
"year",
"Y",
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index d2cee5d94422c..d44fed9e097e7 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -219,6 +219,38 @@ def to_pydatetime(self) -> np.ndarray:
def freq(self):
return self._get_values().inferred_freq
+ @property
+ def isocalendar(self):
+ """
+ Returns a DataFrame with the year, week, and day calculated according to
+ the ISO 8601 standard.
+
+ .. versionadded:: 1.1.0
+
+ Returns
+ -------
+ DataFrame
+ with columns year, week and day
+
+ See Also
+ --------
+ Timestamp.isocalendar
+ datetime.date.isocalendar
+
+ Examples
+ --------
+ >>> ser = pd.to_datetime(pd.Series(["2010-01-01", pd.NaT]))
+ >>> ser.dt.isocalendar
+ year week day
+ 0 2009 53 5
+ 1 <NA> <NA> <NA>
+ >>> ser.dt.isocalendar.week
+ 0 53
+ 1 <NA>
+ Name: week, dtype: UInt32
+ """
+ return self._get_values().isocalendar.set_index(self._parent.index)
+
@delegate_names(
delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property"
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 68d6229e798f5..1ec6cf8fd7b4e 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -89,6 +89,7 @@ def _new_DatetimeIndex(cls, d):
"date",
"time",
"timetz",
+ "isocalendar",
]
+ DatetimeArray._bool_ops,
DatetimeArray,
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index d22dc72eaaadd..515e75b82371a 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -65,7 +65,7 @@ def get_expected(s, name):
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype("int64")
- elif not is_list_like(result):
+ elif not is_list_like(result) or isinstance(result, pd.DataFrame):
return result
return Series(result, index=s.index, name=s.name)
@@ -74,6 +74,8 @@ def compare(s, name):
b = get_expected(s, prop)
if not (is_list_like(a) and is_list_like(b)):
assert a == b
+ elif isinstance(a, pd.DataFrame):
+ tm.assert_frame_equal(a, b)
else:
tm.assert_series_equal(a, b)
@@ -665,3 +667,19 @@ def test_setitem_with_different_tz(self):
dtype=object,
)
tm.assert_series_equal(ser, expected)
+
+ @pytest.mark.parametrize(
+ "input_series, expected_output",
+ [
+ [["2020-01-01"], [[2020, 1, 3]]],
+ [[pd.NaT], [[np.NaN, np.NaN, np.NaN]]],
+ [["2019-12-31", "2019-12-29"], [[2020, 1, 2], [2019, 52, 7]]],
+ [["2010-01-01", pd.NaT], [[2009, 53, 5], [np.NaN, np.NaN, np.NaN]]],
+ ],
+ )
+ def test_isocalendar(self, input_series, expected_output):
+ result = pd.to_datetime(pd.Series(input_series)).dt.isocalendar
+ expected_frame = pd.DataFrame(
+ expected_output, columns=["year", "week", "day"], dtype="UInt32"
+ )
+ tm.assert_frame_equal(result, expected_frame)
diff --git a/pandas/tests/tslibs/test_ccalendar.py b/pandas/tests/tslibs/test_ccalendar.py
index 6f6e32411a784..aab86d3a2df69 100644
--- a/pandas/tests/tslibs/test_ccalendar.py
+++ b/pandas/tests/tslibs/test_ccalendar.py
@@ -1,4 +1,4 @@
-from datetime import datetime
+from datetime import date, datetime
import numpy as np
import pytest
@@ -25,3 +25,26 @@ def test_get_day_of_year_dt():
expected = (dt - dt.replace(month=1, day=1)).days + 1
assert result == expected
+
+
+@pytest.mark.parametrize(
+ "input_date_tuple, expected_iso_tuple",
+ [
+ [(2020, 1, 1), (2020, 1, 3)],
+ [(2019, 12, 31), (2020, 1, 2)],
+ [(2019, 12, 30), (2020, 1, 1)],
+ [(2009, 12, 31), (2009, 53, 4)],
+ [(2010, 1, 1), (2009, 53, 5)],
+ [(2010, 1, 3), (2009, 53, 7)],
+ [(2010, 1, 4), (2010, 1, 1)],
+ [(2006, 1, 1), (2005, 52, 7)],
+ [(2005, 12, 31), (2005, 52, 6)],
+ [(2008, 12, 28), (2008, 52, 7)],
+ [(2008, 12, 29), (2009, 1, 1)],
+ ],
+)
+def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tuple):
+ result = ccalendar.get_iso_calendar(*input_date_tuple)
+ expected_from_date_isocalendar = date(*input_date_tuple).isocalendar()
+ assert result == expected_from_date_isocalendar
+ assert result == expected_iso_tuple
| This PR adds the the isocalendar property to `DatetimeIndex` and the corresponding `Series.dt` accessor. The property returns a DataFrame, e.g.:
```python
>>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
>>> idx.isocalendar
year week day
0 2019 52 7
1 2020 1 1
2 2020 1 2
3 2020 1 3
```
and
```python
>>> pandas.to_datetime(pandas.Series(["2020-01-01"])).dt.isocalendar
year week day
0 2020 1
>>> pandas.to_datetime(pandas.Series(["2019-12-31"])).dt.isocalendar.week
0 1
Name: week, dtype: int32
```
The behavior is consistent with `Timestamp.isocalendar` and `datetime.date.isocalendar`.
For more information about ISO 8601 calendar, see e.g. https://en.wikipedia.org/wiki/ISO_week_date.
Address GH33206
- [x] closes #33206
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Note that I am very happy to rename the field, but I wanted to go ahead and open up the PR.
| https://api.github.com/repos/pandas-dev/pandas/pulls/33220 | 2020-04-01T20:35:15Z | 2020-04-12T21:34:32Z | 2020-04-12T21:34:32Z | 2020-04-13T20:01:43Z |
REF: DataFrame.mask tests | diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 9e6d41a8886b3..406a01efb84e5 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -392,16 +392,6 @@ def test_getitem_boolean_casting(self, datetime_frame):
)
tm.assert_series_equal(result, expected)
- # where dtype conversions
- # GH 3733
- df = DataFrame(data=np.random.randn(100, 50))
- df = df.where(df > 0) # create nans
- bools = df > 0
- mask = isna(df)
- expected = bools.astype(float).mask(mask)
- result = bools.mask(mask)
- tm.assert_frame_equal(result, expected)
-
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
@@ -2066,69 +2056,6 @@ def test_boolean_indexing_mixed(self):
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
- def test_mask(self):
- df = DataFrame(np.random.randn(5, 3))
- cond = df > 0
-
- rs = df.where(cond, np.nan)
- tm.assert_frame_equal(rs, df.mask(df <= 0))
- tm.assert_frame_equal(rs, df.mask(~cond))
-
- other = DataFrame(np.random.randn(5, 3))
- rs = df.where(cond, other)
- tm.assert_frame_equal(rs, df.mask(df <= 0, other))
- tm.assert_frame_equal(rs, df.mask(~cond, other))
-
- # see gh-21891
- df = DataFrame([1, 2])
- res = df.mask([[True], [False]])
-
- exp = DataFrame([np.nan, 2])
- tm.assert_frame_equal(res, exp)
-
- def test_mask_inplace(self):
- # GH8801
- df = DataFrame(np.random.randn(5, 3))
- cond = df > 0
-
- rdf = df.copy()
-
- rdf.where(cond, inplace=True)
- tm.assert_frame_equal(rdf, df.where(cond))
- tm.assert_frame_equal(rdf, df.mask(~cond))
-
- rdf = df.copy()
- rdf.where(cond, -df, inplace=True)
- tm.assert_frame_equal(rdf, df.where(cond, -df))
- tm.assert_frame_equal(rdf, df.mask(~cond, -df))
-
- def test_mask_edge_case_1xN_frame(self):
- # GH4071
- df = DataFrame([[1, 2]])
- res = df.mask(DataFrame([[True, False]]))
- expec = DataFrame([[np.nan, 2]])
- tm.assert_frame_equal(res, expec)
-
- def test_mask_callable(self):
- # GH 12533
- df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
- result = df.mask(lambda x: x > 4, lambda x: x + 1)
- exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]])
- tm.assert_frame_equal(result, exp)
- tm.assert_frame_equal(result, df.mask(df > 4, df + 1))
-
- # return ndarray and scalar
- result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99)
- exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]])
- tm.assert_frame_equal(result, exp)
- tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99))
-
- # chain
- result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10)
- exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]])
- tm.assert_frame_equal(result, exp)
- tm.assert_frame_equal(result, (df + 2).mask((df + 2) > 8, (df + 2) + 10))
-
def test_type_error_multiindex(self):
# See gh-12218
df = DataFrame(
diff --git a/pandas/tests/frame/indexing/test_mask.py b/pandas/tests/frame/indexing/test_mask.py
new file mode 100644
index 0000000000000..30db6110efc80
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_mask.py
@@ -0,0 +1,83 @@
+"""
+Tests for DataFrame.mask; tests DataFrame.where as a side-effect.
+"""
+
+import numpy as np
+
+from pandas import DataFrame, isna
+import pandas._testing as tm
+
+
+class TestDataFrameMask:
+ def test_mask(self):
+ df = DataFrame(np.random.randn(5, 3))
+ cond = df > 0
+
+ rs = df.where(cond, np.nan)
+ tm.assert_frame_equal(rs, df.mask(df <= 0))
+ tm.assert_frame_equal(rs, df.mask(~cond))
+
+ other = DataFrame(np.random.randn(5, 3))
+ rs = df.where(cond, other)
+ tm.assert_frame_equal(rs, df.mask(df <= 0, other))
+ tm.assert_frame_equal(rs, df.mask(~cond, other))
+
+ # see GH#21891
+ df = DataFrame([1, 2])
+ res = df.mask([[True], [False]])
+
+ exp = DataFrame([np.nan, 2])
+ tm.assert_frame_equal(res, exp)
+
+ def test_mask_inplace(self):
+ # GH#8801
+ df = DataFrame(np.random.randn(5, 3))
+ cond = df > 0
+
+ rdf = df.copy()
+
+ rdf.where(cond, inplace=True)
+ tm.assert_frame_equal(rdf, df.where(cond))
+ tm.assert_frame_equal(rdf, df.mask(~cond))
+
+ rdf = df.copy()
+ rdf.where(cond, -df, inplace=True)
+ tm.assert_frame_equal(rdf, df.where(cond, -df))
+ tm.assert_frame_equal(rdf, df.mask(~cond, -df))
+
+ def test_mask_edge_case_1xN_frame(self):
+ # GH#4071
+ df = DataFrame([[1, 2]])
+ res = df.mask(DataFrame([[True, False]]))
+ expec = DataFrame([[np.nan, 2]])
+ tm.assert_frame_equal(res, expec)
+
+ def test_mask_callable(self):
+ # GH#12533
+ df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ result = df.mask(lambda x: x > 4, lambda x: x + 1)
+ exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]])
+ tm.assert_frame_equal(result, exp)
+ tm.assert_frame_equal(result, df.mask(df > 4, df + 1))
+
+ # return ndarray and scalar
+ result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99)
+ exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]])
+ tm.assert_frame_equal(result, exp)
+ tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99))
+
+ # chain
+ result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10)
+ exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]])
+ tm.assert_frame_equal(result, exp)
+ tm.assert_frame_equal(result, (df + 2).mask((df + 2) > 8, (df + 2) + 10))
+
+ def test_mask_dtype_conversion(self):
+ # GH#3733
+ df = DataFrame(data=np.random.randn(100, 50))
+ df = df.where(df > 0) # create nans
+ bools = df > 0
+ mask = isna(df)
+ expected = bools.astype(float).mask(mask)
+ result = bools.mask(mask)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_timedelta.py b/pandas/tests/indexing/test_timedelta.py
index dd4750123c0b5..7da368e4bb321 100644
--- a/pandas/tests/indexing/test_timedelta.py
+++ b/pandas/tests/indexing/test_timedelta.py
@@ -6,7 +6,7 @@
class TestTimedeltaIndexing:
- def test_boolean_indexing(self):
+ def test_loc_setitem_bool_mask(self):
# GH 14946
df = pd.DataFrame({"x": range(10)})
df.index = pd.to_timedelta(range(10), unit="s")
@@ -17,7 +17,9 @@ def test_boolean_indexing(self):
[10, 10, 10, 3, 4, 5, 6, 7, 8, 9],
]
for cond, data in zip(conditions, expected_data):
- result = df.assign(x=df.mask(cond, 10).astype("int64"))
+ result = df.copy()
+ result.loc[cond, "x"] = 10
+
expected = pd.DataFrame(
data,
index=pd.to_timedelta(range(10), unit="s"),
@@ -58,7 +60,7 @@ def test_string_indexing(self):
tm.assert_series_equal(sliced, expected)
@pytest.mark.parametrize("value", [None, pd.NaT, np.nan])
- def test_masked_setitem(self, value):
+ def test_setitem_mask_na_value_td64(self, value):
# issue (#18586)
series = pd.Series([0, 1, 2], dtype="timedelta64[ns]")
series[series == series[0]] = value
| we already did this for Series.mask | https://api.github.com/repos/pandas-dev/pandas/pulls/33219 | 2020-04-01T20:27:39Z | 2020-04-02T14:18:32Z | 2020-04-02T14:18:31Z | 2020-04-02T15:25:48Z |
32380 deprecate squeeze in groupby | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 19db7dcb4b83e..41d519e0765dc 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -590,6 +590,7 @@ Deprecations
- :func:`pandas.api.types.is_categorical` is deprecated and will be removed in a future version; use `:func:pandas.api.types.is_categorical_dtype` instead (:issue:`33385`)
- :meth:`Index.get_value` is deprecated and will be removed in a future version (:issue:`19728`)
- :meth:`DateOffset.__call__` is deprecated and will be removed in a future version, use ``offset + other`` instead (:issue:`34171`)
+- The ``squeeze`` keyword in the ``groupby`` function is deprecated and will be removed in a future version (:issue:`32380`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2d181e826c2a9..01170320e5e31 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -42,6 +42,7 @@
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib, properties
+from pandas._libs.lib import no_default
from pandas._typing import (
ArrayLike,
Axes,
@@ -6253,12 +6254,24 @@ def groupby(
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
- squeeze: bool = False,
+ squeeze: bool = no_default,
observed: bool = False,
dropna: bool = True,
) -> "DataFrameGroupBy":
from pandas.core.groupby.generic import DataFrameGroupBy
+ if squeeze is not no_default:
+ warnings.warn(
+ (
+ "The `squeeze` parameter is deprecated and "
+ "will be removed in a future version."
+ ),
+ FutureWarning,
+ stacklevel=2,
+ )
+ else:
+ squeeze = False
+
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5c7d0eae24cee..8aa8f8bb60654 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7472,6 +7472,9 @@ def clip(
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
+
+ .. deprecated:: 1.1.0
+
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e107b66d33b1c..bc13d5376ec96 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -23,6 +23,7 @@
from pandas._config import get_option
from pandas._libs import lib, properties, reshape, tslibs
+from pandas._libs.lib import no_default
from pandas._typing import ArrayLike, Axis, DtypeObj, IndexKeyFunc, Label, ValueKeyFunc
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, doc
@@ -1642,12 +1643,24 @@ def groupby(
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
- squeeze: bool = False,
+ squeeze: bool = no_default,
observed: bool = False,
dropna: bool = True,
) -> "SeriesGroupBy":
from pandas.core.groupby.generic import SeriesGroupBy
+ if squeeze is not no_default:
+ warnings.warn(
+ (
+ "The `squeeze` parameter is deprecated and "
+ "will be removed in a future version."
+ ),
+ FutureWarning,
+ stacklevel=2,
+ )
+ else:
+ squeeze = False
+
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index c88d16e34eab8..a0d059fc8f83b 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -109,7 +109,8 @@ def test_groupby_return_type():
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
- result = df1.groupby("val1", squeeze=True).apply(func)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
@@ -124,12 +125,14 @@ def func(dataf):
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
- result = df2.groupby("val1", squeeze=True).apply(func)
+ with tm.assert_produces_warning(FutureWarning):
+ result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
- result = df.groupby("X", squeeze=False).count()
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
| - [x] closes #32380
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I deprecated the squeeze keyword. I think I got every points this keyword was used.
I hope that I considered every point necessary to deprecate a keyword. I tried to use other points were a keyword was deprecated as example.
| https://api.github.com/repos/pandas-dev/pandas/pulls/33218 | 2020-04-01T20:02:44Z | 2020-05-22T23:49:12Z | 2020-05-22T23:49:11Z | 2020-05-23T19:59:10Z |
BUG: Block.setitem GH#32395 | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index bdfb44cdc2fa3..0827bc21cd75d 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -854,8 +854,10 @@ def setitem(self, indexer, value):
if is_extension_array_dtype(getattr(value, "dtype", None)):
# We need to be careful not to allow through strings that
# can be parsed to EADtypes
+ is_ea_value = True
arr_value = value
else:
+ is_ea_value = False
arr_value = np.array(value)
if transpose:
@@ -883,6 +885,11 @@ def setitem(self, indexer, value):
values[indexer] = value
return self.make_block(Categorical(self.values, dtype=arr_value.dtype))
+ elif exact_match and is_ea_value:
+ # GH#?32395 if we're going to replace the values entirely, just
+ # substitute in the new array
+ return self.make_block(arr_value)
+
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif exact_match:
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index d1f67981b1ec5..9e37255dbf6d4 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1106,3 +1106,38 @@ def test_loc_with_period_index_indexer():
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
tm.assert_frame_equal(df, df.loc[list(idx)])
+
+
+def test_loc_setitem_df_datetime64tz_column_with_index():
+ df = pd.DataFrame(
+ pd.date_range("2020-01-01", "2020-01-06", 6, tz="UTC"), columns=["data"]
+ )
+ df2 = pd.DataFrame(index=df.index)
+ df2.loc[df.index, "data"] = df["data"]
+
+ tm.assert_frame_equal(df, df2)
+
+
+def test_loc_setitem_df_datetime64tz_column_without_index():
+ df = pd.DataFrame(
+ pd.date_range("2020-01-01", "2020-01-06", 6, tz="UTC"), columns=["data"]
+ )
+ df2 = pd.DataFrame(index=df.index)
+ df2.loc[:, "data"] = df["data"]
+ tm.assert_series_equal(df.data, df2.data)
+
+
+def test_loc_setitem_series_datetime64tz_with_index():
+ s1 = pd.Series(pd.date_range("2020-01-01", "2020-01-06", 6, tz="UTC"), name="data")
+ s2 = pd.Series(index=s1.index, dtype=np.object, name="data")
+ s2.loc[s1.index] = s1
+
+ tm.assert_series_equal(s2, s1)
+
+
+def test_loc_setitem_series_datetime64tz_without_index():
+ s1 = pd.Series(pd.date_range("2020-01-01", "2020-01-06", 6, tz="UTC"), name="data")
+ s2 = pd.Series(index=s1.index, dtype=np.object, name="data")
+ s2.loc[:] = s1
+
+ tm.assert_series_equal(s2, s1)
| @h-vishal this is based on what you have in #32479. Can you adapt parts of this to address comments over there? (that should be merged, not this, as you did the real work) | https://api.github.com/repos/pandas-dev/pandas/pulls/33217 | 2020-04-01T19:12:18Z | 2020-04-03T16:53:41Z | null | 2020-04-23T22:38:33Z |
Added option to include/ignore file extensions in `scripts/validate_unwanted/patterns.py` | diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py
index b476ab5a818c5..193fef026a96b 100755
--- a/scripts/validate_unwanted_patterns.py
+++ b/scripts/validate_unwanted_patterns.py
@@ -16,9 +16,8 @@
import sys
import token
import tokenize
-from typing import IO, Callable, Iterable, List, Tuple
+from typing import IO, Callable, FrozenSet, Iterable, List, Tuple
-FILE_EXTENSIONS_TO_CHECK: Tuple[str, ...] = (".py", ".pyx", ".pxi.ini", ".pxd")
PATHS_TO_IGNORE: Tuple[str, ...] = ("asv_bench/env",)
@@ -293,6 +292,7 @@ def main(
function: Callable[[IO[str]], Iterable[Tuple[int, str]]],
source_path: str,
output_format: str,
+ file_extensions_to_check: str,
) -> bool:
"""
Main entry point of the script.
@@ -322,6 +322,10 @@ def main(
is_failed: bool = False
file_path: str = ""
+ FILE_EXTENSIONS_TO_CHECK: FrozenSet[str] = frozenset(
+ file_extensions_to_check.split(",")
+ )
+
if os.path.isfile(source_path):
file_path = source_path
with open(file_path, "r") as file_obj:
@@ -370,7 +374,7 @@ def main(
parser.add_argument(
"--format",
"-f",
- default="{source_path}:{line_number}:{msg}.",
+ default="{source_path}:{line_number}:{msg}",
help="Output format of the error message.",
)
parser.add_argument(
@@ -380,6 +384,11 @@ def main(
required=True,
help="Validation test case to check.",
)
+ parser.add_argument(
+ "--included-file-extensions",
+ default="py,pyx,pxd,pxi",
+ help="Coma seperated file extensions to check.",
+ )
args = parser.parse_args()
@@ -388,5 +397,6 @@ def main(
function=globals().get(args.validation_type), # type: ignore
source_path=args.path,
output_format=args.format,
+ file_extensions_to_check=args.included_file_extensions,
)
)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
---
This is needed for the check in #32942 to ignore cython files. | https://api.github.com/repos/pandas-dev/pandas/pulls/33216 | 2020-04-01T19:04:40Z | 2020-04-10T21:08:17Z | 2020-04-10T21:08:17Z | 2020-04-11T12:59:39Z |
REF: .dot tests | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 75c935cdf2e60..80573f32b936e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -8,7 +8,7 @@
from pandas._libs import NaT, algos as libalgos, lib, writers
import pandas._libs.internals as libinternals
-from pandas._libs.tslibs import Timedelta, conversion
+from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import ArrayLike
from pandas.util._validators import validate_bool_kwarg
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index e1fc7e9d7c5b8..0255759513e28 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1147,59 +1147,6 @@ def test_any_all_level_axis_none_raises(self, method):
# ---------------------------------------------------------------------
# Matrix-like
- def test_dot(self):
- a = DataFrame(
- np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"]
- )
- b = DataFrame(
- np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["one", "two"]
- )
-
- result = a.dot(b)
- expected = DataFrame(
- np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"]
- )
- # Check alignment
- b1 = b.reindex(index=reversed(b.index))
- result = a.dot(b)
- tm.assert_frame_equal(result, expected)
-
- # Check series argument
- result = a.dot(b["one"])
- tm.assert_series_equal(result, expected["one"], check_names=False)
- assert result.name is None
-
- result = a.dot(b1["one"])
- tm.assert_series_equal(result, expected["one"], check_names=False)
- assert result.name is None
-
- # can pass correct-length arrays
- row = a.iloc[0].values
-
- result = a.dot(row)
- expected = a.dot(a.iloc[0])
- tm.assert_series_equal(result, expected)
-
- with pytest.raises(ValueError, match="Dot product shape mismatch"):
- a.dot(row[:-1])
-
- a = np.random.rand(1, 5)
- b = np.random.rand(5, 1)
- A = DataFrame(a)
-
- # TODO(wesm): unused
- B = DataFrame(b) # noqa
-
- # it works
- result = A.dot(b)
-
- # unaligned
- df = DataFrame(np.random.randn(3, 4), index=[1, 2, 3], columns=range(4))
- df2 = DataFrame(np.random.randn(5, 3), index=range(5), columns=[1, 2, 3])
-
- with pytest.raises(ValueError, match="aligned"):
- df.dot(df2)
-
def test_matmul(self):
# matmul test is for GH 10259
a = DataFrame(
diff --git a/pandas/tests/generic/methods/test_dot.py b/pandas/tests/generic/methods/test_dot.py
new file mode 100644
index 0000000000000..ecbec6b06e923
--- /dev/null
+++ b/pandas/tests/generic/methods/test_dot.py
@@ -0,0 +1,128 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame, Series
+import pandas._testing as tm
+
+
+class DotSharedTests:
+ @pytest.fixture
+ def obj(self):
+ raise NotImplementedError
+
+ @pytest.fixture
+ def other(self) -> DataFrame:
+ """
+ other is a DataFrame that is indexed so that obj.dot(other) is valid
+ """
+ raise NotImplementedError
+
+ @pytest.fixture
+ def expected(self, obj, other) -> DataFrame:
+ """
+ The expected result of obj.dot(other)
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def reduced_dim_assert(cls, result, expected):
+ """
+ Assertion about results with 1 fewer dimension that self.obj
+ """
+ raise NotImplementedError
+
+ def test_dot_equiv_values_dot(self, obj, other, expected):
+ # `expected` is constructed from obj.values.dot(other.values)
+ result = obj.dot(other)
+ tm.assert_equal(result, expected)
+
+ def test_dot_2d_ndarray(self, obj, other, expected):
+ # Check ndarray argument; in this case we get matching values,
+ # but index/columns may not match
+ result = obj.dot(other.values)
+ assert np.all(result == expected.values)
+
+ def test_dot_1d_ndarray(self, obj, expected):
+ # can pass correct-length array
+ row = obj.iloc[0] if obj.ndim == 2 else obj
+
+ result = obj.dot(row.values)
+ expected = obj.dot(row)
+ self.reduced_dim_assert(result, expected)
+
+ def test_dot_series(self, obj, other, expected):
+ # Check series argument
+ result = obj.dot(other["1"])
+ self.reduced_dim_assert(result, expected["1"])
+
+ def test_dot_series_alignment(self, obj, other, expected):
+ result = obj.dot(other.iloc[::-1]["1"])
+ self.reduced_dim_assert(result, expected["1"])
+
+ def test_dot_aligns(self, obj, other, expected):
+ # Check index alignment
+ other2 = other.iloc[::-1]
+ result = obj.dot(other2)
+ tm.assert_equal(result, expected)
+
+ def test_dot_shape_mismatch(self, obj):
+ msg = "Dot product shape mismatch"
+ # exception raised is of type Exception
+ with pytest.raises(Exception, match=msg):
+ obj.dot(obj.values[:3])
+
+ def test_dot_misaligned(self, obj, other):
+ msg = "matrices are not aligned"
+ with pytest.raises(ValueError, match=msg):
+ obj.dot(other.T)
+
+
+class TestSeriesDot(DotSharedTests):
+ @pytest.fixture
+ def obj(self):
+ return Series(np.random.randn(4), index=["p", "q", "r", "s"])
+
+ @pytest.fixture
+ def other(self):
+ return DataFrame(
+ np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"]
+ ).T
+
+ @pytest.fixture
+ def expected(self, obj, other):
+ return Series(np.dot(obj.values, other.values), index=other.columns)
+
+ @classmethod
+ def reduced_dim_assert(cls, result, expected):
+ """
+ Assertion about results with 1 fewer dimension that self.obj
+ """
+ tm.assert_almost_equal(result, expected)
+
+
+class TestDataFrameDot(DotSharedTests):
+ @pytest.fixture
+ def obj(self):
+ return DataFrame(
+ np.random.randn(3, 4), index=["a", "b", "c"], columns=["p", "q", "r", "s"]
+ )
+
+ @pytest.fixture
+ def other(self):
+ return DataFrame(
+ np.random.randn(4, 2), index=["p", "q", "r", "s"], columns=["1", "2"]
+ )
+
+ @pytest.fixture
+ def expected(self, obj, other):
+ return DataFrame(
+ np.dot(obj.values, other.values), index=obj.index, columns=other.columns
+ )
+
+ @classmethod
+ def reduced_dim_assert(cls, result, expected):
+ """
+ Assertion about results with 1 fewer dimension that self.obj
+ """
+ tm.assert_series_equal(result, expected, check_names=False)
+ assert result.name is None
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 149d0aae8ab99..ab8618eb0a7d4 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -17,38 +17,6 @@ def test_prod_numpy16_bug(self):
assert not isinstance(result, Series)
- def test_dot(self):
- a = Series(np.random.randn(4), index=["p", "q", "r", "s"])
- b = DataFrame(
- np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"]
- ).T
-
- result = a.dot(b)
- expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"])
- tm.assert_series_equal(result, expected)
-
- # Check index alignment
- b2 = b.reindex(index=reversed(b.index))
- result = a.dot(b)
- tm.assert_series_equal(result, expected)
-
- # Check ndarray argument
- result = a.dot(b.values)
- assert np.all(result == expected.values)
- tm.assert_almost_equal(a.dot(b["2"].values), expected["2"])
-
- # Check series argument
- tm.assert_almost_equal(a.dot(b["1"]), expected["1"])
- tm.assert_almost_equal(a.dot(b2["1"]), expected["1"])
-
- msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)"
- # exception raised is of type Exception
- with pytest.raises(Exception, match=msg):
- a.dot(a.values[:3])
- msg = "matrices are not aligned"
- with pytest.raises(ValueError, match=msg):
- a.dot(b.T)
-
def test_matmul(self):
# matmul test is for GH #10259
a = Series(np.random.randn(4), index=["p", "q", "r", "s"])
| Fully parametrized over Series/DataFrame | https://api.github.com/repos/pandas-dev/pandas/pulls/33214 | 2020-04-01T18:21:13Z | 2020-04-10T20:53:21Z | 2020-04-10T20:53:21Z | 2020-04-10T21:13:43Z |
DOC: Added check for standard pandas reference | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index be6c076952ca1..73b3314e58969 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -237,6 +237,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
invgrep -RI --exclude=\*.{svg,c,cpp,html,js} --exclude-dir=env "\s$" *
RET=$(($RET + $?)) ; echo $MSG "DONE"
unset INVGREP_APPEND
+
+ MSG='Check if the pandas word reference is always used in lowercase (pandas) NOT Pandas or PANDAS'; echo $MSG
+ invgrep -R '*pandas*|Pandas|PANDAS' web/* doc/
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
### CODE ###
| - [x] xref #32316
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
As suggested by @datapythonista , I've added checks in `ci/code_checks.sh` to look if the pandas is being referenced in a standardized way i.e pandas, not *pandas* or Pandas | https://api.github.com/repos/pandas-dev/pandas/pulls/33213 | 2020-04-01T18:00:21Z | 2020-04-16T23:11:21Z | null | 2020-04-16T23:11:21Z |
CI: Make `isort` to check the "scripts" folder as well | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index be6c076952ca1..da878d3343233 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -121,7 +121,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
# Imports - Check formatting using isort see setup.cfg for settings
MSG='Check import format using isort' ; echo $MSG
- ISORT_CMD="isort --quiet --recursive --check-only pandas asv_bench"
+ ISORT_CMD="isort --quiet --recursive --check-only pandas asv_bench scripts"
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
eval $ISORT_CMD | awk '{print "##[error]" $0}'; RET=$(($RET + ${PIPESTATUS[0]}))
else
diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py
index 17752134e5049..3e0ae90e26527 100755
--- a/scripts/validate_rst_title_capitalization.py
+++ b/scripts/validate_rst_title_capitalization.py
@@ -10,12 +10,11 @@
"""
import argparse
-import sys
-import re
-import os
-from typing import Tuple, Generator, List
import glob
-
+import os
+import re
+import sys
+from typing import Generator, List, Tuple
CAPITALIZATION_EXCEPTIONS = {
"pandas",
| https://api.github.com/repos/pandas-dev/pandas/pulls/33212 | 2020-04-01T17:38:43Z | 2020-04-01T18:42:20Z | 2020-04-01T18:42:20Z | 2020-04-01T18:50:11Z | |
CLN: remove Block.is_categorical_astype | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 778e2866f1eff..bdfb44cdc2fa3 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -50,7 +50,7 @@
pandas_dtype,
)
from pandas.core.dtypes.concat import concat_categorical, concat_datetime
-from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype
+from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCExtensionArray,
@@ -183,21 +183,6 @@ def is_datelike(self) -> bool:
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
- def is_categorical_astype(self, dtype) -> bool:
- """
- validate that we have a astypeable to categorical,
- returns a boolean if we are a categorical
- """
- if dtype is Categorical or dtype is CategoricalDtype:
- # this is a pd.Categorical, but is not
- # a valid type for astypeing
- raise TypeError(f"invalid type {dtype} for astype")
-
- elif is_categorical_dtype(dtype):
- return True
-
- return False
-
def external_values(self):
"""
The array that Series.values returns (public attribute).
@@ -565,7 +550,7 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"):
raise TypeError(msg)
# may need to convert to categorical
- if self.is_categorical_astype(dtype):
+ if is_categorical_dtype(dtype):
if is_categorical_dtype(self.values):
# GH 10696/18593: update an existing categorical efficiently
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 31f17be2fac7b..2f2a663d559d0 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -311,9 +311,10 @@ def cmp(a, b):
# invalid conversion (these are NOT a dtype)
msg = (
- r"invalid type <class 'pandas\.core\.arrays\.categorical\."
- "Categorical'> for astype"
+ "dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
+ "not understood"
)
+
for invalid in [
lambda x: x.astype(Categorical),
lambda x: x.astype("object").astype(Categorical),
| https://api.github.com/repos/pandas-dev/pandas/pulls/33211 | 2020-04-01T17:10:17Z | 2020-04-01T18:33:51Z | 2020-04-01T18:33:51Z | 2020-04-01T18:38:16Z | |
DOC: Fixed examples in pandas/core/indexes/ | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 732f9c5181b97..067d88a666bb3 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -278,6 +278,10 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
pytest -q --doctest-modules pandas/core/groupby/groupby.py -k"-cumcount -describe -pipe"
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ MSG='Doctests indexes' ; echo $MSG
+ pytest -q --doctest-modules pandas/core/indexes/
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
MSG='Doctests tools' ; echo $MSG
pytest -q --doctest-modules pandas/core/tools/
RET=$(($RET + $?)) ; echo $MSG "DONE"
@@ -286,10 +290,6 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
pytest -q --doctest-modules pandas/core/reshape/
RET=$(($RET + $?)) ; echo $MSG "DONE"
- MSG='Doctests interval classes' ; echo $MSG
- pytest -q --doctest-modules pandas/core/indexes/interval.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
MSG='Doctests arrays'; echo $MSG
pytest -q --doctest-modules pandas/core/arrays/
RET=$(($RET + $?)) ; echo $MSG "DONE"
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index feb9881ffdb81..2908d468bcae0 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -129,9 +129,41 @@ class DatetimeProperties(Properties):
Examples
--------
- >>> s.dt.hour
- >>> s.dt.second
- >>> s.dt.quarter
+ >>> seconds_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="s"))
+ >>> seconds_series
+ 0 2000-01-01 00:00:00
+ 1 2000-01-01 00:00:01
+ 2 2000-01-01 00:00:02
+ dtype: datetime64[ns]
+ >>> seconds_series.dt.second
+ 0 0
+ 1 1
+ 2 2
+ dtype: int64
+
+ >>> hours_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="h"))
+ >>> hours_series
+ 0 2000-01-01 00:00:00
+ 1 2000-01-01 01:00:00
+ 2 2000-01-01 02:00:00
+ dtype: datetime64[ns]
+ >>> hours_series.dt.hour
+ 0 0
+ 1 1
+ 2 2
+ dtype: int64
+
+ >>> quarters_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="q"))
+ >>> quarters_series
+ 0 2000-03-31
+ 1 2000-06-30
+ 2 2000-09-30
+ dtype: datetime64[ns]
+ >>> quarters_series.dt.quarter
+ 0 1
+ 1 2
+ 2 3
+ dtype: int64
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
@@ -200,13 +232,24 @@ class TimedeltaProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
- Examples
- --------
- >>> s.dt.hours
- >>> s.dt.seconds
-
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
+
+ Examples
+ --------
+ >>> seconds_series = pd.Series(
+ ... pd.timedelta_range(start="1 second", periods=3, freq="S")
+ ... )
+ >>> seconds_series
+ 0 00:00:01
+ 1 00:00:02
+ 2 00:00:03
+ dtype: timedelta64[ns]
+ >>> seconds_series.dt.seconds
+ 0 1
+ 1 2
+ 2 3
+ dtype: int64
"""
def to_pytimedelta(self) -> np.ndarray:
@@ -229,7 +272,7 @@ def to_pytimedelta(self) -> np.ndarray:
Examples
--------
- >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
+ >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="d"))
>>> s
0 0 days
1 1 days
@@ -239,9 +282,9 @@ def to_pytimedelta(self) -> np.ndarray:
dtype: timedelta64[ns]
>>> s.dt.to_pytimedelta()
- array([datetime.timedelta(0), datetime.timedelta(1),
- datetime.timedelta(2), datetime.timedelta(3),
- datetime.timedelta(4)], dtype=object)
+ array([datetime.timedelta(0), datetime.timedelta(days=1),
+ datetime.timedelta(days=2), datetime.timedelta(days=3),
+ datetime.timedelta(days=4)], dtype=object)
"""
return self._get_values().to_pytimedelta()
@@ -289,14 +332,60 @@ class PeriodProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
- Examples
- --------
- >>> s.dt.hour
- >>> s.dt.second
- >>> s.dt.quarter
-
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
+
+ Examples
+ --------
+ >>> seconds_series = pd.Series(
+ ... pd.period_range(
+ ... start="2000-01-01 00:00:00", end="2000-01-01 00:00:03", freq="s"
+ ... )
+ ... )
+ >>> seconds_series
+ 0 2000-01-01 00:00:00
+ 1 2000-01-01 00:00:01
+ 2 2000-01-01 00:00:02
+ 3 2000-01-01 00:00:03
+ dtype: period[S]
+ >>> seconds_series.dt.second
+ 0 0
+ 1 1
+ 2 2
+ 3 3
+ dtype: int64
+
+ >>> hours_series = pd.Series(
+ ... pd.period_range(start="2000-01-01 00:00", end="2000-01-01 03:00", freq="h")
+ ... )
+ >>> hours_series
+ 0 2000-01-01 00:00
+ 1 2000-01-01 01:00
+ 2 2000-01-01 02:00
+ 3 2000-01-01 03:00
+ dtype: period[H]
+ >>> hours_series.dt.hour
+ 0 0
+ 1 1
+ 2 2
+ 3 3
+ dtype: int64
+
+ >>> quarters_series = pd.Series(
+ ... pd.period_range(start="2000-01-01", end="2000-12-31", freq="Q-DEC")
+ ... )
+ >>> quarters_series
+ 0 2000Q1
+ 1 2000Q2
+ 2 2000Q3
+ 3 2000Q4
+ dtype: period[Q-DEC]
+ >>> quarters_series.dt.quarter
+ 0 1
+ 1 2
+ 2 3
+ 3 4
+ dtype: int64
"""
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 5db860a02865b..5fec68d257167 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -1841,7 +1841,7 @@ def is_object(self) -> bool:
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
- >>> idx.object()
+ >>> idx.is_object()
False
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
@@ -2053,7 +2053,7 @@ def isna(self):
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
- array([False, False, True], dtype=bool)
+ array([False, False, True])
Empty strings are not considered NA values. None is considered an NA
value.
@@ -2062,7 +2062,7 @@ def isna(self):
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
- array([False, False, False, True], dtype=bool)
+ array([False, False, False, True])
For datetimes, `NaT` (Not a Time) is considered as an NA value.
@@ -2072,7 +2072,7 @@ def isna(self):
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
- array([False, True, True, True], dtype=bool)
+ array([False, True, True, True])
"""
return self._isnan
@@ -4790,8 +4790,9 @@ def isin(self, values, level=None):
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
- MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],
- codes=[[0, 1, 2], [2, 0, 1]],
+ MultiIndex([(1, 'red'),
+ (2, 'blue'),
+ (3, 'green')],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
@@ -4859,11 +4860,11 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None):
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
- slice(1, 3)
+ slice(1, 3, None)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
- slice(1, 3)
+ slice(1, 3, None)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind)
@@ -5434,11 +5435,10 @@ def ensure_index_from_sequences(sequences, names=None):
Examples
--------
- >>> ensure_index_from_sequences([[1, 2, 3]], names=['name'])
+ >>> ensure_index_from_sequences([[1, 2, 3]], names=["name"])
Int64Index([1, 2, 3], dtype='int64', name='name')
- >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']],
- names=['L1', 'L2'])
+ >>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"])
MultiIndex([('a', 'a'),
('a', 'b')],
names=['L1', 'L2'])
@@ -5471,6 +5471,10 @@ def ensure_index(index_like, copy=False):
-------
index : Index or MultiIndex
+ See Also
+ --------
+ ensure_index_from_sequences
+
Examples
--------
>>> ensure_index(['a', 'b'])
@@ -5481,13 +5485,8 @@ def ensure_index(index_like, copy=False):
>>> ensure_index([['a', 'a'], ['b', 'c']])
MultiIndex([('a', 'b'),
- ('a', 'c')],
- dtype='object')
- )
-
- See Also
- --------
- ensure_index_from_sequences
+ ('a', 'c')],
+ )
"""
if isinstance(index_like, Index):
if copy:
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index f4814f2efb910..073e1967678ec 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -138,21 +138,25 @@ class CategoricalIndex(ExtensionIndex, accessor.PandasDelegate):
Examples
--------
- >>> pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
- CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa
+ >>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
+ CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
+ categories=['a', 'b', 'c'], ordered=False, dtype='category')
``CategoricalIndex`` can also be instantiated from a ``Categorical``:
- >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
+ >>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])
>>> pd.CategoricalIndex(c)
- CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa
+ CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
+ categories=['a', 'b', 'c'], ordered=False, dtype='category')
Ordered ``CategoricalIndex`` can have a min and max value.
- >>> ci = pd.CategoricalIndex(['a','b','c','a','b','c'], ordered=True,
- ... categories=['c', 'b', 'a'])
+ >>> ci = pd.CategoricalIndex(
+ ... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
+ ... )
>>> ci
- CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['c', 'b', 'a'], ordered=True, dtype='category') # noqa
+ CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
+ categories=['c', 'b', 'a'], ordered=True, dtype='category')
>>> ci.min()
'c'
"""
@@ -652,7 +656,7 @@ def map(self, mapper):
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
- ordered=False, dtype='category')
+ ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 10d62b522a255..4e2d07ddf9225 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -762,10 +762,26 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
Examples
--------
- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'),
- (2, 'one'), (2, 'two'),
- (3, 'one'), (3, 'two')],
- names=['foo', 'bar'])
+ >>> idx = pd.MultiIndex.from_tuples(
+ ... [
+ ... (1, "one"),
+ ... (1, "two"),
+ ... (2, "one"),
+ ... (2, "two"),
+ ... (3, "one"),
+ ... (3, "two")
+ ... ],
+ ... names=["foo", "bar"]
+ ... )
+ >>> idx
+ MultiIndex([(1, 'one'),
+ (1, 'two'),
+ (2, 'one'),
+ (2, 'two'),
+ (3, 'one'),
+ (3, 'two')],
+ names=['foo', 'bar'])
+
>>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
@@ -798,10 +814,12 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
- ('a', 2),
- ('b', 1),
- ('b', 2)],
- names=['foo', 'bar'])
+ ('a', 2),
+ ('b', 1),
+ ('b', 2),
+ ('c', 1),
+ ('c', 2)],
+ names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
@@ -907,11 +925,16 @@ def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
Examples
--------
- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'),
- (1, 'two'),
- (2, 'one'),
- (2, 'two')],
- names=['foo', 'bar'])
+ >>> idx = pd.MultiIndex.from_tuples(
+ ... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"]
+ ... )
+ >>> idx
+ MultiIndex([(1, 'one'),
+ (1, 'two'),
+ (2, 'one'),
+ (2, 'two')],
+ names=['foo', 'bar'])
+
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
@@ -2751,8 +2774,7 @@ def get_loc_level(self, key, level=0, drop_level: bool = True):
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
- (array([False, True, False], dtype=bool),
- Index(['b'], dtype='object', name='A'))
+ (array([False, True, False]), Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
@@ -3275,7 +3297,46 @@ def union(self, other, sort=None):
-------
Index
- >>> index.union(index2)
+ Examples
+ --------
+ >>> idx1 = pd.MultiIndex.from_arrays(
+ ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
+ ... )
+ >>> idx1
+ MultiIndex([(1, 'Red'),
+ (1, 'Blue'),
+ (2, 'Red'),
+ (2, 'Blue')],
+ )
+ >>> idx2 = pd.MultiIndex.from_arrays(
+ ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]]
+ ... )
+ >>> idx2
+ MultiIndex([(3, 'Red'),
+ (3, 'Green'),
+ (2, 'Red'),
+ (2, 'Green')],
+ )
+
+ >>> idx1.union(idx2)
+ MultiIndex([(1, 'Blue'),
+ (1, 'Red'),
+ (2, 'Blue'),
+ (2, 'Green'),
+ (2, 'Red'),
+ (3, 'Green'),
+ (3, 'Red')],
+ )
+
+ >>> idx1.union(idx2, sort=False)
+ MultiIndex([(1, 'Red'),
+ (1, 'Blue'),
+ (2, 'Red'),
+ (2, 'Blue'),
+ (3, 'Red'),
+ (3, 'Green'),
+ (2, 'Green')],
+ )
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 8aaf828787179..1f565828ec7a5 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -138,7 +138,9 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
Examples
--------
- >>> idx = pd.PeriodIndex(year=year_arr, quarter=q_arr)
+ >>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
+ >>> idx
+ PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]', freq='Q-DEC')
"""
_typ = "periodindex"
@@ -775,10 +777,10 @@ def period_range(
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
- PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',
- '2017-06', '2017-06', '2017-07', '2017-08', '2017-09',
- '2017-10', '2017-11', '2017-12', '2018-01'],
- dtype='period[M]', freq='M')
+ PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
+ '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
+ '2018-01'],
+ dtype='period[M]', freq='M')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 62f063b4eed02..765b948f13e96 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -351,7 +351,7 @@ def timedelta_range(
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
- dtype='timedelta64[ns]', freq=None)
+ dtype='timedelta64[ns]', freq='32H')
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33208 | 2020-04-01T14:56:35Z | 2020-04-03T10:50:32Z | 2020-04-03T10:50:32Z | 2020-04-03T10:57:32Z |
Add test for #24615 (Accept PandasArray (with correct dtype) in DatetimeArray constructor) | diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 7d80ad3d8c6be..e5d679950d860 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -374,6 +374,13 @@ def test_searchsorted_invalid_types(self, other, index):
with pytest.raises(TypeError, match=msg):
arr.searchsorted(other)
+ def test_pandasarray_in_datetimearray(self):
+ # GH 24615
+ expected = pd.array([1, 2], dtype="datetime64[ns]")
+ result = pd.arrays.DatetimeArray(pd.array([1, 2], dtype="datetime64[ns]"))
+
+ tm.assert_datetime_array_equal(expected, result)
+
class TestSequenceToDT64NS:
def test_tz_dtype_mismatch_raises(self):
| issue #24615
I added a test for the issue 24615. | https://api.github.com/repos/pandas-dev/pandas/pulls/33204 | 2020-04-01T13:32:47Z | 2020-04-04T01:01:23Z | null | 2020-04-04T01:01:23Z |
DOC: Fix Error in pandas.Series.last | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 143e4543e7ab8..0ecfbce460b3a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8062,15 +8062,21 @@ def first(self: FrameOrSeries, offset) -> FrameOrSeries:
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
- Method to subset final periods of time series data based on a date offset.
+ Select final periods of time series data based on a date offset.
+
+ When having a DataFrame with dates as index, this function can
+ select the last few rows based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
+ The offset length of the data that will be selected. For instance,
+ '3D' will display all the rows having their index within the last 3 days.
Returns
-------
- subset : same type as caller
+ Series or DataFrame
+ A subset of the caller.
Raises
------
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Related to #27977.
output of `python scripts/validate_docstrings.py pandas.Series.last`:
```
################################################################################
################################## Validation ##################################
################################################################################
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/33199 | 2020-04-01T06:13:23Z | 2020-04-03T14:42:31Z | 2020-04-03T14:42:31Z | 2020-04-03T14:42:43Z |
Requested ASV | diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py
index 5a8b109c21858..2745db58e83e3 100644
--- a/asv_bench/benchmarks/arithmetic.py
+++ b/asv_bench/benchmarks/arithmetic.py
@@ -50,6 +50,23 @@ def time_frame_op_with_scalar(self, dtype, scalar, op):
op(self.df, scalar)
+class OpWithFillValue:
+ def setup(self):
+ # GH#31300
+ arr = np.arange(10 ** 6)
+ df = DataFrame({"A": arr})
+ ser = df["A"]
+
+ self.df = df
+ self.ser = ser
+
+ def time_frame_op_with_fill_value_no_nas(self):
+ self.df.add(self.df, fill_value=4)
+
+ def time_series_op_with_fill_value_no_nas(self):
+ self.ser.add(self.ser, fill_value=4)
+
+
class MixedFrameWithSeriesAxis0:
params = [
[
| I think this was requested a while ago and has been sitting in a local branch | https://api.github.com/repos/pandas-dev/pandas/pulls/33197 | 2020-04-01T03:19:34Z | 2020-04-02T00:06:13Z | 2020-04-02T00:06:13Z | 2020-04-02T00:13:49Z |
Added test for #5091 (Missing Periods for some DateOffsets) | diff --git a/pandas/tests/indexes/period/test_to_timestamp.py b/pandas/tests/indexes/period/test_to_timestamp.py
index 23787586cb3d3..0df7d873dcb7d 100644
--- a/pandas/tests/indexes/period/test_to_timestamp.py
+++ b/pandas/tests/indexes/period/test_to_timestamp.py
@@ -10,6 +10,7 @@
Timedelta,
Timestamp,
date_range,
+ offsets,
period_range,
)
import pandas._testing as tm
@@ -99,3 +100,11 @@ def test_to_timestamp_1703(self):
result = index.to_timestamp()
assert result[0] == Timestamp("1/1/2012")
+
+ def test_period_dateoffset_conversion(self):
+ # GH5091
+ expected = date_range("1/1/2012", periods=4, freq=offsets.BQuarterEnd())
+ per = expected.to_period()
+ result = per.to_timestamp()
+
+ tm.assert_index_equal(result, expected)
| - [x] closes #5091
| https://api.github.com/repos/pandas-dev/pandas/pulls/33195 | 2020-04-01T01:39:05Z | 2020-04-01T03:51:35Z | null | 2020-04-01T03:51:35Z |
REF: test_mutate_columns -> test_setitem | diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/indexing/test_setitem.py
similarity index 87%
rename from pandas/tests/frame/test_mutate_columns.py
rename to pandas/tests/frame/indexing/test_setitem.py
index e3f2a67c2f469..c12643f413490 100644
--- a/pandas/tests/frame/test_mutate_columns.py
+++ b/pandas/tests/frame/indexing/test_setitem.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Series
+from pandas import DataFrame, Index, Series
import pandas._testing as tm
# Column add, remove, delete.
@@ -12,14 +12,17 @@ def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
- {"foo": ["a", "b", "c"], "bar": [1, 2, 3], "baz": ["d", "e", "f"]}
- ).set_index("foo")
- s = DataFrame(
- {"foo": ["a", "b", "c", "a"], "fiz": ["g", "h", "i", "j"]}
- ).set_index("foo")
+ {"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
+ index=Index(["a", "b", "c"], name="foo"),
+ )
+ ser = Series(
+ ["g", "h", "i", "j"],
+ index=Index(["a", "b", "c", "a"], name="foo"),
+ name="fiz",
+ )
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
- df["newcol"] = s
+ df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
| https://api.github.com/repos/pandas-dev/pandas/pulls/33193 | 2020-04-01T00:55:59Z | 2020-04-01T17:55:37Z | 2020-04-01T17:55:36Z | 2020-04-01T17:56:25Z | |
CLN: Rename ordered_fixture --> ordered | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 0b14c12f2356f..e1088dae3925a 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -182,7 +182,7 @@ def observed(request):
@pytest.fixture(params=[True, False, None])
-def ordered_fixture(request):
+def ordered(request):
"""
Boolean 'ordered' parameter for Categorical.
"""
diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py
index 835aa87a7c21b..10c454f7c479a 100644
--- a/pandas/tests/arrays/categorical/test_algos.py
+++ b/pandas/tests/arrays/categorical/test_algos.py
@@ -140,23 +140,21 @@ def test_take_empty(self, allow_fill):
with pytest.raises(IndexError, match=msg):
cat.take([0], allow_fill=allow_fill)
- def test_positional_take(self, ordered_fixture):
+ def test_positional_take(self, ordered):
cat = pd.Categorical(
- ["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered_fixture
+ ["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered
)
result = cat.take([0, 1, 2], allow_fill=False)
expected = pd.Categorical(
- ["a", "a", "b"], categories=cat.categories, ordered=ordered_fixture
+ ["a", "a", "b"], categories=cat.categories, ordered=ordered
)
tm.assert_categorical_equal(result, expected)
- def test_positional_take_unobserved(self, ordered_fixture):
- cat = pd.Categorical(
- ["a", "b"], categories=["a", "b", "c"], ordered=ordered_fixture
- )
+ def test_positional_take_unobserved(self, ordered):
+ cat = pd.Categorical(["a", "b"], categories=["a", "b", "c"], ordered=ordered)
result = cat.take([1, 0], allow_fill=False)
expected = pd.Categorical(
- ["b", "a"], categories=cat.categories, ordered=ordered_fixture
+ ["b", "a"], categories=cat.categories, ordered=ordered
)
tm.assert_categorical_equal(result, expected)
diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py
index 0ff7d3e59abb3..c470f677b5386 100644
--- a/pandas/tests/arrays/categorical/test_analytics.py
+++ b/pandas/tests/arrays/categorical/test_analytics.py
@@ -114,14 +114,14 @@ def test_mode(self, values, categories, exp_mode):
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
- def test_searchsorted(self, ordered_fixture):
+ def test_searchsorted(self, ordered):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
cat = Categorical(
["cheese", "milk", "apple", "bread", "bread"],
categories=["cheese", "milk", "apple", "bread"],
- ordered=ordered_fixture,
+ ordered=ordered,
)
ser = Series(cat)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 658d27160e3e1..d0831ea514a64 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -730,10 +730,10 @@ class TestCategoricalDtypeParametrized:
pd.date_range("2017", periods=4),
],
)
- def test_basic(self, categories, ordered_fixture):
- c1 = CategoricalDtype(categories, ordered=ordered_fixture)
+ def test_basic(self, categories, ordered):
+ c1 = CategoricalDtype(categories, ordered=ordered)
tm.assert_index_equal(c1.categories, pd.Index(categories))
- assert c1.ordered is ordered_fixture
+ assert c1.ordered is ordered
def test_order_matters(self):
categories = ["a", "b"]
@@ -754,7 +754,7 @@ def test_categories(self):
tm.assert_index_equal(result.categories, pd.Index(["a", "b", "c"]))
assert result.ordered is False
- def test_equal_but_different(self, ordered_fixture):
+ def test_equal_but_different(self, ordered):
c1 = CategoricalDtype([1, 2, 3])
c2 = CategoricalDtype([1.0, 2.0, 3.0])
assert c1 is not c2
@@ -818,8 +818,8 @@ def test_categorical_equality(self, ordered1, ordered2):
@pytest.mark.parametrize("categories", [list("abc"), None])
@pytest.mark.parametrize("other", ["category", "not a category"])
- def test_categorical_equality_strings(self, categories, ordered_fixture, other):
- c1 = CategoricalDtype(categories, ordered_fixture)
+ def test_categorical_equality_strings(self, categories, ordered, other):
+ c1 = CategoricalDtype(categories, ordered)
result = c1 == other
expected = other == "category"
assert result is expected
@@ -862,12 +862,12 @@ def test_from_categorical_dtype_both(self):
)
assert result == CategoricalDtype([1, 2], ordered=False)
- def test_str_vs_repr(self, ordered_fixture):
- c1 = CategoricalDtype(["a", "b"], ordered=ordered_fixture)
+ def test_str_vs_repr(self, ordered):
+ c1 = CategoricalDtype(["a", "b"], ordered=ordered)
assert str(c1) == "category"
# Py2 will have unicode prefixes
pat = r"CategoricalDtype\(categories=\[.*\], ordered={ordered}\)"
- assert re.match(pat.format(ordered=ordered_fixture), repr(c1))
+ assert re.match(pat.format(ordered=ordered), repr(c1))
def test_categorical_categories(self):
# GH17884
@@ -880,9 +880,9 @@ def test_categorical_categories(self):
"new_categories", [list("abc"), list("cba"), list("wxyz"), None]
)
@pytest.mark.parametrize("new_ordered", [True, False, None])
- def test_update_dtype(self, ordered_fixture, new_categories, new_ordered):
+ def test_update_dtype(self, ordered, new_categories, new_ordered):
original_categories = list("abc")
- dtype = CategoricalDtype(original_categories, ordered_fixture)
+ dtype = CategoricalDtype(original_categories, ordered)
new_dtype = CategoricalDtype(new_categories, new_ordered)
result = dtype.update_dtype(new_dtype)
@@ -892,8 +892,8 @@ def test_update_dtype(self, ordered_fixture, new_categories, new_ordered):
tm.assert_index_equal(result.categories, expected_categories)
assert result.ordered is expected_ordered
- def test_update_dtype_string(self, ordered_fixture):
- dtype = CategoricalDtype(list("abc"), ordered_fixture)
+ def test_update_dtype_string(self, ordered):
+ dtype = CategoricalDtype(list("abc"), ordered)
expected_categories = dtype.categories
expected_ordered = dtype.ordered
result = dtype.update_dtype("category")
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 9ea5252b91e13..e570ea201cc3a 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -1226,10 +1226,10 @@ def test_groupby_categorical_axis_1(code):
tm.assert_frame_equal(result, expected)
-def test_groupby_cat_preserves_structure(observed, ordered_fixture):
+def test_groupby_cat_preserves_structure(observed, ordered):
# GH 28787
df = DataFrame(
- {"Name": Categorical(["Bob", "Greg"], ordered=ordered_fixture), "Item": [1, 2]},
+ {"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
columns=["Name", "Item"],
)
expected = df.copy()
diff --git a/pandas/tests/indexes/categorical/test_map.py b/pandas/tests/indexes/categorical/test_map.py
index 943359a72e971..6cef555275444 100644
--- a/pandas/tests/indexes/categorical/test_map.py
+++ b/pandas/tests/indexes/categorical/test_map.py
@@ -15,12 +15,12 @@ class TestMap:
],
ids=["string", "interval"],
)
- def test_map_str(self, data, categories, ordered_fixture):
+ def test_map_str(self, data, categories, ordered):
# GH 31202 - override base class since we want to maintain categorical/ordered
- index = CategoricalIndex(data, categories=categories, ordered=ordered_fixture)
+ index = CategoricalIndex(data, categories=categories, ordered=ordered)
result = index.map(str)
expected = CategoricalIndex(
- map(str, data), categories=map(str, categories), ordered=ordered_fixture
+ map(str, data), categories=map(str, categories), ordered=ordered
)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py
index 0e5721bfd83fd..0e08a3f41b666 100644
--- a/pandas/tests/indexes/interval/test_indexing.py
+++ b/pandas/tests/indexes/interval/test_indexing.py
@@ -240,10 +240,10 @@ def test_get_indexer_length_one_interval(self, size, closed):
["foo", "foo", "bar", "baz"],
],
)
- def test_get_indexer_categorical(self, target, ordered_fixture):
+ def test_get_indexer_categorical(self, target, ordered):
# GH 30063: categorical and non-categorical results should be consistent
index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)])
- categorical_target = CategoricalIndex(target, ordered=ordered_fixture)
+ categorical_target = CategoricalIndex(target, ordered=ordered)
result = index.get_indexer(categorical_target)
expected = index.get_indexer(target)
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 8a8ac584c16c2..dcd2de3845cbc 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -778,9 +778,9 @@ def test_map_with_dict_or_series(self):
pd.timedelta_range(start="1d", periods=3).array,
],
)
- def test_loc_with_non_string_categories(self, idx_values, ordered_fixture):
+ def test_loc_with_non_string_categories(self, idx_values, ordered):
# GH-17569
- cat_idx = CategoricalIndex(idx_values, ordered=ordered_fixture)
+ cat_idx = CategoricalIndex(idx_values, ordered=ordered)
df = DataFrame({"A": ["foo", "bar", "baz"]}, index=cat_idx)
sl = slice(idx_values[0], idx_values[1])
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index cdb1a73abc431..e49b80e476003 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -1756,18 +1756,14 @@ def test_margins_casted_to_float(self, observed):
)
tm.assert_frame_equal(result, expected)
- def test_pivot_with_categorical(self, observed, ordered_fixture):
+ def test_pivot_with_categorical(self, observed, ordered):
# gh-21370
idx = [np.nan, "low", "high", "low", np.nan]
col = [np.nan, "A", "B", np.nan, "A"]
df = pd.DataFrame(
{
- "In": pd.Categorical(
- idx, categories=["low", "high"], ordered=ordered_fixture
- ),
- "Col": pd.Categorical(
- col, categories=["A", "B"], ordered=ordered_fixture
- ),
+ "In": pd.Categorical(idx, categories=["low", "high"], ordered=ordered),
+ "Col": pd.Categorical(col, categories=["A", "B"], ordered=ordered),
"Val": range(1, 6),
}
)
@@ -1776,16 +1772,14 @@ def test_pivot_with_categorical(self, observed, ordered_fixture):
index="In", columns="Col", values="Val", observed=observed
)
- expected_cols = pd.CategoricalIndex(
- ["A", "B"], ordered=ordered_fixture, name="Col"
- )
+ expected_cols = pd.CategoricalIndex(["A", "B"], ordered=ordered, name="Col")
expected = pd.DataFrame(
data=[[2.0, np.nan], [np.nan, 3.0]], columns=expected_cols
)
expected.index = Index(
pd.Categorical(
- ["low", "high"], categories=["low", "high"], ordered=ordered_fixture
+ ["low", "high"], categories=["low", "high"], ordered=ordered
),
name="In",
)
diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py
index 54f32f979232d..a4532ebb3d8c5 100644
--- a/pandas/tests/series/methods/test_drop_duplicates.py
+++ b/pandas/tests/series/methods/test_drop_duplicates.py
@@ -69,12 +69,12 @@ class TestSeriesDropDuplicates:
"dtype",
["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"],
)
- def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture):
+ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
# Test case 1
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
- tc1 = Series(Categorical(input1, categories=cat_array, ordered=ordered_fixture))
+ tc1 = Series(Categorical(input1, categories=cat_array, ordered=ordered))
if dtype == "datetime64[D]":
# pre-empty flaky xfail, tc1 values are seemingly-random
if not (np.array(tc1) == input1).all():
@@ -103,7 +103,7 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture):
# Test case 2
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
- tc2 = Series(Categorical(input2, categories=cat_array, ordered=ordered_fixture))
+ tc2 = Series(Categorical(input2, categories=cat_array, ordered=ordered))
if dtype == "datetime64[D]":
# pre-empty flaky xfail, tc2 values are seemingly-random
if not (np.array(tc2) == input2).all():
@@ -130,12 +130,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered_fixture):
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
- def test_drop_duplicates_categorical_bool(self, ordered_fixture):
+ def test_drop_duplicates_categorical_bool(self, ordered):
tc = Series(
Categorical(
- [True, False, True, False],
- categories=[True, False],
- ordered=ordered_fixture,
+ [True, False, True, False], categories=[True, False], ordered=ordered,
)
)
| A pretty small and straightforward rename to make things more readable and consistent with our existing naming convention for fixtures. | https://api.github.com/repos/pandas-dev/pandas/pulls/33192 | 2020-04-01T00:47:49Z | 2020-04-01T15:44:42Z | 2020-04-01T15:44:42Z | 2020-04-01T15:44:47Z |
REF: misplaced sort_index test | diff --git a/pandas/tests/series/methods/test_sort_index.py b/pandas/tests/series/methods/test_sort_index.py
index d4ebc9062a0c9..2d4fdfd5a3950 100644
--- a/pandas/tests/series/methods/test_sort_index.py
+++ b/pandas/tests/series/methods/test_sort_index.py
@@ -8,6 +8,10 @@
class TestSeriesSortIndex:
+ def test_sort_index_name(self, datetime_series):
+ result = datetime_series.sort_index(ascending=False)
+ assert result.name == datetime_series.name
+
def test_sort_index(self, datetime_series):
rindex = list(datetime_series.index)
random.shuffle(rindex)
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 3e877cf2fc787..302ca8d1aa43e 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -110,10 +110,6 @@ def _pickle_roundtrip(self, obj):
unpickled = pd.read_pickle(path)
return unpickled
- def test_sort_index_name(self, datetime_series):
- result = datetime_series.sort_index(ascending=False)
- assert result.name == datetime_series.name
-
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33191 | 2020-03-31T23:25:51Z | 2020-04-01T00:23:39Z | 2020-04-01T00:23:39Z | 2020-04-01T00:53:42Z | |
CLN: Correct docstring to describe actual functionality. | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index a66c9cd86d00c..306636278bcbe 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -462,7 +462,7 @@ class _BaseOffset:
def _validate_n(self, n):
"""
- Require that `n` be a nonzero integer.
+ Require that `n` be an integer.
Parameters
----------
| - [x] closes #20560
| https://api.github.com/repos/pandas-dev/pandas/pulls/33190 | 2020-03-31T22:37:05Z | 2020-04-01T00:25:17Z | 2020-04-01T00:25:16Z | 2020-04-01T00:25:23Z |
REF: set_axis tests | diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index 74fe3bfd41b8f..961c18749f055 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -776,35 +776,3 @@ def test_set_reset_index(self):
df = df.set_index("B")
df = df.reset_index()
-
- def test_set_axis_inplace(self):
- # GH14636
- df = DataFrame(
- {"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2], "C": [4.4, 5.5, 6.6]},
- index=[2010, 2011, 2012],
- )
-
- expected = {0: df.copy(), 1: df.copy()}
- expected[0].index = list("abc")
- expected[1].columns = list("abc")
- expected["index"] = expected[0]
- expected["columns"] = expected[1]
-
- for axis in expected:
- result = df.copy()
- result.set_axis(list("abc"), axis=axis, inplace=True)
- tm.assert_frame_equal(result, expected[axis])
-
- # inplace=False
- result = df.set_axis(list("abc"), axis=axis)
- tm.assert_frame_equal(expected[axis], result)
-
- # omitting the "axis" parameter
- with tm.assert_produces_warning(None):
- result = df.set_axis(list("abc"))
- tm.assert_frame_equal(result, expected[0])
-
- # wrong values for the "axis" parameter
- for axis in 3, "foo":
- with pytest.raises(ValueError, match="No axis named"):
- df.set_axis(list("abc"), axis=axis)
diff --git a/pandas/tests/generic/methods/test_set_axis.py b/pandas/tests/generic/methods/test_set_axis.py
new file mode 100644
index 0000000000000..278d43ef93d2f
--- /dev/null
+++ b/pandas/tests/generic/methods/test_set_axis.py
@@ -0,0 +1,75 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame, Series
+import pandas._testing as tm
+
+
+class SharedSetAxisTests:
+ @pytest.fixture
+ def obj(self):
+ raise NotImplementedError("Implemented by subclasses")
+
+ def test_set_axis(self, obj):
+ # GH14636; this tests setting index for both Series and DataFrame
+ new_index = list("abcd")[: len(obj)]
+
+ expected = obj.copy()
+ expected.index = new_index
+
+ # inplace=False
+ result = obj.set_axis(new_index, axis=0, inplace=False)
+ tm.assert_equal(expected, result)
+
+ @pytest.mark.parametrize("axis", [0, "index", 1, "columns"])
+ def test_set_axis_inplace_axis(self, axis, obj):
+ # GH#14636
+ if obj.ndim == 1 and axis in [1, "columns"]:
+ # Series only has [0, "index"]
+ return
+
+ new_index = list("abcd")[: len(obj)]
+
+ expected = obj.copy()
+ if axis in [0, "index"]:
+ expected.index = new_index
+ else:
+ expected.columns = new_index
+
+ result = obj.copy()
+ result.set_axis(new_index, axis=axis, inplace=True)
+ tm.assert_equal(result, expected)
+
+ def test_set_axis_unnamed_kwarg_warns(self, obj):
+ # omitting the "axis" parameter
+ new_index = list("abcd")[: len(obj)]
+
+ expected = obj.copy()
+ expected.index = new_index
+
+ with tm.assert_produces_warning(None):
+ result = obj.set_axis(new_index, inplace=False)
+ tm.assert_equal(result, expected)
+
+ @pytest.mark.parametrize("axis", [3, "foo"])
+ def test_set_axis_invalid_axis_name(self, axis, obj):
+ # wrong values for the "axis" parameter
+ with pytest.raises(ValueError, match="No axis named"):
+ obj.set_axis(list("abc"), axis=axis)
+
+
+class TestDataFrameSetAxis(SharedSetAxisTests):
+ @pytest.fixture
+ def obj(self):
+ df = DataFrame(
+ {"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2], "C": [4.4, 5.5, 6.6]},
+ index=[2010, 2011, 2012],
+ )
+ return df
+
+
+class TestSeriesSetAxis(SharedSetAxisTests):
+ @pytest.fixture
+ def obj(self):
+ ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64")
+ return ser
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index c2bb498df2be2..203750757e28d 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -53,39 +53,3 @@ def test_set_index_makes_timeseries(self):
s = Series(range(10))
s.index = idx
assert s.index.is_all_dates
-
- def test_set_axis_inplace_axes(self, axis_series):
- # GH14636
- ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64")
-
- expected = ser.copy()
- expected.index = list("abcd")
-
- # inplace=True
- # The FutureWarning comes from the fact that we would like to have
- # inplace default to False some day
- result = ser.copy()
- result.set_axis(list("abcd"), axis=axis_series, inplace=True)
- tm.assert_series_equal(result, expected)
-
- def test_set_axis_inplace(self):
- # GH14636
-
- s = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64")
-
- expected = s.copy()
- expected.index = list("abcd")
-
- # inplace=False
- result = s.set_axis(list("abcd"), axis=0, inplace=False)
- tm.assert_series_equal(expected, result)
-
- # omitting the "axis" parameter
- with tm.assert_produces_warning(None):
- result = s.set_axis(list("abcd"), inplace=False)
- tm.assert_series_equal(result, expected)
-
- # wrong values for the "axis" parameter
- for axis in [2, "foo"]:
- with pytest.raises(ValueError, match="No axis named"):
- s.set_axis(list("abcd"), axis=axis, inplace=False)
| Fully parametrized over Series/DataFrame | https://api.github.com/repos/pandas-dev/pandas/pulls/33189 | 2020-03-31T20:57:56Z | 2020-03-31T21:24:43Z | 2020-03-31T21:24:43Z | 2020-03-31T21:44:26Z |
ADMIN: Create separate issue templates for different usecases | diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index e33835c462511..0000000000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,29 +0,0 @@
-#### Code Sample, a copy-pastable example if possible
-
-```python
-# Your code here
-
-```
-#### Problem description
-
-[this should explain **why** the current behaviour is a problem and why the expected output is a better solution.]
-
-**Note**: We receive a lot of issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates!
-
-**Note**: Many problems can be resolved by simply upgrading `pandas` to the latest version. Before submitting, please check if that solution works for you. If possible, you may want to check if `master` addresses this issue, but that is not necessary.
-
-For documentation-related issues, you can check the latest versions of the docs on `master` here:
-
-https://pandas-docs.github.io/pandas-docs-travis/
-
-If the issue has not been resolved there, go ahead and file it in the issue tracker.
-
-#### Expected Output
-
-#### Output of ``pd.show_versions()``
-
-<details>
-
-[paste the output of ``pd.show_versions()`` here below this line]
-
-</details>
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000000000..765c1b8bff62e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,39 @@
+---
+
+name: Bug Report
+about: Create a bug report to help us improve pandas
+title: "BUG:"
+labels: "Bug, Needs Triage"
+
+---
+
+- [ ] I have checked that this issue has not already been reported.
+
+- [ ] I have confirmed this bug exists on the latest version of pandas.
+
+- [ ] (optional) I have confirmed this bug exists on the master branch of pandas.
+
+---
+
+**Note**: Please read [this guide](https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) detailing how to provide the necessary information for us to reproduce your bug.
+
+#### Code Sample, a copy-pastable example
+
+```python
+# Your code here
+
+```
+
+#### Problem description
+
+[this should explain **why** the current behaviour is a problem and why the expected output is a better solution]
+
+#### Expected Output
+
+#### Output of ``pd.show_versions()``
+
+<details>
+
+[paste the output of ``pd.show_versions()`` here leaving a blank line after the details tag]
+
+</details>
diff --git a/.github/ISSUE_TEMPLATE/documentation_improvement.md b/.github/ISSUE_TEMPLATE/documentation_improvement.md
new file mode 100644
index 0000000000000..c6356ac1057c8
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documentation_improvement.md
@@ -0,0 +1,22 @@
+---
+
+name: Documentation Improvement
+about: Report wrong or missing documentation
+title: "DOC:"
+labels: "Docs, Needs Triage"
+
+---
+
+#### Location of the documentation
+
+[this should provide the location of the documentation, e.g. "pandas.read_csv" or the URL of the documentation, e.g. "https://dev.pandas.io/docs/reference/api/pandas.read_csv.html"]
+
+**Note**: You can check the latest versions of the docs on `master` [here](https://dev.pandas.io/docs).
+
+#### Documentation problem
+
+[this should provide a description of what documentation you believe needs to be fixed/improved]
+
+#### Suggested fix for documentation
+
+[this should explain the suggested fix and **why** it's better than the existing documentation]
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000000..0c30b941bc520
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,33 @@
+---
+
+name: Feature Request
+about: Suggest an idea for pandas
+title: "ENH:"
+labels: "Enhancement, Needs Triage"
+
+---
+
+#### Is your feature request related to a problem?
+
+[this should provide a description of what the problem is, e.g. "I wish I could use pandas to do [...]"]
+
+#### Describe the solution you'd like
+
+[this should provide a description of the feature request, e.g. "`DataFrame.foo` should get a new parameter `bar` that [...]", try to write a docstring for the desired feature]
+
+#### API breaking implications
+
+[this should provide a description of how this feature will affect the API]
+
+#### Describe alternatives you've considered
+
+[this should provide a description of any alternative solutions or features you've considered]
+
+#### Additional context
+
+[add any other context, code examples, or references to existing implementations about the feature request here]
+
+```python
+# Your code here, if applicable
+
+```
diff --git a/.github/ISSUE_TEMPLATE/submit_question.md b/.github/ISSUE_TEMPLATE/submit_question.md
new file mode 100644
index 0000000000000..9b48918ff2f6d
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/submit_question.md
@@ -0,0 +1,24 @@
+---
+
+name: Submit Question
+about: Ask a general question about pandas
+title: "QST:"
+labels: "Usage Question, Needs Triage"
+
+---
+
+- [ ] I have searched the [[pandas] tag](https://stackoverflow.com/questions/tagged/pandas) on StackOverflow for similar questions.
+
+- [ ] I have asked my usage related question on [StackOverflow](https://stackoverflow.com).
+
+---
+
+#### Question about pandas
+
+**Note**: If you'd still like to submit a question, please read [this guide](
+https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) detailing how to provide the necessary information for us to reproduce your question.
+
+```python
+# Your code here, if applicable
+
+```
| revival of #31551 originally opened by @jschendel
----
> I recall this being mentioned on one of the core dev calls a few months back, and took inspiration from [rapidsai/cudf](https://github.com/rapidsai/cudf) for the various templates.
>
> Added templates for the following use cases:
>
> * Bug Report
>
> * This is largely the same as the current issue template
>
> * Documentation Enhancement
>
> * Documentation Error
>
> * Feature Request
>
> * Submit Question
>
> * This attempts to direct users to StackOverflow for usage questions
>
>
> We could also add a Blank Template for opening an issue without any template provided, but opted not to do that for now, as I don't want to encourage people to bypass these templates. Could certainly add one if there's a consensus that we want this.
>
> I've created a [local repo](https://github.com/jschendel/pandas-templates/issues) where you can see these templates in action since I couldn't figure out another way to actually display these. Click the "New Issue" button to see what these changes would look like.
>
>
| https://api.github.com/repos/pandas-dev/pandas/pulls/33187 | 2020-03-31T17:54:33Z | 2020-04-03T19:43:12Z | 2020-04-03T19:43:12Z | 2020-04-06T08:24:06Z |
TST: cover search_sorted scalar mixed timezones case | diff --git a/pandas/tests/series/methods/test_searchsorted.py b/pandas/tests/series/methods/test_searchsorted.py
index fd6c6f74a9136..5a6ec0039c7cd 100644
--- a/pandas/tests/series/methods/test_searchsorted.py
+++ b/pandas/tests/series/methods/test_searchsorted.py
@@ -40,6 +40,14 @@ def test_searchsorted_datetime64_scalar(self):
assert is_scalar(res)
assert res == 1
+ def test_searchsorted_datetime64_scalar_mixed_timezones(self):
+ # GH 30086
+ ser = Series(date_range("20120101", periods=10, freq="2D", tz="UTC"))
+ val = Timestamp("20120102", tz="America/New_York")
+ res = ser.searchsorted(val)
+ assert is_scalar(res)
+ assert res == 1
+
def test_searchsorted_datetime64_list(self):
ser = Series(date_range("20120101", periods=10, freq="2D"))
vals = [Timestamp("20120102"), Timestamp("20120104")]
| - [X] closes #30086
- [X] tests added / passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Raises a `ValueError` on version 0.25.1 (reporter's version), as expected:
```
>>> import pandas as pd
>>> pd.__version__
'0.25.1'
>>> ser = pd.Series(date_range("20120101", periods=10, freq="2D", tz="UTC"))
>>> val = pd.Timestamp("20120102", tz="America/New_York")
>>> res = ser.searchsorted(val)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/jamescobonkerr/venv/lib/python3.7/site-packages/pandas/core/series.py", line 2694, in searchsorted
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
File "/Users/jamescobonkerr/venv/lib/python3.7/site-packages/pandas/core/algorithms.py", line 1887, in searchsorted
result = arr.searchsorted(value, side=side, sorter=sorter)
File "/Users/jamescobonkerr/venv/lib/python3.7/site-packages/pandas/core/arrays/datetimelike.py", line 666, in searchsorted
self._check_compatible_with(value)
File "/Users/jamescobonkerr/venv/lib/python3.7/site-packages/pandas/core/arrays/datetimes.py", line 591, in _check_compatible_with
own=self.tz, other=other.tz
ValueError: Timezones don't match. 'UTC != America/New_York'
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/33185 | 2020-03-31T17:17:12Z | 2020-03-31T18:24:23Z | 2020-03-31T18:24:22Z | 2020-03-31T18:24:29Z |
REF/CLN: test_get_dummies | diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_get_dummies.py
similarity index 84%
rename from pandas/tests/reshape/test_reshape.py
rename to pandas/tests/reshape/test_get_dummies.py
index 6113cfec48df9..c003bfa6a239a 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_get_dummies.py
@@ -6,7 +6,7 @@
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
-from pandas import Categorical, DataFrame, Index, Series, get_dummies
+from pandas import Categorical, CategoricalIndex, DataFrame, Series, get_dummies
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
@@ -31,11 +31,11 @@ def effective_dtype(self, dtype):
return np.uint8
return dtype
- def test_raises_on_dtype_object(self, df):
+ def test_get_dummies_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype="object")
- def test_basic(self, sparse, dtype):
+ def test_get_dummies_basic(self, sparse, dtype):
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
@@ -56,7 +56,7 @@ def test_basic(self, sparse, dtype):
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
- def test_basic_types(self, sparse, dtype):
+ def test_get_dummies_basic_types(self, sparse, dtype):
# GH 10531
s_list = list("abc")
s_series = Series(s_list)
@@ -106,7 +106,7 @@ def test_basic_types(self, sparse, dtype):
result = result.sort_index()
tm.assert_series_equal(result, expected)
- def test_just_na(self, sparse):
+ def test_get_dummies_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=["A"])
@@ -123,7 +123,7 @@ def test_just_na(self, sparse):
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ["A"]
- def test_include_na(self, sparse, dtype):
+ def test_get_dummies_include_na(self, sparse, dtype):
s = ["a", "b", np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame(
@@ -152,7 +152,7 @@ def test_include_na(self, sparse, dtype):
)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
- def test_unicode(self, sparse):
+ def test_get_dummies_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
@@ -175,7 +175,7 @@ def test_dataframe_dummies_all_obj(self, df, sparse):
dtype=np.uint8,
)
if sparse:
- expected = pd.DataFrame(
+ expected = DataFrame(
{
"A_a": SparseArray([1, 0, 1], dtype="uint8"),
"A_b": SparseArray([0, 1, 0], dtype="uint8"),
@@ -223,7 +223,7 @@ def test_dataframe_dummies_prefix_list(self, df, sparse):
cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected = expected[["C"] + cols]
- typ = SparseArray if sparse else pd.Series
+ typ = SparseArray if sparse else Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
tm.assert_frame_equal(result, expected)
@@ -242,11 +242,11 @@ def test_dataframe_dummies_prefix_str(self, df, sparse):
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat(
[
- pd.Series([1, 2, 3], name="C"),
- pd.Series([1, 0, 1], name="bad_a", dtype="Sparse[uint8]"),
- pd.Series([0, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
- pd.Series([1, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
- pd.Series([0, 0, 1], name="bad_c", dtype="Sparse[uint8]"),
+ Series([1, 2, 3], name="C"),
+ Series([1, 0, 1], name="bad_a", dtype="Sparse[uint8]"),
+ Series([0, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
+ Series([1, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
+ Series([0, 0, 1], name="bad_c", dtype="Sparse[uint8]"),
],
axis=1,
)
@@ -267,7 +267,7 @@ def test_dataframe_dummies_subset(self, df, sparse):
expected[["C"]] = df[["C"]]
if sparse:
cols = ["from_A_a", "from_A_b"]
- expected[cols] = expected[cols].astype(pd.SparseDtype("uint8", 0))
+ expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
@@ -286,7 +286,7 @@ def test_dataframe_dummies_prefix_sep(self, df, sparse):
expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]]
if sparse:
cols = ["A..a", "A..b", "B..b", "B..c"]
- expected[cols] = expected[cols].astype(pd.SparseDtype("uint8", 0))
+ expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
@@ -323,7 +323,7 @@ def test_dataframe_dummies_prefix_dict(self, sparse):
columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
- expected[columns] = expected[columns].astype(pd.SparseDtype("uint8", 0))
+ expected[columns] = expected[columns].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
@@ -359,7 +359,7 @@ def test_dataframe_dummies_with_na(self, df, sparse, dtype):
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
- df["cat"] = pd.Categorical(["x", "y", "y"])
+ df["cat"] = Categorical(["x", "y", "y"])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
@@ -386,30 +386,30 @@ def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
"get_dummies_kwargs,expected",
[
(
- {"data": pd.DataFrame(({"ä": ["a"]}))},
- pd.DataFrame({"ä_a": [1]}, dtype=np.uint8),
+ {"data": DataFrame(({"ä": ["a"]}))},
+ DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
- {"data": pd.DataFrame({"x": ["ä"]})},
- pd.DataFrame({"x_ä": [1]}, dtype=np.uint8),
+ {"data": DataFrame({"x": ["ä"]})},
+ DataFrame({"x_ä": [1]}, dtype=np.uint8),
),
(
- {"data": pd.DataFrame({"x": ["a"]}), "prefix": "ä"},
- pd.DataFrame({"ä_a": [1]}, dtype=np.uint8),
+ {"data": DataFrame({"x": ["a"]}), "prefix": "ä"},
+ DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
- {"data": pd.DataFrame({"x": ["a"]}), "prefix_sep": "ä"},
- pd.DataFrame({"xäa": [1]}, dtype=np.uint8),
+ {"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"},
+ DataFrame({"xäa": [1]}, dtype=np.uint8),
),
],
)
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
- # GH22084 pd.get_dummies incorrectly encodes unicode characters
+ # GH22084 get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
tm.assert_frame_equal(result, expected)
- def test_basic_drop_first(self, sparse):
+ def test_get_dummies_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list("abc")
@@ -430,7 +430,7 @@ def test_basic_drop_first(self, sparse):
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
- def test_basic_drop_first_one_level(self, sparse):
+ def test_get_dummies_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list("aaa")
s_series = Series(s_list)
@@ -448,7 +448,7 @@ def test_basic_drop_first_one_level(self, sparse):
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
- def test_basic_drop_first_NA(self, sparse):
+ def test_get_dummies_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ["a", "b", np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
@@ -481,7 +481,7 @@ def test_dataframe_dummies_drop_first(self, df, sparse):
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype):
- df["cat"] = pd.Categorical(["x", "y", "y"])
+ df["cat"] = Categorical(["x", "y", "y"])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame(
{"C": [1, 2, 3], "A_b": [0, 1, 0], "B_c": [0, 0, 1], "cat_y": [0, 1, 1]}
@@ -521,24 +521,24 @@ def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
expected = expected[["C", "A_b", "B_c"]]
tm.assert_frame_equal(result, expected)
- def test_int_int(self):
+ def test_get_dummies_int_int(self):
data = Series([1, 2, 1])
- result = pd.get_dummies(data)
+ result = get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2], dtype=np.uint8)
tm.assert_frame_equal(result, expected)
- data = Series(pd.Categorical(["a", "b", "a"]))
- result = pd.get_dummies(data)
+ data = Series(Categorical(["a", "b", "a"]))
+ result = get_dummies(data)
expected = DataFrame(
- [[1, 0], [0, 1], [1, 0]], columns=pd.Categorical(["a", "b"]), dtype=np.uint8
+ [[1, 0], [0, 1], [1, 0]], columns=Categorical(["a", "b"]), dtype=np.uint8
)
tm.assert_frame_equal(result, expected)
- def test_int_df(self, dtype):
+ def test_get_dummies_int_df(self, dtype):
data = DataFrame(
{
"A": [1, 2, 1],
- "B": pd.Categorical(["a", "b", "a"]),
+ "B": Categorical(["a", "b", "a"]),
"C": [1, 2, 1],
"D": [1.0, 2.0, 1.0],
}
@@ -549,22 +549,22 @@ def test_int_df(self, dtype):
columns=columns,
)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
- result = pd.get_dummies(data, columns=["A", "B"], dtype=dtype)
+ result = get_dummies(data, columns=["A", "B"], dtype=dtype)
tm.assert_frame_equal(result, expected)
- def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
+ @pytest.mark.parametrize("ordered", [True, False])
+ def test_dataframe_dummies_preserve_categorical_dtype(self, dtype, ordered):
# GH13854
- for ordered in [False, True]:
- cat = pd.Categorical(list("xy"), categories=list("xyz"), ordered=ordered)
- result = get_dummies(cat, dtype=dtype)
+ cat = Categorical(list("xy"), categories=list("xyz"), ordered=ordered)
+ result = get_dummies(cat, dtype=dtype)
- data = np.array([[1, 0, 0], [0, 1, 0]], dtype=self.effective_dtype(dtype))
- cols = pd.CategoricalIndex(
- cat.categories, categories=cat.categories, ordered=ordered
- )
- expected = DataFrame(data, columns=cols, dtype=self.effective_dtype(dtype))
+ data = np.array([[1, 0, 0], [0, 1, 0]], dtype=self.effective_dtype(dtype))
+ cols = CategoricalIndex(
+ cat.categories, categories=cat.categories, ordered=ordered
+ )
+ expected = DataFrame(data, columns=cols, dtype=self.effective_dtype(dtype))
- tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("sparse", [True, False])
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
@@ -593,10 +593,10 @@ def test_get_dummies_duplicate_columns(self, df):
tm.assert_frame_equal(result, expected)
def test_get_dummies_all_sparse(self):
- df = pd.DataFrame({"A": [1, 2]})
- result = pd.get_dummies(df, columns=["A"], sparse=True)
+ df = DataFrame({"A": [1, 2]})
+ result = get_dummies(df, columns=["A"], sparse=True)
dtype = SparseDtype("uint8", 0)
- expected = pd.DataFrame(
+ expected = DataFrame(
{
"A_1": SparseArray([1, 0], dtype=dtype),
"A_2": SparseArray([0, 1], dtype=dtype),
@@ -607,7 +607,7 @@ def test_get_dummies_all_sparse(self):
@pytest.mark.parametrize("values", ["baz"])
def test_get_dummies_with_string_values(self, values):
# issue #28383
- df = pd.DataFrame(
+ df = DataFrame(
{
"bar": [1, 2, 3, 4, 5, 6],
"foo": ["one", "one", "one", "two", "two", "two"],
@@ -619,26 +619,4 @@ def test_get_dummies_with_string_values(self, values):
msg = "Input must be a list-like for parameter `columns`"
with pytest.raises(TypeError, match=msg):
- pd.get_dummies(df, columns=values)
-
-
-class TestCategoricalReshape:
- def test_reshaping_multi_index_categorical(self):
-
- cols = ["ItemA", "ItemB", "ItemC"]
- data = {c: tm.makeTimeDataFrame() for c in cols}
- df = pd.concat({c: data[c].stack() for c in data}, axis="columns")
- df.index.names = ["major", "minor"]
- df["str"] = "foo"
-
- df["category"] = df["str"].astype("category")
- result = df["category"].unstack()
-
- dti = df.index.levels[0]
- c = Categorical(["foo"] * len(dti))
- expected = DataFrame(
- {"A": c.copy(), "B": c.copy(), "C": c.copy(), "D": c.copy()},
- columns=Index(list("ABCD"), name="minor"),
- index=dti.rename("major"),
- )
- tm.assert_frame_equal(result, expected)
+ get_dummies(df, columns=values)
diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py
index 7645fb8759a54..cdf6a16e88ad0 100644
--- a/pandas/tests/series/methods/test_unstack.py
+++ b/pandas/tests/series/methods/test_unstack.py
@@ -118,3 +118,20 @@ def test_unstack_mixed_type_name_in_multiindex(
expected_values, columns=expected_columns, index=expected_index,
)
tm.assert_frame_equal(result, expected)
+
+
+def test_unstack_multi_index_categorical_values():
+
+ mi = tm.makeTimeDataFrame().stack().index.rename(["major", "minor"])
+ ser = pd.Series(["foo"] * len(mi), index=mi, name="category", dtype="category")
+
+ result = ser.unstack()
+
+ dti = ser.index.levels[0]
+ c = pd.Categorical(["foo"] * len(dti))
+ expected = DataFrame(
+ {"A": c.copy(), "B": c.copy(), "C": c.copy(), "D": c.copy()},
+ columns=pd.Index(list("ABCD"), name="minor"),
+ index=dti.rename("major"),
+ )
+ tm.assert_frame_equal(result, expected)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33184 | 2020-03-31T16:48:35Z | 2020-04-01T00:28:49Z | 2020-04-01T00:28:49Z | 2020-04-01T11:00:23Z | |
REF: fillna tests | diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index 954601ad423bf..543edc6b66ff2 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -442,18 +442,6 @@ def test_frame_repr(self):
expected = " A\na 1\nb 2\nc 3"
assert result == expected
- def test_fillna_categorical(self):
- # GH 11343
- idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name="x")
- # fill by value in categories
- exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name="x")
- tm.assert_index_equal(idx.fillna(1.0), exp)
-
- # fill by value not in categories raises ValueError
- msg = "fill value must be in categories"
- with pytest.raises(ValueError, match=msg):
- idx.fillna(2.0)
-
@pytest.mark.parametrize(
"dtype, engine_type",
[
diff --git a/pandas/tests/indexes/categorical/test_fillna.py b/pandas/tests/indexes/categorical/test_fillna.py
new file mode 100644
index 0000000000000..0d878249d3800
--- /dev/null
+++ b/pandas/tests/indexes/categorical/test_fillna.py
@@ -0,0 +1,19 @@
+import numpy as np
+import pytest
+
+from pandas import CategoricalIndex
+import pandas._testing as tm
+
+
+class TestFillNA:
+ def test_fillna_categorical(self):
+ # GH#11343
+ idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name="x")
+ # fill by value in categories
+ exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name="x")
+ tm.assert_index_equal(idx.fillna(1.0), exp)
+
+ # fill by value not in categories raises ValueError
+ msg = "fill value must be in categories"
+ with pytest.raises(ValueError, match=msg):
+ idx.fillna(2.0)
diff --git a/pandas/tests/indexes/datetimes/test_missing.py b/pandas/tests/indexes/datetimes/test_fillna.py
similarity index 98%
rename from pandas/tests/indexes/datetimes/test_missing.py
rename to pandas/tests/indexes/datetimes/test_fillna.py
index 3399c8eaf6750..5fbe60bb0c50f 100644
--- a/pandas/tests/indexes/datetimes/test_missing.py
+++ b/pandas/tests/indexes/datetimes/test_fillna.py
@@ -4,7 +4,7 @@
import pandas._testing as tm
-class TestDatetimeIndex:
+class TestDatetimeIndexFillNA:
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_fillna_datetime64(self, tz):
# GH 11343
diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py
index 54ffec2e03fd3..4c9d518778ceb 100644
--- a/pandas/tests/indexes/multi/test_missing.py
+++ b/pandas/tests/indexes/multi/test_missing.py
@@ -1,55 +1,16 @@
import numpy as np
import pytest
-from pandas._libs.tslib import iNaT
-
import pandas as pd
-from pandas import Int64Index, MultiIndex, PeriodIndex, UInt64Index
+from pandas import MultiIndex
import pandas._testing as tm
-from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
def test_fillna(idx):
# GH 11343
-
- # TODO: Remove or Refactor. Not Implemented for MultiIndex
- for name, index in [("idx", idx)]:
- if len(index) == 0:
- pass
- elif isinstance(index, MultiIndex):
- idx = index.copy()
- msg = "isna is not defined for MultiIndex"
- with pytest.raises(NotImplementedError, match=msg):
- idx.fillna(idx[0])
- else:
- idx = index.copy()
- result = idx.fillna(idx[0])
- tm.assert_index_equal(result, idx)
- assert result is not idx
-
- msg = "'value' must be a scalar, passed: "
- with pytest.raises(TypeError, match=msg):
- idx.fillna([idx[0]])
-
- idx = index.copy()
- values = idx.values
-
- if isinstance(index, DatetimeIndexOpsMixin):
- values[1] = iNaT
- elif isinstance(index, (Int64Index, UInt64Index)):
- continue
- else:
- values[1] = np.nan
-
- if isinstance(index, PeriodIndex):
- idx = type(index)(values, freq=index.freq)
- else:
- idx = type(index)(values)
-
- expected = np.array([False] * len(idx), dtype=bool)
- expected[1] = True
- tm.assert_numpy_array_equal(idx._isnan, expected)
- assert idx.hasnans is True
+ msg = "isna is not defined for MultiIndex"
+ with pytest.raises(NotImplementedError, match=msg):
+ idx.fillna(idx[0])
def test_dropna():
diff --git a/pandas/tests/indexes/period/test_fillna.py b/pandas/tests/indexes/period/test_fillna.py
new file mode 100644
index 0000000000000..602e87333a6c1
--- /dev/null
+++ b/pandas/tests/indexes/period/test_fillna.py
@@ -0,0 +1,36 @@
+from pandas import Index, NaT, Period, PeriodIndex
+import pandas._testing as tm
+
+
+class TestFillNA:
+ def test_fillna_period(self):
+ # GH#11343
+ idx = PeriodIndex(["2011-01-01 09:00", NaT, "2011-01-01 11:00"], freq="H")
+
+ exp = PeriodIndex(
+ ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], freq="H"
+ )
+ result = idx.fillna(Period("2011-01-01 10:00", freq="H"))
+ tm.assert_index_equal(result, exp)
+
+ exp = Index(
+ [
+ Period("2011-01-01 09:00", freq="H"),
+ "x",
+ Period("2011-01-01 11:00", freq="H"),
+ ],
+ dtype=object,
+ )
+ result = idx.fillna("x")
+ tm.assert_index_equal(result, exp)
+
+ exp = Index(
+ [
+ Period("2011-01-01 09:00", freq="H"),
+ Period("2011-01-01", freq="D"),
+ Period("2011-01-01 11:00", freq="H"),
+ ],
+ dtype=object,
+ )
+ result = idx.fillna(Period("2011-01-01", freq="D"))
+ tm.assert_index_equal(result, exp)
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index a62936655e09c..0ce10fb8779a1 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -67,35 +67,6 @@ def test_repeat_freqstr(self, index, use_numpy):
tm.assert_index_equal(result, expected)
assert result.freqstr == index.freqstr
- def test_fillna_period(self):
- # GH 11343
- idx = PeriodIndex(["2011-01-01 09:00", NaT, "2011-01-01 11:00"], freq="H")
-
- exp = PeriodIndex(
- ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], freq="H"
- )
- tm.assert_index_equal(idx.fillna(Period("2011-01-01 10:00", freq="H")), exp)
-
- exp = Index(
- [
- Period("2011-01-01 09:00", freq="H"),
- "x",
- Period("2011-01-01 11:00", freq="H"),
- ],
- dtype=object,
- )
- tm.assert_index_equal(idx.fillna("x"), exp)
-
- exp = Index(
- [
- Period("2011-01-01 09:00", freq="H"),
- Period("2011-01-01", freq="D"),
- Period("2011-01-01 11:00", freq="H"),
- ],
- dtype=object,
- )
- tm.assert_index_equal(idx.fillna(Period("2011-01-01", freq="D")), exp)
-
def test_no_millisecond_field(self):
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
diff --git a/pandas/tests/indexes/timedeltas/test_fillna.py b/pandas/tests/indexes/timedeltas/test_fillna.py
new file mode 100644
index 0000000000000..47b2f2ff597f4
--- /dev/null
+++ b/pandas/tests/indexes/timedeltas/test_fillna.py
@@ -0,0 +1,17 @@
+from pandas import Index, NaT, Timedelta, TimedeltaIndex
+import pandas._testing as tm
+
+
+class TestFillNA:
+ def test_fillna_timedelta(self):
+ # GH#11343
+ idx = TimedeltaIndex(["1 day", NaT, "3 day"])
+
+ exp = TimedeltaIndex(["1 day", "2 day", "3 day"])
+ tm.assert_index_equal(idx.fillna(Timedelta("2 day")), exp)
+
+ exp = TimedeltaIndex(["1 day", "3 hour", "3 day"])
+ idx.fillna(Timedelta("3 hour"))
+
+ exp = Index([Timedelta("1 day"), "x", Timedelta("3 day")], dtype=object)
+ tm.assert_index_equal(idx.fillna("x"), exp)
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index fa00b870ca757..129bdef870a14 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -43,21 +43,6 @@ def test_shift(self):
def test_pickle_compat_construction(self):
pass
- def test_fillna_timedelta(self):
- # GH 11343
- idx = pd.TimedeltaIndex(["1 day", pd.NaT, "3 day"])
-
- exp = pd.TimedeltaIndex(["1 day", "2 day", "3 day"])
- tm.assert_index_equal(idx.fillna(pd.Timedelta("2 day")), exp)
-
- exp = pd.TimedeltaIndex(["1 day", "3 hour", "3 day"])
- idx.fillna(pd.Timedelta("3 hour"))
-
- exp = pd.Index(
- [pd.Timedelta("1 day"), "x", pd.Timedelta("3 day")], dtype=object
- )
- tm.assert_index_equal(idx.fillna("x"), exp)
-
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 232b2a61f6268..f7c7457f3a703 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -629,11 +629,8 @@ def test_timedelta_assignment():
s = s.reindex(s.index.insert(0, "A"))
tm.assert_series_equal(s, Series([np.nan, Timedelta("1 days")], index=["A", "B"]))
- result = s.fillna(timedelta(1))
- expected = Series(Timedelta("1 days"), index=["A", "B"])
- tm.assert_series_equal(result, expected)
-
s.loc["A"] = timedelta(1)
+ expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(s, expected)
# GH 14155
diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py
new file mode 100644
index 0000000000000..c34838be24fc1
--- /dev/null
+++ b/pandas/tests/series/methods/test_fillna.py
@@ -0,0 +1,176 @@
+from datetime import timedelta
+
+import numpy as np
+import pytest
+
+from pandas import Categorical, DataFrame, NaT, Period, Series, Timedelta, Timestamp
+import pandas._testing as tm
+
+
+class TestSeriesFillNA:
+ def test_fillna_pytimedelta(self):
+ # GH#8209
+ ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])
+
+ result = ser.fillna(timedelta(1))
+ expected = Series(Timedelta("1 days"), index=["A", "B"])
+ tm.assert_series_equal(result, expected)
+
+ def test_fillna_period(self):
+ # GH#13737
+ ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
+
+ res = ser.fillna(Period("2012-01", freq="M"))
+ exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")])
+ tm.assert_series_equal(res, exp)
+ assert res.dtype == "Period[M]"
+
+ def test_fillna_dt64_timestamp(self):
+ ser = Series(
+ [
+ Timestamp("20130101"),
+ Timestamp("20130101"),
+ Timestamp("20130102"),
+ Timestamp("20130103 9:01:01"),
+ ]
+ )
+ ser[2] = np.nan
+
+ # reg fillna
+ result = ser.fillna(Timestamp("20130104"))
+ expected = Series(
+ [
+ Timestamp("20130101"),
+ Timestamp("20130101"),
+ Timestamp("20130104"),
+ Timestamp("20130103 9:01:01"),
+ ]
+ )
+ tm.assert_series_equal(result, expected)
+
+ result = ser.fillna(NaT)
+ expected = ser
+ tm.assert_series_equal(result, expected)
+
+ def test_fillna_dt64_non_nao(self):
+ # GH#27419
+ ser = Series([Timestamp("2010-01-01"), NaT, Timestamp("2000-01-01")])
+ val = np.datetime64("1975-04-05", "ms")
+
+ result = ser.fillna(val)
+ expected = Series(
+ [Timestamp("2010-01-01"), Timestamp("1975-04-05"), Timestamp("2000-01-01")]
+ )
+ tm.assert_series_equal(result, expected)
+
+ def test_fillna_numeric_inplace(self):
+ x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
+ y = x.copy()
+
+ y.fillna(value=0, inplace=True)
+
+ expected = x.fillna(value=0)
+ tm.assert_series_equal(y, expected)
+
+ # ---------------------------------------------------------------
+ # CategoricalDtype
+
+ @pytest.mark.parametrize(
+ "fill_value, expected_output",
+ [
+ ("a", ["a", "a", "b", "a", "a"]),
+ ({1: "a", 3: "b", 4: "b"}, ["a", "a", "b", "b", "b"]),
+ ({1: "a"}, ["a", "a", "b", np.nan, np.nan]),
+ ({1: "a", 3: "b"}, ["a", "a", "b", "b", np.nan]),
+ (Series("a"), ["a", np.nan, "b", np.nan, np.nan]),
+ (Series("a", index=[1]), ["a", "a", "b", np.nan, np.nan]),
+ (Series({1: "a", 3: "b"}), ["a", "a", "b", "b", np.nan]),
+ (Series(["a", "b"], index=[3, 4]), ["a", np.nan, "b", "a", "b"]),
+ ],
+ )
+ def test_fillna_categorical(self, fill_value, expected_output):
+ # GH#17033
+ # Test fillna for a Categorical series
+ data = ["a", np.nan, "b", np.nan, np.nan]
+ ser = Series(Categorical(data, categories=["a", "b"]))
+ exp = Series(Categorical(expected_output, categories=["a", "b"]))
+ result = ser.fillna(fill_value)
+ tm.assert_series_equal(result, exp)
+
+ @pytest.mark.parametrize(
+ "fill_value, expected_output",
+ [
+ (Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]),
+ (Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]),
+ (
+ Series(
+ Categorical(
+ ["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"]
+ )
+ ),
+ ["a", "d", "b", "d", "a"],
+ ),
+ ],
+ )
+ def test_fillna_categorical_with_new_categories(self, fill_value, expected_output):
+ # GH#26215
+ data = ["a", np.nan, "b", np.nan, np.nan]
+ ser = Series(Categorical(data, categories=["a", "b", "c", "d", "e"]))
+ exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"]))
+ result = ser.fillna(fill_value)
+ tm.assert_series_equal(result, exp)
+
+ def test_fillna_categorical_raises(self):
+ data = ["a", np.nan, "b", np.nan, np.nan]
+ ser = Series(Categorical(data, categories=["a", "b"]))
+
+ with pytest.raises(ValueError, match="fill value must be in categories"):
+ ser.fillna("d")
+
+ with pytest.raises(ValueError, match="fill value must be in categories"):
+ ser.fillna(Series("d"))
+
+ with pytest.raises(ValueError, match="fill value must be in categories"):
+ ser.fillna({1: "d", 3: "a"})
+
+ msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
+ with pytest.raises(TypeError, match=msg):
+ ser.fillna(["a", "b"])
+
+ msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
+ with pytest.raises(TypeError, match=msg):
+ ser.fillna(("a", "b"))
+
+ msg = (
+ '"value" parameter must be a scalar, dict '
+ 'or Series, but you passed a "DataFrame"'
+ )
+ with pytest.raises(TypeError, match=msg):
+ ser.fillna(DataFrame({1: ["a"], 3: ["b"]}))
+
+ # ---------------------------------------------------------------
+ # Invalid Usages
+
+ def test_fillna_listlike_invalid(self):
+ ser = Series(np.random.randint(-100, 100, 50))
+ msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
+ with pytest.raises(TypeError, match=msg):
+ ser.fillna([1, 2])
+
+ msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
+ with pytest.raises(TypeError, match=msg):
+ ser.fillna((1, 2))
+
+ def test_fillna_method_and_limit_invalid(self):
+
+ # related GH#9217, make sure limit is an int and greater than 0
+ ser = Series([1, 2, 3, None])
+ msg = (
+ r"Cannot specify both 'value' and 'method'\.|"
+ r"Limit must be greater than 0|"
+ "Limit must be an integer"
+ )
+ for limit in [-1, 0, 1.0, 2.0]:
+ for method in ["backfill", "bfill", "pad", "ffill", None]:
+ with pytest.raises(ValueError, match=msg):
+ ser.fillna(1, limit=limit, method=method)
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 1687f80e9f3ed..9e9b93a499487 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -122,22 +122,6 @@ def test_datetime64_fillna(self):
)
s[2] = np.nan
- # reg fillna
- result = s.fillna(Timestamp("20130104"))
- expected = Series(
- [
- Timestamp("20130101"),
- Timestamp("20130101"),
- Timestamp("20130104"),
- Timestamp("20130103 9:01:01"),
- ]
- )
- tm.assert_series_equal(result, expected)
-
- result = s.fillna(NaT)
- expected = s
- tm.assert_series_equal(result, expected)
-
# ffill
result = s.ffill()
expected = Series(
@@ -177,242 +161,228 @@ def test_datetime64_fillna(self):
result = s.fillna(method="backfill")
tm.assert_series_equal(result, expected)
- def test_datetime64_tz_fillna(self):
-
- for tz in ["US/Eastern", "Asia/Tokyo"]:
- # DatetimeBlock
- s = Series(
- [
- Timestamp("2011-01-01 10:00"),
- pd.NaT,
- Timestamp("2011-01-03 10:00"),
- pd.NaT,
- ]
- )
- null_loc = pd.Series([False, True, False, True])
-
- result = s.fillna(pd.Timestamp("2011-01-02 10:00"))
- expected = Series(
- [
- Timestamp("2011-01-01 10:00"),
- Timestamp("2011-01-02 10:00"),
- Timestamp("2011-01-03 10:00"),
- Timestamp("2011-01-02 10:00"),
- ]
- )
- tm.assert_series_equal(expected, result)
- # check s is not changed
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz))
- expected = Series(
- [
- Timestamp("2011-01-01 10:00"),
- Timestamp("2011-01-02 10:00", tz=tz),
- Timestamp("2011-01-03 10:00"),
- Timestamp("2011-01-02 10:00", tz=tz),
- ]
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna("AAA")
- expected = Series(
- [
- Timestamp("2011-01-01 10:00"),
- "AAA",
- Timestamp("2011-01-03 10:00"),
- "AAA",
- ],
- dtype=object,
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna(
- {
- 1: pd.Timestamp("2011-01-02 10:00", tz=tz),
- 3: pd.Timestamp("2011-01-04 10:00"),
- }
- )
- expected = Series(
- [
- Timestamp("2011-01-01 10:00"),
- Timestamp("2011-01-02 10:00", tz=tz),
- Timestamp("2011-01-03 10:00"),
- Timestamp("2011-01-04 10:00"),
- ]
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna(
- {
- 1: pd.Timestamp("2011-01-02 10:00"),
- 3: pd.Timestamp("2011-01-04 10:00"),
- }
- )
- expected = Series(
- [
- Timestamp("2011-01-01 10:00"),
- Timestamp("2011-01-02 10:00"),
- Timestamp("2011-01-03 10:00"),
- Timestamp("2011-01-04 10:00"),
- ]
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- # DatetimeBlockTZ
- idx = pd.DatetimeIndex(
- ["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz=tz
- )
- s = pd.Series(idx)
- assert s.dtype == f"datetime64[ns, {tz}]"
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna(pd.Timestamp("2011-01-02 10:00"))
- expected = Series(
- [
- Timestamp("2011-01-01 10:00", tz=tz),
- Timestamp("2011-01-02 10:00"),
- Timestamp("2011-01-03 10:00", tz=tz),
- Timestamp("2011-01-02 10:00"),
- ]
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz))
- idx = pd.DatetimeIndex(
- [
- "2011-01-01 10:00",
- "2011-01-02 10:00",
- "2011-01-03 10:00",
- "2011-01-02 10:00",
- ],
- tz=tz,
- )
- expected = Series(idx)
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
- idx = pd.DatetimeIndex(
- [
- "2011-01-01 10:00",
- "2011-01-02 10:00",
- "2011-01-03 10:00",
- "2011-01-02 10:00",
- ],
- tz=tz,
- )
- expected = Series(idx)
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna("AAA")
- expected = Series(
- [
- Timestamp("2011-01-01 10:00", tz=tz),
- "AAA",
- Timestamp("2011-01-03 10:00", tz=tz),
- "AAA",
- ],
- dtype=object,
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna(
- {
- 1: pd.Timestamp("2011-01-02 10:00", tz=tz),
- 3: pd.Timestamp("2011-01-04 10:00"),
- }
- )
- expected = Series(
- [
- Timestamp("2011-01-01 10:00", tz=tz),
- Timestamp("2011-01-02 10:00", tz=tz),
- Timestamp("2011-01-03 10:00", tz=tz),
- Timestamp("2011-01-04 10:00"),
- ]
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna(
- {
- 1: pd.Timestamp("2011-01-02 10:00", tz=tz),
- 3: pd.Timestamp("2011-01-04 10:00", tz=tz),
- }
- )
- expected = Series(
- [
- Timestamp("2011-01-01 10:00", tz=tz),
- Timestamp("2011-01-02 10:00", tz=tz),
- Timestamp("2011-01-03 10:00", tz=tz),
- Timestamp("2011-01-04 10:00", tz=tz),
- ]
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- # filling with a naive/other zone, coerce to object
- result = s.fillna(Timestamp("20130101"))
- expected = Series(
- [
- Timestamp("2011-01-01 10:00", tz=tz),
- Timestamp("2013-01-01"),
- Timestamp("2011-01-03 10:00", tz=tz),
- Timestamp("2013-01-01"),
- ]
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
-
- result = s.fillna(Timestamp("20130101", tz="US/Pacific"))
- expected = Series(
- [
- Timestamp("2011-01-01 10:00", tz=tz),
- Timestamp("2013-01-01", tz="US/Pacific"),
- Timestamp("2011-01-03 10:00", tz=tz),
- Timestamp("2013-01-01", tz="US/Pacific"),
- ]
- )
- tm.assert_series_equal(expected, result)
- tm.assert_series_equal(pd.isna(s), null_loc)
+ @pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
+ def test_datetime64_tz_fillna(self, tz):
+ # DatetimeBlock
+ s = Series(
+ [
+ Timestamp("2011-01-01 10:00"),
+ pd.NaT,
+ Timestamp("2011-01-03 10:00"),
+ pd.NaT,
+ ]
+ )
+ null_loc = pd.Series([False, True, False, True])
+
+ result = s.fillna(pd.Timestamp("2011-01-02 10:00"))
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00"),
+ Timestamp("2011-01-02 10:00"),
+ Timestamp("2011-01-03 10:00"),
+ Timestamp("2011-01-02 10:00"),
+ ]
+ )
+ tm.assert_series_equal(expected, result)
+ # check s is not changed
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz))
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00"),
+ Timestamp("2011-01-02 10:00", tz=tz),
+ Timestamp("2011-01-03 10:00"),
+ Timestamp("2011-01-02 10:00", tz=tz),
+ ]
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna("AAA")
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00"),
+ "AAA",
+ Timestamp("2011-01-03 10:00"),
+ "AAA",
+ ],
+ dtype=object,
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna(
+ {
+ 1: pd.Timestamp("2011-01-02 10:00", tz=tz),
+ 3: pd.Timestamp("2011-01-04 10:00"),
+ }
+ )
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00"),
+ Timestamp("2011-01-02 10:00", tz=tz),
+ Timestamp("2011-01-03 10:00"),
+ Timestamp("2011-01-04 10:00"),
+ ]
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna(
+ {1: pd.Timestamp("2011-01-02 10:00"), 3: pd.Timestamp("2011-01-04 10:00")}
+ )
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00"),
+ Timestamp("2011-01-02 10:00"),
+ Timestamp("2011-01-03 10:00"),
+ Timestamp("2011-01-04 10:00"),
+ ]
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ # DatetimeBlockTZ
+ idx = pd.DatetimeIndex(
+ ["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz=tz
+ )
+ s = pd.Series(idx)
+ assert s.dtype == f"datetime64[ns, {tz}]"
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna(pd.Timestamp("2011-01-02 10:00"))
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00", tz=tz),
+ Timestamp("2011-01-02 10:00"),
+ Timestamp("2011-01-03 10:00", tz=tz),
+ Timestamp("2011-01-02 10:00"),
+ ]
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz))
+ idx = pd.DatetimeIndex(
+ [
+ "2011-01-01 10:00",
+ "2011-01-02 10:00",
+ "2011-01-03 10:00",
+ "2011-01-02 10:00",
+ ],
+ tz=tz,
+ )
+ expected = Series(idx)
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+ result = s.fillna(pd.Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
+ idx = pd.DatetimeIndex(
+ [
+ "2011-01-01 10:00",
+ "2011-01-02 10:00",
+ "2011-01-03 10:00",
+ "2011-01-02 10:00",
+ ],
+ tz=tz,
+ )
+ expected = Series(idx)
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna("AAA")
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00", tz=tz),
+ "AAA",
+ Timestamp("2011-01-03 10:00", tz=tz),
+ "AAA",
+ ],
+ dtype=object,
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna(
+ {
+ 1: pd.Timestamp("2011-01-02 10:00", tz=tz),
+ 3: pd.Timestamp("2011-01-04 10:00"),
+ }
+ )
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00", tz=tz),
+ Timestamp("2011-01-02 10:00", tz=tz),
+ Timestamp("2011-01-03 10:00", tz=tz),
+ Timestamp("2011-01-04 10:00"),
+ ]
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna(
+ {
+ 1: pd.Timestamp("2011-01-02 10:00", tz=tz),
+ 3: pd.Timestamp("2011-01-04 10:00", tz=tz),
+ }
+ )
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00", tz=tz),
+ Timestamp("2011-01-02 10:00", tz=tz),
+ Timestamp("2011-01-03 10:00", tz=tz),
+ Timestamp("2011-01-04 10:00", tz=tz),
+ ]
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ # filling with a naive/other zone, coerce to object
+ result = s.fillna(Timestamp("20130101"))
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00", tz=tz),
+ Timestamp("2013-01-01"),
+ Timestamp("2011-01-03 10:00", tz=tz),
+ Timestamp("2013-01-01"),
+ ]
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ result = s.fillna(Timestamp("20130101", tz="US/Pacific"))
+ expected = Series(
+ [
+ Timestamp("2011-01-01 10:00", tz=tz),
+ Timestamp("2013-01-01", tz="US/Pacific"),
+ Timestamp("2011-01-03 10:00", tz=tz),
+ Timestamp("2013-01-01", tz="US/Pacific"),
+ ]
+ )
+ tm.assert_series_equal(expected, result)
+ tm.assert_series_equal(pd.isna(s), null_loc)
+
+ def test_fillna_dt64tz_with_method(self):
# with timezone
# GH 15855
- df = pd.Series([pd.Timestamp("2012-11-11 00:00:00+01:00"), pd.NaT])
+ ser = pd.Series([pd.Timestamp("2012-11-11 00:00:00+01:00"), pd.NaT])
exp = pd.Series(
[
pd.Timestamp("2012-11-11 00:00:00+01:00"),
pd.Timestamp("2012-11-11 00:00:00+01:00"),
]
)
- tm.assert_series_equal(df.fillna(method="pad"), exp)
+ tm.assert_series_equal(ser.fillna(method="pad"), exp)
- df = pd.Series([pd.NaT, pd.Timestamp("2012-11-11 00:00:00+01:00")])
+ ser = pd.Series([pd.NaT, pd.Timestamp("2012-11-11 00:00:00+01:00")])
exp = pd.Series(
[
pd.Timestamp("2012-11-11 00:00:00+01:00"),
pd.Timestamp("2012-11-11 00:00:00+01:00"),
]
)
- tm.assert_series_equal(df.fillna(method="bfill"), exp)
-
- def test_datetime64_non_nano_fillna(self):
- # GH#27419
- ser = Series([Timestamp("2010-01-01"), pd.NaT, Timestamp("2000-01-01")])
- val = np.datetime64("1975-04-05", "ms")
-
- result = ser.fillna(val)
- expected = Series(
- [Timestamp("2010-01-01"), Timestamp("1975-04-05"), Timestamp("2000-01-01")]
- )
- tm.assert_series_equal(result, expected)
+ tm.assert_series_equal(ser.fillna(method="bfill"), exp)
def test_fillna_consistency(self):
# GH 16402
@@ -486,28 +456,6 @@ def test_fillna_int(self):
s.fillna(method="ffill", inplace=True)
tm.assert_series_equal(s.fillna(method="ffill", inplace=False), s)
- def test_fillna_raise(self):
- s = Series(np.random.randint(-100, 100, 50))
- msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
- with pytest.raises(TypeError, match=msg):
- s.fillna([1, 2])
-
- msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
- with pytest.raises(TypeError, match=msg):
- s.fillna((1, 2))
-
- # related GH 9217, make sure limit is an int and greater than 0
- s = Series([1, 2, 3, None])
- msg = (
- r"Cannot specify both 'value' and 'method'\.|"
- r"Limit must be greater than 0|"
- "Limit must be an integer"
- )
- for limit in [-1, 0, 1.0, 2.0]:
- for method in ["backfill", "bfill", "pad", "ffill", None]:
- with pytest.raises(ValueError, match=msg):
- s.fillna(1, limit=limit, method=method)
-
def test_categorical_nan_equality(self):
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
@@ -523,77 +471,6 @@ def test_categorical_nan_handling(self):
s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)
)
- @pytest.mark.parametrize(
- "fill_value, expected_output",
- [
- ("a", ["a", "a", "b", "a", "a"]),
- ({1: "a", 3: "b", 4: "b"}, ["a", "a", "b", "b", "b"]),
- ({1: "a"}, ["a", "a", "b", np.nan, np.nan]),
- ({1: "a", 3: "b"}, ["a", "a", "b", "b", np.nan]),
- (Series("a"), ["a", np.nan, "b", np.nan, np.nan]),
- (Series("a", index=[1]), ["a", "a", "b", np.nan, np.nan]),
- (Series({1: "a", 3: "b"}), ["a", "a", "b", "b", np.nan]),
- (Series(["a", "b"], index=[3, 4]), ["a", np.nan, "b", "a", "b"]),
- ],
- )
- def test_fillna_categorical(self, fill_value, expected_output):
- # GH 17033
- # Test fillna for a Categorical series
- data = ["a", np.nan, "b", np.nan, np.nan]
- s = Series(Categorical(data, categories=["a", "b"]))
- exp = Series(Categorical(expected_output, categories=["a", "b"]))
- tm.assert_series_equal(s.fillna(fill_value), exp)
-
- @pytest.mark.parametrize(
- "fill_value, expected_output",
- [
- (Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]),
- (Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]),
- (
- Series(
- Categorical(
- ["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"]
- )
- ),
- ["a", "d", "b", "d", "a"],
- ),
- ],
- )
- def test_fillna_categorical_with_new_categories(self, fill_value, expected_output):
- # GH 26215
- data = ["a", np.nan, "b", np.nan, np.nan]
- s = Series(Categorical(data, categories=["a", "b", "c", "d", "e"]))
- exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"]))
- tm.assert_series_equal(s.fillna(fill_value), exp)
-
- def test_fillna_categorical_raise(self):
- data = ["a", np.nan, "b", np.nan, np.nan]
- s = Series(Categorical(data, categories=["a", "b"]))
-
- with pytest.raises(ValueError, match="fill value must be in categories"):
- s.fillna("d")
-
- with pytest.raises(ValueError, match="fill value must be in categories"):
- s.fillna(Series("d"))
-
- with pytest.raises(ValueError, match="fill value must be in categories"):
- s.fillna({1: "d", 3: "a"})
-
- msg = '"value" parameter must be a scalar or dict, but you passed a "list"'
- with pytest.raises(TypeError, match=msg):
- s.fillna(["a", "b"])
-
- msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'
- with pytest.raises(TypeError, match=msg):
- s.fillna(("a", "b"))
-
- msg = (
- '"value" parameter must be a scalar, dict '
- 'or Series, but you passed a "DataFrame"'
- )
- with pytest.raises(TypeError, match=msg):
- s.fillna(DataFrame({1: ["a"], 3: ["b"]}))
-
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype="M8[ns]")
@@ -736,15 +613,6 @@ def test_fillna_bug(self):
expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], x.index)
tm.assert_series_equal(filled, expected)
- def test_fillna_inplace(self):
- x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])
- y = x.copy()
-
- y.fillna(value=0, inplace=True)
-
- expected = x.fillna(value=0)
- tm.assert_series_equal(y, expected)
-
def test_fillna_invalid_method(self, datetime_series):
try:
datetime_series.fillna(method="ffil")
diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
index d5a3efcf5757c..5c2c1db14e70f 100644
--- a/pandas/tests/series/test_period.py
+++ b/pandas/tests/series/test_period.py
@@ -38,15 +38,6 @@ def test_isna(self):
tm.assert_series_equal(s.isna(), Series([False, True]))
tm.assert_series_equal(s.notna(), Series([True, False]))
- def test_fillna(self):
- # GH 13737
- s = Series([pd.Period("2011-01", freq="M"), pd.Period("NaT", freq="M")])
-
- res = s.fillna(pd.Period("2012-01", freq="M"))
- exp = Series([pd.Period("2011-01", freq="M"), pd.Period("2012-01", freq="M")])
- tm.assert_series_equal(res, exp)
- assert res.dtype == "Period[M]"
-
def test_dropna(self):
# GH 13737
s = Series([pd.Period("2011-01", freq="M"), pd.Period("NaT", freq="M")])
| This gets them all for tests.indexes, a fraction for tests.series, and none for DataFrame. This will take multiple passes | https://api.github.com/repos/pandas-dev/pandas/pulls/33183 | 2020-03-31T16:06:42Z | 2020-03-31T16:38:07Z | 2020-03-31T16:38:07Z | 2020-03-31T17:34:41Z |
BUG: support min/max functions for rolling windows with custom BaseIndexer | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 19e8acdaa7384..15a2f3e8ea08c 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -102,7 +102,8 @@ Other API changes
- Added :meth:`DataFrame.value_counts` (:issue:`5377`)
- :meth:`Groupby.groups` now returns an abbreviated representation when called on large dataframes (:issue:`1135`)
- ``loc`` lookups with an object-dtype :class:`Index` and an integer key will now raise ``KeyError`` instead of ``TypeError`` when key is missing (:issue:`31905`)
-- Using a :func:`pandas.api.indexers.BaseIndexer` with ``min``, ``max``, ``std``, ``var``, ``count``, ``skew``, ``cov``, ``corr`` will now raise a ``NotImplementedError`` (:issue:`32865`)
+- Using a :func:`pandas.api.indexers.BaseIndexer` with ``std``, ``var``, ``count``, ``skew``, ``cov``, ``corr`` will now raise a ``NotImplementedError`` (:issue:`32865`)
+- Using a :func:`pandas.api.indexers.BaseIndexer` with ``min``, ``max`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`)
-
Backwards incompatible API changes
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 1d1963fb04818..f3889039c095e 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -1051,7 +1051,7 @@ cdef _roll_min_max_variable(ndarray[numeric] values,
bint is_max):
cdef:
numeric ai
- int64_t i, close_offset, curr_win_size
+ int64_t i, k, curr_win_size, start
Py_ssize_t nobs = 0, N = len(values)
deque Q[int64_t] # min/max always the front
deque W[int64_t] # track the whole window for nobs compute
@@ -1068,60 +1068,45 @@ cdef _roll_min_max_variable(ndarray[numeric] values,
# The original impl didn't deal with variable window sizes
# So the code was optimized for that
- for i in range(starti[0], endi[0]):
- ai = init_mm(values[i], &nobs, is_max)
-
- # Discard previous entries if we find new min or max
- if is_max:
- while not Q.empty() and ((ai >= values[Q.back()]) or
- values[Q.back()] != values[Q.back()]):
- Q.pop_back()
- else:
- while not Q.empty() and ((ai <= values[Q.back()]) or
- values[Q.back()] != values[Q.back()]):
- Q.pop_back()
- Q.push_back(i)
- W.push_back(i)
-
- # if right is open then the first window is empty
- close_offset = 0 if endi[0] > starti[0] else 1
# first window's size
curr_win_size = endi[0] - starti[0]
+ # GH 32865
+ # Anchor output index to values index to provide custom
+ # BaseIndexer support
+ for i in range(N):
- for i in range(endi[0], endi[N-1]):
- if not Q.empty() and curr_win_size > 0:
- output[i-1+close_offset] = calc_mm(
- minp, nobs, values[Q.front()])
- else:
- output[i-1+close_offset] = NaN
-
- ai = init_mm(values[i], &nobs, is_max)
-
- # Discard previous entries if we find new min or max
- if is_max:
- while not Q.empty() and ((ai >= values[Q.back()]) or
- values[Q.back()] != values[Q.back()]):
- Q.pop_back()
+ curr_win_size = endi[i] - starti[i]
+ if i == 0:
+ start = starti[i]
else:
- while not Q.empty() and ((ai <= values[Q.back()]) or
- values[Q.back()] != values[Q.back()]):
- Q.pop_back()
+ start = endi[i - 1]
- # Maintain window/nobs retention
- curr_win_size = endi[i + close_offset] - starti[i + close_offset]
- while not Q.empty() and Q.front() <= i - curr_win_size:
+ for k in range(start, endi[i]):
+ ai = init_mm(values[k], &nobs, is_max)
+ # Discard previous entries if we find new min or max
+ if is_max:
+ while not Q.empty() and ((ai >= values[Q.back()]) or
+ values[Q.back()] != values[Q.back()]):
+ Q.pop_back()
+ else:
+ while not Q.empty() and ((ai <= values[Q.back()]) or
+ values[Q.back()] != values[Q.back()]):
+ Q.pop_back()
+ Q.push_back(k)
+ W.push_back(k)
+
+ # Discard entries outside and left of current window
+ while not Q.empty() and Q.front() <= starti[i] - 1:
Q.pop_front()
- while not W.empty() and W.front() <= i - curr_win_size:
+ while not W.empty() and W.front() <= starti[i] - 1:
remove_mm(values[W.front()], &nobs)
W.pop_front()
- Q.push_back(i)
- W.push_back(i)
-
- if not Q.empty() and curr_win_size > 0:
- output[N-1] = calc_mm(minp, nobs, values[Q.front()])
- else:
- output[N-1] = NaN
+ # Save output based on index in input value array
+ if not Q.empty() and curr_win_size > 0:
+ output[i] = calc_mm(minp, nobs, values[Q.front()])
+ else:
+ output[i] = NaN
return output
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 8abc47886d261..05f19de19f9f7 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -327,7 +327,7 @@ def func(arg, window, min_periods=None):
def validate_baseindexer_support(func_name: Optional[str]) -> None:
# GH 32865: These functions work correctly with a BaseIndexer subclass
- BASEINDEXER_WHITELIST = {"mean", "sum", "median", "kurt", "quantile"}
+ BASEINDEXER_WHITELIST = {"min", "max", "mean", "sum", "median", "kurt", "quantile"}
if isinstance(func_name, str) and func_name not in BASEINDEXER_WHITELIST:
raise NotImplementedError(
f"{func_name} is not supported with using a BaseIndexer "
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index e9190dfde4fc4..25d575e0ad0b6 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -82,9 +82,7 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
df.rolling(indexer, win_type="boxcar")
-@pytest.mark.parametrize(
- "func", ["min", "max", "std", "var", "count", "skew", "cov", "corr"]
-)
+@pytest.mark.parametrize("func", ["std", "var", "count", "skew", "cov", "corr"])
def test_notimplemented_functions(func):
# GH 32865
class CustomIndexer(BaseIndexer):
@@ -95,3 +93,34 @@ def get_window_bounds(self, num_values, min_periods, center, closed):
indexer = CustomIndexer()
with pytest.raises(NotImplementedError, match=f"{func} is not supported"):
getattr(df.rolling(indexer), func)()
+
+
+@pytest.mark.parametrize("constructor", [Series, DataFrame])
+@pytest.mark.parametrize(
+ "func,alt_func,expected",
+ [
+ ("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan]),
+ ("max", np.max, [2.0, 3.0, 4.0, 100.0, 100.0, 100.0, 8.0, 9.0, 9.0, np.nan]),
+ ],
+)
+def test_rolling_forward_window(constructor, func, alt_func, expected):
+ # GH 32865
+ class ForwardIndexer(BaseIndexer):
+ def get_window_bounds(self, num_values, min_periods, center, closed):
+ start = np.arange(num_values, dtype="int64")
+ end_s = start[: -self.window_size] + self.window_size
+ end_e = np.full(self.window_size, num_values, dtype="int64")
+ end = np.concatenate([end_s, end_e])
+
+ return start, end
+
+ values = np.arange(10)
+ values[5] = 100.0
+
+ indexer = ForwardIndexer(window_size=3)
+ rolling = constructor(values).rolling(window=indexer, min_periods=2)
+ result = getattr(rolling, func)()
+ expected = constructor(expected)
+ tm.assert_equal(result, expected)
+ expected2 = constructor(rolling.apply(lambda x: alt_func(x)))
+ tm.assert_equal(result, expected2)
| - [X] xref #32865
- [X] 1 tests added / 1 passed
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
## The problem
We currently don't support several rolling window functions when building a rolling window object using a custom class descended from `pandas.api.indexers.Baseindexer`. The implementations were written with backward-looking windows in mind, and this led to these functions breaking.
Currently, using these functions returns a `NotImplemented` error thanks to #33057, but ideally we want to update the implementations, so that they will work without a performance hit. This is what I aim to do over a series of PRs.
## Scope of PR
This PR proposes an alternative implementation for the `min` and `max` rolling window functions implemented in the `_roll_min_max_variable` function in the `aggregations.pyx` file.
## Perf note
This implementation is slightly faster than the one on master branch (tested with a `Series` with 10e6 randomly generated values and `timeit`). If necessary, I can add a benchmark to the benchmark suite, although it must be noted that we need to benchmark with large arrays of data to minimize the effect of overhead. | https://api.github.com/repos/pandas-dev/pandas/pulls/33180 | 2020-03-31T13:48:19Z | 2020-04-03T19:58:23Z | 2020-04-03T19:58:23Z | 2020-04-09T06:35:39Z |
DOC: Fixed examples in pandas/core/arrays/ | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index be6c076952ca1..a9a8b9aa14c5e 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -288,26 +288,17 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Doctests interval classes' ; echo $MSG
- pytest -q --doctest-modules \
- pandas/core/indexes/interval.py \
- pandas/core/arrays/interval.py
+ pytest -q --doctest-modules pandas/core/indexes/interval.py
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Doctests arrays'; echo $MSG
- pytest -q --doctest-modules \
- pandas/core/arrays/string_.py \
- pandas/core/arrays/integer.py \
- pandas/core/arrays/boolean.py
+ pytest -q --doctest-modules pandas/core/arrays/
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Doctests dtypes'; echo $MSG
pytest -q --doctest-modules pandas/core/dtypes/
RET=$(($RET + $?)) ; echo $MSG "DONE"
- MSG='Doctests arrays/boolean.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/arrays/boolean.py
- RET=$(($RET + $?)) ; echo $MSG "DONE"
-
MSG='Doctests base.py' ; echo $MSG
pytest -q --doctest-modules pandas/core/base.py
RET=$(($RET + $?)) ; echo $MSG "DONE"
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index af897e86a14d4..6cb597ba75852 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -1161,7 +1161,7 @@ def _create_method(cls, op, coerce_to_dtype=True):
--------
Given an ExtensionArray subclass called MyExtensionArray, use
- >>> __add__ = cls._create_method(operator.add)
+ __add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index edc138574830d..f283b6fd3b4b3 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1598,19 +1598,19 @@ def sort_values(self, inplace=False, ascending=True, na_position="last"):
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
- [NaN, 2.0, 2.0, NaN, 5.0]
+ [NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values()
- [2.0, 2.0, 5.0, NaN, NaN]
+ [2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
- [5.0, 2.0, 2.0, NaN, NaN]
+ [5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
- [NaN, NaN, 2.0, 2.0, 5.0]
+ [NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
- [NaN, NaN, 5.0, 2.0, 2.0]
+ [NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
@@ -1835,7 +1835,7 @@ def take(self, indexer, allow_fill: bool = False, fill_value=None):
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
- Categories (3, object): [a, b]
+ Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
@@ -2231,33 +2231,32 @@ def unique(self):
-------
unique values : ``Categorical``
+ See Also
+ --------
+ pandas.unique
+ CategoricalIndex.unique
+ Series.unique
+
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
- >>> pd.Categorical(list('baabc'))
+ >>> pd.Categorical(list("baabc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
- >>> pd.Categorical(list('baabc'), categories=list('abc'))
+ >>> pd.Categorical(list("baabc"), categories=list("abc")).unique()
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
- >>> pd.Categorical(list('baabc'),
- ... categories=list('abc'),
- ... ordered=True)
+ >>> pd.Categorical(
+ ... list("baabc"), categories=list("abc"), ordered=True
+ ... ).unique()
[b, a, c]
Categories (3, object): [a < b < c]
-
- See Also
- --------
- unique
- CategoricalIndex.unique
- Series.unique
-
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
@@ -2438,7 +2437,7 @@ def replace(self, to_replace, value, inplace: bool = False):
--------
>>> s = pd.Categorical([1, 2, 1, 3])
>>> s.replace(1, 3)
- [3, 3, 2, 3]
+ [3, 2, 3, 3]
Categories (2, int64): [2, 3]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
@@ -2506,16 +2505,100 @@ class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
Examples
--------
+ >>> s = pd.Series(list("abbccc")).astype("category")
+ >>> s
+ 0 a
+ 1 b
+ 2 b
+ 3 c
+ 4 c
+ 5 c
+ dtype: category
+ Categories (3, object): [a, b, c]
+
>>> s.cat.categories
- >>> s.cat.categories = list('abc')
- >>> s.cat.rename_categories(list('cab'))
- >>> s.cat.reorder_categories(list('cab'))
- >>> s.cat.add_categories(['d','e'])
- >>> s.cat.remove_categories(['d'])
- >>> s.cat.remove_unused_categories()
- >>> s.cat.set_categories(list('abcde'))
+ Index(['a', 'b', 'c'], dtype='object')
+
+ >>> s.cat.rename_categories(list("cba"))
+ 0 c
+ 1 b
+ 2 b
+ 3 a
+ 4 a
+ 5 a
+ dtype: category
+ Categories (3, object): [c, b, a]
+
+ >>> s.cat.reorder_categories(list("cba"))
+ 0 a
+ 1 b
+ 2 b
+ 3 c
+ 4 c
+ 5 c
+ dtype: category
+ Categories (3, object): [c, b, a]
+
+ >>> s.cat.add_categories(["d", "e"])
+ 0 a
+ 1 b
+ 2 b
+ 3 c
+ 4 c
+ 5 c
+ dtype: category
+ Categories (5, object): [a, b, c, d, e]
+
+ >>> s.cat.remove_categories(["a", "c"])
+ 0 NaN
+ 1 b
+ 2 b
+ 3 NaN
+ 4 NaN
+ 5 NaN
+ dtype: category
+ Categories (1, object): [b]
+
+ >>> s1 = s.cat.add_categories(["d", "e"])
+ >>> s1.cat.remove_unused_categories()
+ 0 a
+ 1 b
+ 2 b
+ 3 c
+ 4 c
+ 5 c
+ dtype: category
+ Categories (3, object): [a, b, c]
+
+ >>> s.cat.set_categories(list("abcde"))
+ 0 a
+ 1 b
+ 2 b
+ 3 c
+ 4 c
+ 5 c
+ dtype: category
+ Categories (5, object): [a, b, c, d, e]
+
>>> s.cat.as_ordered()
+ 0 a
+ 1 b
+ 2 b
+ 3 c
+ 4 c
+ 5 c
+ dtype: category
+ Categories (3, object): [a < b < c]
+
>>> s.cat.as_unordered()
+ 0 a
+ 1 b
+ 2 b
+ 3 c
+ 4 c
+ 5 c
+ dtype: category
+ Categories (3, object): [a, b, c]
"""
def __init__(self, data):
@@ -2603,7 +2686,7 @@ def _recode_for_categories(codes: np.ndarray, old_categories, new_categories):
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
- array([ 1, 0, 0, -1])
+ array([ 1, 0, 0, -1], dtype=int8)
"""
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index a153b4e06157b..c0bbbebac7c33 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -181,7 +181,7 @@ def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) ->
Examples
--------
- >>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
+ >>> self._unbox_scalar(Timedelta("10s")) # doctest: +SKIP
10000000000
"""
raise AbstractMethodError(self)
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index e2a13df069ae2..e6a17491e9378 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -922,9 +922,10 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"):
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
- 0 2015-03-29 03:00:00+02:00
- 1 2015-03-29 03:30:00+02:00
- dtype: datetime64[ns, Europe/Warsaw]
+ 0 2018-10-28 01:20:00+02:00
+ 1 2018-10-28 02:36:00+02:00
+ 2 2018-10-28 03:46:00+01:00
+ dtype: datetime64[ns, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
@@ -935,15 +936,17 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"):
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
- dtype: datetime64[ns, 'Europe/Warsaw']
+ dtype: datetime64[ns, Europe/Warsaw]
+
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
- dtype: datetime64[ns, 'Europe/Warsaw']
+ dtype: datetime64[ns, Europe/Warsaw]
+
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
- dtype: datetime64[ns, 'Europe/Warsaw']
+ dtype: datetime64[ns, Europe/Warsaw]
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
@@ -1604,9 +1607,9 @@ def date(self):
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
- array([ True, False, False], dtype=bool)
+ array([ True, False, False])
- >>> dates = pd.Series(idx)
+ >>> dates_series = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index cf6c16d4cad5d..d23d26d870f75 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -94,7 +94,7 @@ def to_numpy(
>>> a = pd.array([True, False, pd.NA], dtype="boolean")
>>> a.to_numpy()
- array([True, False, NA], dtype=object)
+ array([True, False, <NA>], dtype=object)
When no missing values are present, an equivalent dtype can be used.
@@ -110,7 +110,7 @@ def to_numpy(
>>> a = pd.array([True, False, pd.NA], dtype="boolean")
>>> a
<BooleanArray>
- [True, False, NA]
+ [True, False, <NA>]
Length: 3, dtype: boolean
>>> a.to_numpy(dtype="bool")
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index be9cc53d33d6f..d9bd567f88845 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -818,6 +818,7 @@ def period_array(
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
+ <PeriodArray>
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py
index 787407060c7f1..8a30d2b954b55 100644
--- a/pandas/core/arrays/sparse/accessor.py
+++ b/pandas/core/arrays/sparse/accessor.py
@@ -67,24 +67,25 @@ def from_coo(cls, A, dense_index=False):
Examples
--------
>>> from scipy import sparse
- >>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
- shape=(3, 4))
+
+ >>> A = sparse.coo_matrix(
+ ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
+ ... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
- with 3 stored elements in COOrdinate format>
+ with 3 stored elements in COOrdinate format>
+
>>> A.todense()
- matrix([[ 0., 0., 1., 2.],
- [ 3., 0., 0., 0.],
- [ 0., 0., 0., 0.]])
+ matrix([[0., 0., 1., 2.],
+ [3., 0., 0., 0.],
+ [0., 0., 0., 0.]])
+
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
- 0 2 1
- 3 2
- 1 0 3
- dtype: float64
- BlockIndex
- Block locations: array([0], dtype=int32)
- Block lengths: array([3], dtype=int32)
+ 0 2 1.0
+ 3 2.0
+ 1 0 3.0
+ dtype: Sparse[float64, nan]
"""
from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
from pandas import Series
@@ -119,24 +120,49 @@ def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
- >>> s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
- (1, 2, 'a', 1),
- (1, 1, 'b', 0),
- (1, 1, 'b', 1),
- (2, 1, 'b', 0),
- (2, 1, 'b', 1)],
- names=['A', 'B', 'C', 'D'])
+ >>> s.index = pd.MultiIndex.from_tuples(
+ ... [
+ ... (1, 2, "a", 0),
+ ... (1, 2, "a", 1),
+ ... (1, 1, "b", 0),
+ ... (1, 1, "b", 1),
+ ... (2, 1, "b", 0),
+ ... (2, 1, "b", 1)
+ ... ],
+ ... names=["A", "B", "C", "D"],
+ ... )
+ >>> s
+ A B C D
+ 1 2 a 0 3.0
+ 1 NaN
+ 1 b 0 1.0
+ 1 3.0
+ 2 1 b 0 NaN
+ 1 NaN
+ dtype: float64
+
>>> ss = s.astype("Sparse")
- >>> A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B'],
- ... column_levels=['C', 'D'],
- ... sort_labels=True)
+ >>> ss
+ A B C D
+ 1 2 a 0 3.0
+ 1 NaN
+ 1 b 0 1.0
+ 1 3.0
+ 2 1 b 0 NaN
+ 1 NaN
+ dtype: Sparse[float64, nan]
+
+ >>> A, rows, columns = ss.sparse.to_coo(
+ ... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
+ ... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
- with 3 stored elements in COOrdinate format>
+ with 3 stored elements in COOrdinate format>
>>> A.todense()
- matrix([[ 0., 0., 1., 3.],
- [ 3., 0., 0., 0.],
- [ 0., 0., 0., 0.]])
+ matrix([[0., 0., 1., 3.],
+ [3., 0., 0., 0.],
+ [0., 0., 0., 0.]])
+
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 8c09aa9176f31..a98875ace09aa 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -1048,7 +1048,7 @@ def astype(self, dtype=None, copy=True):
Examples
--------
- >>> arr = SparseArray([0, 0, 1, 2])
+ >>> arr = pd.arrays.SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
@@ -1066,8 +1066,8 @@ def astype(self, dtype=None, copy=True):
>>> arr.astype(np.dtype('float64'))
... # doctest: +NORMALIZE_WHITESPACE
- [0, 0, 1.0, 2.0]
- Fill: 0
+ [0.0, 0.0, 1.0, 2.0]
+ Fill: 0.0
IntIndex
Indices: array([2, 3], dtype=int32)
@@ -1107,19 +1107,19 @@ def map(self, mapper):
Examples
--------
>>> arr = pd.arrays.SparseArray([0, 1, 2])
- >>> arr.apply(lambda x: x + 10)
+ >>> arr.map(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
- >>> arr.apply({0: 10, 1: 11, 2: 12})
+ >>> arr.map({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
- >>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2]))
+ >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py
index 135514e334920..afa11586fda04 100644
--- a/pandas/core/arrays/sparse/dtype.py
+++ b/pandas/core/arrays/sparse/dtype.py
@@ -347,7 +347,7 @@ def _subtype_with_str(self):
dtype('O')
>>> dtype._subtype_with_str
- str
+ <class 'str'>
"""
if isinstance(self.fill_value, str):
return type(self.fill_value)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33179 | 2020-03-31T12:52:44Z | 2020-04-01T19:07:42Z | 2020-04-01T19:07:42Z | 2020-04-01T19:09:11Z |
WEB: Moving pandas blog to the website | diff --git a/web/pandas/community/blog/2019-user-survey.md b/web/pandas/community/blog/2019-user-survey.md
new file mode 100644
index 0000000000000..73c426e7cbec9
--- /dev/null
+++ b/web/pandas/community/blog/2019-user-survey.md
@@ -0,0 +1,172 @@
+Title: 2019 pandas user survey
+Date: 2019-08-22
+
+<style type="text/css">
+table td {
+ background: none;
+}
+
+table tr.even td {
+ background: none;
+}
+
+table {
+ text-shadow: none;
+}
+
+</style>
+
+# 2019 pandas user survey
+
+Pandas recently conducted a user survey to help guide future development.
+Thanks to everyone who participated! This post presents the high-level results.
+
+This analysis and the raw data can be found [on GitHub](https://github.com/pandas-dev/pandas-user-surveys) and run on Binder
+
+[](https://mybinder.org/v2/gh/pandas-dev/pandas-user-surveys/master?filepath=2019.ipynb)
+
+
+We had about 1250 repsonses over the 15 days we ran the survey in the summer of 2019.
+
+## About the Respondents
+
+There was a fair amount of representation across pandas experience and frequeny of use, though the majority of respondents are on the more experienced side.
+
+
+
+
+
+
+
+
+
+
+
+We included a few questions that were also asked in the [Python Developers Survey](https://www.jetbrains.com/research/python-developers-survey-2018/) so we could compare Pandas' population to Python's.
+
+90% of our respondents use Python as a primary language (compared with 84% from the PSF survey).
+
+
+
+
+
+ Yes 90.67%
+ No 9.33%
+ Name: Is Python your main language?, dtype: object
+
+
+
+Windows users are well represented (see [Steve Dower's talk](https://www.youtube.com/watch?v=uoI57uMdDD4) on this topic).
+
+
+
+
+
+ Linux 61.57%
+ Windows 60.21%
+ MacOS 42.75%
+ Name: What Operating Systems do you use?, dtype: object
+
+
+
+For environment isolation, [conda](https://conda.io/en/latest/) was the most popular.
+
+
+
+
+
+
+
+Most repondents are Python 3 only.
+
+
+
+
+
+ 3 92.39%
+ 2 & 3 6.80%
+ 2 0.81%
+ Name: Python 2 or 3?, dtype: object
+
+
+
+## Pandas APIs
+
+It can be hard for open source projects to know what features are actually used. We asked a few questions to get an idea.
+
+CSV and Excel are (for better or worse) the most popular formats.
+
+
+
+
+
+
+In preperation for a possible refactor of pandas internals, we wanted to get a sense for
+how common wide (100s of columns or more) DataFrames are.
+
+
+
+
+
+
+Pandas is slowly growing new exentension types. Categoricals are the most popular,
+and the nullable integer type is already almost as popular as datetime with timezone.
+
+
+
+
+
+
+More and better examples seem to be a high-priority development item.
+Pandas recently received a NumFOCUS grant to improve our documentation,
+which we're using to write tutorial-style documentation, which should help
+meet this need.
+
+
+
+
+
+
+We also asked about specific, commonly-requested features.
+
+
+
+
+
+
+Of these, the clear standout is "scaling" to large datasets. A couple observations:
+
+1. Perhaps pandas' documentation should do a better job of promoting libraries that provide scalable dataframes (like [Dask](https://dask.org), [vaex](https://dask.org), and [modin](https://modin.readthedocs.io/en/latest/))
+2. Memory efficiency (perhaps from a native string data type, fewer internal copies, etc.) is a valuable goal.
+
+After that, the next-most critical improvement is integer missing values. Those were actually added in [Pandas 0.24](https://pandas.pydata.org/pandas-docs/stable/whatsnew/v0.24.0.html#optional-integer-na-support), but they're not the default, and there's still some incompatibilites with the rest of pandas API.
+
+Pandas is a less conservative library than, say, NumPy. We're approaching 1.0, but on the way we've made many deprecations and some outright API breaking changes. Fortunately, most people are OK with the tradeoff.
+
+
+
+
+
+ Yes 94.89%
+ No 5.11%
+ Name: Is Pandas stable enough for you?, dtype: object
+
+
+
+There's a perception (which is shared by many of the pandas maintainers) that the pandas API is too large. To measure that, we asked whether users thought that pandas' API was too large, too small, or just right.
+
+
+
+
+
+
+Finally, we asked for an overall satisfaction with the library, from 1 (not very unsatisfied) to 5 (very satisfied).
+
+
+
+
+
+
+Most people are very satisfied. The average response is 4.39. I look forward to tracking this number over time.
+
+If you're analyzing the raw data, be sure to share the results with us [@pandas_dev](https://twitter.com/pandas_dev).
diff --git a/web/pandas/community/blog/extension-arrays.md b/web/pandas/community/blog/extension-arrays.md
new file mode 100644
index 0000000000000..bc6179adfa719
--- /dev/null
+++ b/web/pandas/community/blog/extension-arrays.md
@@ -0,0 +1,218 @@
+Title: pandas extension arrays
+Date: 2019-01-04
+
+# pandas extension arrays
+
+Extensibility was a major theme in pandas development over the last couple of
+releases. This post introduces the pandas extension array interface: the
+motivation behind it and how it might affect you as a pandas user. Finally, we
+look at how extension arrays may shape the future of pandas.
+
+Extension Arrays are just one of the changes in pandas 0.24.0. See the
+[whatsnew][whatsnew] for a full changelog.
+
+## The Motivation
+
+Pandas is built on top of NumPy. You could roughly define a Series as a wrapper
+around a NumPy array, and a DataFrame as a collection of Series with a shared
+index. That's not entirely correct for several reasons, but I want to focus on
+the "wrapper around a NumPy array" part. It'd be more correct to say "wrapper
+around an array-like object".
+
+Pandas mostly uses NumPy's builtin data representation; we've restricted it in
+places and extended it in others. For example, pandas' early users cared greatly
+about timezone-aware datetimes, which NumPy doesn't support. So pandas
+internally defined a `DatetimeTZ` dtype (which mimics a NumPy dtype), and
+allowed you to use that dtype in `Index`, `Series`, and as a column in a
+`DataFrame`. That dtype carried around the tzinfo, but wasn't itself a valid
+NumPy dtype.
+
+As another example, consider `Categorical`. This actually composes *two* arrays:
+one for the `categories` and one for the `codes`. But it can be stored in a
+`DataFrame` like any other column.
+
+Each of these extension types pandas added is useful on its own, but carries a
+high maintenance cost. Large sections of the codebase need to be aware of how to
+handle a NumPy array or one of these other kinds of special arrays. This made
+adding new extension types to pandas very difficult.
+
+Anaconda, Inc. had a client who regularly dealt with datasets with IP addresses.
+They wondered if it made sense to add an [IPArray][IPArray] to pandas. In the
+end, we didn't think it passed the cost-benefit test for inclusion in pandas
+*itself*, but we were interested in defining an interface for third-party
+extensions to pandas. Any object implementing this interface would be allowed in
+pandas. I was able to write [cyberpandas][cyberpandas] outside of pandas, but it
+feels like using any other dtype built into pandas.
+
+## The Current State
+
+As of pandas 0.24.0, all of pandas' internal extension arrays (Categorical,
+Datetime with Timezone, Period, Interval, and Sparse) are now built on top of
+the ExtensionArray interface. Users shouldn't notice many changes. The main
+thing you'll notice is that things are cast to `object` dtype in fewer places,
+meaning your code will run faster and your types will be more stable. This
+includes storing `Period` and `Interval` data in `Series` (which were previously
+cast to object dtype).
+
+Additionally, we'll be able to add *new* extension arrays with relative ease.
+For example, 0.24.0 (optionally) solved one of pandas longest-standing pain
+points: missing values casting integer-dtype values to float.
+
+
+```python
+>>> int_ser = pd.Series([1, 2], index=[0, 2])
+>>> int_ser
+0 1
+2 2
+dtype: int64
+
+>>> int_ser.reindex([0, 1, 2])
+0 1.0
+1 NaN
+2 2.0
+dtype: float64
+```
+
+With the new [IntegerArray][IntegerArray] and nullable integer dtypes, we can
+natively represent integer data with missing values.
+
+```python
+>>> int_ser = pd.Series([1, 2], index=[0, 2], dtype=pd.Int64Dtype())
+>>> int_ser
+0 1
+2 2
+dtype: Int64
+
+>>> int_ser.reindex([0, 1, 2])
+0 1
+1 NaN
+2 2
+dtype: Int64
+```
+
+One thing it does slightly change how you should access the raw (unlabeled)
+arrays stored inside a Series or Index, which is occasionally useful. Perhaps
+the method you're calling only works with NumPy arrays, or perhaps you want to
+disable automatic alignment.
+
+In the past, you'd hear things like "Use `.values` to extract the NumPy array
+from a Series or DataFrame." If it were a good resource, they'd tell you that's
+not *entirely* true, since there are some exceptions. I'd like to delve into
+those exceptions.
+
+The fundamental problem with `.values` is that it serves two purposes:
+
+1. Extracting the array backing a Series, Index, or DataFrame
+2. Converting the Series, Index, or DataFrame to a NumPy array
+
+As we saw above, the "array" backing a Series or Index might not be a NumPy
+array, it may instead be an extension array (from pandas or a third-party
+library). For example, consider `Categorical`,
+
+```python
+>>> cat = pd.Categorical(['a', 'b', 'a'], categories=['a', 'b', 'c'])
+>>> ser = pd.Series(cat)
+>>> ser
+0 a
+1 b
+2 a
+dtype: category
+Categories (3, object): [a, b, c]
+
+>>> ser.values
+[a, b, a]
+Categories (3, object): [a, b, c]
+```
+
+In this case `.values` is a Categorical, not a NumPy array. For period-dtype
+data, `.values` returns a NumPy array of `Period` objects, which is expensive to
+create. For timezone-aware data, `.values` converts to UTC and *drops* the
+timezone info. These kind of surprises (different types, or expensive or lossy
+conversions) stem from trying to shoehorn these extension arrays into a NumPy
+array. But the entire point of an extension array is for representing data NumPy
+*can't* natively represent.
+
+To solve the `.values` problem, we've split its roles into two dedicated methods:
+
+1. Use `.array` to get a zero-copy reference to the underlying data
+2. Use `.to_numpy()` to get a (potentially expensive, lossy) NumPy array of the
+ data.
+
+So with our Categorical example,
+
+```python
+>>> ser.array
+[a, b, a]
+Categories (3, object): [a, b, c]
+
+>>> ser.to_numpy()
+array(['a', 'b', 'a'], dtype=object)
+```
+
+To summarize:
+
+- `.array` will *always* be a an ExtensionArray, and is always a zero-copy
+ reference back to the data.
+- `.to_numpy()` is *always* a NumPy array, so you can reliably call
+ ndarray-specific methods on it.
+
+You shouldn't ever need `.values` anymore.
+
+## Possible Future Paths
+
+Extension Arrays open up quite a few exciting opportunities. Currently, pandas
+represents string data using Python objects in a NumPy array, which is slow.
+Libraries like [Apache Arrow][arrow] provide native support for variable-length
+strings, and the [Fletcher][fletcher] library provides pandas extension arrays
+for Arrow arrays. It will allow [GeoPandas][geopandas] to store geometry data
+more efficiently. Pandas (or third-party libraries) will be able to support
+nested data, data with units, geo data, GPU arrays. Keep an eye on the
+[pandas ecosystem][eco] page, which will keep track of third-party extension
+arrays. It's an exciting time for pandas development.
+
+## Other Thoughts
+
+I'd like to emphasize that this is an *interface*, and not a concrete array
+implementation. We are *not* reimplementing NumPy here in pandas. Rather, this
+is a way to take any array-like data structure (one or more NumPy arrays, an
+Apache Arrow array, a CuPy array) and place it inside a DataFrame. I think
+getting pandas out of the array business, and instead thinking about
+higher-level tabular data things, is a healthy development for the project.
+
+This works perfectly with NumPy's [`__array_ufunc__`][ufunc] protocol and
+[NEP-18][nep18]. You'll be able to use the familiar NumPy API on objects that
+aren't backed by NumPy memory.
+
+## Upgrade
+
+These new goodies are all available in the recently released pandas 0.24.
+
+conda:
+
+ conda install -c conda-forge pandas
+
+pip:
+
+ pip install --upgrade pandas
+
+As always, we're happy to hear feedback on the [mailing list][ml],
+[@pandas-dev][twitter], or [issue tracker][tracker].
+
+Thanks to the many contributors, maintainers, and [institutional
+partners][partners] involved in the pandas community.
+
+
+[IPArray]: https://github.com/pandas-dev/pandas/issues/18767
+[cyberpandas]: https://cyberpandas.readthedocs.io
+[IntegerArray]: http://pandas.pydata.org/pandas-docs/version/0.24/reference/api/pandas.arrays.IntegerArray.html
+[fletcher]: https://github.com/xhochy/fletcher
+[arrow]: https://arrow.apache.org
+[ufunc]: https://docs.scipy.org/doc/numpy-1.13.0/neps/ufunc-overrides.html
+[nep18]: https://www.numpy.org/neps/nep-0018-array-function-protocol.html
+[ml]: https://mail.python.org/mailman/listinfo/pandas-dev
+[twitter]: https://twitter.com/pandas_dev
+[tracker]: https://github.com/pandas-dev/pandas/issues
+[partners]: https://github.com/pandas-dev/pandas-governance/blob/master/people.md
+[eco]: http://pandas.pydata.org/pandas-docs/stable/ecosystem.html#extension-data-types
+[whatsnew]: http://pandas.pydata.org/pandas-docs/version/0.24/whatsnew/v0.24.0.html
+[geopandas]: https://github.com/geopandas/geopandas
diff --git a/web/pandas/community/blog.html b/web/pandas/community/blog/index.html
similarity index 100%
rename from web/pandas/community/blog.html
rename to web/pandas/community/blog/index.html
diff --git a/web/pandas/community/blog/pandas-1.0.md b/web/pandas/community/blog/pandas-1.0.md
new file mode 100644
index 0000000000000..b07c34a4ab6b5
--- /dev/null
+++ b/web/pandas/community/blog/pandas-1.0.md
@@ -0,0 +1,31 @@
+Title: pandas 1.0
+Date: 2020-01-29
+
+# pandas 1.0
+
+Today pandas celebrates its 1.0.0 release. In many ways this is just a normal release with a host of new features, performance improvements, and bug fixes, which are documented in our [release notes](https://pandas.pydata.org/pandas-docs/version/1.0.0/whatsnew/v1.0.0.html). But it’s also something a bit more — a milestone for the project beyond just the commits. We wanted to take some time to reflect on where we've been and where we're going.
+
+## Reflections
+
+The world of scientific Python has changed a lot since pandas was started. In 2011, [the ecosystem was fragmented](https://wesmckinney.com/blog/a-roadmap-for-rich-scientific-data-structures-in-python/): a standard *rich* data structure for statistics and data science had yet to emerge. This echos a similar story for NumPy, which consolidated array efforts that were [previously fragmented](https://numpy.org/old_array_packages.html).
+
+Over the subsequent years, pandas emerged as a *de facto* standard. It’s used by data scientists and analysts and as a data structure for other libraries to build on top of. StackOverflow [cited pandas](https://stackoverflow.blog/2017/09/14/python-growing-quickly/) as one of the reasons for Python being the fastest growing major programming language.
+
+
+
+Today, the ecosystem is in another phase of exploration.
+Several new DataFrame implementations are cropping up to fill needs not met by pandas.
+We're [working with those projects](https://datapythonista.me/blog/dataframe-summit-at-euroscipy.html) to establish shared standards and semantics for rich data structures.
+
+## Community and Project Health
+
+This release cycle is the first to involve any kind of grant funding for pandas. [Pandas received funding](https://chanzuckerberg.com/eoss/proposals/) as part of the CZI’s [*Essential Open Source Software for Science*](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd) [program](https://medium.com/@cziscience/the-invisible-foundations-of-biomedicine-4ab7f8d4f5dd). The pandas project relies overwhelmingly on volunteer contributors. These volunteer contributions are shepherded and augmented by some maintainers who are given time from their employers — our [institutional partners](https://github.com/pandas-dev/pandas-governance/blob/master/people.md#institutional-partners). The largest work item in our grant award was library maintenance, which specifically includes working with community members to address our large backlog of open issues and pull requests.
+
+While a “1.0.0” version might seem arbitrary or anti-climactic (given that pandas as a codebase is nearly 12 years old), we see it as a symbolic milestone celebrating the growth of our core developer team and depth of our contributor base. Few open source projects are ever truly “done” and pandas is no different. We recognize the essential role that pandas now occupies, and we intend to continue to evolve the project and adapt to the needs of the world’s data wranglers.
+
+## Going Forward
+
+Our [roadmap](https://pandas.pydata.org/pandas-docs/version/1.0.0/development/roadmap.html) contains an up-to-date listing of where we see the project heading over the next few years.
+Needless to say, there's still plenty to do.
+
+Check out the [release notes](https://pandas.pydata.org/pandas-docs/version/1.0.0/whatsnew/v1.0.0.html) and visit the [installation page](https://pandas.pydata.org/pandas-docs/version/1.0.0/getting_started/install.html) for instructions on updating to pandas 1.0.
diff --git a/web/pandas/config.yml b/web/pandas/config.yml
index d943ad3833b52..23575cc123050 100644
--- a/web/pandas/config.yml
+++ b/web/pandas/config.yml
@@ -15,6 +15,7 @@ main:
- toc
- tables
- fenced_code
+ - meta
static:
logo: /static/img/pandas_white.svg
css:
@@ -23,7 +24,7 @@ navbar:
- name: "About us"
target:
- name: "About pandas"
- target: /about/index.html
+ target: /about/
- name: "Project roadmap"
target: /about/roadmap.html
- name: "Team"
@@ -39,7 +40,7 @@ navbar:
- name: "Community"
target:
- name: "Blog"
- target: /community/blog.html
+ target: /community/blog/
- name: "Ask a question (StackOverflow)"
target: https://stackoverflow.com/questions/tagged/pandas
- name: "Code of conduct"
@@ -49,9 +50,11 @@ navbar:
- name: "Contribute"
target: /contribute.html
blog:
- num_posts: 8
+ num_posts: 50
+ posts_path: community/blog
+ author: "pandas team"
+ feed_name: "pandas blog"
feed:
- - https://dev.pandas.io/pandas-blog/feeds/all.atom.xml
- https://wesmckinney.com/feeds/pandas.atom.xml
- https://tomaugspurger.github.io/feed
- https://jorisvandenbossche.github.io/feeds/pandas.atom.xml
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_13_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_13_0.png
new file mode 100644
index 0000000000000..9ce2ff483f2c2
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_13_0.png differ
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_18_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_18_0.png
new file mode 100644
index 0000000000000..63b2c93b0573d
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_18_0.png differ
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_20_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_20_0.png
new file mode 100644
index 0000000000000..1c7abb0434dad
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_20_0.png differ
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_22_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_22_0.png
new file mode 100644
index 0000000000000..5ef3d69b48700
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_22_0.png differ
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_24_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_24_0.png
new file mode 100644
index 0000000000000..1a15be05af92d
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_24_0.png differ
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_26_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_26_0.png
new file mode 100644
index 0000000000000..4f8d9f2c439ae
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_26_0.png differ
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_31_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_31_0.png
new file mode 100644
index 0000000000000..6c8b5f1108f79
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_31_0.png differ
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_33_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_33_0.png
new file mode 100644
index 0000000000000..fd490d3e7255a
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_33_0.png differ
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_4_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_4_0.png
new file mode 100644
index 0000000000000..5276ed359badb
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_4_0.png differ
diff --git a/web/pandas/static/img/blog/2019-user-survey/2019_5_0.png b/web/pandas/static/img/blog/2019-user-survey/2019_5_0.png
new file mode 100644
index 0000000000000..a252e1c9b3503
Binary files /dev/null and b/web/pandas/static/img/blog/2019-user-survey/2019_5_0.png differ
diff --git a/web/pandas_web.py b/web/pandas_web.py
index 38ab78f5690e7..e62deaa8cdc7f 100755
--- a/web/pandas_web.py
+++ b/web/pandas_web.py
@@ -78,6 +78,47 @@ def blog_add_posts(context):
"""
tag_expr = re.compile("<.*?>")
posts = []
+ # posts from the file system
+ if context["blog"]["posts_path"]:
+ posts_path = os.path.join(
+ context["source_path"], *context["blog"]["posts_path"].split("/")
+ )
+ for fname in os.listdir(posts_path):
+ if fname.startswith("index."):
+ continue
+ link = (
+ f"/{context['blog']['posts_path']}"
+ f"/{os.path.splitext(fname)[0]}.html"
+ )
+ md = markdown.Markdown(
+ extensions=context["main"]["markdown_extensions"]
+ )
+ with open(os.path.join(posts_path, fname)) as f:
+ html = md.convert(f.read())
+ title = md.Meta["title"][0]
+ summary = re.sub(tag_expr, "", html)
+ try:
+ body_position = summary.index(title) + len(title)
+ except ValueError:
+ raise ValueError(
+ f'Blog post "{fname}" should have a markdown header '
+ f'corresponding to its "Title" element "{title}"'
+ )
+ summary = " ".join(summary[body_position:].split(" ")[:30])
+ posts.append(
+ {
+ "title": title,
+ "author": context["blog"]["author"],
+ "published": datetime.datetime.strptime(
+ md.Meta["date"][0], "%Y-%m-%d"
+ ),
+ "feed": context["blog"]["feed_name"],
+ "link": link,
+ "description": summary,
+ "summary": summary,
+ }
+ )
+ # posts from rss feeds
for feed_url in context["blog"]["feed"]:
feed_data = feedparser.parse(feed_url)
for entry in feed_data.entries:
@@ -180,6 +221,7 @@ def get_context(config_fname: str, ignore_io_errors: bool, **kwargs):
with open(config_fname) as f:
context = yaml.safe_load(f)
+ context["source_path"] = os.path.dirname(config_fname)
context["ignore_io_errors"] = ignore_io_errors
context.update(kwargs)
| Moving the pandas blog posts (now in a separate repo/project) to the `web/community/blog/` in our website. The static site generator we're using for the web, renders them automatically using our layout:

The posts list doesn't have much visual change, just links to the new path:

The main change here, besides adding the posts markdown files (and images) is that the static site generator is now able to not only fetch from rss, but also from the file system.
CC: @TomAugspurger
Btw @TomAugspurger , I increased the number of posts we show in the list, and see that there are some about dask-ml and other things unrelated to pandas being fetched from your blog. Would be great if you can add a pandas tag to your blog, so we only fetch the relevant content. | https://api.github.com/repos/pandas-dev/pandas/pulls/33178 | 2020-03-31T12:35:55Z | 2020-04-07T20:04:50Z | 2020-04-07T20:04:50Z | 2020-04-07T20:04:50Z |
Issue #33161 - Use empty_frame fixture | diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 4fa5e4196ae5b..a7321104f2879 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -22,11 +22,13 @@
)
import pandas._testing as tm
from pandas.arrays import SparseArray
+from pandas.conftest import empty_frame
import pandas.core.common as com
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
+
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
@@ -842,7 +844,7 @@ def test_setitem_with_empty_listlike(self):
def test_setitem_scalars_no_index(self):
# GH16823 / 17894
- df = DataFrame()
+ df = empty_frame()
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
@@ -1669,7 +1671,7 @@ def test_reindex_subclass(self):
class MyDataFrame(DataFrame):
pass
- expected = DataFrame()
+ expected = empty_frame()
df = MyDataFrame()
result = df.reindex_like(expected)
diff --git a/pandas/tests/frame/indexing/test_insert.py b/pandas/tests/frame/indexing/test_insert.py
index 622c93d1c2fdc..88fda6462a80f 100644
--- a/pandas/tests/frame/indexing/test_insert.py
+++ b/pandas/tests/frame/indexing/test_insert.py
@@ -8,6 +8,7 @@
from pandas import DataFrame, Index
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDataFrameInsert:
@@ -58,7 +59,7 @@ def test_insert_column_bug_4032(self):
def test_insert_with_columns_dups(self):
# GH#14291
- df = DataFrame()
+ df = empty_frame()
df.insert(0, "A", ["g", "h", "i"], allow_duplicates=True)
df.insert(0, "A", ["d", "e", "f"], allow_duplicates=True)
df.insert(0, "A", ["a", "b", "c"], allow_duplicates=True)
diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py
index 9fc3629e794e2..3d203774c31a6 100644
--- a/pandas/tests/frame/methods/test_append.py
+++ b/pandas/tests/frame/methods/test_append.py
@@ -4,12 +4,13 @@
import pandas as pd
from pandas import DataFrame, Series, Timestamp
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDataFrameAppend:
def test_append_empty_list(self):
# GH 28769
- df = DataFrame()
+ df = empty_frame()
result = df.append([])
expected = df
tm.assert_frame_equal(result, expected)
@@ -96,29 +97,29 @@ def test_append_missing_cols(self):
def test_append_empty_dataframe(self):
# Empty df append empty df
- df1 = DataFrame()
- df2 = DataFrame()
+ df1 = empty_frame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
- df2 = DataFrame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=["bar", "foo"])
- df2 = DataFrame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=["bar", "foo"])
- df2 = DataFrame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
@@ -130,7 +131,7 @@ def test_append_dtypes(self):
# can sometimes infer the correct type
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(5))
- df2 = DataFrame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index 7715cb1cb6eec..e8db28c31e3dc 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -6,6 +6,7 @@
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDataFrameCombineFirst:
@@ -73,7 +74,7 @@ def test_combine_first(self, float_frame):
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
- comb = DataFrame().combine_first(float_frame)
+ comb = empty_frame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
diff --git a/pandas/tests/frame/methods/test_count.py b/pandas/tests/frame/methods/test_count.py
index 13a93e3efc48c..a3409e7fd8b8d 100644
--- a/pandas/tests/frame/methods/test_count.py
+++ b/pandas/tests/frame/methods/test_count.py
@@ -1,11 +1,12 @@
from pandas import DataFrame, Series
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDataFrameCount:
def test_count(self):
# corner case
- frame = DataFrame()
+ frame = empty_frame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
@@ -23,7 +24,7 @@ def test_count(self):
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
- df = DataFrame()
+ df = empty_frame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_filter.py b/pandas/tests/frame/methods/test_filter.py
index 569b2fe21d1c2..9681fb7c17ce3 100644
--- a/pandas/tests/frame/methods/test_filter.py
+++ b/pandas/tests/frame/methods/test_filter.py
@@ -4,6 +4,7 @@
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDataFrameFilter:
@@ -122,7 +123,7 @@ def test_filter_bytestring(self, name):
tm.assert_frame_equal(df.filter(regex=name), expected)
def test_filter_corner(self):
- empty = DataFrame()
+ empty = empty_frame()
result = empty.filter([])
tm.assert_frame_equal(result, empty)
diff --git a/pandas/tests/frame/methods/test_head_tail.py b/pandas/tests/frame/methods/test_head_tail.py
index 93763bc12ce0d..e5bd7e5da3d74 100644
--- a/pandas/tests/frame/methods/test_head_tail.py
+++ b/pandas/tests/frame/methods/test_head_tail.py
@@ -1,7 +1,7 @@
import numpy as np
-from pandas import DataFrame
import pandas._testing as tm
+from pandas.conftest import empty_frame
def test_head_tail(float_frame):
@@ -25,6 +25,6 @@ def test_head_tail(float_frame):
tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
# test empty dataframe
- empty_df = DataFrame()
+ empty_df = empty_frame()
tm.assert_frame_equal(empty_df.tail(), empty_df)
tm.assert_frame_equal(empty_df.head(), empty_df)
diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py
index 6307738021f68..47a2b19939610 100644
--- a/pandas/tests/frame/methods/test_isin.py
+++ b/pandas/tests/frame/methods/test_isin.py
@@ -4,6 +4,7 @@
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDataFrameIsIn:
@@ -176,7 +177,7 @@ def test_isin_empty_datetimelike(self):
df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])})
df1_td = DataFrame({"date": [pd.Timedelta(1, "s"), pd.Timedelta(2, "s")]})
df2 = DataFrame({"date": []})
- df3 = DataFrame()
+ df3 = empty_frame()
expected = DataFrame({"date": [False, False]})
diff --git a/pandas/tests/frame/methods/test_round.py b/pandas/tests/frame/methods/test_round.py
index 6dcdf49e93729..5889fe1e7288e 100644
--- a/pandas/tests/frame/methods/test_round.py
+++ b/pandas/tests/frame/methods/test_round.py
@@ -4,6 +4,7 @@
import pandas as pd
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDataFrameRound:
@@ -11,7 +12,7 @@ def test_round(self):
# GH#2665
# Test that rounding an empty DataFrame does nothing
- df = DataFrame()
+ df = empty_frame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 6525e93d89fce..5497330c7edd8 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -21,6 +21,7 @@
to_timedelta,
)
import pandas._testing as tm
+from pandas.conftest import empty_frame
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
@@ -754,10 +755,10 @@ def test_operators_timedelta64(self):
assert df["off2"].dtype == "timedelta64[ns]"
def test_sum_corner(self):
- empty_frame = DataFrame()
+ empty_frame_ = empty_frame()
- axis0 = empty_frame.sum(0)
- axis1 = empty_frame.sum(1)
+ axis0 = empty_frame_.sum(0)
+ axis1 = empty_frame_.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 91627b46c2fee..26fa238be0e13 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -11,6 +11,7 @@
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDataFrameMisc:
@@ -118,14 +119,14 @@ def test_tab_completion(self):
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
- empty_frame = DataFrame()
+ empty_frame_ = empty_frame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
- hash(empty_frame)
+ hash(empty_frame_)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
@@ -162,8 +163,8 @@ def test_get_agg_axis(self, float_frame):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
- empty_frame = DataFrame()
- assert empty_frame.empty
+ empty_frame_ = empty_frame()
+ assert empty_frame_.empty
assert not float_frame.empty
assert not float_string_frame.empty
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index e328523253144..5709eb1c72a12 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -12,6 +12,7 @@
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, notna
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.core.apply import frame_apply
from pandas.core.base import SpecificationError
@@ -74,12 +75,12 @@ def test_apply_mixed_datetimelike(self):
def test_apply_empty(self, float_frame):
# empty
- empty_frame = DataFrame()
+ empty_frame_ = empty_frame()
- applied = empty_frame.apply(np.sqrt)
+ applied = empty_frame_.apply(np.sqrt)
assert applied.empty
- applied = empty_frame.apply(np.mean)
+ applied = empty_frame_.apply(np.mean)
assert applied.empty
no_rows = float_frame[:0]
@@ -99,7 +100,7 @@ def test_apply_empty(self, float_frame):
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
- empty_frame = DataFrame()
+ empty_frame = empty_frame()
x = []
result = empty_frame.apply(x.append, axis=1, result_type="expand")
@@ -760,7 +761,7 @@ def test_with_dictlike_columns(self):
tm.assert_series_equal(result, expected)
# GH 18775
- df = DataFrame()
+ df = empty_frame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 89f8bc433419b..df0ce9a7f86cc 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -10,6 +10,7 @@
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
+from pandas.conftest import empty_frame
import pandas.core.common as com
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
@@ -990,13 +991,13 @@ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
# corner cases
# empty
- plus_empty = float_frame + DataFrame()
+ plus_empty = float_frame + empty_frame()
assert np.isnan(plus_empty.values).all()
- empty_plus = DataFrame() + float_frame
+ empty_plus = empty_frame() + float_frame
assert np.isnan(empty_plus.values).all()
- empty_empty = DataFrame() + DataFrame()
+ empty_empty = empty_frame() + empty_frame()
assert empty_empty.empty
# out of order
@@ -1116,7 +1117,7 @@ def test_combineFunc(self, float_frame, mixed_float_frame):
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
- result = DataFrame() * 2
+ result = empty_frame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index e67fef9efef6d..efed9e5ded789 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -16,10 +16,12 @@
option_context,
)
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.core.arrays import IntervalArray, integer_array
from pandas.core.internals import ObjectBlock
from pandas.core.internals.blocks import IntBlock
+
# Segregated collection of methods that require the BlockManager internal data
# structure
@@ -345,7 +347,7 @@ def test_copy(self, float_frame, float_string_frame):
assert copy._data is not float_string_frame._data
def test_pickle(self, float_string_frame, timezone_frame):
- empty_frame = DataFrame()
+ empty_frame_ = empty_frame()
unpickled = tm.round_trip_pickle(float_string_frame)
tm.assert_frame_equal(float_string_frame, unpickled)
@@ -354,7 +356,7 @@ def test_pickle(self, float_string_frame, timezone_frame):
float_string_frame._data.ndim
# empty
- unpickled = tm.round_trip_pickle(empty_frame)
+ unpickled = tm.round_trip_pickle(empty_frame_)
repr(unpickled)
# tz frame
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 9f40e8c6931c8..03a9ad2c57437 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -12,6 +12,7 @@
from pandas.compat import PY37, is_platform_little_endian
from pandas.compat.numpy import _is_numpy_dev
+from pandas.conftest import empty_frame
from pandas.core.dtypes.common import is_integer_dtype
@@ -78,7 +79,7 @@ def test_series_with_name_not_matching_column(self):
],
)
def test_empty_constructor(self, constructor):
- expected = DataFrame()
+ expected = empty_frame()
result = constructor()
assert len(result.index) == 0
assert len(result.columns) == 0
@@ -1774,7 +1775,7 @@ def test_constructor_with_datetimes(self):
i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern")
expected = DataFrame({"a": i.to_series().reset_index(drop=True)})
- df = DataFrame()
+ df = empty_frame()
df["a"] = i
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 7cb7115276f71..510fe30f1a665 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -7,6 +7,7 @@
import pandas as pd
from pandas import Categorical, DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.tests.frame.common import _check_mixed_float
@@ -167,7 +168,7 @@ def test_dropna_multiple_axes(self):
def test_dropna_tz_aware_datetime(self):
# GH13407
- df = DataFrame()
+ df = empty_frame()
dt1 = datetime.datetime(2015, 1, 1, tzinfo=dateutil.tz.tzutc())
dt2 = datetime.datetime(2015, 2, 2, tzinfo=dateutil.tz.tzutc())
df["Time"] = [dt1]
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 9d3c40ce926d7..5bf3cf7906b10 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -7,6 +7,7 @@
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Period, Series, Timedelta, date_range
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDataFrameReshape:
@@ -53,7 +54,7 @@ def test_pivot_duplicates(self):
def test_pivot_empty(self):
df = DataFrame(columns=["a", "b", "c"])
result = df.pivot("a", "b", "c")
- expected = DataFrame()
+ expected = empty_frame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index e860ea1a3d052..5c94c1440e2ae 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -11,6 +11,7 @@
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, concat
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.core.base import SpecificationError
from pandas.core.groupby.grouper import Grouping
@@ -223,7 +224,7 @@ def test_aggregate_item_by_item(df):
def aggfun(ser):
return ser.size
- result = DataFrame().groupby(df.A).agg(aggfun)
+ result = empty_frame().groupby(df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 9fbcced75c327..c69b4d0449fff 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -7,6 +7,7 @@
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, bdate_range
import pandas._testing as tm
+from pandas.conftest import empty_frame
def test_apply_issues():
@@ -658,7 +659,7 @@ def test_func(x):
pass
result = test_df.groupby("groups").apply(test_func)
- expected = DataFrame()
+ expected = empty_frame()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py
index b4239d7d34a90..50883b525113d 100644
--- a/pandas/tests/groupby/test_counting.py
+++ b/pandas/tests/groupby/test_counting.py
@@ -5,6 +5,7 @@
from pandas import DataFrame, MultiIndex, Period, Series, Timedelta, Timestamp
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestCounting:
@@ -19,7 +20,7 @@ def test_cumcount(self):
tm.assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
- ge = DataFrame().groupby(level=0)
+ ge = empty_frame().groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
@@ -94,7 +95,7 @@ def test_ngroup_one_group(self):
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_empty(self):
- ge = DataFrame().groupby(level=0)
+ ge = empty_frame().groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 1529a259c49af..2f0aea7de3772 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -7,6 +7,7 @@
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, NaT, Timestamp, date_range, offsets
import pandas._testing as tm
+from pandas.conftest import empty_frame
randn = np.random.randn
@@ -236,7 +237,7 @@ def test_groupby_function_tuple_1677(self):
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
- a = DataFrame()
+ a = empty_frame()
c = DataFrame({"A": "foo", "B": dr}, index=dr)
result = a.append(c)
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index fa00b870ca757..59c0222691982 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -16,6 +16,7 @@
timedelta_range,
)
import pandas._testing as tm
+from pandas.conftest import empty_frame
from ..datetimelike import DatetimeLike
@@ -171,7 +172,7 @@ def test_pass_TimedeltaIndex_to_index(self):
def test_append_numpy_bug_1681(self):
td = timedelta_range("1 days", "10 days", freq="2D")
- a = DataFrame()
+ a = empty_frame()
c = DataFrame({"A": "foo", "B": td}, index=td)
str(c)
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 9cc031001f81c..d7c76305d325c 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -5,6 +5,7 @@
from pandas import DataFrame, Float64Index, MultiIndex, Series, UInt64Index, date_range
import pandas._testing as tm
+from pandas.conftest import empty_frame
def _mklbl(prefix, n):
@@ -89,7 +90,7 @@ def setup_method(self, method):
self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)
- self.frame_empty = DataFrame()
+ self.frame_empty = empty_frame()
self.series_empty = Series(dtype=object)
# form agglomerates
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index c8c2d1ed587cf..40c55b2371281 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -7,6 +7,7 @@
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestDatetimeIndex:
@@ -211,7 +212,7 @@ def test_loc_setitem_datetime(self):
lambda x: np.datetime64(x),
]:
- df = DataFrame()
+ df = empty_frame()
df.loc[conv(dt1), "one"] = 100
df.loc[conv(dt2), "one"] = 200
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index a8a21b0610c14..37d97fab97700 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -12,6 +12,7 @@
import pandas as pd
from pandas import DataFrame, Index, NaT, Series
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.core.indexers import validate_indices
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
from pandas.tests.indexing.common import _mklbl
@@ -169,7 +170,7 @@ def test_inf_upcast(self):
tm.assert_index_equal(result, expected)
# Test with np.inf in columns
- df = DataFrame()
+ df = empty_frame()
df.loc[0, 0] = 1
df.loc[1, 1] = 2
df.loc[0, np.inf] = 3
@@ -566,7 +567,7 @@ def test_string_slice(self):
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
- df = DataFrame()
+ df = empty_frame()
assert not df.index.is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 2ce07ec41758f..2386f6dcaccc2 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -10,6 +10,7 @@
import pandas as pd
from pandas import DataFrame, Index, Series, date_range
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestPartialSetting:
@@ -373,7 +374,7 @@ def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
- df = DataFrame()
+ df = empty_frame()
with pytest.raises(ValueError):
df.loc[1] = 1
@@ -397,14 +398,14 @@ def f():
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
+ df = empty_frame()
df["foo"] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
+ df = empty_frame()
df["foo"] = df.index
return df
@@ -436,9 +437,9 @@ def f():
expected["foo"] = expected["foo"].astype("float64")
tm.assert_frame_equal(f(), expected)
- df = DataFrame()
+ df = empty_frame()
tm.assert_index_equal(df.columns, Index([], dtype=object))
- df2 = DataFrame()
+ df2 = empty_frame()
df2[1] = Series([1], index=["foo"])
df.loc[:, 1] = Series([1], index=["foo"])
tm.assert_frame_equal(df, DataFrame([[1]], index=["foo"], columns=[1]))
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index ec4614538004c..47e7079fdbbe6 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -12,6 +12,7 @@
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
+from pandas.conftest import empty_frame
from pandas.io.formats.style import Styler, _get_level_lengths # noqa # isort:skip
@@ -117,7 +118,7 @@ def test_render(self):
# it worked?
def test_render_empty_dfs(self):
- empty_df = DataFrame()
+ empty_df = empty_frame()
es = Styler(empty_df)
es.render()
# An index but no columns
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index 9a14022d6f776..551886281ca78 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -8,9 +8,10 @@
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, option_context
import pandas._testing as tm
-
+from pandas.conftest import empty_frame
import pandas.io.formats.format as fmt
+
lorem_ipsum = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod "
"tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim "
@@ -391,7 +392,7 @@ def test_to_html_justify(justify, datapath):
)
def test_to_html_invalid_justify(justify):
# GH 17527
- df = DataFrame()
+ df = empty_frame()
msg = "Invalid value for justify parameter"
with pytest.raises(ValueError, match=msg):
@@ -439,7 +440,7 @@ def test_to_html_index(datapath):
@pytest.mark.parametrize("classes", ["sortable draggable", ["sortable", "draggable"]])
def test_to_html_with_classes(classes, datapath):
- df = DataFrame()
+ df = empty_frame()
expected = expected_html(datapath, "with_classes")
result = df.to_html(classes=classes)
assert result == expected
@@ -718,7 +719,7 @@ def test_ignore_display_max_colwidth(method, expected, max_colwidth):
@pytest.mark.parametrize("classes", [True, 0])
def test_to_html_invalid_classes_type(classes):
# GH 25608
- df = DataFrame()
+ df = empty_frame()
msg = "classes must be a string, list, or tuple"
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 509e5bcb33304..d1aaf9c3aa48f 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -6,6 +6,7 @@
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestToLatex:
@@ -79,7 +80,7 @@ def test_to_latex_format(self, float_frame):
assert withindex_result == withindex_expected
def test_to_latex_empty(self):
- df = DataFrame()
+ df = empty_frame()
result = df.to_latex()
expected = r"""\begin{tabular}{l}
\toprule
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index b7a9918ff46da..ba37772540fb8 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -5,7 +5,7 @@
from pandas import DataFrame, Index, Series, json_normalize
import pandas._testing as tm
-
+from pandas.conftest import empty_frame
from pandas.io.json._normalize import nested_to_record
@@ -165,7 +165,7 @@ def test_simple_normalize(self, state_data):
def test_empty_array(self):
result = json_normalize([])
- expected = DataFrame()
+ expected = empty_frame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py
index 5c4e642115798..c850ea28f77c3 100644
--- a/pandas/tests/io/parser/test_mangle_dupes.py
+++ b/pandas/tests/io/parser/test_mangle_dupes.py
@@ -9,6 +9,7 @@
from pandas import DataFrame
import pandas._testing as tm
+from pandas.conftest import empty_frame
@pytest.mark.parametrize("kwargs", [dict(), dict(mangle_dupe_cols=True)])
@@ -121,7 +122,7 @@ def test_mangled_unnamed_placeholders(all_parsers):
# This test recursively updates `df`.
for i in range(3):
- expected = DataFrame()
+ expected = empty_frame()
for j in range(i + 1):
expected["Unnamed: 0" + ".1" * j] = [0, 1, 2]
diff --git a/pandas/tests/io/parser/test_usecols.py b/pandas/tests/io/parser/test_usecols.py
index 979eb4702cc84..f05541ce4e0fb 100644
--- a/pandas/tests/io/parser/test_usecols.py
+++ b/pandas/tests/io/parser/test_usecols.py
@@ -11,6 +11,7 @@
from pandas import DataFrame, Index
import pandas._testing as tm
+from pandas.conftest import empty_frame
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
@@ -408,7 +409,7 @@ def test_usecols_with_multi_byte_characters(all_parsers, usecols):
def test_empty_usecols(all_parsers):
data = "a,b,c\n1,2,3\n4,5,6"
- expected = DataFrame()
+ expected = empty_frame()
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=set())
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 9a0788ea068ad..61c241d0984be 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -11,6 +11,7 @@
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
+from pandas.conftest import empty_frame
import pandas.util._test_decorators as td
import pandas as pd
@@ -2391,7 +2392,7 @@ def test_frame(self, compression, setup_path):
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
- df0 = DataFrame()
+ df0 = empty_frame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 2f2ae8cd9d32b..1913cdcb81c18 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -42,7 +42,7 @@
to_timedelta,
)
import pandas._testing as tm
-
+from pandas.conftest import empty_frame
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
@@ -885,7 +885,7 @@ def test_chunksize_read(self):
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
- res2 = DataFrame()
+ res2 = empty_frame()
i = 0
sizes = [5, 5, 5, 5, 2]
@@ -900,7 +900,7 @@ def test_chunksize_read(self):
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
- res3 = DataFrame()
+ res3 = empty_frame()
i = 0
sizes = [5, 5, 5, 5, 2]
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 6384c5f19c898..124b9980875e1 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -6,6 +6,7 @@
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.core.groupby.groupby import DataError
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
@@ -83,7 +84,7 @@ def test_resample_interpolate(frame):
def test_raises_on_non_datetimelike_index():
# this is a non datetimelike index
- xp = DataFrame()
+ xp = empty_frame()
msg = (
"Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex, "
"but got an instance of 'Index'"
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index dc1efa46403be..5c0222d0453c1 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -7,6 +7,7 @@
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, concat, merge
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.tests.reshape.merge.test_merge import NGROUPS, N, get_test_data
a_ = np.array
@@ -377,7 +378,7 @@ def test_join_index_mixed_overlap(self):
def test_join_empty_bug(self):
# generated an exception in 0.4.3
- x = DataFrame()
+ x = empty_frame()
x.join(DataFrame([3], index=[0], columns=["A"]), how="outer")
def test_join_unconsolidated(self):
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index a12395b32ab4e..59eb1261840ae 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -28,6 +28,7 @@
read_csv,
)
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@@ -796,7 +797,7 @@ def test_append(self, sort, float_frame):
)
def test_append_empty(self, float_frame):
- empty = DataFrame()
+ empty = empty_frame()
appended = float_frame.append(empty)
tm.assert_frame_equal(float_frame, appended)
@@ -1528,7 +1529,7 @@ def test_handle_empty_objects(self, sort):
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
- empty = DataFrame()
+ empty = empty_frame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py
index 8795af2e11122..02d4af011af59 100644
--- a/pandas/tests/reshape/test_crosstab.py
+++ b/pandas/tests/reshape/test_crosstab.py
@@ -3,6 +3,7 @@
from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, Series, crosstab
import pandas._testing as tm
+from pandas.conftest import empty_frame
class TestCrosstab:
@@ -231,7 +232,7 @@ def test_crosstab_no_overlap(self):
s2 = Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
- expected = DataFrame()
+ expected = empty_frame()
tm.assert_frame_equal(actual, expected)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 6289c2efea7f1..4d2a5eb3f3792 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -10,6 +10,7 @@
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, concat, isna, notna
import pandas._testing as tm
+from pandas.conftest import empty_frame
import pandas.core.strings as strings
@@ -1928,7 +1929,7 @@ def test_empty_str_methods(self):
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
- empty_df = DataFrame()
+ empty_df = empty_frame()
tm.assert_frame_equal(empty_df, empty.str.partition("a"))
tm.assert_frame_equal(empty_df, empty.str.rpartition("a"))
diff --git a/pandas/tests/window/moments/test_moments_expanding.py b/pandas/tests/window/moments/test_moments_expanding.py
index 9dfaecee9caeb..eb4a8fae64360 100644
--- a/pandas/tests/window/moments/test_moments_expanding.py
+++ b/pandas/tests/window/moments/test_moments_expanding.py
@@ -6,6 +6,7 @@
from pandas import DataFrame, Index, MultiIndex, Series, isna, notna
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.tests.window.common import ConsistencyBase
@@ -287,7 +288,7 @@ def test_moment_functions_zero_length(self, f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
- df1 = DataFrame()
+ df1 = empty_frame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
@@ -311,7 +312,7 @@ def test_moment_functions_zero_length(self, f):
)
def test_moment_functions_zero_length_pairwise(self, f):
- df1 = DataFrame()
+ df1 = empty_frame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index f3a14971ef2e7..89f9dc52fca98 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -11,6 +11,7 @@
import pandas as pd
from pandas import DataFrame, Index, Series, isna, notna
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import Base, ConsistencyBase
@@ -1454,7 +1455,7 @@ def test_moment_functions_zero_length(self):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
- df1 = DataFrame()
+ df1 = empty_frame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
@@ -1495,7 +1496,7 @@ def test_moment_functions_zero_length(self):
def test_moment_functions_zero_length_pairwise(self):
- df1 = DataFrame()
+ df1 = empty_frame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index 6b6367fd80b26..60c614f75b4dd 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -6,6 +6,7 @@
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.core.window import Expanding
from pandas.tests.window.common import Base
@@ -67,8 +68,8 @@ def test_empty_df_expanding(self, expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
- expected = DataFrame()
- result = DataFrame().expanding(expander).sum()
+ expected = empty_frame()
+ result = empty_frame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index ab2c7fcb7a0dc..3e8c8c632c9cf 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -9,6 +9,7 @@
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas._testing as tm
+from pandas.conftest import empty_frame
from pandas.core.window import Rolling
from pandas.tests.window.common import Base
@@ -250,8 +251,8 @@ def test_closed_median_quantile(self, closed, expected):
def tests_empty_df_rolling(self, roller):
# GH 15819 Verifies that datetime and integer rolling windows can be
# applied to empty DataFrames
- expected = DataFrame()
- result = DataFrame().rolling(roller).sum()
+ expected = empty_frame()
+ result = empty_frame().rolling(roller).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer rolling windows can be applied to
| Replaced Empty data frames with fixture empty_frame()
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33177 | 2020-03-31T11:46:30Z | 2020-03-31T15:21:56Z | null | 2020-04-01T12:50:18Z |
DOC: Fix examples in pandas/core/ops/ | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index e987e147fa343..3e9138814fbdf 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -310,7 +310,11 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
pytest -q --doctest-modules pandas/core/indexes/
RET=$(($RET + $?)) ; echo $MSG "DONE"
- MSG='Doctests reshaping functions' ; echo $MSG
+ MSG='Doctests ops' ; echo $MSG
+ pytest -q --doctest-modules pandas/core/ops/
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
+ MSG='Doctests reshape' ; echo $MSG
pytest -q --doctest-modules pandas/core/reshape/
RET=$(($RET + $?)) ; echo $MSG "DONE"
diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py
index 854d6072eea36..c33cb32dcec19 100644
--- a/pandas/core/ops/missing.py
+++ b/pandas/core/ops/missing.py
@@ -72,7 +72,7 @@ def fill_zeros(result, x, y):
def mask_zero_div_zero(x, y, result):
"""
- Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes
+ Set results of 0 // 0 to np.nan, regardless of the dtypes
of the numerator or the denominator.
Parameters
@@ -83,13 +83,16 @@ def mask_zero_div_zero(x, y, result):
Returns
-------
- filled_result : ndarray
+ ndarray
+ The filled result.
Examples
--------
>>> x = np.array([1, 0, -1], dtype=np.int64)
+ >>> x
+ array([ 1, 0, -1])
>>> y = 0 # int 0; numpy behavior is different with float
- >>> result = x / y
+ >>> result = x // y
>>> result # raw numpy result does not fill division by zero
array([0, 0, 0])
>>> mask_zero_div_zero(x, y, result)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33176 | 2020-03-31T10:50:36Z | 2020-04-03T15:11:06Z | 2020-04-03T15:11:06Z | 2020-04-03T16:17:15Z |
CI: add doctest check for done modules | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 5401cc81785ab..be6c076952ca1 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -279,8 +279,8 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
pytest -q --doctest-modules pandas/core/groupby/groupby.py -k"-cumcount -describe -pipe"
RET=$(($RET + $?)) ; echo $MSG "DONE"
- MSG='Doctests datetimes.py' ; echo $MSG
- pytest -q --doctest-modules pandas/core/tools/datetimes.py
+ MSG='Doctests tools' ; echo $MSG
+ pytest -q --doctest-modules pandas/core/tools/
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Doctests reshaping functions' ; echo $MSG
@@ -323,6 +323,10 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then
MSG='Doctests tseries' ; echo $MSG
pytest -q --doctest-modules pandas/tseries/
RET=$(($RET + $?)) ; echo $MSG "DONE"
+
+ MSG='Doctests computation' ; echo $MSG
+ pytest -q --doctest-modules pandas/core/computation/
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
### DOCSTRINGS ###
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33175 | 2020-03-31T10:26:47Z | 2020-03-31T13:12:11Z | 2020-03-31T13:12:11Z | 2020-03-31T13:14:37Z |
CLN: using C-API of datetime | diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index a318bea14b52b..6fa9159c469c2 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -8,8 +8,7 @@ cnp.import_array()
import pytz
# stdlib datetime imports
-from datetime import time as datetime_time
-from cpython.datetime cimport (datetime, tzinfo,
+from cpython.datetime cimport (datetime, time, tzinfo,
PyDateTime_Check, PyDate_Check,
PyDateTime_IMPORT)
PyDateTime_IMPORT
@@ -284,7 +283,7 @@ cdef convert_to_tsobject(object ts, object tz, object unit,
return convert_datetime_to_tsobject(ts, tz, nanos)
elif PyDate_Check(ts):
# Keep the converter same as PyDateTime's
- ts = datetime.combine(ts, datetime_time())
+ ts = datetime.combine(ts, time())
return convert_datetime_to_tsobject(ts, tz)
elif getattr(ts, '_typ', None) == 'period':
raise ValueError("Cannot convert Period to Timestamp "
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index c3a47902cff0f..dd745f840d0ab 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1,5 +1,3 @@
-from datetime import datetime
-
from cpython.object cimport PyObject_RichCompareBool, Py_EQ, Py_NE
from numpy cimport int64_t, import_array, ndarray
@@ -13,6 +11,7 @@ from libc.string cimport strlen, memset
import cython
from cpython.datetime cimport (
+ datetime,
PyDate_Check,
PyDateTime_Check,
PyDateTime_IMPORT,
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 64b79200028b6..7858072407a35 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -5,8 +5,7 @@ cimport numpy as cnp
from numpy cimport int64_t
cnp.import_array()
-from datetime import time as datetime_time, timedelta
-from cpython.datetime cimport (datetime, PyDateTime_Check,
+from cpython.datetime cimport (datetime, time, PyDateTime_Check, PyDelta_Check,
PyTZInfo_Check, PyDateTime_IMPORT)
PyDateTime_IMPORT
@@ -33,7 +32,7 @@ from pandas._libs.tslibs.tzconversion import (
# ----------------------------------------------------------------------
# Constants
-_zero_time = datetime_time(0, 0)
+_zero_time = time(0, 0)
_no_input = object()
# ----------------------------------------------------------------------
@@ -879,8 +878,7 @@ default 'raise'
raise ValueError('Cannot infer offset with only one time.')
nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward')
- if nonexistent not in nonexistent_options and not isinstance(
- nonexistent, timedelta):
+ if nonexistent not in nonexistent_options and not PyDelta_Check(nonexistent):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or a timedelta object"
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33174 | 2020-03-31T10:16:14Z | 2020-03-31T14:49:43Z | 2020-03-31T14:49:43Z | 2020-04-01T09:37:46Z |
CLN: Added static types | diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 5272a0a042d0e..1b980aea372e2 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -189,8 +189,13 @@ cdef inline bint does_string_look_like_time(str parse_string):
return 0 <= hour <= 23 and 0 <= minute <= 59
-def parse_datetime_string(date_string: str, freq=None, dayfirst=False,
- yearfirst=False, **kwargs):
+def parse_datetime_string(
+ str date_string,
+ object freq=None,
+ bint dayfirst=False,
+ bint yearfirst=False,
+ **kwargs,
+):
"""
Parse datetime string, only returns datetime.
Also cares special handling matching time patterns.
@@ -272,8 +277,9 @@ def parse_time_string(arg: str, freq=None, dayfirst=None, yearfirst=None):
return res
-cdef parse_datetime_string_with_reso(str date_string, freq=None, dayfirst=False,
- yearfirst=False):
+cdef parse_datetime_string_with_reso(
+ str date_string, object freq=None, bint dayfirst=False, bint yearfirst=False,
+):
"""
Parse datetime string and try to identify its resolution.
@@ -467,8 +473,14 @@ cdef inline object _parse_dateabbr_string(object date_string, object default,
raise ValueError(f'Unable to parse {date_string}')
-cdef dateutil_parse(str timestr, object default, ignoretz=False,
- tzinfos=None, dayfirst=None, yearfirst=None):
+cdef dateutil_parse(
+ str timestr,
+ object default,
+ bint ignoretz=False,
+ object tzinfos=None,
+ bint dayfirst=False,
+ bint yearfirst=False,
+):
""" lifted from dateutil to get resolution"""
cdef:
@@ -531,8 +543,9 @@ cdef dateutil_parse(str timestr, object default, ignoretz=False,
# Parsing for type-inference
-def try_parse_dates(object[:] values, parser=None,
- dayfirst=False, default=None):
+def try_parse_dates(
+ object[:] values, parser=None, bint dayfirst=False, default=None,
+):
cdef:
Py_ssize_t i, n
object[:] result
@@ -569,9 +582,14 @@ def try_parse_dates(object[:] values, parser=None,
return result.base # .base to access underlying ndarray
-def try_parse_date_and_time(object[:] dates, object[:] times,
- date_parser=None, time_parser=None,
- dayfirst=False, default=None):
+def try_parse_date_and_time(
+ object[:] dates,
+ object[:] times,
+ date_parser=None,
+ time_parser=None,
+ bint dayfirst=False,
+ default=None,
+):
cdef:
Py_ssize_t i, n
object[:] result
@@ -607,8 +625,7 @@ def try_parse_date_and_time(object[:] dates, object[:] times,
return result.base # .base to access underlying ndarray
-def try_parse_year_month_day(object[:] years, object[:] months,
- object[:] days):
+def try_parse_year_month_day(object[:] years, object[:] months, object[:] days):
cdef:
Py_ssize_t i, n
object[:] result
@@ -705,6 +722,9 @@ class _timelex:
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
+ cdef:
+ Py_ssize_t n
+
stream = self.stream.replace('\x00', '')
# TODO: Change \s --> \s+ (this doesn't match existing behavior)
@@ -760,15 +780,20 @@ def _format_is_iso(f) -> bint:
return False
-def _guess_datetime_format(dt_str, dayfirst=False, dt_str_parse=du_parse,
- dt_str_split=_DATEUTIL_LEXER_SPLIT):
+def _guess_datetime_format(
+ dt_str,
+ bint dayfirst=False,
+ dt_str_parse=du_parse,
+ dt_str_split=_DATEUTIL_LEXER_SPLIT,
+):
"""
Guess the datetime format of a given datetime string.
Parameters
----------
- dt_str : string, datetime string to guess the format of
- dayfirst : boolean, default False
+ dt_str : str
+ Datetime string to guess the format of.
+ dayfirst : bool, default False
If True parses dates with the day first, eg 20/01/2005
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug).
@@ -878,8 +903,7 @@ def _guess_datetime_format(dt_str, dayfirst=False, dt_str_parse=du_parse,
@cython.wraparound(False)
@cython.boundscheck(False)
-cdef inline object convert_to_unicode(object item,
- bint keep_trivial_numbers):
+cdef inline object convert_to_unicode(object item, bint keep_trivial_numbers):
"""
Convert `item` to str.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
---
I'll revert any change related to styling, I applied "black" formatting in some places, just so I could think straight. | https://api.github.com/repos/pandas-dev/pandas/pulls/33169 | 2020-03-31T08:46:57Z | 2020-04-03T19:42:16Z | 2020-04-03T19:42:16Z | 2020-04-06T08:43:09Z |
CLN: Replace DataFrame() with empty_frame in tests | diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 9e6d41a8886b3..9d7a0ace00f8b 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -817,9 +817,9 @@ def test_setitem_with_empty_listlike(self):
expected = pd.DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
- def test_setitem_scalars_no_index(self):
+ def test_setitem_scalars_no_index(self, empty_frame):
# GH16823 / 17894
- df = DataFrame()
+ df = empty_frame
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/frame/indexing/test_insert.py b/pandas/tests/frame/indexing/test_insert.py
index 622c93d1c2fdc..5fa7ff50d3379 100644
--- a/pandas/tests/frame/indexing/test_insert.py
+++ b/pandas/tests/frame/indexing/test_insert.py
@@ -56,9 +56,9 @@ def test_insert_column_bug_4032(self):
expected = DataFrame([[1.3, 1, 1.1], [2.3, 2, 2.2]], columns=["c", "a", "b"])
tm.assert_frame_equal(result, expected)
- def test_insert_with_columns_dups(self):
+ def test_insert_with_columns_dups(self, empty_frame):
# GH#14291
- df = DataFrame()
+ df = empty_frame
df.insert(0, "A", ["g", "h", "i"], allow_duplicates=True)
df.insert(0, "A", ["d", "e", "f"], allow_duplicates=True)
df.insert(0, "A", ["a", "b", "c"], allow_duplicates=True)
diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py
index 9fc3629e794e2..a2caf6d0b3384 100644
--- a/pandas/tests/frame/methods/test_append.py
+++ b/pandas/tests/frame/methods/test_append.py
@@ -7,9 +7,9 @@
class TestDataFrameAppend:
- def test_append_empty_list(self):
+ def test_append_empty_list(self, empty_frame):
# GH 28769
- df = DataFrame()
+ df = empty_frame
result = df.append([])
expected = df
tm.assert_frame_equal(result, expected)
@@ -93,44 +93,44 @@ def test_append_missing_cols(self):
expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
- def test_append_empty_dataframe(self):
+ def test_append_empty_dataframe(self, empty_frame):
# Empty df append empty df
- df1 = DataFrame()
- df2 = DataFrame()
+ df1 = empty_frame
+ df2 = empty_frame
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
- df2 = DataFrame()
+ df2 = empty_frame
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=["bar", "foo"])
- df2 = DataFrame()
+ df2 = empty_frame
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=["bar", "foo"])
- df2 = DataFrame()
+ df2 = empty_frame
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
- def test_append_dtypes(self):
+ def test_append_dtypes(self, empty_frame):
# GH 5754
# row appends of different dtypes (so need to do by-item)
# can sometimes infer the correct type
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(5))
- df2 = DataFrame()
+ df2 = empty_frame
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index 7715cb1cb6eec..25b8c2cc21314 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -24,7 +24,7 @@ def test_combine_first_mixed(self):
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
- def test_combine_first(self, float_frame):
+ def test_combine_first(self, empty_frame, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
@@ -73,7 +73,7 @@ def test_combine_first(self, float_frame):
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
- comb = DataFrame().combine_first(float_frame)
+ comb = empty_frame.combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
diff --git a/pandas/tests/frame/methods/test_count.py b/pandas/tests/frame/methods/test_count.py
index 13a93e3efc48c..9588ed9e0d33c 100644
--- a/pandas/tests/frame/methods/test_count.py
+++ b/pandas/tests/frame/methods/test_count.py
@@ -3,9 +3,9 @@
class TestDataFrameCount:
- def test_count(self):
+ def test_count(self, empty_frame):
# corner case
- frame = DataFrame()
+ frame = empty_frame
ct1 = frame.count(1)
assert isinstance(ct1, Series)
@@ -23,7 +23,7 @@ def test_count(self):
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
- df = DataFrame()
+ df = empty_frame
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_filter.py b/pandas/tests/frame/methods/test_filter.py
index 569b2fe21d1c2..650fbddf8b9e0 100644
--- a/pandas/tests/frame/methods/test_filter.py
+++ b/pandas/tests/frame/methods/test_filter.py
@@ -121,8 +121,8 @@ def test_filter_bytestring(self, name):
tm.assert_frame_equal(df.filter(like=name), expected)
tm.assert_frame_equal(df.filter(regex=name), expected)
- def test_filter_corner(self):
- empty = DataFrame()
+ def test_filter_corner(self, empty_frame):
+ empty = empty_frame
result = empty.filter([])
tm.assert_frame_equal(result, empty)
diff --git a/pandas/tests/frame/methods/test_head_tail.py b/pandas/tests/frame/methods/test_head_tail.py
index 93763bc12ce0d..dddf2edfaacf6 100644
--- a/pandas/tests/frame/methods/test_head_tail.py
+++ b/pandas/tests/frame/methods/test_head_tail.py
@@ -1,10 +1,9 @@
import numpy as np
-from pandas import DataFrame
import pandas._testing as tm
-def test_head_tail(float_frame):
+def test_head_tail(empty_frame, float_frame):
tm.assert_frame_equal(float_frame.head(), float_frame[:5])
tm.assert_frame_equal(float_frame.tail(), float_frame[-5:])
@@ -25,6 +24,6 @@ def test_head_tail(float_frame):
tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
# test empty dataframe
- empty_df = DataFrame()
+ empty_df = empty_frame
tm.assert_frame_equal(empty_df.tail(), empty_df)
tm.assert_frame_equal(empty_df.head(), empty_df)
diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py
index 6307738021f68..71b9938842ef4 100644
--- a/pandas/tests/frame/methods/test_isin.py
+++ b/pandas/tests/frame/methods/test_isin.py
@@ -171,12 +171,12 @@ def test_isin_multiIndex(self):
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
- def test_isin_empty_datetimelike(self):
+ def test_isin_empty_datetimelike(self, empty_frame):
# GH#15473
df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])})
df1_td = DataFrame({"date": [pd.Timedelta(1, "s"), pd.Timedelta(2, "s")]})
df2 = DataFrame({"date": []})
- df3 = DataFrame()
+ df3 = empty_frame
expected = DataFrame({"date": [False, False]})
diff --git a/pandas/tests/frame/methods/test_round.py b/pandas/tests/frame/methods/test_round.py
index 6dcdf49e93729..8034763b9f423 100644
--- a/pandas/tests/frame/methods/test_round.py
+++ b/pandas/tests/frame/methods/test_round.py
@@ -7,11 +7,11 @@
class TestDataFrameRound:
- def test_round(self):
+ def test_round(self, empty_frame):
# GH#2665
# Test that rounding an empty DataFrame does nothing
- df = DataFrame()
+ df = empty_frame
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 6525e93d89fce..26ef0f81ab75f 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -753,8 +753,8 @@ def test_operators_timedelta64(self):
assert df["off1"].dtype == "timedelta64[ns]"
assert df["off2"].dtype == "timedelta64[ns]"
- def test_sum_corner(self):
- empty_frame = DataFrame()
+ def test_sum_corner(self, empty_frame):
+ empty_frame = empty_frame
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 91627b46c2fee..d97baa5c3a465 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -117,8 +117,8 @@ def test_tab_completion(self):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
- def test_not_hashable(self):
- empty_frame = DataFrame()
+ def test_not_hashable(self, empty_frame):
+ empty_frame = empty_frame
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
@@ -161,8 +161,8 @@ def test_get_agg_axis(self, float_frame):
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
- def test_nonzero(self, float_frame, float_string_frame):
- empty_frame = DataFrame()
+ def test_nonzero(self, float_frame, float_string_frame, empty_frame):
+ empty_frame = empty_frame
assert empty_frame.empty
assert not float_frame.empty
@@ -414,7 +414,7 @@ def test_series_put_names(self, float_string_frame):
for k, v in series.items():
assert v.name == k
- def test_empty_nonzero(self):
+ def test_empty_nonzero(self, empty_frame):
df = DataFrame([1, 2, 3])
assert not df.empty
df = DataFrame(index=[1], columns=[1])
@@ -423,7 +423,7 @@ def test_empty_nonzero(self):
assert df.empty
assert df.T.empty
empty_frames = [
- DataFrame(),
+ empty_frame,
DataFrame(index=[1]),
DataFrame(columns=[1]),
DataFrame({1: []}),
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index e328523253144..7d2cf09d96122 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -72,10 +72,8 @@ def test_apply_mixed_datetimelike(self):
result = df.apply(lambda x: x, axis=1)
tm.assert_frame_equal(result, df)
- def test_apply_empty(self, float_frame):
+ def test_apply_empty(self, float_frame, empty_frame):
# empty
- empty_frame = DataFrame()
-
applied = empty_frame.apply(np.sqrt)
assert applied.empty
@@ -97,10 +95,8 @@ def test_apply_empty(self, float_frame):
result = expected.apply(lambda x: x["a"], axis=1)
tm.assert_frame_equal(expected, result)
- def test_apply_with_reduce_empty(self):
+ def test_apply_with_reduce_empty(self, empty_frame):
# reduce with an empty DataFrame
- empty_frame = DataFrame()
-
x = []
result = empty_frame.apply(x.append, axis=1, result_type="expand")
tm.assert_frame_equal(result, empty_frame)
@@ -740,7 +736,7 @@ def test_infer_row_shape(self):
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
- def test_with_dictlike_columns(self):
+ def test_with_dictlike_columns(self, empty_frame):
# GH 17602
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1)
@@ -760,7 +756,7 @@ def test_with_dictlike_columns(self):
tm.assert_series_equal(result, expected)
# GH 18775
- df = DataFrame()
+ df = empty_frame
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(
@@ -1354,7 +1350,7 @@ def func(group_col):
),
),
)
- def test_agg_cython_table(self, df, func, expected, axis):
+ def test_agg_cython_table(self, df, func, expected, axis, empty_frame):
# GH 21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 89f8bc433419b..b22482dab5616 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -958,7 +958,9 @@ def test_add_with_dti_mismatched_tzs(self):
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
- def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
+ def test_combineFrame(
+ self, empty_frame, float_frame, mixed_float_frame, mixed_int_frame
+ ):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
@@ -990,13 +992,13 @@ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
# corner cases
# empty
- plus_empty = float_frame + DataFrame()
+ plus_empty = float_frame + empty_frame
assert np.isnan(plus_empty.values).all()
- empty_plus = DataFrame() + float_frame
+ empty_plus = empty_frame + float_frame
assert np.isnan(empty_plus.values).all()
- empty_empty = DataFrame() + DataFrame()
+ empty_empty = empty_frame + empty_frame
assert empty_empty.empty
# out of order
@@ -1106,7 +1108,7 @@ def test_combine_series(
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
- def test_combineFunc(self, float_frame, mixed_float_frame):
+ def test_combineFunc(self, empty_frame, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
@@ -1116,7 +1118,7 @@ def test_combineFunc(self, float_frame, mixed_float_frame):
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
- result = DataFrame() * 2
+ result = empty_frame * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index e67fef9efef6d..51decd2fb5cb4 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -344,8 +344,7 @@ def test_copy(self, float_frame, float_string_frame):
copy = float_string_frame.copy()
assert copy._data is not float_string_frame._data
- def test_pickle(self, float_string_frame, timezone_frame):
- empty_frame = DataFrame()
+ def test_pickle(self, empty_frame, float_string_frame, timezone_frame):
unpickled = tm.round_trip_pickle(float_string_frame)
tm.assert_frame_equal(float_string_frame, unpickled)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 9f40e8c6931c8..fb82c6af66363 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -77,8 +77,8 @@ def test_series_with_name_not_matching_column(self):
lambda: DataFrame(data=range(0)),
],
)
- def test_empty_constructor(self, constructor):
- expected = DataFrame()
+ def test_empty_constructor(self, constructor, empty_frame):
+ expected = empty_frame
result = constructor()
assert len(result.index) == 0
assert len(result.columns) == 0
@@ -1648,7 +1648,7 @@ def test_constructor_single_value(self):
with pytest.raises(TypeError, match=msg):
DataFrame("a", [1, 2], ["a", "c"], float)
- def test_constructor_with_datetimes(self):
+ def test_constructor_with_datetimes(self, empty_frame):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype("M8[ns]").name
@@ -1774,7 +1774,7 @@ def test_constructor_with_datetimes(self):
i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern")
expected = DataFrame({"a": i.to_series().reset_index(drop=True)})
- df = DataFrame()
+ df = empty_frame
df["a"] = i
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 7cb7115276f71..2161381edc48e 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -165,9 +165,9 @@ def test_dropna_multiple_axes(self):
with pytest.raises(TypeError, match="supplying multiple axes"):
inp.dropna(how="all", axis=(0, 1), inplace=True)
- def test_dropna_tz_aware_datetime(self):
+ def test_dropna_tz_aware_datetime(self, empty_frame):
# GH13407
- df = DataFrame()
+ df = empty_frame
dt1 = datetime.datetime(2015, 1, 1, tzinfo=dateutil.tz.tzutc())
dt2 = datetime.datetime(2015, 2, 2, tzinfo=dateutil.tz.tzutc())
df["Time"] = [dt1]
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 6d786d9580542..a4d458ac4e0ce 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -45,7 +45,7 @@ def test_repr_mixed_big(self):
repr(biggie)
- def test_repr(self, float_frame):
+ def test_repr(self, empty_frame, float_frame):
buf = StringIO()
# small one
@@ -63,7 +63,7 @@ def test_repr(self, float_frame):
repr(no_index)
# no columns or index
- DataFrame().info(buf=buf)
+ empty_frame.info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
assert "\t" not in repr(df)
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 9d3c40ce926d7..a461c36389a95 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -50,10 +50,10 @@ def test_pivot_duplicates(self):
with pytest.raises(ValueError, match="duplicate entries"):
data.pivot("a", "b", "c")
- def test_pivot_empty(self):
+ def test_pivot_empty(self, empty_frame):
df = DataFrame(columns=["a", "b", "c"])
result = df.pivot("a", "b", "c")
- expected = DataFrame()
+ expected = empty_frame
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index e860ea1a3d052..9402387248049 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -199,7 +199,7 @@ def test_aggregate_str_func(tsframe, groupbyfunc):
tm.assert_frame_equal(result, expected)
-def test_aggregate_item_by_item(df):
+def test_aggregate_item_by_item(df, empty_frame):
grouped = df.groupby("A")
aggfun = lambda ser: ser.size
@@ -223,7 +223,7 @@ def test_aggregate_item_by_item(df):
def aggfun(ser):
return ser.size
- result = DataFrame().groupby(df.A).agg(aggfun)
+ result = empty_frame.groupby(df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 9fbcced75c327..19542d2efdc19 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -649,7 +649,7 @@ def noddy(value, weight):
df_grouped.apply(lambda x: noddy(x.value, x.weight))
-def test_groupby_apply_all_none():
+def test_groupby_apply_all_none(empty_frame):
# Tests to make sure no errors if apply function returns all None
# values. Issue 9684.
test_df = DataFrame({"groups": [0, 0, 1, 1], "random_vars": [8, 7, 4, 5]})
@@ -658,7 +658,7 @@ def test_func(x):
pass
result = test_df.groupby("groups").apply(test_func)
- expected = DataFrame()
+ expected = empty_frame
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py
index b4239d7d34a90..89b91050ea4e1 100644
--- a/pandas/tests/groupby/test_counting.py
+++ b/pandas/tests/groupby/test_counting.py
@@ -18,8 +18,8 @@ def test_cumcount(self):
tm.assert_series_equal(expected, g.cumcount())
tm.assert_series_equal(expected, sg.cumcount())
- def test_cumcount_empty(self):
- ge = DataFrame().groupby(level=0)
+ def test_cumcount_empty(self, empty_frame):
+ ge = empty_frame.groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
@@ -93,8 +93,8 @@ def test_ngroup_one_group(self):
tm.assert_series_equal(expected, g.ngroup())
tm.assert_series_equal(expected, sg.ngroup())
- def test_ngroup_empty(self):
- ge = DataFrame().groupby(level=0)
+ def test_ngroup_empty(self, empty_frame):
+ ge = empty_frame.groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 1529a259c49af..666fedeac0235 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -233,10 +233,10 @@ def test_groupby_function_tuple_1677(self):
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
- def test_append_numpy_bug_1681(self):
+ def test_append_numpy_bug_1681(self, empty_frame):
# another datetime64 bug
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
- a = DataFrame()
+ a = empty_frame
c = DataFrame({"A": "foo", "B": dr}, index=dr)
result = a.append(c)
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 129bdef870a14..007fedb5df8ef 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -153,10 +153,10 @@ def test_pass_TimedeltaIndex_to_index(self):
tm.assert_numpy_array_equal(idx.values, expected.values)
- def test_append_numpy_bug_1681(self):
+ def test_append_numpy_bug_1681(self, empty_frame):
td = timedelta_range("1 days", "10 days", freq="2D")
- a = DataFrame()
+ a = empty_frame
c = DataFrame({"A": "foo", "B": td}, index=td)
str(c)
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index c8c2d1ed587cf..a0e4db7a3ed15 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -198,7 +198,7 @@ def test_partial_setting_with_datetimelike_dtype(self):
df.loc[mask, "C"] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
- def test_loc_setitem_datetime(self):
+ def test_loc_setitem_datetime(self, empty_frame):
# GH 9516
dt1 = Timestamp("20130101 09:00:00")
@@ -211,7 +211,7 @@ def test_loc_setitem_datetime(self):
lambda x: np.datetime64(x),
]:
- df = DataFrame()
+ df = empty_frame
df.loc[conv(dt1), "one"] = 100
df.loc[conv(dt2), "one"] = 200
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index a8a21b0610c14..0fe9996c9161f 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -150,7 +150,7 @@ def test_setitem_ndarray_3d_does_not_fail_for_iloc_empty_dataframe(self):
with pytest.raises(ValueError, match=msg):
obj.iloc[nd3] = 0
- def test_inf_upcast(self):
+ def test_inf_upcast(self, empty_frame):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
@@ -169,7 +169,7 @@ def test_inf_upcast(self):
tm.assert_index_equal(result, expected)
# Test with np.inf in columns
- df = DataFrame()
+ df = empty_frame
df.loc[0, 0] = 1
df.loc[1, 1] = 2
df.loc[0, np.inf] = 3
@@ -554,7 +554,7 @@ def view(self):
tm.assert_frame_equal(result, df)
- def test_string_slice(self):
+ def test_string_slice(self, empty_frame):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
@@ -566,7 +566,7 @@ def test_string_slice(self):
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
- df = DataFrame()
+ df = empty_frame
assert not df.index.is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 2ce07ec41758f..deb7b8a9197de 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -369,11 +369,11 @@ def test_partial_set_empty_series(self):
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=["foo", "bar", 3]))
- def test_partial_set_empty_frame(self):
+ def test_partial_set_empty_frame(self, empty_frame):
# partially set with an empty object
# frame
- df = DataFrame()
+ df = empty_frame.copy()
with pytest.raises(ValueError):
df.loc[1] = 1
@@ -397,14 +397,14 @@ def f():
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
+ df = empty_frame.copy()
df["foo"] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
+ df = empty_frame.copy()
df["foo"] = df.index
return df
@@ -436,9 +436,9 @@ def f():
expected["foo"] = expected["foo"].astype("float64")
tm.assert_frame_equal(f(), expected)
- df = DataFrame()
+ df = empty_frame.copy()
tm.assert_index_equal(df.columns, Index([], dtype=object))
- df2 = DataFrame()
+ df2 = empty_frame.copy()
df2[1] = Series([1], index=["foo"])
df.loc[:, 1] = Series([1], index=["foo"])
tm.assert_frame_equal(df, DataFrame([[1]], index=["foo"], columns=[1]))
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index b1502ed3f3c09..f1c9c3f078b2a 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -501,9 +501,9 @@ def test_reading_all_sheets_with_blank(self, read_ext):
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
- def test_read_excel_blank(self, read_ext):
+ def test_read_excel_blank(self, read_ext, empty_frame):
actual = pd.read_excel("blank" + read_ext, "Sheet1")
- tm.assert_frame_equal(actual, DataFrame())
+ tm.assert_frame_equal(actual, empty_frame)
def test_read_excel_blank_with_header(self, read_ext):
expected = DataFrame(columns=["col_1", "col_2"])
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index ec4614538004c..b70dcfe7c3a97 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -116,8 +116,8 @@ def test_render(self):
s.render()
# it worked?
- def test_render_empty_dfs(self):
- empty_df = DataFrame()
+ def test_render_empty_dfs(self, empty_frame):
+ empty_df = empty_frame
es = Styler(empty_df)
es.render()
# An index but no columns
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index 9a14022d6f776..b28e9a8caa3fb 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -389,9 +389,9 @@ def test_to_html_justify(justify, datapath):
@pytest.mark.parametrize(
"justify", ["super-right", "small-left", "noinherit", "tiny", "pandas"]
)
-def test_to_html_invalid_justify(justify):
+def test_to_html_invalid_justify(justify, empty_frame):
# GH 17527
- df = DataFrame()
+ df = empty_frame
msg = "Invalid value for justify parameter"
with pytest.raises(ValueError, match=msg):
@@ -438,8 +438,8 @@ def test_to_html_index(datapath):
@pytest.mark.parametrize("classes", ["sortable draggable", ["sortable", "draggable"]])
-def test_to_html_with_classes(classes, datapath):
- df = DataFrame()
+def test_to_html_with_classes(classes, datapath, empty_frame):
+ df = empty_frame
expected = expected_html(datapath, "with_classes")
result = df.to_html(classes=classes)
assert result == expected
@@ -716,9 +716,9 @@ def test_ignore_display_max_colwidth(method, expected, max_colwidth):
@pytest.mark.parametrize("classes", [True, 0])
-def test_to_html_invalid_classes_type(classes):
+def test_to_html_invalid_classes_type(classes, empty_frame):
# GH 25608
- df = DataFrame()
+ df = empty_frame
msg = "classes must be a string, list, or tuple"
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 509e5bcb33304..c6b6e18590720 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -78,8 +78,8 @@ def test_to_latex_format(self, float_frame):
assert withindex_result == withindex_expected
- def test_to_latex_empty(self):
- df = DataFrame()
+ def test_to_latex_empty(self, empty_frame):
+ df = empty_frame
result = df.to_latex()
expected = r"""\begin{tabular}{l}
\toprule
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index b7a9918ff46da..73492367dc8cf 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -163,9 +163,9 @@ def test_simple_normalize(self, state_data):
tm.assert_frame_equal(result, expected)
- def test_empty_array(self):
+ def test_empty_array(self, empty_frame):
result = json_normalize([])
- expected = DataFrame()
+ expected = empty_frame
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py
index 5c4e642115798..43d2cdc3c20de 100644
--- a/pandas/tests/io/parser/test_mangle_dupes.py
+++ b/pandas/tests/io/parser/test_mangle_dupes.py
@@ -111,7 +111,7 @@ def test_thorough_mangle_names(all_parsers, data, names, expected):
parser.read_csv(StringIO(data), names=names)
-def test_mangled_unnamed_placeholders(all_parsers):
+def test_mangled_unnamed_placeholders(all_parsers, empty_frame):
# xref gh-13017
orig_key = "0"
parser = all_parsers
@@ -121,7 +121,7 @@ def test_mangled_unnamed_placeholders(all_parsers):
# This test recursively updates `df`.
for i in range(3):
- expected = DataFrame()
+ expected = empty_frame.copy()
for j in range(i + 1):
expected["Unnamed: 0" + ".1" * j] = [0, 1, 2]
diff --git a/pandas/tests/io/parser/test_usecols.py b/pandas/tests/io/parser/test_usecols.py
index 979eb4702cc84..0c73d91c13490 100644
--- a/pandas/tests/io/parser/test_usecols.py
+++ b/pandas/tests/io/parser/test_usecols.py
@@ -406,9 +406,9 @@ def test_usecols_with_multi_byte_characters(all_parsers, usecols):
tm.assert_frame_equal(result, expected)
-def test_empty_usecols(all_parsers):
+def test_empty_usecols(all_parsers, empty_frame):
data = "a,b,c\n1,2,3\n4,5,6"
- expected = DataFrame()
+ expected = empty_frame
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=set())
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 9a0788ea068ad..273f7dba692d3 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -2388,10 +2388,10 @@ def test_frame(self, compression, setup_path):
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
- def test_empty_series_frame(self, setup_path):
+ def test_empty_series_frame(self, setup_path, empty_frame):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
- df0 = DataFrame()
+ df0 = empty_frame
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 2f2ae8cd9d32b..89a69ba98336e 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -877,7 +877,7 @@ def test_get_schema_keys(self):
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
- def test_chunksize_read(self):
+ def test_chunksize_read(self, empty_frame):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
@@ -885,7 +885,7 @@ def test_chunksize_read(self):
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
- res2 = DataFrame()
+ res2 = empty_frame
i = 0
sizes = [5, 5, 5, 5, 2]
@@ -900,7 +900,7 @@ def test_chunksize_read(self):
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
- res3 = DataFrame()
+ res3 = empty_frame
i = 0
sizes = [5, 5, 5, 5, 2]
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 6384c5f19c898..1ca83ee3e159f 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -81,9 +81,9 @@ def test_resample_interpolate(frame):
)
-def test_raises_on_non_datetimelike_index():
+def test_raises_on_non_datetimelike_index(empty_frame):
# this is a non datetimelike index
- xp = DataFrame()
+ xp = empty_frame
msg = (
"Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex, "
"but got an instance of 'Index'"
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index dc1efa46403be..e09b2e3829667 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -375,9 +375,9 @@ def test_join_index_mixed_overlap(self):
expected = _join_by_hand(df1, df2)
tm.assert_frame_equal(joined, expected)
- def test_join_empty_bug(self):
+ def test_join_empty_bug(self, empty_frame):
# generated an exception in 0.4.3
- x = DataFrame()
+ x = empty_frame
x.join(DataFrame([3], index=[0], columns=["A"]), how="outer")
def test_join_unconsolidated(self):
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index a12395b32ab4e..d0c585f9763e4 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -795,8 +795,8 @@ def test_append(self, sort, float_frame):
mixed_appended2.reindex(columns=["A", "B", "C", "D"]),
)
- def test_append_empty(self, float_frame):
- empty = DataFrame()
+ def test_append_empty(self, empty_frame, float_frame):
+ empty = empty_frame
appended = float_frame.append(empty)
tm.assert_frame_equal(float_frame, appended)
@@ -1507,7 +1507,7 @@ def test_with_mixed_tuples(self, sort):
# it works
concat([df1, df2], sort=sort)
- def test_handle_empty_objects(self, sort):
+ def test_handle_empty_objects(self, sort, empty_frame):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
@@ -1528,7 +1528,7 @@ def test_handle_empty_objects(self, sort):
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
- empty = DataFrame()
+ empty = empty_frame
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py
index 8795af2e11122..44057e0dfb49d 100644
--- a/pandas/tests/reshape/test_crosstab.py
+++ b/pandas/tests/reshape/test_crosstab.py
@@ -224,14 +224,14 @@ def test_crosstab_dropna(self):
)
tm.assert_index_equal(res.columns, m)
- def test_crosstab_no_overlap(self):
+ def test_crosstab_no_overlap(self, empty_frame):
# GS 10291
s1 = Series([1, 2, 3], index=[1, 2, 3])
s2 = Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
- expected = DataFrame()
+ expected = empty_frame
tm.assert_frame_equal(actual, expected)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 6289c2efea7f1..b74c8e493e72e 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -1926,9 +1926,9 @@ def test_empty_str_methods(self):
table = str.maketrans("a", "b")
tm.assert_series_equal(empty_str, empty.str.translate(table))
- def test_empty_str_methods_to_frame(self):
+ def test_empty_str_methods_to_frame(self, empty_frame):
empty = Series(dtype=str)
- empty_df = DataFrame()
+ empty_df = empty_frame
tm.assert_frame_equal(empty_df, empty.str.partition("a"))
tm.assert_frame_equal(empty_df, empty.str.rpartition("a"))
diff --git a/pandas/tests/window/moments/test_moments_expanding.py b/pandas/tests/window/moments/test_moments_expanding.py
index 9dfaecee9caeb..778d2831f6989 100644
--- a/pandas/tests/window/moments/test_moments_expanding.py
+++ b/pandas/tests/window/moments/test_moments_expanding.py
@@ -283,11 +283,11 @@ def _check_expanding_has_min_periods(self, func, static_comp, has_min_periods):
lambda x: x.expanding(min_periods=5).apply(sum, raw=True),
],
)
- def test_moment_functions_zero_length(self, f):
+ def test_moment_functions_zero_length(self, f, empty_frame):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
- df1 = DataFrame()
+ df1 = empty_frame
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
@@ -309,9 +309,9 @@ def test_moment_functions_zero_length(self, f):
lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)),
],
)
- def test_moment_functions_zero_length_pairwise(self, f):
+ def test_moment_functions_zero_length_pairwise(self, f, empty_frame):
- df1 = DataFrame()
+ df1 = empty_frame
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index f3a14971ef2e7..8d5cb36181758 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -1450,11 +1450,11 @@ def test_rolling_min_max_numeric_types(self):
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
assert result.dtypes[0] == np.dtype("f8")
- def test_moment_functions_zero_length(self):
+ def test_moment_functions_zero_length(self, empty_frame):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
- df1 = DataFrame()
+ df1 = empty_frame
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
@@ -1493,9 +1493,9 @@ def test_moment_functions_zero_length(self):
# scipy needed for rolling_window
continue
- def test_moment_functions_zero_length_pairwise(self):
+ def test_moment_functions_zero_length_pairwise(self, empty_frame):
- df1 = DataFrame()
+ df1 = empty_frame
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index 6b6367fd80b26..49bc7ea67715a 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -63,12 +63,12 @@ def test_numpy_compat(self, method):
),
],
)
- def test_empty_df_expanding(self, expander):
+ def test_empty_df_expanding(self, expander, empty_frame):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
- expected = DataFrame()
- result = DataFrame().expanding(expander).sum()
+ expected = empty_frame
+ result = empty_frame.expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index ab2c7fcb7a0dc..3b1e97276e073 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -247,11 +247,11 @@ def test_closed_median_quantile(self, closed, expected):
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("roller", ["1s", 1])
- def tests_empty_df_rolling(self, roller):
+ def tests_empty_df_rolling(self, roller, empty_frame):
# GH 15819 Verifies that datetime and integer rolling windows can be
# applied to empty DataFrames
- expected = DataFrame()
- result = DataFrame().rolling(roller).sum()
+ expected = empty_frame
+ result = empty_frame.rolling(roller).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer rolling windows can be applied to
| - [ ] closes #33161
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33167 | 2020-03-31T06:09:22Z | 2020-05-08T15:58:32Z | null | 2020-05-08T15:58:33Z |
CLN: Replace DataFrame() with empty_frame() in tests | diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 4fa5e4196ae5b..274057da76b2b 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -44,7 +44,7 @@ def test_get(self, float_frame):
@pytest.mark.parametrize(
"df",
[
- DataFrame(),
+ empty_frame(),
DataFrame(columns=list("AB")),
DataFrame(columns=list("AB"), index=range(3)),
],
@@ -842,7 +842,7 @@ def test_setitem_with_empty_listlike(self):
def test_setitem_scalars_no_index(self):
# GH16823 / 17894
- df = DataFrame()
+ df = empty_frame()
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
@@ -1669,7 +1669,7 @@ def test_reindex_subclass(self):
class MyDataFrame(DataFrame):
pass
- expected = DataFrame()
+ expected = empty_frame()
df = MyDataFrame()
result = df.reindex_like(expected)
diff --git a/pandas/tests/frame/indexing/test_insert.py b/pandas/tests/frame/indexing/test_insert.py
index 622c93d1c2fdc..dc3ae63dc229e 100644
--- a/pandas/tests/frame/indexing/test_insert.py
+++ b/pandas/tests/frame/indexing/test_insert.py
@@ -58,7 +58,7 @@ def test_insert_column_bug_4032(self):
def test_insert_with_columns_dups(self):
# GH#14291
- df = DataFrame()
+ df = empty_frame()
df.insert(0, "A", ["g", "h", "i"], allow_duplicates=True)
df.insert(0, "A", ["d", "e", "f"], allow_duplicates=True)
df.insert(0, "A", ["a", "b", "c"], allow_duplicates=True)
diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py
index 9fc3629e794e2..455101982daa2 100644
--- a/pandas/tests/frame/methods/test_append.py
+++ b/pandas/tests/frame/methods/test_append.py
@@ -9,7 +9,7 @@
class TestDataFrameAppend:
def test_append_empty_list(self):
# GH 28769
- df = DataFrame()
+ df = empty_frame()
result = df.append([])
expected = df
tm.assert_frame_equal(result, expected)
@@ -96,29 +96,29 @@ def test_append_missing_cols(self):
def test_append_empty_dataframe(self):
# Empty df append empty df
- df1 = DataFrame()
- df2 = DataFrame()
+ df1 = empty_frame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
- df2 = DataFrame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=["bar", "foo"])
- df2 = DataFrame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=["bar", "foo"])
- df2 = DataFrame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
@@ -130,7 +130,7 @@ def test_append_dtypes(self):
# can sometimes infer the correct type
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(5))
- df2 = DataFrame()
+ df2 = empty_frame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py
index 7715cb1cb6eec..2ef76aa9f2c5a 100644
--- a/pandas/tests/frame/methods/test_combine_first.py
+++ b/pandas/tests/frame/methods/test_combine_first.py
@@ -73,7 +73,7 @@ def test_combine_first(self, float_frame):
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
- comb = DataFrame().combine_first(float_frame)
+ comb = empty_frame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
diff --git a/pandas/tests/frame/methods/test_count.py b/pandas/tests/frame/methods/test_count.py
index 13a93e3efc48c..3c09f0aa1916f 100644
--- a/pandas/tests/frame/methods/test_count.py
+++ b/pandas/tests/frame/methods/test_count.py
@@ -5,7 +5,7 @@
class TestDataFrameCount:
def test_count(self):
# corner case
- frame = DataFrame()
+ frame = empty_frame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
@@ -23,7 +23,7 @@ def test_count(self):
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
- df = DataFrame()
+ df = empty_frame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_drop_duplicates.py b/pandas/tests/frame/methods/test_drop_duplicates.py
index fd4bae26ade57..1b7a86bfdf09a 100644
--- a/pandas/tests/frame/methods/test_drop_duplicates.py
+++ b/pandas/tests/frame/methods/test_drop_duplicates.py
@@ -193,7 +193,7 @@ def test_drop_duplicates_tuple():
@pytest.mark.parametrize(
"df",
[
- DataFrame(),
+ empty_frame(),
DataFrame(columns=[]),
DataFrame(columns=["A", "B", "C"]),
DataFrame(index=[]),
diff --git a/pandas/tests/frame/methods/test_filter.py b/pandas/tests/frame/methods/test_filter.py
index 569b2fe21d1c2..51b232ad09467 100644
--- a/pandas/tests/frame/methods/test_filter.py
+++ b/pandas/tests/frame/methods/test_filter.py
@@ -122,7 +122,7 @@ def test_filter_bytestring(self, name):
tm.assert_frame_equal(df.filter(regex=name), expected)
def test_filter_corner(self):
- empty = DataFrame()
+ empty = empty_frame()
result = empty.filter([])
tm.assert_frame_equal(result, empty)
diff --git a/pandas/tests/frame/methods/test_head_tail.py b/pandas/tests/frame/methods/test_head_tail.py
index 93763bc12ce0d..1f1b53c03f389 100644
--- a/pandas/tests/frame/methods/test_head_tail.py
+++ b/pandas/tests/frame/methods/test_head_tail.py
@@ -25,6 +25,6 @@ def test_head_tail(float_frame):
tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
# test empty dataframe
- empty_df = DataFrame()
+ empty_df = empty_frame()
tm.assert_frame_equal(empty_df.tail(), empty_df)
tm.assert_frame_equal(empty_df.head(), empty_df)
diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py
index 6307738021f68..a0c7ab4a72502 100644
--- a/pandas/tests/frame/methods/test_isin.py
+++ b/pandas/tests/frame/methods/test_isin.py
@@ -176,7 +176,7 @@ def test_isin_empty_datetimelike(self):
df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])})
df1_td = DataFrame({"date": [pd.Timedelta(1, "s"), pd.Timedelta(2, "s")]})
df2 = DataFrame({"date": []})
- df3 = DataFrame()
+ df3 = empty_frame()
expected = DataFrame({"date": [False, False]})
diff --git a/pandas/tests/frame/methods/test_round.py b/pandas/tests/frame/methods/test_round.py
index 6dcdf49e93729..5e0cf3229ebd0 100644
--- a/pandas/tests/frame/methods/test_round.py
+++ b/pandas/tests/frame/methods/test_round.py
@@ -11,7 +11,7 @@ def test_round(self):
# GH#2665
# Test that rounding an empty DataFrame does nothing
- df = DataFrame()
+ df = empty_frame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 6525e93d89fce..e11045d7e1740 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -754,7 +754,7 @@ def test_operators_timedelta64(self):
assert df["off2"].dtype == "timedelta64[ns]"
def test_sum_corner(self):
- empty_frame = DataFrame()
+ empty_frame = empty_frame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 91627b46c2fee..82fe90e0c7f96 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -118,7 +118,7 @@ def test_tab_completion(self):
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
- empty_frame = DataFrame()
+ empty_frame = empty_frame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
@@ -162,7 +162,7 @@ def test_get_agg_axis(self, float_frame):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
- empty_frame = DataFrame()
+ empty_frame = empty_frame()
assert empty_frame.empty
assert not float_frame.empty
@@ -423,7 +423,7 @@ def test_empty_nonzero(self):
assert df.empty
assert df.T.empty
empty_frames = [
- DataFrame(),
+ empty_frame(),
DataFrame(index=[1]),
DataFrame(columns=[1]),
DataFrame({1: []}),
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index e328523253144..87fcebde8553c 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -74,7 +74,7 @@ def test_apply_mixed_datetimelike(self):
def test_apply_empty(self, float_frame):
# empty
- empty_frame = DataFrame()
+ empty_frame = empty_frame()
applied = empty_frame.apply(np.sqrt)
assert applied.empty
@@ -99,7 +99,7 @@ def test_apply_empty(self, float_frame):
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
- empty_frame = DataFrame()
+ empty_frame = empty_frame()
x = []
result = empty_frame.apply(x.append, axis=1, result_type="expand")
@@ -760,7 +760,7 @@ def test_with_dictlike_columns(self):
tm.assert_series_equal(result, expected)
# GH 18775
- df = DataFrame()
+ df = empty_frame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(
@@ -1323,7 +1323,7 @@ def func(group_col):
"df, func, expected",
chain(
tm.get_cython_table_params(
- DataFrame(),
+ empty_frame(),
[
("sum", Series(dtype="float64")),
("max", Series(dtype="float64")),
@@ -1365,7 +1365,7 @@ def test_agg_cython_table(self, df, func, expected, axis):
"df, func, expected",
chain(
tm.get_cython_table_params(
- DataFrame(), [("cumprod", DataFrame()), ("cumsum", DataFrame())]
+ empty_frame(), [("cumprod", empty_frame()), ("cumsum", empty_frame())]
),
tm.get_cython_table_params(
DataFrame([[np.nan, 1], [1, 2]]),
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 89f8bc433419b..bc9e8feae40a1 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -990,13 +990,13 @@ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
# corner cases
# empty
- plus_empty = float_frame + DataFrame()
+ plus_empty = float_frame + empty_frame()
assert np.isnan(plus_empty.values).all()
- empty_plus = DataFrame() + float_frame
+ empty_plus = empty_frame() + float_frame
assert np.isnan(empty_plus.values).all()
- empty_empty = DataFrame() + DataFrame()
+ empty_empty = empty_frame() + empty_frame()
assert empty_empty.empty
# out of order
@@ -1116,7 +1116,7 @@ def test_combineFunc(self, float_frame, mixed_float_frame):
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
- result = DataFrame() * 2
+ result = empty_frame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index e67fef9efef6d..bd008f08a6aa1 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -345,7 +345,7 @@ def test_copy(self, float_frame, float_string_frame):
assert copy._data is not float_string_frame._data
def test_pickle(self, float_string_frame, timezone_frame):
- empty_frame = DataFrame()
+ empty_frame = empty_frame()
unpickled = tm.round_trip_pickle(float_string_frame)
tm.assert_frame_equal(float_string_frame, unpickled)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 9f40e8c6931c8..73fa67e6d8287 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -62,7 +62,7 @@ def test_series_with_name_not_matching_column(self):
@pytest.mark.parametrize(
"constructor",
[
- lambda: DataFrame(),
+ lambda: empty_frame(),
lambda: DataFrame(None),
lambda: DataFrame({}),
lambda: DataFrame(()),
@@ -78,7 +78,7 @@ def test_series_with_name_not_matching_column(self):
],
)
def test_empty_constructor(self, constructor):
- expected = DataFrame()
+ expected = empty_frame()
result = constructor()
assert len(result.index) == 0
assert len(result.columns) == 0
@@ -1774,7 +1774,7 @@ def test_constructor_with_datetimes(self):
i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern")
expected = DataFrame({"a": i.to_series().reset_index(drop=True)})
- df = DataFrame()
+ df = empty_frame()
df["a"] = i
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index 7cb7115276f71..6dc77b835adf3 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -167,7 +167,7 @@ def test_dropna_multiple_axes(self):
def test_dropna_tz_aware_datetime(self):
# GH13407
- df = DataFrame()
+ df = empty_frame()
dt1 = datetime.datetime(2015, 1, 1, tzinfo=dateutil.tz.tzutc())
dt2 = datetime.datetime(2015, 2, 2, tzinfo=dateutil.tz.tzutc())
df["Time"] = [dt1]
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 6d786d9580542..e1edeb426a54b 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -63,7 +63,7 @@ def test_repr(self, float_frame):
repr(no_index)
# no columns or index
- DataFrame().info(buf=buf)
+ empty_frame().info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
assert "\t" not in repr(df)
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 9d3c40ce926d7..d2e72d376dbbc 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -53,7 +53,7 @@ def test_pivot_duplicates(self):
def test_pivot_empty(self):
df = DataFrame(columns=["a", "b", "c"])
result = df.pivot("a", "b", "c")
- expected = DataFrame()
+ expected = empty_frame()
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index e860ea1a3d052..fe8e09256c334 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -223,7 +223,7 @@ def test_aggregate_item_by_item(df):
def aggfun(ser):
return ser.size
- result = DataFrame().groupby(df.A).agg(aggfun)
+ result = empty_frame().groupby(df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index 9fbcced75c327..3b9567dffe56c 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -658,7 +658,7 @@ def test_func(x):
pass
result = test_df.groupby("groups").apply(test_func)
- expected = DataFrame()
+ expected = empty_frame()
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py
index b4239d7d34a90..36a910f6dfd0f 100644
--- a/pandas/tests/groupby/test_counting.py
+++ b/pandas/tests/groupby/test_counting.py
@@ -19,7 +19,7 @@ def test_cumcount(self):
tm.assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
- ge = DataFrame().groupby(level=0)
+ ge = empty_frame().groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
@@ -94,7 +94,7 @@ def test_ngroup_one_group(self):
tm.assert_series_equal(expected, sg.ngroup())
def test_ngroup_empty(self):
- ge = DataFrame().groupby(level=0)
+ ge = empty_frame().groupby(level=0)
se = Series(dtype=object).groupby(level=0)
# edge case, as this is usually considered float
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index b8d8f56512a69..6b19b47f1fe4f 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -310,7 +310,7 @@ def f1(x):
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
- return DataFrame()
+ return empty_frame()
else:
y = y.set_index(["b", "c"])
return y
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 1529a259c49af..b28906d326667 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -236,7 +236,7 @@ def test_groupby_function_tuple_1677(self):
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
- a = DataFrame()
+ a = empty_frame()
c = DataFrame({"A": "foo", "B": dr}, index=dr)
result = a.append(c)
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index fa00b870ca757..649164f33e77d 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -171,7 +171,7 @@ def test_pass_TimedeltaIndex_to_index(self):
def test_append_numpy_bug_1681(self):
td = timedelta_range("1 days", "10 days", freq="2D")
- a = DataFrame()
+ a = empty_frame()
c = DataFrame({"A": "foo", "B": td}, index=td)
str(c)
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 9cc031001f81c..3aa520a5e42ea 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -89,7 +89,7 @@ def setup_method(self, method):
self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)
- self.frame_empty = DataFrame()
+ self.frame_empty = empty_frame()
self.series_empty = Series(dtype=object)
# form agglomerates
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index c8c2d1ed587cf..b73dfe7d0b0b6 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -211,7 +211,7 @@ def test_loc_setitem_datetime(self):
lambda x: np.datetime64(x),
]:
- df = DataFrame()
+ df = empty_frame()
df.loc[conv(dt1), "one"] = 100
df.loc[conv(dt2), "one"] = 200
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index a8a21b0610c14..6938a78003e6e 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -169,7 +169,7 @@ def test_inf_upcast(self):
tm.assert_index_equal(result, expected)
# Test with np.inf in columns
- df = DataFrame()
+ df = empty_frame()
df.loc[0, 0] = 1
df.loc[1, 1] = 2
df.loc[0, np.inf] = 3
@@ -566,7 +566,7 @@ def test_string_slice(self):
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
- df = DataFrame()
+ df = empty_frame()
assert not df.index.is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 2ce07ec41758f..3625dab843984 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -373,7 +373,7 @@ def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
- df = DataFrame()
+ df = empty_frame()
with pytest.raises(ValueError):
df.loc[1] = 1
@@ -397,14 +397,14 @@ def f():
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
+ df = empty_frame()
df["foo"] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
+ df = empty_frame()
df["foo"] = df.index
return df
@@ -436,9 +436,9 @@ def f():
expected["foo"] = expected["foo"].astype("float64")
tm.assert_frame_equal(f(), expected)
- df = DataFrame()
+ df = empty_frame()
tm.assert_index_equal(df.columns, Index([], dtype=object))
- df2 = DataFrame()
+ df2 = empty_frame()
df2[1] = Series([1], index=["foo"])
df.loc[:, 1] = Series([1], index=["foo"])
tm.assert_frame_equal(df, DataFrame([[1]], index=["foo"], columns=[1]))
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index b1502ed3f3c09..53cb9e9ba0a32 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -503,7 +503,7 @@ def test_reading_all_sheets_with_blank(self, read_ext):
# GH6403
def test_read_excel_blank(self, read_ext):
actual = pd.read_excel("blank" + read_ext, "Sheet1")
- tm.assert_frame_equal(actual, DataFrame())
+ tm.assert_frame_equal(actual, empty_frame())
def test_read_excel_blank_with_header(self, read_ext):
expected = DataFrame(columns=["col_1", "col_2"])
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index ec4614538004c..a7ebb5c28c595 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -117,7 +117,7 @@ def test_render(self):
# it worked?
def test_render_empty_dfs(self):
- empty_df = DataFrame()
+ empty_df = empty_frame()
es = Styler(empty_df)
es.render()
# An index but no columns
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index 9a14022d6f776..7b2ba62bfee4a 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -391,7 +391,7 @@ def test_to_html_justify(justify, datapath):
)
def test_to_html_invalid_justify(justify):
# GH 17527
- df = DataFrame()
+ df = empty_frame()
msg = "Invalid value for justify parameter"
with pytest.raises(ValueError, match=msg):
@@ -439,7 +439,7 @@ def test_to_html_index(datapath):
@pytest.mark.parametrize("classes", ["sortable draggable", ["sortable", "draggable"]])
def test_to_html_with_classes(classes, datapath):
- df = DataFrame()
+ df = empty_frame()
expected = expected_html(datapath, "with_classes")
result = df.to_html(classes=classes)
assert result == expected
@@ -718,7 +718,7 @@ def test_ignore_display_max_colwidth(method, expected, max_colwidth):
@pytest.mark.parametrize("classes", [True, 0])
def test_to_html_invalid_classes_type(classes):
# GH 25608
- df = DataFrame()
+ df = empty_frame()
msg = "classes must be a string, list, or tuple"
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index 509e5bcb33304..7cdc4d5e2ab61 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -79,7 +79,7 @@ def test_to_latex_format(self, float_frame):
assert withindex_result == withindex_expected
def test_to_latex_empty(self):
- df = DataFrame()
+ df = empty_frame()
result = df.to_latex()
expected = r"""\begin{tabular}{l}
\toprule
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index b7a9918ff46da..a0cf8f6ad1151 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -165,7 +165,7 @@ def test_simple_normalize(self, state_data):
def test_empty_array(self):
result = json_normalize([])
- expected = DataFrame()
+ expected = empty_frame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py
index 5c4e642115798..ab837f51e9bfa 100644
--- a/pandas/tests/io/parser/test_mangle_dupes.py
+++ b/pandas/tests/io/parser/test_mangle_dupes.py
@@ -121,7 +121,7 @@ def test_mangled_unnamed_placeholders(all_parsers):
# This test recursively updates `df`.
for i in range(3):
- expected = DataFrame()
+ expected = empty_frame()
for j in range(i + 1):
expected["Unnamed: 0" + ".1" * j] = [0, 1, 2]
diff --git a/pandas/tests/io/parser/test_usecols.py b/pandas/tests/io/parser/test_usecols.py
index 979eb4702cc84..0ad9c54145d45 100644
--- a/pandas/tests/io/parser/test_usecols.py
+++ b/pandas/tests/io/parser/test_usecols.py
@@ -408,7 +408,7 @@ def test_usecols_with_multi_byte_characters(all_parsers, usecols):
def test_empty_usecols(all_parsers):
data = "a,b,c\n1,2,3\n4,5,6"
- expected = DataFrame()
+ expected = empty_frame()
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=set())
@@ -443,7 +443,7 @@ def test_np_array_usecols(all_parsers):
}
),
),
- (lambda x: False, DataFrame()),
+ (lambda x: False, empty_frame()),
],
)
def test_callable_usecols(all_parsers, usecols, expected):
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 9a0788ea068ad..b9f49f3dfda54 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -2391,7 +2391,7 @@ def test_frame(self, compression, setup_path):
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
- df0 = DataFrame()
+ df0 = empty_frame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 2f2ae8cd9d32b..f11147abf658a 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -885,7 +885,7 @@ def test_chunksize_read(self):
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
- res2 = DataFrame()
+ res2 = empty_frame()
i = 0
sizes = [5, 5, 5, 5, 2]
@@ -900,7 +900,7 @@ def test_chunksize_read(self):
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
- res3 = DataFrame()
+ res3 = empty_frame()
i = 0
sizes = [5, 5, 5, 5, 2]
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 6384c5f19c898..7d065a5029d64 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -83,7 +83,7 @@ def test_resample_interpolate(frame):
def test_raises_on_non_datetimelike_index():
# this is a non datetimelike index
- xp = DataFrame()
+ xp = empty_frame()
msg = (
"Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex, "
"but got an instance of 'Index'"
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index dc1efa46403be..73980f7d5c24f 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -377,7 +377,7 @@ def test_join_index_mixed_overlap(self):
def test_join_empty_bug(self):
# generated an exception in 0.4.3
- x = DataFrame()
+ x = empty_frame()
x.join(DataFrame([3], index=[0], columns=["A"]), how="outer")
def test_join_unconsolidated(self):
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index a12395b32ab4e..a8dd58e827556 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -796,7 +796,7 @@ def test_append(self, sort, float_frame):
)
def test_append_empty(self, float_frame):
- empty = DataFrame()
+ empty = empty_frame()
appended = float_frame.append(empty)
tm.assert_frame_equal(float_frame, appended)
@@ -1528,7 +1528,7 @@ def test_handle_empty_objects(self, sort):
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
- empty = DataFrame()
+ empty = empty_frame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py
index 8795af2e11122..4c00b5660577b 100644
--- a/pandas/tests/reshape/test_crosstab.py
+++ b/pandas/tests/reshape/test_crosstab.py
@@ -231,7 +231,7 @@ def test_crosstab_no_overlap(self):
s2 = Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
- expected = DataFrame()
+ expected = empty_frame()
tm.assert_frame_equal(actual, expected)
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 6289c2efea7f1..410cbd64f968d 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -1928,7 +1928,7 @@ def test_empty_str_methods(self):
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
- empty_df = DataFrame()
+ empty_df = empty_frame()
tm.assert_frame_equal(empty_df, empty.str.partition("a"))
tm.assert_frame_equal(empty_df, empty.str.rpartition("a"))
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index 6411b9ab654f1..add40cb36036f 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -194,7 +194,7 @@ def test_multiindex_objects():
Index([1, 2, 3]),
Index([True, False, True]),
DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),
- DataFrame(),
+ empty_frame(),
tm.makeMissingDataframe(),
tm.makeMixedDataFrame(),
tm.makeTimeDataFrame(),
diff --git a/pandas/tests/window/common.py b/pandas/tests/window/common.py
index 6aeada3152dbb..4850091c822ab 100644
--- a/pandas/tests/window/common.py
+++ b/pandas/tests/window/common.py
@@ -148,7 +148,7 @@ def create_series():
def create_dataframes():
return [
- DataFrame(),
+ empty_frame(),
DataFrame(columns=["a"]),
DataFrame(columns=["a", "a"]),
DataFrame(columns=["a", "b"]),
diff --git a/pandas/tests/window/moments/test_moments_expanding.py b/pandas/tests/window/moments/test_moments_expanding.py
index 9dfaecee9caeb..8ab05ed5a8098 100644
--- a/pandas/tests/window/moments/test_moments_expanding.py
+++ b/pandas/tests/window/moments/test_moments_expanding.py
@@ -287,7 +287,7 @@ def test_moment_functions_zero_length(self, f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
- df1 = DataFrame()
+ df1 = empty_frame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
@@ -311,7 +311,7 @@ def test_moment_functions_zero_length(self, f):
)
def test_moment_functions_zero_length_pairwise(self, f):
- df1 = DataFrame()
+ df1 = empty_frame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index f3a14971ef2e7..2a189be7a04cc 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -1454,7 +1454,7 @@ def test_moment_functions_zero_length(self):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
- df1 = DataFrame()
+ df1 = empty_frame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
@@ -1495,7 +1495,7 @@ def test_moment_functions_zero_length(self):
def test_moment_functions_zero_length_pairwise(self):
- df1 = DataFrame()
+ df1 = empty_frame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index 6b6367fd80b26..5d0555c23fd07 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -67,8 +67,8 @@ def test_empty_df_expanding(self, expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
- expected = DataFrame()
- result = DataFrame().expanding(expander).sum()
+ expected = empty_frame()
+ result = empty_frame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py
index ab2c7fcb7a0dc..2fb4a583ccd43 100644
--- a/pandas/tests/window/test_rolling.py
+++ b/pandas/tests/window/test_rolling.py
@@ -250,8 +250,8 @@ def test_closed_median_quantile(self, closed, expected):
def tests_empty_df_rolling(self, roller):
# GH 15819 Verifies that datetime and integer rolling windows can be
# applied to empty DataFrames
- expected = DataFrame()
- result = DataFrame().rolling(roller).sum()
+ expected = empty_frame()
+ result = empty_frame().rolling(roller).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer rolling windows can be applied to
| - [ ] closes #33161
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33165 | 2020-03-31T04:48:29Z | 2020-03-31T04:55:20Z | null | 2020-03-31T04:56:20Z |
REF: collect .get tests | diff --git a/pandas/tests/frame/indexing/test_get.py b/pandas/tests/frame/indexing/test_get.py
new file mode 100644
index 0000000000000..5f2651eec683c
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_get.py
@@ -0,0 +1,27 @@
+import pytest
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+class TestGet:
+ def test_get(self, float_frame):
+ b = float_frame.get("B")
+ tm.assert_series_equal(b, float_frame["B"])
+
+ assert float_frame.get("foo") is None
+ tm.assert_series_equal(
+ float_frame.get("foo", float_frame["B"]), float_frame["B"]
+ )
+
+ @pytest.mark.parametrize(
+ "df",
+ [
+ DataFrame(),
+ DataFrame(columns=list("AB")),
+ DataFrame(columns=list("AB"), index=range(3)),
+ ],
+ )
+ def test_get_none(self, df):
+ # see gh-5652
+ assert df.get(None) is None
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 4fa5e4196ae5b..2498bfbaa5d97 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -31,29 +31,6 @@
_slice_msg = "slice indices must be integers or None or have an __index__ method"
-class TestGet:
- def test_get(self, float_frame):
- b = float_frame.get("B")
- tm.assert_series_equal(b, float_frame["B"])
-
- assert float_frame.get("foo") is None
- tm.assert_series_equal(
- float_frame.get("foo", float_frame["B"]), float_frame["B"]
- )
-
- @pytest.mark.parametrize(
- "df",
- [
- DataFrame(),
- DataFrame(columns=list("AB")),
- DataFrame(columns=list("AB"), index=range(3)),
- ],
- )
- def test_get_none(self, df):
- # see gh-5652
- assert df.get(None) is None
-
-
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
diff --git a/pandas/tests/series/indexing/test_get.py b/pandas/tests/series/indexing/test_get.py
index 5847141a44ef5..3371c47fa1b0a 100644
--- a/pandas/tests/series/indexing/test_get.py
+++ b/pandas/tests/series/indexing/test_get.py
@@ -1,7 +1,9 @@
import numpy as np
+import pytest
import pandas as pd
from pandas import Series
+import pandas._testing as tm
def test_get():
@@ -149,3 +151,44 @@ def test_get_with_default():
for other in others:
assert s.get(other, "z") == "z"
assert s.get(other, other) == other
+
+
+@pytest.mark.parametrize(
+ "arr",
+ [np.random.randn(10), tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")],
+)
+def test_get2(arr):
+ # TODO: better name, possibly split
+ # GH#21260
+ ser = Series(arr, index=[2 * i for i in range(len(arr))])
+ assert ser.get(4) == ser.iloc[2]
+
+ result = ser.get([4, 6])
+ expected = ser.iloc[[2, 3]]
+ tm.assert_series_equal(result, expected)
+
+ result = ser.get(slice(2))
+ expected = ser.iloc[[0, 1]]
+ tm.assert_series_equal(result, expected)
+
+ assert ser.get(-1) is None
+ assert ser.get(ser.index.max() + 1) is None
+
+ ser = Series(arr[:6], index=list("abcdef"))
+ assert ser.get("c") == ser.iloc[2]
+
+ result = ser.get(slice("b", "d"))
+ expected = ser.iloc[[1, 2, 3]]
+ tm.assert_series_equal(result, expected)
+
+ result = ser.get("Z")
+ assert result is None
+
+ assert ser.get(4) == ser.iloc[4]
+ assert ser.get(-1) == ser.iloc[-1]
+ assert ser.get(len(ser)) is None
+
+ # GH#21257
+ ser = Series(arr)
+ ser2 = ser[::2]
+ assert ser2.get(1) is None
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 5b3786e1a0d3c..232b2a61f6268 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -188,46 +188,6 @@ def test_getitem_box_float64(datetime_series):
assert isinstance(value, np.float64)
-@pytest.mark.parametrize(
- "arr",
- [np.random.randn(10), tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")],
-)
-def test_get(arr):
- # GH 21260
- s = Series(arr, index=[2 * i for i in range(len(arr))])
- assert s.get(4) == s.iloc[2]
-
- result = s.get([4, 6])
- expected = s.iloc[[2, 3]]
- tm.assert_series_equal(result, expected)
-
- result = s.get(slice(2))
- expected = s.iloc[[0, 1]]
- tm.assert_series_equal(result, expected)
-
- assert s.get(-1) is None
- assert s.get(s.index.max() + 1) is None
-
- s = Series(arr[:6], index=list("abcdef"))
- assert s.get("c") == s.iloc[2]
-
- result = s.get(slice("b", "d"))
- expected = s.iloc[[1, 2, 3]]
- tm.assert_series_equal(result, expected)
-
- result = s.get("Z")
- assert result is None
-
- assert s.get(4) == s.iloc[4]
- assert s.get(-1) == s.iloc[-1]
- assert s.get(len(s)) is None
-
- # GH 21257
- s = pd.Series(arr)
- s2 = s[::2]
- assert s2.get(1) is None
-
-
def test_series_box_timestamp():
rng = pd.date_range("20090415", "20090519", freq="B")
ser = Series(rng)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33164 | 2020-03-31T04:02:59Z | 2020-03-31T13:39:48Z | 2020-03-31T13:39:48Z | 2020-03-31T14:00:16Z | |
Json parametrize more2 | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 2ee64403c7cf4..0b14c12f2356f 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -526,6 +526,64 @@ def empty_frame():
return DataFrame()
+@pytest.fixture
+def int_frame():
+ """
+ Fixture for DataFrame of ints with index of unique strings
+
+ Columns are ['A', 'B', 'C', 'D']
+
+ A B C D
+ vpBeWjM651 1 0 1 0
+ 5JyxmrP1En -1 0 0 0
+ qEDaoD49U2 -1 1 0 0
+ m66TkTfsFe 0 0 0 0
+ EHPaNzEUFm -1 0 -1 0
+ fpRJCevQhi 2 0 0 0
+ OlQvnmfi3Q 0 0 -2 0
+ ... .. .. .. ..
+ uB1FPlz4uP 0 0 0 1
+ EcSe6yNzCU 0 0 -1 0
+ L50VudaiI8 -1 1 -2 0
+ y3bpw4nwIp 0 -1 0 0
+ H0RdLLwrCT 1 1 0 0
+ rY82K0vMwm 0 0 0 0
+ 1OPIUjnkjk 2 0 0 0
+
+ [30 rows x 4 columns]
+ """
+ return DataFrame(tm.getSeriesData()).astype("int64")
+
+
+@pytest.fixture
+def datetime_frame():
+ """
+ Fixture for DataFrame of floats with DatetimeIndex
+
+ Columns are ['A', 'B', 'C', 'D']
+
+ A B C D
+ 2000-01-03 -1.122153 0.468535 0.122226 1.693711
+ 2000-01-04 0.189378 0.486100 0.007864 -1.216052
+ 2000-01-05 0.041401 -0.835752 -0.035279 -0.414357
+ 2000-01-06 0.430050 0.894352 0.090719 0.036939
+ 2000-01-07 -0.620982 -0.668211 -0.706153 1.466335
+ 2000-01-10 -0.752633 0.328434 -0.815325 0.699674
+ 2000-01-11 -2.236969 0.615737 -0.829076 -1.196106
+ ... ... ... ... ...
+ 2000-02-03 1.642618 -0.579288 0.046005 1.385249
+ 2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351
+ 2000-02-07 -2.656149 -0.601387 1.410148 0.444150
+ 2000-02-08 -1.201881 -1.289040 0.772992 -1.445300
+ 2000-02-09 1.377373 0.398619 1.008453 -0.928207
+ 2000-02-10 0.473194 -0.636677 0.984058 0.511519
+ 2000-02-11 -0.965556 0.408313 -1.312844 -0.381948
+
+ [30 rows x 4 columns]
+ """
+ return DataFrame(tm.getTimeSeriesData())
+
+
@pytest.fixture
def float_frame():
"""
diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py
index e89b2c6f1fec0..486d140849159 100644
--- a/pandas/tests/frame/conftest.py
+++ b/pandas/tests/frame/conftest.py
@@ -79,66 +79,6 @@ def bool_frame_with_na():
return df
-@pytest.fixture
-def int_frame():
- """
- Fixture for DataFrame of ints with index of unique strings
-
- Columns are ['A', 'B', 'C', 'D']
-
- A B C D
- vpBeWjM651 1 0 1 0
- 5JyxmrP1En -1 0 0 0
- qEDaoD49U2 -1 1 0 0
- m66TkTfsFe 0 0 0 0
- EHPaNzEUFm -1 0 -1 0
- fpRJCevQhi 2 0 0 0
- OlQvnmfi3Q 0 0 -2 0
- ... .. .. .. ..
- uB1FPlz4uP 0 0 0 1
- EcSe6yNzCU 0 0 -1 0
- L50VudaiI8 -1 1 -2 0
- y3bpw4nwIp 0 -1 0 0
- H0RdLLwrCT 1 1 0 0
- rY82K0vMwm 0 0 0 0
- 1OPIUjnkjk 2 0 0 0
-
- [30 rows x 4 columns]
- """
- df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})
- # force these all to int64 to avoid platform testing issues
- return DataFrame({c: s for c, s in df.items()}, dtype=np.int64)
-
-
-@pytest.fixture
-def datetime_frame():
- """
- Fixture for DataFrame of floats with DatetimeIndex
-
- Columns are ['A', 'B', 'C', 'D']
-
- A B C D
- 2000-01-03 -1.122153 0.468535 0.122226 1.693711
- 2000-01-04 0.189378 0.486100 0.007864 -1.216052
- 2000-01-05 0.041401 -0.835752 -0.035279 -0.414357
- 2000-01-06 0.430050 0.894352 0.090719 0.036939
- 2000-01-07 -0.620982 -0.668211 -0.706153 1.466335
- 2000-01-10 -0.752633 0.328434 -0.815325 0.699674
- 2000-01-11 -2.236969 0.615737 -0.829076 -1.196106
- ... ... ... ... ...
- 2000-02-03 1.642618 -0.579288 0.046005 1.385249
- 2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351
- 2000-02-07 -2.656149 -0.601387 1.410148 0.444150
- 2000-02-08 -1.201881 -1.289040 0.772992 -1.445300
- 2000-02-09 1.377373 0.398619 1.008453 -0.928207
- 2000-02-10 0.473194 -0.636677 0.984058 0.511519
- 2000-02-11 -0.965556 0.408313 -1.312844 -0.381948
-
- [30 rows x 4 columns]
- """
- return DataFrame(tm.getTimeSeriesData())
-
-
@pytest.fixture
def float_string_frame():
"""
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 6a7a81e88d318..b74abc965f7fa 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -16,20 +16,15 @@
import pandas._testing as tm
_seriesd = tm.getSeriesData()
-_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
-_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
-_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
-_mixed_frame = _frame.copy()
-
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
@@ -43,17 +38,10 @@ def assert_json_roundtrip_equal(result, expected, orient):
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
- self.intframe = _intframe.copy()
- self.tsframe = _tsframe.copy()
- self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
- del self.intframe
- del self.tsframe
- del self.mixed_frame
-
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
@@ -137,12 +125,12 @@ def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
- def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
- data = self.intframe.to_json(orient=orient)
+ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype, int_frame):
+ data = int_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
- expected = self.intframe.copy()
+ expected = int_frame
if (
numpy
and (is_platform_32bit() or is_platform_windows())
@@ -236,13 +224,13 @@ def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
- def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
+ def test_roundtrip_timestamp(self, orient, convert_axes, numpy, datetime_frame):
# TODO: improve coverage with date_format parameter
- data = self.tsframe.to_json(orient=orient)
+ data = datetime_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
- expected = self.tsframe.copy()
+ expected = datetime_frame.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
@@ -730,23 +718,22 @@ def test_reconstruction_index(self):
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
- def test_path(self, float_frame):
+ def test_path(self, float_frame, int_frame, datetime_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
- self.intframe,
- self.tsframe,
- self.mixed_frame,
+ int_frame,
+ datetime_frame,
]:
df.to_json(path)
read_json(path)
- def test_axis_dates(self, datetime_series):
+ def test_axis_dates(self, datetime_series, datetime_frame):
# frame
- json = self.tsframe.to_json()
+ json = datetime_frame.to_json()
result = read_json(json)
- tm.assert_frame_equal(result, self.tsframe)
+ tm.assert_frame_equal(result, datetime_frame)
# series
json = datetime_series.to_json()
@@ -754,10 +741,10 @@ def test_axis_dates(self, datetime_series):
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
- def test_convert_dates(self, datetime_series):
+ def test_convert_dates(self, datetime_series, datetime_frame):
# frame
- df = self.tsframe.copy()
+ df = datetime_frame
df["date"] = Timestamp("20130101")
json = df.to_json()
@@ -837,8 +824,8 @@ def test_convert_dates_infer(self, infer_word):
("20130101 20:43:42.123456789", "ns"),
],
)
- def test_date_format_frame(self, date, date_unit):
- df = self.tsframe.copy()
+ def test_date_format_frame(self, date, date_unit, datetime_frame):
+ df = datetime_frame
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
@@ -853,8 +840,8 @@ def test_date_format_frame(self, date, date_unit):
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
- def test_date_format_frame_raises(self):
- df = self.tsframe.copy()
+ def test_date_format_frame_raises(self, datetime_frame):
+ df = datetime_frame
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@@ -890,8 +877,8 @@ def test_date_format_series_raises(self, datetime_series):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
- def test_date_unit(self, unit):
- df = self.tsframe.copy()
+ def test_date_unit(self, unit, datetime_frame):
+ df = datetime_frame
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
| https://api.github.com/repos/pandas-dev/pandas/pulls/33163 | 2020-03-31T02:22:57Z | 2020-03-31T16:39:21Z | 2020-03-31T16:39:21Z | 2023-04-12T20:17:14Z | |
DOC: Make doc decorator a class and replace Appender by doc | diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 9121e00b3e8d6..3c1602344c314 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -757,7 +757,7 @@ def _validate_shift_value(self, fill_value):
"will raise in a future version, pass "
f"{self._scalar_type.__name__} instead.",
FutureWarning,
- stacklevel=10,
+ stacklevel=8,
)
fill_value = new_fill
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 8aced50b78ae2..10fd7e5680ff9 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -35,7 +35,7 @@
to_dt64D,
)
from pandas.errors import AbstractMethodError
-from pandas.util._decorators import Appender, Substitution, cache_readonly
+from pandas.util._decorators import cache_readonly, doc
__all__ = [
"Day",
@@ -877,11 +877,12 @@ class BusinessMonthBegin(MonthOffset):
_day_opt = "business_start"
+@doc(bound="bound")
class _CustomBusinessMonth(CustomMixin, BusinessMixin, MonthOffset):
"""
DateOffset subclass representing custom business month(s).
- Increments between %(bound)s of month dates.
+ Increments between {bound} of month dates.
Parameters
----------
@@ -971,14 +972,12 @@ def apply(self, other):
return result
-@Substitution(bound="end")
-@Appender(_CustomBusinessMonth.__doc__)
+@doc(_CustomBusinessMonth, bound="end")
class CustomBusinessMonthEnd(_CustomBusinessMonth):
_prefix = "CBM"
-@Substitution(bound="beginning")
-@Appender(_CustomBusinessMonth.__doc__)
+@doc(_CustomBusinessMonth, bound="beginning")
class CustomBusinessMonthBegin(_CustomBusinessMonth):
_prefix = "CBMS"
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 80286d5f138ad..6135ccba1573d 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -329,55 +329,52 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]:
return decorate
-def doc(*args: Union[str, Callable], **kwargs) -> Callable[[F], F]:
+def doc(*docstrings: Union[str, Callable], **params) -> Callable[[F], F]:
"""
A decorator take docstring templates, concatenate them and perform string
substitution on it.
This decorator will add a variable "_docstring_components" to the wrapped
- function to keep track the original docstring template for potential usage.
+ callable to keep track the original docstring template for potential usage.
If it should be consider as a template, it will be saved as a string.
Otherwise, it will be saved as callable, and later user __doc__ and dedent
to get docstring.
Parameters
----------
- *args : str or callable
+ *docstrings : str or callable
The string / docstring / docstring template to be appended in order
- after default docstring under function.
- **kwargs
- The objects which would be used to format docstring template.
+ after default docstring under callable.
+ **params
+ The string which would be used to format docstring template.
"""
- def decorator(func: F) -> F:
- @wraps(func)
- def wrapper(*args, **kwargs) -> Callable:
- return func(*args, **kwargs)
-
+ def decorator(decorated: F) -> F:
# collecting docstring and docstring templates
docstring_components: List[Union[str, Callable]] = []
- if func.__doc__:
- docstring_components.append(dedent(func.__doc__))
+ if decorated.__doc__:
+ docstring_components.append(dedent(decorated.__doc__))
- for arg in args:
- if hasattr(arg, "_docstring_components"):
- docstring_components.extend(arg._docstring_components) # type: ignore
- elif isinstance(arg, str) or arg.__doc__:
- docstring_components.append(arg)
+ for docstring in docstrings:
+ if hasattr(docstring, "_docstring_components"):
+ docstring_components.extend(
+ docstring._docstring_components # type: ignore
+ )
+ elif isinstance(docstring, str) or docstring.__doc__:
+ docstring_components.append(docstring)
# formatting templates and concatenating docstring
- wrapper.__doc__ = "".join(
+ decorated.__doc__ = "".join(
[
- arg.format(**kwargs)
- if isinstance(arg, str)
- else dedent(arg.__doc__ or "")
- for arg in docstring_components
+ component.format(**params)
+ if isinstance(component, str)
+ else dedent(component.__doc__ or "")
+ for component in docstring_components
]
)
- wrapper._docstring_components = docstring_components # type: ignore
-
- return cast(F, wrapper)
+ decorated._docstring_components = docstring_components # type: ignore
+ return decorated
return decorator
| - [x] work for #31942
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33160 | 2020-03-31T01:58:25Z | 2020-05-22T10:04:56Z | 2020-05-22T10:04:56Z | 2020-05-22T10:05:05Z |
CLN: use ._data less in reshape | diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index b4497ce1780e6..10f3e29cc5024 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -395,7 +395,7 @@ def __init__(
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, ABCDataFrame)
if self._is_frame:
- axis = 1 if axis == 0 else 0
+ axis = DataFrame._get_block_manager_axis(axis)
self._is_series = isinstance(sample, ABCSeries)
if not 0 <= axis <= sample.ndim:
@@ -436,7 +436,8 @@ def __init__(
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
- self.axis = axis
+ self.bm_axis = axis
+ self.axis = 1 - self.bm_axis if self._is_frame else 0
self.keys = keys
self.names = names or getattr(keys, "names", None)
self.levels = levels
@@ -454,7 +455,7 @@ def get_result(self):
if self._is_series:
# stack blocks
- if self.axis == 0:
+ if self.bm_axis == 0:
name = com.consensus_name_attr(self.objs)
mgr = self.objs[0]._data.concat(
@@ -477,21 +478,22 @@ def get_result(self):
else:
mgrs_indexers = []
for obj in self.objs:
- mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
- if ax == self.axis:
+ # ::-1 to convert BlockManager ax to DataFrame ax
+ if ax == self.bm_axis:
# Suppress reindexing on concat axis
continue
- obj_labels = mgr.axes[ax]
+ # 1-ax to convert BlockManager axis to DataFrame axis
+ obj_labels = obj.axes[1 - ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
- mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy
+ mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy
)
if not self.copy:
new_data._consolidate_inplace()
@@ -500,7 +502,7 @@ def get_result(self):
return cons(new_data).__finalize__(self, method="concat")
def _get_result_dim(self) -> int:
- if self._is_series and self.axis == 1:
+ if self._is_series and self.bm_axis == 1:
return 2
else:
return self.objs[0].ndim
@@ -508,7 +510,7 @@ def _get_result_dim(self) -> int:
def _get_new_axes(self) -> List[Index]:
ndim = self._get_result_dim()
return [
- self._get_concat_axis() if i == self.axis else self._get_comb_axis(i)
+ self._get_concat_axis() if i == self.bm_axis else self._get_comb_axis(i)
for i in range(ndim)
]
@@ -527,7 +529,7 @@ def _get_concat_axis(self) -> Index:
Return index to be used along concatenation axis.
"""
if self._is_series:
- if self.axis == 0:
+ if self.bm_axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = ibase.default_index(len(self.objs))
@@ -555,7 +557,7 @@ def _get_concat_axis(self) -> Index:
else:
return ensure_index(self.keys).set_names(self.names)
else:
- indexes = [x._data.axes[self.axis] for x in self.objs]
+ indexes = [x.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = ibase.default_index(sum(len(i) for i in indexes))
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 4b1fd73d9950e..e78d5ccaa30c7 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -596,7 +596,11 @@ def __init__(
self.left = self.orig_left = _left
self.right = self.orig_right = _right
self.how = how
- self.axis = axis
+
+ # bm_axis -> the axis on the BlockManager
+ self.bm_axis = axis
+ # axis --> the axis on the Series/DataFrame
+ self.axis = 1 - axis if self.left.ndim == 2 else 0
self.on = com.maybe_make_list(on)
self.left_on = com.maybe_make_list(left_on)
@@ -664,18 +668,17 @@ def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
- ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = _items_overlap_with_suffix(
- ldata.items, lsuf, rdata.items, rsuf
+ self.left._info_axis, lsuf, self.right._info_axis, rsuf
)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
- [(ldata, lindexers), (rdata, rindexers)],
+ [(self.left._data, lindexers), (self.right._data, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0,
copy=self.copy,
@@ -864,8 +867,8 @@ def _get_join_indexers(self):
)
def _get_join_info(self):
- left_ax = self.left._data.axes[self.axis]
- right_ax = self.right._data.axes[self.axis]
+ left_ax = self.left.axes[self.axis]
+ right_ax = self.right.axes[self.axis]
if self.left_index and self.right_index and self.how != "asof":
join_index, left_indexer, right_indexer = left_ax.join(
@@ -1478,12 +1481,10 @@ def __init__(
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
- # this is a bit kludgy
- ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = _items_overlap_with_suffix(
- ldata.items, lsuf, rdata.items, rsuf
+ self.left._info_axis, lsuf, self.right._info_axis, rsuf
)
if self.fill_method == "ffill":
@@ -1497,7 +1498,7 @@ def get_result(self):
rindexers = {1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
- [(ldata, lindexers), (rdata, rindexers)],
+ [(self.left._data, lindexers), (self.right._data, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0,
copy=self.copy,
| https://api.github.com/repos/pandas-dev/pandas/pulls/33159 | 2020-03-31T01:09:34Z | 2020-03-31T16:42:08Z | 2020-03-31T16:42:08Z | 2020-03-31T17:25:25Z | |
BUG: isna_old with td64, dt64tz, period | diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index d461db2d05f9d..f7b0615366ba0 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -16,30 +16,24 @@
ensure_object,
is_bool_dtype,
is_complex_dtype,
- is_datetime64_dtype,
- is_datetime64tz_dtype,
is_datetimelike_v_numeric,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
- is_period_dtype,
is_scalar,
is_string_dtype,
is_string_like_dtype,
- is_timedelta64_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
- ABCDatetimeArray,
ABCExtensionArray,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
- ABCTimedeltaArray,
)
from pandas.core.dtypes.inference import is_list_like
@@ -139,17 +133,7 @@ def _isna_new(obj):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, type):
return False
- elif isinstance(
- obj,
- (
- ABCSeries,
- np.ndarray,
- ABCIndexClass,
- ABCExtensionArray,
- ABCDatetimeArray,
- ABCTimedeltaArray,
- ),
- ):
+ elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass, ABCExtensionArray)):
return _isna_ndarraylike(obj)
elif isinstance(obj, ABCDataFrame):
return obj.isna()
@@ -158,7 +142,7 @@ def _isna_new(obj):
elif hasattr(obj, "__array__"):
return _isna_ndarraylike(np.asarray(obj))
else:
- return obj is None
+ return False
def _isna_old(obj):
@@ -189,7 +173,7 @@ def _isna_old(obj):
elif hasattr(obj, "__array__"):
return _isna_ndarraylike_old(np.asarray(obj))
else:
- return obj is None
+ return False
_isna = _isna_new
@@ -224,37 +208,14 @@ def _use_inf_as_na(key):
def _isna_ndarraylike(obj):
- is_extension = is_extension_array_dtype(obj)
-
- if not is_extension:
- # Avoid accessing `.values` on things like
- # PeriodIndex, which may be expensive.
- values = getattr(obj, "_values", obj)
- else:
- values = obj
-
+ is_extension = is_extension_array_dtype(obj.dtype)
+ values = getattr(obj, "_values", obj)
dtype = values.dtype
if is_extension:
- if isinstance(obj, (ABCIndexClass, ABCSeries)):
- values = obj._values
- else:
- values = obj
result = values.isna()
- elif isinstance(obj, ABCDatetimeArray):
- return obj.isna()
elif is_string_dtype(dtype):
- # Working around NumPy ticket 1542
- shape = values.shape
-
- if is_string_like_dtype(dtype):
- # object array of strings
- result = np.zeros(values.shape, dtype=bool)
- else:
- # object array of non-strings
- result = np.empty(shape, dtype=bool)
- vec = libmissing.isnaobj(values.ravel())
- result[...] = vec.reshape(shape)
+ result = _isna_string_dtype(values, dtype, old=False)
elif needs_i8_conversion(dtype):
# this is the NaT pattern
@@ -274,17 +235,9 @@ def _isna_ndarraylike_old(obj):
dtype = values.dtype
if is_string_dtype(dtype):
- # Working around NumPy ticket 1542
- shape = values.shape
-
- if is_string_like_dtype(dtype):
- result = np.zeros(values.shape, dtype=bool)
- else:
- result = np.empty(shape, dtype=bool)
- vec = libmissing.isnaobj_old(values.ravel())
- result[:] = vec.reshape(shape)
+ result = _isna_string_dtype(values, dtype, old=True)
- elif is_datetime64_dtype(dtype):
+ elif needs_i8_conversion(dtype):
# this is the NaT pattern
result = values.view("i8") == iNaT
else:
@@ -297,6 +250,24 @@ def _isna_ndarraylike_old(obj):
return result
+def _isna_string_dtype(values: np.ndarray, dtype: np.dtype, old: bool) -> np.ndarray:
+ # Working around NumPy ticket 1542
+ shape = values.shape
+
+ if is_string_like_dtype(dtype):
+ result = np.zeros(values.shape, dtype=bool)
+ else:
+ result = np.empty(shape, dtype=bool)
+ if old:
+ vec = libmissing.isnaobj_old(values.ravel())
+ else:
+ vec = libmissing.isnaobj(values.ravel())
+
+ result[...] = vec.reshape(shape)
+
+ return result
+
+
def notna(obj):
"""
Detect non-missing values for an array-like object.
@@ -556,12 +527,7 @@ def na_value_for_dtype(dtype, compat: bool = True):
if is_extension_array_dtype(dtype):
return dtype.na_value
- if (
- is_datetime64_dtype(dtype)
- or is_datetime64tz_dtype(dtype)
- or is_timedelta64_dtype(dtype)
- or is_period_dtype(dtype)
- ):
+ if needs_i8_conversion(dtype):
return NaT
elif is_float_dtype(dtype):
return np.nan
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index 7ba59786bb0fa..cad46d0a23967 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -185,6 +185,21 @@ def test_isna_datetime(self):
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
+ def test_isna_old_datetimelike(self):
+ # isna_old should work for dt64tz, td64, and period, not just tznaive
+ dti = pd.date_range("2016-01-01", periods=3)
+ dta = dti._data
+ dta[-1] = pd.NaT
+ expected = np.array([False, False, True], dtype=bool)
+
+ objs = [dta, dta.tz_localize("US/Eastern"), dta - dta, dta.to_period("D")]
+
+ for obj in objs:
+ with cf.option_context("mode.use_inf_as_na", True):
+ result = pd.isna(obj)
+
+ tm.assert_numpy_array_equal(result, expected)
+
@pytest.mark.parametrize(
"value, expected",
[
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Started as a CLN branch and i left that in for now, can separate if desired. the bugfix is on L287. | https://api.github.com/repos/pandas-dev/pandas/pulls/33158 | 2020-03-31T00:57:28Z | 2020-03-31T16:48:51Z | 2020-03-31T16:48:51Z | 2020-03-31T17:22:32Z |
Backport PR #33102 on branch 1.0.x (PERF: fix performance regression in memory_usage(deep=True) for object dtype) | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 2187668c96ca4..a3aff45afa116 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -619,4 +619,17 @@ def time_select_dtypes(self, n):
self.df.select_dtypes(include="int")
+class MemoryUsage:
+ def setup(self):
+ self.df = DataFrame(np.random.randn(100000, 2), columns=list("AB"))
+ self.df2 = self.df.copy()
+ self.df2["A"] = self.df2["A"].astype("object")
+
+ def time_memory_usage(self):
+ self.df.memory_usage(deep=True)
+
+ def time_memory_usage_object_dtype(self):
+ self.df2.memory_usage(deep=True)
+
+
from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/pandas/core/base.py b/pandas/core/base.py
index a46b3256a9d48..2709601eb2d20 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1356,7 +1356,7 @@ def memory_usage(self, deep=False):
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
- v += lib.memory_usage_of_objects(self.array)
+ v += lib.memory_usage_of_objects(self._values)
return v
@Substitution(
| Backport PR #33102: PERF: fix performance regression in memory_usage(deep=True) for object dtype | https://api.github.com/repos/pandas-dev/pandas/pulls/33157 | 2020-03-31T00:12:22Z | 2020-05-05T15:55:25Z | 2020-05-05T15:55:25Z | 2020-05-05T15:55:25Z |
REF: collect .drop tests | diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py
index 0bc234dcb39aa..177d10cdbf615 100644
--- a/pandas/tests/frame/methods/test_drop.py
+++ b/pandas/tests/frame/methods/test_drop.py
@@ -6,7 +6,7 @@
from pandas.errors import PerformanceWarning
import pandas as pd
-from pandas import DataFrame, Index, MultiIndex
+from pandas import DataFrame, Index, MultiIndex, Series, Timestamp
import pandas._testing as tm
@@ -258,3 +258,162 @@ def test_drop_non_empty_list(self, index, drop_labels):
# GH# 21494
with pytest.raises(KeyError, match="not found in axis"):
pd.DataFrame(index=index).drop(drop_labels)
+
+ def test_mixed_depth_drop(self):
+ arrays = [
+ ["a", "top", "top", "routine1", "routine1", "routine2"],
+ ["", "OD", "OD", "result1", "result2", "result1"],
+ ["", "wx", "wy", "", "", ""],
+ ]
+
+ tuples = sorted(zip(*arrays))
+ index = MultiIndex.from_tuples(tuples)
+ df = DataFrame(np.random.randn(4, 6), columns=index)
+
+ result = df.drop("a", axis=1)
+ expected = df.drop([("a", "", "")], axis=1)
+ tm.assert_frame_equal(expected, result)
+
+ result = df.drop(["top"], axis=1)
+ expected = df.drop([("top", "OD", "wx")], axis=1)
+ expected = expected.drop([("top", "OD", "wy")], axis=1)
+ tm.assert_frame_equal(expected, result)
+
+ result = df.drop(("top", "OD", "wx"), axis=1)
+ expected = df.drop([("top", "OD", "wx")], axis=1)
+ tm.assert_frame_equal(expected, result)
+
+ expected = df.drop([("top", "OD", "wy")], axis=1)
+ expected = df.drop("top", axis=1)
+
+ result = df.drop("result1", level=1, axis=1)
+ expected = df.drop(
+ [("routine1", "result1", ""), ("routine2", "result1", "")], axis=1
+ )
+ tm.assert_frame_equal(expected, result)
+
+ def test_drop_multiindex_other_level_nan(self):
+ # GH#12754
+ df = (
+ DataFrame(
+ {
+ "A": ["one", "one", "two", "two"],
+ "B": [np.nan, 0.0, 1.0, 2.0],
+ "C": ["a", "b", "c", "c"],
+ "D": [1, 2, 3, 4],
+ }
+ )
+ .set_index(["A", "B", "C"])
+ .sort_index()
+ )
+ result = df.drop("c", level="C")
+ expected = DataFrame(
+ [2, 1],
+ columns=["D"],
+ index=pd.MultiIndex.from_tuples(
+ [("one", 0.0, "b"), ("one", np.nan, "a")], names=["A", "B", "C"]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_drop_nonunique(self):
+ df = DataFrame(
+ [
+ ["x-a", "x", "a", 1.5],
+ ["x-a", "x", "a", 1.2],
+ ["z-c", "z", "c", 3.1],
+ ["x-a", "x", "a", 4.1],
+ ["x-b", "x", "b", 5.1],
+ ["x-b", "x", "b", 4.1],
+ ["x-b", "x", "b", 2.2],
+ ["y-a", "y", "a", 1.2],
+ ["z-b", "z", "b", 2.1],
+ ],
+ columns=["var1", "var2", "var3", "var4"],
+ )
+
+ grp_size = df.groupby("var1").size()
+ drop_idx = grp_size.loc[grp_size == 1]
+
+ idf = df.set_index(["var1", "var2", "var3"])
+
+ # it works! GH#2101
+ result = idf.drop(drop_idx.index, level=0).reset_index()
+ expected = df[-df.var1.isin(drop_idx.index)]
+
+ result.index = expected.index
+
+ tm.assert_frame_equal(result, expected)
+
+ def test_drop_level(self):
+ index = MultiIndex(
+ levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
+ codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
+ names=["first", "second"],
+ )
+ frame = DataFrame(
+ np.random.randn(10, 3),
+ index=index,
+ columns=Index(["A", "B", "C"], name="exp"),
+ )
+
+ result = frame.drop(["bar", "qux"], level="first")
+ expected = frame.iloc[[0, 1, 2, 5, 6]]
+ tm.assert_frame_equal(result, expected)
+
+ result = frame.drop(["two"], level="second")
+ expected = frame.iloc[[0, 2, 3, 6, 7, 9]]
+ tm.assert_frame_equal(result, expected)
+
+ result = frame.T.drop(["bar", "qux"], axis=1, level="first")
+ expected = frame.iloc[[0, 1, 2, 5, 6]].T
+ tm.assert_frame_equal(result, expected)
+
+ result = frame.T.drop(["two"], axis=1, level="second")
+ expected = frame.iloc[[0, 2, 3, 6, 7, 9]].T
+ tm.assert_frame_equal(result, expected)
+
+ def test_drop_level_nonunique_datetime(self):
+ # GH#12701
+ idx = Index([2, 3, 4, 4, 5], name="id")
+ idxdt = pd.to_datetime(
+ [
+ "201603231400",
+ "201603231500",
+ "201603231600",
+ "201603231600",
+ "201603231700",
+ ]
+ )
+ df = DataFrame(np.arange(10).reshape(5, 2), columns=list("ab"), index=idx)
+ df["tstamp"] = idxdt
+ df = df.set_index("tstamp", append=True)
+ ts = Timestamp("201603231600")
+ assert df.index.is_unique is False
+
+ result = df.drop(ts, level="tstamp")
+ expected = df.loc[idx != 4]
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("box", [Series, DataFrame])
+ def test_drop_tz_aware_timestamp_across_dst(self, box):
+ # GH#21761
+ start = Timestamp("2017-10-29", tz="Europe/Berlin")
+ end = Timestamp("2017-10-29 04:00:00", tz="Europe/Berlin")
+ index = pd.date_range(start, end, freq="15min")
+ data = box(data=[1] * len(index), index=index)
+ result = data.drop(start)
+ expected_start = Timestamp("2017-10-29 00:15:00", tz="Europe/Berlin")
+ expected_idx = pd.date_range(expected_start, end, freq="15min")
+ expected = box(data=[1] * len(expected_idx), index=expected_idx)
+ tm.assert_equal(result, expected)
+
+ def test_drop_preserve_names(self):
+ index = MultiIndex.from_arrays(
+ [[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]], names=["one", "two"]
+ )
+
+ df = DataFrame(np.random.randn(6, 3), index=index)
+
+ result = df.drop([(0, 2)])
+ assert result.index.names == ("one", "two")
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index 3b3ae074c774a..d9da059eb9e9c 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -500,6 +500,34 @@ def test_contains_with_missing_value(self):
assert np.nan not in idx
assert (1, np.nan) in idx
+ def test_multiindex_contains_dropped(self):
+ # GH#19027
+ # test that dropped MultiIndex levels are not in the MultiIndex
+ # despite continuing to be in the MultiIndex's levels
+ idx = MultiIndex.from_product([[1, 2], [3, 4]])
+ assert 2 in idx
+ idx = idx.drop(2)
+
+ # drop implementation keeps 2 in the levels
+ assert 2 in idx.levels[0]
+ # but it should no longer be in the index itself
+ assert 2 not in idx
+
+ # also applies to strings
+ idx = MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ assert "a" in idx
+ idx = idx.drop("a")
+ assert "a" in idx.levels[0]
+ assert "a" not in idx
+
+ def test_contains_td64_level(self):
+ # GH#24570
+ tx = pd.timedelta_range("09:30:00", "16:00:00", freq="30 min")
+ idx = MultiIndex.from_arrays([tx, np.arange(len(tx))])
+ assert tx[0] in idx
+ assert "element_not_exit" not in idx
+ assert "0 day 09:30:00" in idx
+
def test_timestamp_multiindex_indexer():
# https://github.com/pandas-dev/pandas/issues/26944
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index 0064187a94265..5e5fcd3db88d8 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -26,26 +26,6 @@ def test_multiindex_perf_warn(self):
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0,)]
- def test_multiindex_contains_dropped(self):
- # GH 19027
- # test that dropped MultiIndex levels are not in the MultiIndex
- # despite continuing to be in the MultiIndex's levels
- idx = MultiIndex.from_product([[1, 2], [3, 4]])
- assert 2 in idx
- idx = idx.drop(2)
-
- # drop implementation keeps 2 in the levels
- assert 2 in idx.levels[0]
- # but it should no longer be in the index itself
- assert 2 not in idx
-
- # also applies to strings
- idx = MultiIndex.from_product([["a", "b"], ["c", "d"]])
- assert "a" in idx
- idx = idx.drop("a")
- assert "a" in idx.levels[0]
- assert "a" not in idx
-
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
@@ -85,14 +65,6 @@ def test_multi_nan_indexing(self):
)
tm.assert_frame_equal(result, expected)
- def test_contains(self):
- # GH 24570
- tx = pd.timedelta_range("09:30:00", "16:00:00", freq="30 min")
- idx = MultiIndex.from_arrays([tx, np.arange(len(tx))])
- assert tx[0] in idx
- assert "element_not_exit" not in idx
- assert "0 day 09:30:00" in idx
-
def test_nested_tuples_duplicates(self):
# GH#30892
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 84279d874bae1..0fdcc513ee126 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1252,92 +1252,6 @@ def test_level_with_tuples(self):
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
- def test_mixed_depth_drop(self):
- arrays = [
- ["a", "top", "top", "routine1", "routine1", "routine2"],
- ["", "OD", "OD", "result1", "result2", "result1"],
- ["", "wx", "wy", "", "", ""],
- ]
-
- tuples = sorted(zip(*arrays))
- index = MultiIndex.from_tuples(tuples)
- df = DataFrame(randn(4, 6), columns=index)
-
- result = df.drop("a", axis=1)
- expected = df.drop([("a", "", "")], axis=1)
- tm.assert_frame_equal(expected, result)
-
- result = df.drop(["top"], axis=1)
- expected = df.drop([("top", "OD", "wx")], axis=1)
- expected = expected.drop([("top", "OD", "wy")], axis=1)
- tm.assert_frame_equal(expected, result)
-
- result = df.drop(("top", "OD", "wx"), axis=1)
- expected = df.drop([("top", "OD", "wx")], axis=1)
- tm.assert_frame_equal(expected, result)
-
- expected = df.drop([("top", "OD", "wy")], axis=1)
- expected = df.drop("top", axis=1)
-
- result = df.drop("result1", level=1, axis=1)
- expected = df.drop(
- [("routine1", "result1", ""), ("routine2", "result1", "")], axis=1
- )
- tm.assert_frame_equal(expected, result)
-
- def test_drop_multiindex_other_level_nan(self):
- # GH 12754
- df = (
- DataFrame(
- {
- "A": ["one", "one", "two", "two"],
- "B": [np.nan, 0.0, 1.0, 2.0],
- "C": ["a", "b", "c", "c"],
- "D": [1, 2, 3, 4],
- }
- )
- .set_index(["A", "B", "C"])
- .sort_index()
- )
- result = df.drop("c", level="C")
- expected = DataFrame(
- [2, 1],
- columns=["D"],
- index=pd.MultiIndex.from_tuples(
- [("one", 0.0, "b"), ("one", np.nan, "a")], names=["A", "B", "C"]
- ),
- )
- tm.assert_frame_equal(result, expected)
-
- def test_drop_nonunique(self):
- df = DataFrame(
- [
- ["x-a", "x", "a", 1.5],
- ["x-a", "x", "a", 1.2],
- ["z-c", "z", "c", 3.1],
- ["x-a", "x", "a", 4.1],
- ["x-b", "x", "b", 5.1],
- ["x-b", "x", "b", 4.1],
- ["x-b", "x", "b", 2.2],
- ["y-a", "y", "a", 1.2],
- ["z-b", "z", "b", 2.1],
- ],
- columns=["var1", "var2", "var3", "var4"],
- )
-
- grp_size = df.groupby("var1").size()
- drop_idx = grp_size.loc[grp_size == 1]
-
- idf = df.set_index(["var1", "var2", "var3"])
-
- # it works! #2101
- result = idf.drop(drop_idx.index, level=0).reset_index()
- expected = df[-df.var1.isin(drop_idx.index)]
-
- result.index = expected.index
-
- tm.assert_frame_equal(result, expected)
-
def test_mixed_depth_pop(self):
arrays = [
["a", "top", "top", "routine1", "routine1", "routine2"],
@@ -1380,68 +1294,6 @@ def test_reindex_level_partial_selection(self):
result = self.frame.T.loc[:, ["foo", "qux"]]
tm.assert_frame_equal(result, expected.T)
- def test_drop_level(self):
- result = self.frame.drop(["bar", "qux"], level="first")
- expected = self.frame.iloc[[0, 1, 2, 5, 6]]
- tm.assert_frame_equal(result, expected)
-
- result = self.frame.drop(["two"], level="second")
- expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]]
- tm.assert_frame_equal(result, expected)
-
- result = self.frame.T.drop(["bar", "qux"], axis=1, level="first")
- expected = self.frame.iloc[[0, 1, 2, 5, 6]].T
- tm.assert_frame_equal(result, expected)
-
- result = self.frame.T.drop(["two"], axis=1, level="second")
- expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]].T
- tm.assert_frame_equal(result, expected)
-
- def test_drop_level_nonunique_datetime(self):
- # GH 12701
- idx = Index([2, 3, 4, 4, 5], name="id")
- idxdt = pd.to_datetime(
- [
- "201603231400",
- "201603231500",
- "201603231600",
- "201603231600",
- "201603231700",
- ]
- )
- df = DataFrame(np.arange(10).reshape(5, 2), columns=list("ab"), index=idx)
- df["tstamp"] = idxdt
- df = df.set_index("tstamp", append=True)
- ts = Timestamp("201603231600")
- assert df.index.is_unique is False
-
- result = df.drop(ts, level="tstamp")
- expected = df.loc[idx != 4]
- tm.assert_frame_equal(result, expected)
-
- @pytest.mark.parametrize("box", [Series, DataFrame])
- def test_drop_tz_aware_timestamp_across_dst(self, box):
- # GH 21761
- start = Timestamp("2017-10-29", tz="Europe/Berlin")
- end = Timestamp("2017-10-29 04:00:00", tz="Europe/Berlin")
- index = pd.date_range(start, end, freq="15min")
- data = box(data=[1] * len(index), index=index)
- result = data.drop(start)
- expected_start = Timestamp("2017-10-29 00:15:00", tz="Europe/Berlin")
- expected_idx = pd.date_range(expected_start, end, freq="15min")
- expected = box(data=[1] * len(expected_idx), index=expected_idx)
- tm.assert_equal(result, expected)
-
- def test_drop_preserve_names(self):
- index = MultiIndex.from_arrays(
- [[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]], names=["one", "two"]
- )
-
- df = DataFrame(np.random.randn(6, 3), index=index)
-
- result = df.drop([(0, 2)])
- assert result.index.names == ("one", "two")
-
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=["\u0394", "i1"])
| https://api.github.com/repos/pandas-dev/pandas/pulls/33156 | 2020-03-30T23:40:47Z | 2020-03-31T17:07:55Z | 2020-03-31T17:07:55Z | 2020-03-31T17:24:38Z | |
REF: PeriodIndex test_indexing tests | diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index a4c6764d065c9..39688e5b92380 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -216,165 +216,7 @@ def test_getitem_day(self):
s[v]
-class TestWhere:
- @pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
- def test_where(self, klass):
- i = period_range("20130101", periods=5, freq="D")
- cond = [True] * len(i)
- expected = i
- result = i.where(klass(cond))
- tm.assert_index_equal(result, expected)
-
- cond = [False] + [True] * (len(i) - 1)
- expected = PeriodIndex([NaT] + i[1:].tolist(), freq="D")
- result = i.where(klass(cond))
- tm.assert_index_equal(result, expected)
-
- def test_where_other(self):
- i = period_range("20130101", periods=5, freq="D")
- for arr in [np.nan, NaT]:
- result = i.where(notna(i), other=np.nan)
- expected = i
- tm.assert_index_equal(result, expected)
-
- i2 = i.copy()
- i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D")
- result = i.where(notna(i2), i2)
- tm.assert_index_equal(result, i2)
-
- i2 = i.copy()
- i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D")
- result = i.where(notna(i2), i2.values)
- tm.assert_index_equal(result, i2)
-
- def test_where_invalid_dtypes(self):
- pi = period_range("20130101", periods=5, freq="D")
-
- i2 = pi.copy()
- i2 = PeriodIndex([NaT, NaT] + pi[2:].tolist(), freq="D")
-
- with pytest.raises(TypeError, match="Where requires matching dtype"):
- pi.where(notna(i2), i2.asi8)
-
- with pytest.raises(TypeError, match="Where requires matching dtype"):
- pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
-
- with pytest.raises(TypeError, match="Where requires matching dtype"):
- pi.where(notna(i2), i2.to_timestamp("S"))
-
-
-class TestTake:
- def test_take(self):
- # GH#10295
- idx1 = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
-
- for idx in [idx1]:
- result = idx.take([0])
- assert result == Period("2011-01-01", freq="D")
-
- result = idx.take([5])
- assert result == Period("2011-01-06", freq="D")
-
- result = idx.take([0, 1, 2])
- expected = period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
- tm.assert_index_equal(result, expected)
- assert result.freq == "D"
- assert result.freq == expected.freq
-
- result = idx.take([0, 2, 4])
- expected = PeriodIndex(
- ["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
- assert result.freq == "D"
-
- result = idx.take([7, 4, 1])
- expected = PeriodIndex(
- ["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
- assert result.freq == "D"
-
- result = idx.take([3, 2, 5])
- expected = PeriodIndex(
- ["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
- assert result.freq == "D"
-
- result = idx.take([-3, 2, 5])
- expected = PeriodIndex(
- ["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
- )
- tm.assert_index_equal(result, expected)
- assert result.freq == expected.freq
- assert result.freq == "D"
-
- def test_take_misc(self):
- index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
- expected = PeriodIndex(
- [
- datetime(2010, 1, 6),
- datetime(2010, 1, 7),
- datetime(2010, 1, 9),
- datetime(2010, 1, 13),
- ],
- freq="D",
- name="idx",
- )
-
- taken1 = index.take([5, 6, 8, 12])
- taken2 = index[[5, 6, 8, 12]]
-
- for taken in [taken1, taken2]:
- tm.assert_index_equal(taken, expected)
- assert isinstance(taken, PeriodIndex)
- assert taken.freq == index.freq
- assert taken.name == expected.name
-
- def test_take_fill_value(self):
- # GH#12631
- idx = PeriodIndex(
- ["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
- )
- result = idx.take(np.array([1, 0, -1]))
- expected = PeriodIndex(
- ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
- )
- tm.assert_index_equal(result, expected)
-
- # fill_value
- result = idx.take(np.array([1, 0, -1]), fill_value=True)
- expected = PeriodIndex(
- ["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
- )
- tm.assert_index_equal(result, expected)
-
- # allow_fill=False
- result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- expected = PeriodIndex(
- ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
- )
- tm.assert_index_equal(result, expected)
-
- msg = (
- "When allow_fill=True and fill_value is not None, "
- "all indices must be >= -1"
- )
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -2]), fill_value=True)
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -5]), fill_value=True)
-
- msg = "index -5 is out of bounds for( axis 0 with)? size 3"
- with pytest.raises(IndexError, match=msg):
- idx.take(np.array([1, -5]))
-
-
-class TestIndexing:
+class TestGetLoc:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
@@ -465,153 +307,68 @@ def test_get_loc_integer(self):
with pytest.raises(KeyError, match="46"):
pi2.get_loc(46)
- @pytest.mark.parametrize("freq", ["H", "D"])
- def test_get_value_datetime_hourly(self, freq):
- # get_loc and get_value should treat datetime objects symmetrically
- dti = date_range("2016-01-01", periods=3, freq="MS")
- pi = dti.to_period(freq)
- ser = pd.Series(range(7, 10), index=pi)
+ # TODO: This method came from test_period; de-dup with version above
+ def test_get_loc2(self):
+ idx = period_range("2000-01-01", periods=3)
- ts = dti[0]
+ for method in [None, "pad", "backfill", "nearest"]:
+ assert idx.get_loc(idx[1], method) == 1
+ assert idx.get_loc(idx[1].asfreq("H", how="start"), method) == 1
+ assert idx.get_loc(idx[1].to_timestamp(), method) == 1
+ assert idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method) == 1
+ assert idx.get_loc(str(idx[1]), method) == 1
- assert pi.get_loc(ts) == 0
- assert pi.get_value(ser, ts) == 7
- assert ser[ts] == 7
- assert ser.loc[ts] == 7
+ idx = period_range("2000-01-01", periods=5)[::2]
+ assert idx.get_loc("2000-01-02T12", method="nearest", tolerance="1 day") == 1
+ assert (
+ idx.get_loc("2000-01-02T12", method="nearest", tolerance=Timedelta("1D"))
+ == 1
+ )
+ assert (
+ idx.get_loc(
+ "2000-01-02T12", method="nearest", tolerance=np.timedelta64(1, "D")
+ )
+ == 1
+ )
+ assert (
+ idx.get_loc("2000-01-02T12", method="nearest", tolerance=timedelta(1)) == 1
+ )
- ts2 = ts + Timedelta(hours=3)
- if freq == "H":
- with pytest.raises(KeyError, match="2016-01-01 03:00"):
- pi.get_loc(ts2)
- with pytest.raises(KeyError, match="2016-01-01 03:00"):
- pi.get_value(ser, ts2)
- with pytest.raises(KeyError, match="2016-01-01 03:00"):
- ser[ts2]
- with pytest.raises(KeyError, match="2016-01-01 03:00"):
- ser.loc[ts2]
- else:
- assert pi.get_loc(ts2) == 0
- assert pi.get_value(ser, ts2) == 7
- assert ser[ts2] == 7
- assert ser.loc[ts2] == 7
+ msg = "unit abbreviation w/o a number"
+ with pytest.raises(ValueError, match=msg):
+ idx.get_loc("2000-01-10", method="nearest", tolerance="foo")
- def test_get_value_integer(self):
- msg = "index 16801 is out of bounds for axis 0 with size 3"
- dti = date_range("2016-01-01", periods=3)
- pi = dti.to_period("D")
- ser = pd.Series(range(3), index=pi)
- with pytest.raises(IndexError, match=msg):
- pi.get_value(ser, 16801)
+ msg = "Input has different freq=None from PeriodArray\\(freq=D\\)"
+ with pytest.raises(ValueError, match=msg):
+ idx.get_loc("2000-01-10", method="nearest", tolerance="1 hour")
+ with pytest.raises(KeyError, match=r"^Period\('2000-01-10', 'D'\)$"):
+ idx.get_loc("2000-01-10", method="nearest", tolerance="1 day")
+ with pytest.raises(
+ ValueError, match="list-like tolerance size must match target index size"
+ ):
+ idx.get_loc(
+ "2000-01-10",
+ method="nearest",
+ tolerance=[
+ Timedelta("1 day").to_timedelta64(),
+ Timedelta("1 day").to_timedelta64(),
+ ],
+ )
- msg = "index 46 is out of bounds for axis 0 with size 3"
- pi2 = dti.to_period("Y") # duplicates, ordinals are all 46
- ser2 = pd.Series(range(3), index=pi2)
- with pytest.raises(IndexError, match=msg):
- pi2.get_value(ser2, 46)
- def test_is_monotonic_increasing(self):
+class TestGetIndexer:
+ def test_get_indexer(self):
# GH 17717
- p0 = Period("2017-09-01")
- p1 = Period("2017-09-02")
- p2 = Period("2017-09-03")
-
- idx_inc0 = PeriodIndex([p0, p1, p2])
- idx_inc1 = PeriodIndex([p0, p1, p1])
- idx_dec0 = PeriodIndex([p2, p1, p0])
- idx_dec1 = PeriodIndex([p2, p1, p1])
- idx = PeriodIndex([p1, p2, p0])
+ p1 = Period("2017-09-01")
+ p2 = Period("2017-09-04")
+ p3 = Period("2017-09-07")
- assert idx_inc0.is_monotonic_increasing is True
- assert idx_inc1.is_monotonic_increasing is True
- assert idx_dec0.is_monotonic_increasing is False
- assert idx_dec1.is_monotonic_increasing is False
- assert idx.is_monotonic_increasing is False
+ tp0 = Period("2017-08-31")
+ tp1 = Period("2017-09-02")
+ tp2 = Period("2017-09-05")
+ tp3 = Period("2017-09-09")
- def test_is_monotonic_decreasing(self):
- # GH 17717
- p0 = Period("2017-09-01")
- p1 = Period("2017-09-02")
- p2 = Period("2017-09-03")
-
- idx_inc0 = PeriodIndex([p0, p1, p2])
- idx_inc1 = PeriodIndex([p0, p1, p1])
- idx_dec0 = PeriodIndex([p2, p1, p0])
- idx_dec1 = PeriodIndex([p2, p1, p1])
- idx = PeriodIndex([p1, p2, p0])
-
- assert idx_inc0.is_monotonic_decreasing is False
- assert idx_inc1.is_monotonic_decreasing is False
- assert idx_dec0.is_monotonic_decreasing is True
- assert idx_dec1.is_monotonic_decreasing is True
- assert idx.is_monotonic_decreasing is False
-
- def test_contains(self):
- # GH 17717
- p0 = Period("2017-09-01")
- p1 = Period("2017-09-02")
- p2 = Period("2017-09-03")
- p3 = Period("2017-09-04")
-
- ps0 = [p0, p1, p2]
- idx0 = PeriodIndex(ps0)
- ser = pd.Series(range(6, 9), index=idx0)
-
- for p in ps0:
- assert p in idx0
- assert str(p) in idx0
-
- # GH#31172
- # Higher-resolution period-like are _not_ considered as contained
- key = "2017-09-01 00:00:01"
- assert key not in idx0
- with pytest.raises(KeyError, match=key):
- idx0.get_loc(key)
- with pytest.raises(KeyError, match=key):
- idx0.get_value(ser, key)
-
- assert "2017-09" in idx0
-
- assert p3 not in idx0
-
- def test_get_value(self):
- # GH 17717
- p0 = Period("2017-09-01")
- p1 = Period("2017-09-02")
- p2 = Period("2017-09-03")
-
- idx0 = PeriodIndex([p0, p1, p2])
- input0 = pd.Series(np.array([1, 2, 3]), index=idx0)
- expected0 = 2
-
- result0 = idx0.get_value(input0, p1)
- assert result0 == expected0
-
- idx1 = PeriodIndex([p1, p1, p2])
- input1 = pd.Series(np.array([1, 2, 3]), index=idx1)
- expected1 = input1.iloc[[0, 1]]
-
- result1 = idx1.get_value(input1, p1)
- tm.assert_series_equal(result1, expected1)
-
- idx2 = PeriodIndex([p1, p2, p1])
- input2 = pd.Series(np.array([1, 2, 3]), index=idx2)
- expected2 = input2.iloc[[0, 2]]
-
- result2 = idx2.get_value(input2, p1)
- tm.assert_series_equal(result2, expected2)
-
- def test_get_indexer(self):
- # GH 17717
- p1 = Period("2017-09-01")
- p2 = Period("2017-09-04")
- p3 = Period("2017-09-07")
-
- tp0 = Period("2017-08-31")
- tp1 = Period("2017-09-02")
- tp2 = Period("2017-09-05")
- tp3 = Period("2017-09-09")
-
- idx = PeriodIndex([p1, p2, p3])
+ idx = PeriodIndex([p1, p2, p3])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
@@ -677,54 +434,6 @@ def test_get_indexer_non_unique(self):
tm.assert_numpy_array_equal(result[0], expected_indexer)
tm.assert_numpy_array_equal(result[1], expected_missing)
- # TODO: This method came from test_period; de-dup with version above
- def test_get_loc2(self):
- idx = period_range("2000-01-01", periods=3)
-
- for method in [None, "pad", "backfill", "nearest"]:
- assert idx.get_loc(idx[1], method) == 1
- assert idx.get_loc(idx[1].asfreq("H", how="start"), method) == 1
- assert idx.get_loc(idx[1].to_timestamp(), method) == 1
- assert idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method) == 1
- assert idx.get_loc(str(idx[1]), method) == 1
-
- idx = period_range("2000-01-01", periods=5)[::2]
- assert idx.get_loc("2000-01-02T12", method="nearest", tolerance="1 day") == 1
- assert (
- idx.get_loc("2000-01-02T12", method="nearest", tolerance=Timedelta("1D"))
- == 1
- )
- assert (
- idx.get_loc(
- "2000-01-02T12", method="nearest", tolerance=np.timedelta64(1, "D")
- )
- == 1
- )
- assert (
- idx.get_loc("2000-01-02T12", method="nearest", tolerance=timedelta(1)) == 1
- )
-
- msg = "unit abbreviation w/o a number"
- with pytest.raises(ValueError, match=msg):
- idx.get_loc("2000-01-10", method="nearest", tolerance="foo")
-
- msg = "Input has different freq=None from PeriodArray\\(freq=D\\)"
- with pytest.raises(ValueError, match=msg):
- idx.get_loc("2000-01-10", method="nearest", tolerance="1 hour")
- with pytest.raises(KeyError, match=r"^Period\('2000-01-10', 'D'\)$"):
- idx.get_loc("2000-01-10", method="nearest", tolerance="1 day")
- with pytest.raises(
- ValueError, match="list-like tolerance size must match target index size"
- ):
- idx.get_loc(
- "2000-01-10",
- method="nearest",
- tolerance=[
- Timedelta("1 day").to_timedelta64(),
- Timedelta("1 day").to_timedelta64(),
- ],
- )
-
# TODO: This method came from test_period; de-dup with version above
def test_get_indexer2(self):
idx = period_range("2000-01-01", periods=3).asfreq("H", how="start")
@@ -778,23 +487,266 @@ def test_get_indexer2(self):
):
idx.get_indexer(target, "nearest", tolerance=tol_bad)
- def test_indexing(self):
- # GH 4390, iat incorrectly indexing
- index = period_range("1/1/2001", periods=10)
- s = Series(np.random.randn(10), index=index)
- expected = s[index[0]]
- result = s.iat[0]
- assert expected == result
-
- def test_period_index_indexer(self):
- # GH4125
- idx = period_range("2002-01", "2003-12", freq="M")
- df = pd.DataFrame(np.random.randn(24, 10), index=idx)
- tm.assert_frame_equal(df, df.loc[idx])
- tm.assert_frame_equal(df, df.loc[list(idx)])
- tm.assert_frame_equal(df, df.loc[list(idx)])
- tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
- tm.assert_frame_equal(df, df.loc[list(idx)])
+
+class TestWhere:
+ @pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
+ def test_where(self, klass):
+ i = period_range("20130101", periods=5, freq="D")
+ cond = [True] * len(i)
+ expected = i
+ result = i.where(klass(cond))
+ tm.assert_index_equal(result, expected)
+
+ cond = [False] + [True] * (len(i) - 1)
+ expected = PeriodIndex([NaT] + i[1:].tolist(), freq="D")
+ result = i.where(klass(cond))
+ tm.assert_index_equal(result, expected)
+
+ def test_where_other(self):
+ i = period_range("20130101", periods=5, freq="D")
+ for arr in [np.nan, NaT]:
+ result = i.where(notna(i), other=np.nan)
+ expected = i
+ tm.assert_index_equal(result, expected)
+
+ i2 = i.copy()
+ i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D")
+ result = i.where(notna(i2), i2)
+ tm.assert_index_equal(result, i2)
+
+ i2 = i.copy()
+ i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D")
+ result = i.where(notna(i2), i2.values)
+ tm.assert_index_equal(result, i2)
+
+ def test_where_invalid_dtypes(self):
+ pi = period_range("20130101", periods=5, freq="D")
+
+ i2 = pi.copy()
+ i2 = PeriodIndex([NaT, NaT] + pi[2:].tolist(), freq="D")
+
+ with pytest.raises(TypeError, match="Where requires matching dtype"):
+ pi.where(notna(i2), i2.asi8)
+
+ with pytest.raises(TypeError, match="Where requires matching dtype"):
+ pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
+
+ with pytest.raises(TypeError, match="Where requires matching dtype"):
+ pi.where(notna(i2), i2.to_timestamp("S"))
+
+
+class TestTake:
+ def test_take(self):
+ # GH#10295
+ idx1 = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
+
+ for idx in [idx1]:
+ result = idx.take([0])
+ assert result == Period("2011-01-01", freq="D")
+
+ result = idx.take([5])
+ assert result == Period("2011-01-06", freq="D")
+
+ result = idx.take([0, 1, 2])
+ expected = period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
+ tm.assert_index_equal(result, expected)
+ assert result.freq == "D"
+ assert result.freq == expected.freq
+
+ result = idx.take([0, 2, 4])
+ expected = PeriodIndex(
+ ["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
+ assert result.freq == "D"
+
+ result = idx.take([7, 4, 1])
+ expected = PeriodIndex(
+ ["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
+ assert result.freq == "D"
+
+ result = idx.take([3, 2, 5])
+ expected = PeriodIndex(
+ ["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
+ assert result.freq == "D"
+
+ result = idx.take([-3, 2, 5])
+ expected = PeriodIndex(
+ ["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
+ )
+ tm.assert_index_equal(result, expected)
+ assert result.freq == expected.freq
+ assert result.freq == "D"
+
+ def test_take_misc(self):
+ index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
+ expected = PeriodIndex(
+ [
+ datetime(2010, 1, 6),
+ datetime(2010, 1, 7),
+ datetime(2010, 1, 9),
+ datetime(2010, 1, 13),
+ ],
+ freq="D",
+ name="idx",
+ )
+
+ taken1 = index.take([5, 6, 8, 12])
+ taken2 = index[[5, 6, 8, 12]]
+
+ for taken in [taken1, taken2]:
+ tm.assert_index_equal(taken, expected)
+ assert isinstance(taken, PeriodIndex)
+ assert taken.freq == index.freq
+ assert taken.name == expected.name
+
+ def test_take_fill_value(self):
+ # GH#12631
+ idx = PeriodIndex(
+ ["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
+ )
+ result = idx.take(np.array([1, 0, -1]))
+ expected = PeriodIndex(
+ ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
+ )
+ tm.assert_index_equal(result, expected)
+
+ # fill_value
+ result = idx.take(np.array([1, 0, -1]), fill_value=True)
+ expected = PeriodIndex(
+ ["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
+ )
+ tm.assert_index_equal(result, expected)
+
+ # allow_fill=False
+ result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
+ expected = PeriodIndex(
+ ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
+ )
+ tm.assert_index_equal(result, expected)
+
+ msg = (
+ "When allow_fill=True and fill_value is not None, "
+ "all indices must be >= -1"
+ )
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -2]), fill_value=True)
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -5]), fill_value=True)
+
+ msg = "index -5 is out of bounds for( axis 0 with)? size 3"
+ with pytest.raises(IndexError, match=msg):
+ idx.take(np.array([1, -5]))
+
+
+class TestGetValue:
+ def test_get_value(self):
+ # GH 17717
+ p0 = Period("2017-09-01")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
+
+ idx0 = PeriodIndex([p0, p1, p2])
+ input0 = pd.Series(np.array([1, 2, 3]), index=idx0)
+ expected0 = 2
+
+ result0 = idx0.get_value(input0, p1)
+ assert result0 == expected0
+
+ idx1 = PeriodIndex([p1, p1, p2])
+ input1 = pd.Series(np.array([1, 2, 3]), index=idx1)
+ expected1 = input1.iloc[[0, 1]]
+
+ result1 = idx1.get_value(input1, p1)
+ tm.assert_series_equal(result1, expected1)
+
+ idx2 = PeriodIndex([p1, p2, p1])
+ input2 = pd.Series(np.array([1, 2, 3]), index=idx2)
+ expected2 = input2.iloc[[0, 2]]
+
+ result2 = idx2.get_value(input2, p1)
+ tm.assert_series_equal(result2, expected2)
+
+ @pytest.mark.parametrize("freq", ["H", "D"])
+ def test_get_value_datetime_hourly(self, freq):
+ # get_loc and get_value should treat datetime objects symmetrically
+ dti = date_range("2016-01-01", periods=3, freq="MS")
+ pi = dti.to_period(freq)
+ ser = pd.Series(range(7, 10), index=pi)
+
+ ts = dti[0]
+
+ assert pi.get_loc(ts) == 0
+ assert pi.get_value(ser, ts) == 7
+ assert ser[ts] == 7
+ assert ser.loc[ts] == 7
+
+ ts2 = ts + Timedelta(hours=3)
+ if freq == "H":
+ with pytest.raises(KeyError, match="2016-01-01 03:00"):
+ pi.get_loc(ts2)
+ with pytest.raises(KeyError, match="2016-01-01 03:00"):
+ pi.get_value(ser, ts2)
+ with pytest.raises(KeyError, match="2016-01-01 03:00"):
+ ser[ts2]
+ with pytest.raises(KeyError, match="2016-01-01 03:00"):
+ ser.loc[ts2]
+ else:
+ assert pi.get_loc(ts2) == 0
+ assert pi.get_value(ser, ts2) == 7
+ assert ser[ts2] == 7
+ assert ser.loc[ts2] == 7
+
+ def test_get_value_integer(self):
+ msg = "index 16801 is out of bounds for axis 0 with size 3"
+ dti = date_range("2016-01-01", periods=3)
+ pi = dti.to_period("D")
+ ser = pd.Series(range(3), index=pi)
+ with pytest.raises(IndexError, match=msg):
+ pi.get_value(ser, 16801)
+
+ msg = "index 46 is out of bounds for axis 0 with size 3"
+ pi2 = dti.to_period("Y") # duplicates, ordinals are all 46
+ ser2 = pd.Series(range(3), index=pi2)
+ with pytest.raises(IndexError, match=msg):
+ pi2.get_value(ser2, 46)
+
+
+class TestContains:
+ def test_contains(self):
+ # GH 17717
+ p0 = Period("2017-09-01")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
+ p3 = Period("2017-09-04")
+
+ ps0 = [p0, p1, p2]
+ idx0 = PeriodIndex(ps0)
+ ser = pd.Series(range(6, 9), index=idx0)
+
+ for p in ps0:
+ assert p in idx0
+ assert str(p) in idx0
+
+ # GH#31172
+ # Higher-resolution period-like are _not_ considered as contained
+ key = "2017-09-01 00:00:01"
+ assert key not in idx0
+ with pytest.raises(KeyError, match=key):
+ idx0.get_loc(key)
+ with pytest.raises(KeyError, match=key):
+ idx0.get_value(ser, key)
+
+ assert "2017-09" in idx0
+
+ assert p3 not in idx0
class TestAsOfLocs:
diff --git a/pandas/tests/indexes/period/test_monotonic.py b/pandas/tests/indexes/period/test_monotonic.py
new file mode 100644
index 0000000000000..e06e7da1773f5
--- /dev/null
+++ b/pandas/tests/indexes/period/test_monotonic.py
@@ -0,0 +1,39 @@
+from pandas import Period, PeriodIndex
+
+
+def test_is_monotonic_increasing():
+ # GH#17717
+ p0 = Period("2017-09-01")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
+
+ idx_inc0 = PeriodIndex([p0, p1, p2])
+ idx_inc1 = PeriodIndex([p0, p1, p1])
+ idx_dec0 = PeriodIndex([p2, p1, p0])
+ idx_dec1 = PeriodIndex([p2, p1, p1])
+ idx = PeriodIndex([p1, p2, p0])
+
+ assert idx_inc0.is_monotonic_increasing is True
+ assert idx_inc1.is_monotonic_increasing is True
+ assert idx_dec0.is_monotonic_increasing is False
+ assert idx_dec1.is_monotonic_increasing is False
+ assert idx.is_monotonic_increasing is False
+
+
+def test_is_monotonic_decreasing():
+ # GH#17717
+ p0 = Period("2017-09-01")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
+
+ idx_inc0 = PeriodIndex([p0, p1, p2])
+ idx_inc1 = PeriodIndex([p0, p1, p1])
+ idx_dec0 = PeriodIndex([p2, p1, p0])
+ idx_dec1 = PeriodIndex([p2, p1, p1])
+ idx = PeriodIndex([p1, p2, p0])
+
+ assert idx_inc0.is_monotonic_decreasing is False
+ assert idx_inc1.is_monotonic_decreasing is False
+ assert idx_dec0.is_monotonic_decreasing is True
+ assert idx_dec1.is_monotonic_decreasing is True
+ assert idx.is_monotonic_decreasing is False
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index ee92e5a69204d..d1f67981b1ec5 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1095,3 +1095,14 @@ def test_loc_datetimelike_mismatched_dtypes():
with pytest.raises(KeyError, match=msg):
df["a"].loc[tdi]
+
+
+def test_loc_with_period_index_indexer():
+ # GH#4125
+ idx = pd.period_range("2002-01", "2003-12", freq="M")
+ df = pd.DataFrame(np.random.randn(24, 10), index=idx)
+ tm.assert_frame_equal(df, df.loc[idx])
+ tm.assert_frame_equal(df, df.loc[list(idx)])
+ tm.assert_frame_equal(df, df.loc[list(idx)])
+ tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
+ tm.assert_frame_equal(df, df.loc[list(idx)])
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 25939e63c256b..61d109344568c 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -4,7 +4,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Series, Timedelta, Timestamp, date_range
+from pandas import DataFrame, Series, Timedelta, Timestamp, date_range, period_range
import pandas._testing as tm
from pandas.tests.indexing.common import Base
@@ -302,3 +302,12 @@ def test_iat_dont_wrap_object_datetimelike():
assert result is ser2[1]
assert isinstance(result, timedelta)
assert not isinstance(result, Timedelta)
+
+
+def test_iat_series_with_period_index():
+ # GH 4390, iat incorrectly indexing
+ index = period_range("1/1/2001", periods=10)
+ ser = Series(np.random.randn(10), index=index)
+ expected = ser[index[0]]
+ result = ser.iat[0]
+ assert expected == result
| This should be it for the PeriodIndex tests for now. | https://api.github.com/repos/pandas-dev/pandas/pulls/33154 | 2020-03-30T19:55:53Z | 2020-03-30T20:58:01Z | 2020-03-30T20:58:01Z | 2020-03-30T21:57:59Z |
REF: test_rename_axis | diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py
index e69a562f8214d..ffad526d3f4d1 100644
--- a/pandas/tests/frame/methods/test_rename.py
+++ b/pandas/tests/frame/methods/test_rename.py
@@ -67,30 +67,6 @@ def test_rename_chainmap(self, args, kwargs):
expected = DataFrame({"a": colAData, "b": colBdata})
tm.assert_frame_equal(result, expected)
- @pytest.mark.parametrize(
- "kwargs, rename_index, rename_columns",
- [
- ({"mapper": None, "axis": 0}, True, False),
- ({"mapper": None, "axis": 1}, False, True),
- ({"index": None}, True, False),
- ({"columns": None}, False, True),
- ({"index": None, "columns": None}, True, True),
- ({}, False, False),
- ],
- )
- def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
- # GH 25034
- index = Index(list("abc"), name="foo")
- columns = Index(["col1", "col2"], name="bar")
- data = np.arange(6).reshape(3, 2)
- df = DataFrame(data, index, columns)
-
- result = df.rename_axis(**kwargs)
- expected_index = index.rename(None) if rename_index else index
- expected_columns = columns.rename(None) if rename_columns else columns
- expected = DataFrame(data, expected_index, expected_columns)
- tm.assert_frame_equal(result, expected)
-
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
diff --git a/pandas/tests/frame/methods/test_rename_axis.py b/pandas/tests/frame/methods/test_rename_axis.py
new file mode 100644
index 0000000000000..9b964d842526c
--- /dev/null
+++ b/pandas/tests/frame/methods/test_rename_axis.py
@@ -0,0 +1,105 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame, Index, MultiIndex
+import pandas._testing as tm
+
+
+class TestDataFrameRenameAxis:
+ def test_rename_axis_inplace(self, float_frame):
+ # GH#15704
+ expected = float_frame.rename_axis("foo")
+ result = float_frame.copy()
+ no_return = result.rename_axis("foo", inplace=True)
+
+ assert no_return is None
+ tm.assert_frame_equal(result, expected)
+
+ expected = float_frame.rename_axis("bar", axis=1)
+ result = float_frame.copy()
+ no_return = result.rename_axis("bar", axis=1, inplace=True)
+
+ assert no_return is None
+ tm.assert_frame_equal(result, expected)
+
+ def test_rename_axis_raises(self):
+ # GH#17833
+ df = DataFrame({"A": [1, 2], "B": [1, 2]})
+ with pytest.raises(ValueError, match="Use `.rename`"):
+ df.rename_axis(id, axis=0)
+
+ with pytest.raises(ValueError, match="Use `.rename`"):
+ df.rename_axis({0: 10, 1: 20}, axis=0)
+
+ with pytest.raises(ValueError, match="Use `.rename`"):
+ df.rename_axis(id, axis=1)
+
+ with pytest.raises(ValueError, match="Use `.rename`"):
+ df["A"].rename_axis(id)
+
+ def test_rename_axis_mapper(self):
+ # GH#19978
+ mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
+ df = DataFrame(
+ {"x": list(range(len(mi))), "y": [i * 10 for i in range(len(mi))]}, index=mi
+ )
+
+ # Test for rename of the Index object of columns
+ result = df.rename_axis("cols", axis=1)
+ tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
+
+ # Test for rename of the Index object of columns using dict
+ result = result.rename_axis(columns={"cols": "new"}, axis=1)
+ tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
+
+ # Test for renaming index using dict
+ result = df.rename_axis(index={"ll": "foo"})
+ assert result.index.names == ["foo", "nn"]
+
+ # Test for renaming index using a function
+ result = df.rename_axis(index=str.upper, axis=0)
+ assert result.index.names == ["LL", "NN"]
+
+ # Test for renaming index providing complete list
+ result = df.rename_axis(index=["foo", "goo"])
+ assert result.index.names == ["foo", "goo"]
+
+ # Test for changing index and columns at same time
+ sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
+ result = sdf.rename_axis(index="foo", columns="meh")
+ assert result.index.name == "foo"
+ assert result.columns.name == "meh"
+
+ # Test different error cases
+ with pytest.raises(TypeError, match="Must pass"):
+ df.rename_axis(index="wrong")
+
+ with pytest.raises(ValueError, match="Length of names"):
+ df.rename_axis(index=["wrong"])
+
+ with pytest.raises(TypeError, match="bogus"):
+ df.rename_axis(bogus=None)
+
+ @pytest.mark.parametrize(
+ "kwargs, rename_index, rename_columns",
+ [
+ ({"mapper": None, "axis": 0}, True, False),
+ ({"mapper": None, "axis": 1}, False, True),
+ ({"index": None}, True, False),
+ ({"columns": None}, False, True),
+ ({"index": None, "columns": None}, True, True),
+ ({}, False, False),
+ ],
+ )
+ def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
+ # GH 25034
+ index = Index(list("abc"), name="foo")
+ columns = Index(["col1", "col2"], name="bar")
+ data = np.arange(6).reshape(3, 2)
+ df = DataFrame(data, index, columns)
+
+ result = df.rename_axis(**kwargs)
+ expected_index = index.rename(None) if rename_index else index
+ expected_columns = columns.rename(None) if rename_columns else columns
+ expected = DataFrame(data, expected_index, expected_columns)
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index e37170d4155f8..74fe3bfd41b8f 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -615,80 +615,6 @@ def test_dti_set_index_reindex(self):
# Renaming
- def test_rename_axis_inplace(self, float_frame):
- # GH 15704
- expected = float_frame.rename_axis("foo")
- result = float_frame.copy()
- no_return = result.rename_axis("foo", inplace=True)
-
- assert no_return is None
- tm.assert_frame_equal(result, expected)
-
- expected = float_frame.rename_axis("bar", axis=1)
- result = float_frame.copy()
- no_return = result.rename_axis("bar", axis=1, inplace=True)
-
- assert no_return is None
- tm.assert_frame_equal(result, expected)
-
- def test_rename_axis_raises(self):
- # https://github.com/pandas-dev/pandas/issues/17833
- df = DataFrame({"A": [1, 2], "B": [1, 2]})
- with pytest.raises(ValueError, match="Use `.rename`"):
- df.rename_axis(id, axis=0)
-
- with pytest.raises(ValueError, match="Use `.rename`"):
- df.rename_axis({0: 10, 1: 20}, axis=0)
-
- with pytest.raises(ValueError, match="Use `.rename`"):
- df.rename_axis(id, axis=1)
-
- with pytest.raises(ValueError, match="Use `.rename`"):
- df["A"].rename_axis(id)
-
- def test_rename_axis_mapper(self):
- # GH 19978
- mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
- df = DataFrame(
- {"x": list(range(len(mi))), "y": [i * 10 for i in range(len(mi))]}, index=mi
- )
-
- # Test for rename of the Index object of columns
- result = df.rename_axis("cols", axis=1)
- tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
-
- # Test for rename of the Index object of columns using dict
- result = result.rename_axis(columns={"cols": "new"}, axis=1)
- tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
-
- # Test for renaming index using dict
- result = df.rename_axis(index={"ll": "foo"})
- assert result.index.names == ["foo", "nn"]
-
- # Test for renaming index using a function
- result = df.rename_axis(index=str.upper, axis=0)
- assert result.index.names == ["LL", "NN"]
-
- # Test for renaming index providing complete list
- result = df.rename_axis(index=["foo", "goo"])
- assert result.index.names == ["foo", "goo"]
-
- # Test for changing index and columns at same time
- sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
- result = sdf.rename_axis(index="foo", columns="meh")
- assert result.index.name == "foo"
- assert result.columns.name == "meh"
-
- # Test different error cases
- with pytest.raises(TypeError, match="Must pass"):
- df.rename_axis(index="wrong")
-
- with pytest.raises(ValueError, match="Length of names"):
- df.rename_axis(index=["wrong"])
-
- with pytest.raises(TypeError, match="bogus"):
- df.rename_axis(bogus=None)
-
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py
index 20f6cda7cad60..07c02330d85ce 100644
--- a/pandas/tests/generic/test_series.py
+++ b/pandas/tests/generic/test_series.py
@@ -21,15 +21,6 @@ def test_rename_mi(self):
)
s.rename(str.lower)
- @pytest.mark.parametrize("func", ["rename_axis", "_set_axis_name"])
- def test_set_axis_name(self, func):
- s = Series([1, 2, 3], index=["a", "b", "c"])
- name = "foo"
-
- result = methodcaller(func, name)(s)
- assert s.index.name is None
- assert result.index.name == name
-
@pytest.mark.parametrize("func", ["rename_axis", "_set_axis_name"])
def test_set_axis_name_mi(self, func):
s = Series(
diff --git a/pandas/tests/series/methods/test_rename_axis.py b/pandas/tests/series/methods/test_rename_axis.py
new file mode 100644
index 0000000000000..b519dd1144493
--- /dev/null
+++ b/pandas/tests/series/methods/test_rename_axis.py
@@ -0,0 +1,43 @@
+import pytest
+
+from pandas import Index, MultiIndex, Series
+import pandas._testing as tm
+
+
+class TestSeriesRenameAxis:
+ def test_rename_axis_mapper(self):
+ # GH 19978
+ mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
+ ser = Series(list(range(len(mi))), index=mi)
+
+ result = ser.rename_axis(index={"ll": "foo"})
+ assert result.index.names == ["foo", "nn"]
+
+ result = ser.rename_axis(index=str.upper, axis=0)
+ assert result.index.names == ["LL", "NN"]
+
+ result = ser.rename_axis(index=["foo", "goo"])
+ assert result.index.names == ["foo", "goo"]
+
+ with pytest.raises(TypeError, match="unexpected"):
+ ser.rename_axis(columns="wrong")
+
+ def test_rename_axis_inplace(self, datetime_series):
+ # GH 15704
+ expected = datetime_series.rename_axis("foo")
+ result = datetime_series
+ no_return = result.rename_axis("foo", inplace=True)
+
+ assert no_return is None
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}])
+ def test_rename_axis_none(self, kwargs):
+ # GH 25034
+ index = Index(list("abc"), name="foo")
+ ser = Series([1, 2, 3], index=index)
+
+ result = ser.rename_axis(**kwargs)
+ expected_index = index.rename(None) if kwargs else index
+ expected = Series([1, 2, 3], index=expected_index)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index 769d1ed877a69..c2bb498df2be2 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas import Index, MultiIndex, Series
+from pandas import Index, Series
import pandas._testing as tm
@@ -54,43 +54,6 @@ def test_set_index_makes_timeseries(self):
s.index = idx
assert s.index.is_all_dates
- def test_rename_axis_mapper(self):
- # GH 19978
- mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
- s = Series(list(range(len(mi))), index=mi)
-
- result = s.rename_axis(index={"ll": "foo"})
- assert result.index.names == ["foo", "nn"]
-
- result = s.rename_axis(index=str.upper, axis=0)
- assert result.index.names == ["LL", "NN"]
-
- result = s.rename_axis(index=["foo", "goo"])
- assert result.index.names == ["foo", "goo"]
-
- with pytest.raises(TypeError, match="unexpected"):
- s.rename_axis(columns="wrong")
-
- def test_rename_axis_inplace(self, datetime_series):
- # GH 15704
- expected = datetime_series.rename_axis("foo")
- result = datetime_series
- no_return = result.rename_axis("foo", inplace=True)
-
- assert no_return is None
- tm.assert_series_equal(result, expected)
-
- @pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}])
- def test_rename_axis_none(self, kwargs):
- # GH 25034
- index = Index(list("abc"), name="foo")
- df = Series([1, 2, 3], index=index)
-
- result = df.rename_axis(**kwargs)
- expected_index = index.rename(None) if kwargs else index
- expected = Series([1, 2, 3], index=expected_index)
- tm.assert_series_equal(result, expected)
-
def test_set_axis_inplace_axes(self, axis_series):
# GH14636
ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64")
| https://api.github.com/repos/pandas-dev/pandas/pulls/33152 | 2020-03-30T19:01:00Z | 2020-03-30T20:59:41Z | 2020-03-30T20:59:41Z | 2020-03-30T21:58:22Z | |
REF: misplaced DataFrame arithmetic tests | diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 9e0b51767df2c..89f8bc433419b 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -8,7 +8,9 @@
import pytz
import pandas as pd
+from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
+import pandas.core.common as com
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
@@ -841,6 +843,629 @@ def test_align_frame(self):
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize(
+ "op", [operator.add, operator.sub, operator.mul, operator.truediv]
+ )
+ def test_operators_none_as_na(self, op):
+ df = DataFrame(
+ {"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
+ )
+
+ # since filling converts dtypes from object, changed expected to be
+ # object
+ filled = df.fillna(np.nan)
+ result = op(df, 3)
+ expected = op(filled, 3).astype(object)
+ expected[com.isna(expected)] = None
+ tm.assert_frame_equal(result, expected)
+
+ result = op(df, df)
+ expected = op(filled, filled).astype(object)
+ expected[com.isna(expected)] = None
+ tm.assert_frame_equal(result, expected)
+
+ result = op(df, df.fillna(7))
+ tm.assert_frame_equal(result, expected)
+
+ result = op(df.fillna(7), df)
+ tm.assert_frame_equal(result, expected, check_dtype=False)
+
+ @pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
+ # TODO: not sure what's correct here.
+ @pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
+ def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
+ # we are comparing floats vs a string
+ result = getattr(float_frame, op)("foo")
+ assert bool(result.all().all()) is res
+
+ def test_binary_ops_align(self):
+
+ # test aligning binary ops
+
+ # GH 6681
+ index = MultiIndex.from_product(
+ [list("abc"), ["one", "two", "three"], [1, 2, 3]],
+ names=["first", "second", "third"],
+ )
+
+ df = DataFrame(
+ np.arange(27 * 3).reshape(27, 3),
+ index=index,
+ columns=["value1", "value2", "value3"],
+ ).sort_index()
+
+ idx = pd.IndexSlice
+ for op in ["add", "sub", "mul", "div", "truediv"]:
+ opa = getattr(operator, op, None)
+ if opa is None:
+ continue
+
+ x = Series([1.0, 10.0, 100.0], [1, 2, 3])
+ result = getattr(df, op)(x, level="third", axis=0)
+
+ expected = pd.concat(
+ [opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
+ ).sort_index()
+ tm.assert_frame_equal(result, expected)
+
+ x = Series([1.0, 10.0], ["two", "three"])
+ result = getattr(df, op)(x, level="second", axis=0)
+
+ expected = (
+ pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
+ .reindex_like(df)
+ .sort_index()
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # GH9463 (alignment level of dataframe with series)
+
+ midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
+ df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
+ s = pd.Series({"a": 1, "b": 2})
+
+ df2 = df.copy()
+ df2.columns.names = ["lvl0", "lvl1"]
+ s2 = s.copy()
+ s2.index.name = "lvl1"
+
+ # different cases of integer/string level names:
+ res1 = df.mul(s, axis=1, level=1)
+ res2 = df.mul(s2, axis=1, level=1)
+ res3 = df2.mul(s, axis=1, level=1)
+ res4 = df2.mul(s2, axis=1, level=1)
+ res5 = df2.mul(s, axis=1, level="lvl1")
+ res6 = df2.mul(s2, axis=1, level="lvl1")
+
+ exp = DataFrame(
+ np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
+ )
+
+ for res in [res1, res2]:
+ tm.assert_frame_equal(res, exp)
+
+ exp.columns.names = ["lvl0", "lvl1"]
+ for res in [res3, res4, res5, res6]:
+ tm.assert_frame_equal(res, exp)
+
+ def test_add_with_dti_mismatched_tzs(self):
+ base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
+ idx1 = base.tz_convert("Asia/Tokyo")[:2]
+ idx2 = base.tz_convert("US/Eastern")[1:]
+
+ df1 = DataFrame({"A": [1, 2]}, index=idx1)
+ df2 = DataFrame({"A": [1, 1]}, index=idx2)
+ exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
+ tm.assert_frame_equal(df1 + df2, exp)
+
+ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
+ frame_copy = float_frame.reindex(float_frame.index[::2])
+
+ del frame_copy["D"]
+ frame_copy["C"][:5] = np.nan
+
+ added = float_frame + frame_copy
+
+ indexer = added["A"].dropna().index
+ exp = (float_frame["A"] * 2).copy()
+
+ tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
+
+ exp.loc[~exp.index.isin(indexer)] = np.nan
+ tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
+
+ assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
+
+ # assert(False)
+
+ assert np.isnan(added["D"]).all()
+
+ self_added = float_frame + float_frame
+ tm.assert_index_equal(self_added.index, float_frame.index)
+
+ added_rev = frame_copy + float_frame
+ assert np.isnan(added["D"]).all()
+ assert np.isnan(added_rev["D"]).all()
+
+ # corner cases
+
+ # empty
+ plus_empty = float_frame + DataFrame()
+ assert np.isnan(plus_empty.values).all()
+
+ empty_plus = DataFrame() + float_frame
+ assert np.isnan(empty_plus.values).all()
+
+ empty_empty = DataFrame() + DataFrame()
+ assert empty_empty.empty
+
+ # out of order
+ reverse = float_frame.reindex(columns=float_frame.columns[::-1])
+
+ tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
+
+ # mix vs float64, upcast
+ added = float_frame + mixed_float_frame
+ _check_mixed_float(added, dtype="float64")
+ added = mixed_float_frame + float_frame
+ _check_mixed_float(added, dtype="float64")
+
+ # mix vs mix
+ added = mixed_float_frame + mixed_float_frame
+ _check_mixed_float(added, dtype=dict(C=None))
+
+ # with int
+ added = float_frame + mixed_int_frame
+ _check_mixed_float(added, dtype="float64")
+
+ def test_combine_series(
+ self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
+ ):
+
+ # Series
+ series = float_frame.xs(float_frame.index[0])
+
+ added = float_frame + series
+
+ for key, s in added.items():
+ tm.assert_series_equal(s, float_frame[key] + series[key])
+
+ larger_series = series.to_dict()
+ larger_series["E"] = 1
+ larger_series = Series(larger_series)
+ larger_added = float_frame + larger_series
+
+ for key, s in float_frame.items():
+ tm.assert_series_equal(larger_added[key], s + series[key])
+ assert "E" in larger_added
+ assert np.isnan(larger_added["E"]).all()
+
+ # no upcast needed
+ added = mixed_float_frame + series
+ _check_mixed_float(added)
+
+ # vs mix (upcast) as needed
+ added = mixed_float_frame + series.astype("float32")
+ _check_mixed_float(added, dtype=dict(C=None))
+ added = mixed_float_frame + series.astype("float16")
+ _check_mixed_float(added, dtype=dict(C=None))
+
+ # FIXME: don't leave commented-out
+ # these raise with numexpr.....as we are adding an int64 to an
+ # uint64....weird vs int
+
+ # added = mixed_int_frame + (100*series).astype('int64')
+ # _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
+ # 'int64', D = 'int64'))
+ # added = mixed_int_frame + (100*series).astype('int32')
+ # _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
+ # 'int32', D = 'int64'))
+
+ # TimeSeries
+ ts = datetime_frame["A"]
+
+ # 10890
+ # we no longer allow auto timeseries broadcasting
+ # and require explicit broadcasting
+ added = datetime_frame.add(ts, axis="index")
+
+ for key, col in datetime_frame.items():
+ result = col + ts
+ tm.assert_series_equal(added[key], result, check_names=False)
+ assert added[key].name == key
+ if col.name == ts.name:
+ assert result.name == "A"
+ else:
+ assert result.name is None
+
+ smaller_frame = datetime_frame[:-5]
+ smaller_added = smaller_frame.add(ts, axis="index")
+
+ tm.assert_index_equal(smaller_added.index, datetime_frame.index)
+
+ smaller_ts = ts[:-5]
+ smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
+ tm.assert_frame_equal(smaller_added, smaller_added2)
+
+ # length 0, result is all-nan
+ result = datetime_frame.add(ts[:0], axis="index")
+ expected = DataFrame(
+ np.nan, index=datetime_frame.index, columns=datetime_frame.columns
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # Frame is all-nan
+ result = datetime_frame[:0].add(ts, axis="index")
+ expected = DataFrame(
+ np.nan, index=datetime_frame.index, columns=datetime_frame.columns
+ )
+ tm.assert_frame_equal(result, expected)
+
+ # empty but with non-empty index
+ frame = datetime_frame[:1].reindex(columns=[])
+ result = frame.mul(ts, axis="index")
+ assert len(result) == len(ts)
+
+ def test_combineFunc(self, float_frame, mixed_float_frame):
+ result = float_frame * 2
+ tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
+
+ # vs mix
+ result = mixed_float_frame * 2
+ for c, s in result.items():
+ tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
+ _check_mixed_float(result, dtype=dict(C=None))
+
+ result = DataFrame() * 2
+ assert result.index.equals(DataFrame().index)
+ assert len(result.columns) == 0
+
+ def test_comparisons(self, simple_frame, float_frame):
+ df1 = tm.makeTimeDataFrame()
+ df2 = tm.makeTimeDataFrame()
+
+ row = simple_frame.xs("a")
+ ndim_5 = np.ones(df1.shape + (1, 1, 1))
+
+ def test_comp(func):
+ result = func(df1, df2)
+ tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
+
+ msg = (
+ "Unable to coerce to Series/DataFrame, "
+ "dimension must be <= 2: (30, 4, 1, 1, 1)"
+ )
+ with pytest.raises(ValueError, match=re.escape(msg)):
+ func(df1, ndim_5)
+
+ result2 = func(simple_frame, row)
+ tm.assert_numpy_array_equal(
+ result2.values, func(simple_frame.values, row.values)
+ )
+
+ result3 = func(float_frame, 0)
+ tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
+
+ msg = "Can only compare identically-labeled DataFrame"
+ with pytest.raises(ValueError, match=msg):
+ func(simple_frame, simple_frame[:2])
+
+ test_comp(operator.eq)
+ test_comp(operator.ne)
+ test_comp(operator.lt)
+ test_comp(operator.gt)
+ test_comp(operator.ge)
+ test_comp(operator.le)
+
+ def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
+ # GH 11565
+ df = DataFrame(
+ {x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
+ )
+
+ f = getattr(operator, compare_operators_no_eq_ne)
+ msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
+ with pytest.raises(TypeError, match=msg):
+ f(df, 0)
+
+ def test_comparison_protected_from_errstate(self):
+ missing_df = tm.makeDataFrame()
+ missing_df.iloc[0]["A"] = np.nan
+ with np.errstate(invalid="ignore"):
+ expected = missing_df.values < 0
+ with np.errstate(invalid="raise"):
+ result = (missing_df < 0).values
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_boolean_comparison(self):
+
+ # GH 4576
+ # boolean comparisons with a tuple/list give unexpected results
+ df = DataFrame(np.arange(6).reshape((3, 2)))
+ b = np.array([2, 2])
+ b_r = np.atleast_2d([2, 2])
+ b_c = b_r.T
+ lst = [2, 2, 2]
+ tup = tuple(lst)
+
+ # gt
+ expected = DataFrame([[False, False], [False, True], [True, True]])
+ result = df > b
+ tm.assert_frame_equal(result, expected)
+
+ result = df.values > b
+ tm.assert_numpy_array_equal(result, expected.values)
+
+ msg1d = "Unable to coerce to Series, length must be 2: given 3"
+ msg2d = "Unable to coerce to DataFrame, shape must be"
+ msg2db = "operands could not be broadcast together with shapes"
+ with pytest.raises(ValueError, match=msg1d):
+ # wrong shape
+ df > lst
+
+ with pytest.raises(ValueError, match=msg1d):
+ # wrong shape
+ result = df > tup
+
+ # broadcasts like ndarray (GH#23000)
+ result = df > b_r
+ tm.assert_frame_equal(result, expected)
+
+ result = df.values > b_r
+ tm.assert_numpy_array_equal(result, expected.values)
+
+ with pytest.raises(ValueError, match=msg2d):
+ df > b_c
+
+ with pytest.raises(ValueError, match=msg2db):
+ df.values > b_c
+
+ # ==
+ expected = DataFrame([[False, False], [True, False], [False, False]])
+ result = df == b
+ tm.assert_frame_equal(result, expected)
+
+ with pytest.raises(ValueError, match=msg1d):
+ result = df == lst
+
+ with pytest.raises(ValueError, match=msg1d):
+ result = df == tup
+
+ # broadcasts like ndarray (GH#23000)
+ result = df == b_r
+ tm.assert_frame_equal(result, expected)
+
+ result = df.values == b_r
+ tm.assert_numpy_array_equal(result, expected.values)
+
+ with pytest.raises(ValueError, match=msg2d):
+ df == b_c
+
+ assert df.values.shape != b_c.shape
+
+ # with alignment
+ df = DataFrame(
+ np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
+ )
+ expected.index = df.index
+ expected.columns = df.columns
+
+ with pytest.raises(ValueError, match=msg1d):
+ result = df == lst
+
+ with pytest.raises(ValueError, match=msg1d):
+ result = df == tup
+
+ def test_inplace_ops_alignment(self):
+
+ # inplace ops / ops alignment
+ # GH 8511
+
+ columns = list("abcdefg")
+ X_orig = DataFrame(
+ np.arange(10 * len(columns)).reshape(-1, len(columns)),
+ columns=columns,
+ index=range(10),
+ )
+ Z = 100 * X_orig.iloc[:, 1:-1].copy()
+ block1 = list("bedcf")
+ subs = list("bcdef")
+
+ # add
+ X = X_orig.copy()
+ result1 = (X[block1] + Z).reindex(columns=subs)
+
+ X[block1] += Z
+ result2 = X.reindex(columns=subs)
+
+ X = X_orig.copy()
+ result3 = (X[block1] + Z[block1]).reindex(columns=subs)
+
+ X[block1] += Z[block1]
+ result4 = X.reindex(columns=subs)
+
+ tm.assert_frame_equal(result1, result2)
+ tm.assert_frame_equal(result1, result3)
+ tm.assert_frame_equal(result1, result4)
+
+ # sub
+ X = X_orig.copy()
+ result1 = (X[block1] - Z).reindex(columns=subs)
+
+ X[block1] -= Z
+ result2 = X.reindex(columns=subs)
+
+ X = X_orig.copy()
+ result3 = (X[block1] - Z[block1]).reindex(columns=subs)
+
+ X[block1] -= Z[block1]
+ result4 = X.reindex(columns=subs)
+
+ tm.assert_frame_equal(result1, result2)
+ tm.assert_frame_equal(result1, result3)
+ tm.assert_frame_equal(result1, result4)
+
+ def test_inplace_ops_identity(self):
+
+ # GH 5104
+ # make sure that we are actually changing the object
+ s_orig = Series([1, 2, 3])
+ df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
+
+ # no dtype change
+ s = s_orig.copy()
+ s2 = s
+ s += 1
+ tm.assert_series_equal(s, s2)
+ tm.assert_series_equal(s_orig + 1, s)
+ assert s is s2
+ assert s._data is s2._data
+
+ df = df_orig.copy()
+ df2 = df
+ df += 1
+ tm.assert_frame_equal(df, df2)
+ tm.assert_frame_equal(df_orig + 1, df)
+ assert df is df2
+ assert df._data is df2._data
+
+ # dtype change
+ s = s_orig.copy()
+ s2 = s
+ s += 1.5
+ tm.assert_series_equal(s, s2)
+ tm.assert_series_equal(s_orig + 1.5, s)
+
+ df = df_orig.copy()
+ df2 = df
+ df += 1.5
+ tm.assert_frame_equal(df, df2)
+ tm.assert_frame_equal(df_orig + 1.5, df)
+ assert df is df2
+ assert df._data is df2._data
+
+ # mixed dtype
+ arr = np.random.randint(0, 10, size=5)
+ df_orig = DataFrame({"A": arr.copy(), "B": "foo"})
+ df = df_orig.copy()
+ df2 = df
+ df["A"] += 1
+ expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})
+ tm.assert_frame_equal(df, expected)
+ tm.assert_frame_equal(df2, expected)
+ assert df._data is df2._data
+
+ df = df_orig.copy()
+ df2 = df
+ df["A"] += 1.5
+ expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"})
+ tm.assert_frame_equal(df, expected)
+ tm.assert_frame_equal(df2, expected)
+ assert df._data is df2._data
+
+ @pytest.mark.parametrize(
+ "op",
+ [
+ "add",
+ "and",
+ "div",
+ "floordiv",
+ "mod",
+ "mul",
+ "or",
+ "pow",
+ "sub",
+ "truediv",
+ "xor",
+ ],
+ )
+ def test_inplace_ops_identity2(self, op):
+
+ if op == "div":
+ return
+
+ df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]})
+
+ operand = 2
+ if op in ("and", "or", "xor"):
+ # cannot use floats for boolean ops
+ df["a"] = [True, False, True]
+
+ df_copy = df.copy()
+ iop = f"__i{op}__"
+ op = f"__{op}__"
+
+ # no id change and value is correct
+ getattr(df, iop)(operand)
+ expected = getattr(df_copy, op)(operand)
+ tm.assert_frame_equal(df, expected)
+ expected = id(df)
+ assert id(df) == expected
+
+ def test_alignment_non_pandas(self):
+ index = ["A", "B", "C"]
+ columns = ["X", "Y", "Z"]
+ df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
+
+ align = pd.core.ops._align_method_FRAME
+ for val in [
+ [1, 2, 3],
+ (1, 2, 3),
+ np.array([1, 2, 3], dtype=np.int64),
+ range(1, 4),
+ ]:
+
+ tm.assert_series_equal(
+ align(df, val, "index")[1], Series([1, 2, 3], index=df.index)
+ )
+ tm.assert_series_equal(
+ align(df, val, "columns")[1], Series([1, 2, 3], index=df.columns)
+ )
+
+ # length mismatch
+ msg = "Unable to coerce to Series, length must be 3: given 2"
+ for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
+
+ with pytest.raises(ValueError, match=msg):
+ align(df, val, "index")
+
+ with pytest.raises(ValueError, match=msg):
+ align(df, val, "columns")
+
+ val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ tm.assert_frame_equal(
+ align(df, val, "index")[1],
+ DataFrame(val, index=df.index, columns=df.columns),
+ )
+ tm.assert_frame_equal(
+ align(df, val, "columns")[1],
+ DataFrame(val, index=df.index, columns=df.columns),
+ )
+
+ # shape mismatch
+ msg = "Unable to coerce to DataFrame, shape must be"
+ val = np.array([[1, 2, 3], [4, 5, 6]])
+ with pytest.raises(ValueError, match=msg):
+ align(df, val, "index")
+
+ with pytest.raises(ValueError, match=msg):
+ align(df, val, "columns")
+
+ val = np.zeros((3, 3, 3))
+ msg = re.escape(
+ "Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)"
+ )
+ with pytest.raises(ValueError, match=msg):
+ align(df, val, "index")
+ with pytest.raises(ValueError, match=msg):
+ align(df, val, "columns")
+
+ def test_no_warning(self, all_arithmetic_operators):
+ df = pd.DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
+ b = df["B"]
+ with tm.assert_produces_warning(None):
+ getattr(df, all_arithmetic_operators)(b, 0)
+
def test_pow_with_realignment():
# GH#32685 pow has special semantics for operating with null values
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 4e37954726b93..fede1ca23a8ce 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -6,10 +6,8 @@
import pytest
import pandas as pd
-from pandas import DataFrame, MultiIndex, Series
+from pandas import DataFrame, Series
import pandas._testing as tm
-import pandas.core.common as com
-from pandas.tests.frame.common import _check_mixed_float
class TestDataFrameUnaryOperators:
@@ -282,628 +280,3 @@ def test_logical_operators_nans(self, left, right, op, expected):
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
-
-
-class TestDataFrameOperators:
- @pytest.mark.parametrize(
- "op", [operator.add, operator.sub, operator.mul, operator.truediv]
- )
- def test_operators_none_as_na(self, op):
- df = DataFrame(
- {"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
- )
-
- # since filling converts dtypes from object, changed expected to be
- # object
- filled = df.fillna(np.nan)
- result = op(df, 3)
- expected = op(filled, 3).astype(object)
- expected[com.isna(expected)] = None
- tm.assert_frame_equal(result, expected)
-
- result = op(df, df)
- expected = op(filled, filled).astype(object)
- expected[com.isna(expected)] = None
- tm.assert_frame_equal(result, expected)
-
- result = op(df, df.fillna(7))
- tm.assert_frame_equal(result, expected)
-
- result = op(df.fillna(7), df)
- tm.assert_frame_equal(result, expected, check_dtype=False)
-
- @pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
- # TODO: not sure what's correct here.
- @pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
- def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
- # we are comparing floats vs a string
- result = getattr(float_frame, op)("foo")
- assert bool(result.all().all()) is res
-
- def test_binary_ops_align(self):
-
- # test aligning binary ops
-
- # GH 6681
- index = MultiIndex.from_product(
- [list("abc"), ["one", "two", "three"], [1, 2, 3]],
- names=["first", "second", "third"],
- )
-
- df = DataFrame(
- np.arange(27 * 3).reshape(27, 3),
- index=index,
- columns=["value1", "value2", "value3"],
- ).sort_index()
-
- idx = pd.IndexSlice
- for op in ["add", "sub", "mul", "div", "truediv"]:
- opa = getattr(operator, op, None)
- if opa is None:
- continue
-
- x = Series([1.0, 10.0, 100.0], [1, 2, 3])
- result = getattr(df, op)(x, level="third", axis=0)
-
- expected = pd.concat(
- [opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
- ).sort_index()
- tm.assert_frame_equal(result, expected)
-
- x = Series([1.0, 10.0], ["two", "three"])
- result = getattr(df, op)(x, level="second", axis=0)
-
- expected = (
- pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
- .reindex_like(df)
- .sort_index()
- )
- tm.assert_frame_equal(result, expected)
-
- # GH9463 (alignment level of dataframe with series)
-
- midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
- df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
- s = pd.Series({"a": 1, "b": 2})
-
- df2 = df.copy()
- df2.columns.names = ["lvl0", "lvl1"]
- s2 = s.copy()
- s2.index.name = "lvl1"
-
- # different cases of integer/string level names:
- res1 = df.mul(s, axis=1, level=1)
- res2 = df.mul(s2, axis=1, level=1)
- res3 = df2.mul(s, axis=1, level=1)
- res4 = df2.mul(s2, axis=1, level=1)
- res5 = df2.mul(s, axis=1, level="lvl1")
- res6 = df2.mul(s2, axis=1, level="lvl1")
-
- exp = DataFrame(
- np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
- )
-
- for res in [res1, res2]:
- tm.assert_frame_equal(res, exp)
-
- exp.columns.names = ["lvl0", "lvl1"]
- for res in [res3, res4, res5, res6]:
- tm.assert_frame_equal(res, exp)
-
- def test_dti_tz_convert_to_utc(self):
- base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
- idx1 = base.tz_convert("Asia/Tokyo")[:2]
- idx2 = base.tz_convert("US/Eastern")[1:]
-
- df1 = DataFrame({"A": [1, 2]}, index=idx1)
- df2 = DataFrame({"A": [1, 1]}, index=idx2)
- exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
- tm.assert_frame_equal(df1 + df2, exp)
-
- def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
- frame_copy = float_frame.reindex(float_frame.index[::2])
-
- del frame_copy["D"]
- frame_copy["C"][:5] = np.nan
-
- added = float_frame + frame_copy
-
- indexer = added["A"].dropna().index
- exp = (float_frame["A"] * 2).copy()
-
- tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
-
- exp.loc[~exp.index.isin(indexer)] = np.nan
- tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
-
- assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
-
- # assert(False)
-
- assert np.isnan(added["D"]).all()
-
- self_added = float_frame + float_frame
- tm.assert_index_equal(self_added.index, float_frame.index)
-
- added_rev = frame_copy + float_frame
- assert np.isnan(added["D"]).all()
- assert np.isnan(added_rev["D"]).all()
-
- # corner cases
-
- # empty
- plus_empty = float_frame + DataFrame()
- assert np.isnan(plus_empty.values).all()
-
- empty_plus = DataFrame() + float_frame
- assert np.isnan(empty_plus.values).all()
-
- empty_empty = DataFrame() + DataFrame()
- assert empty_empty.empty
-
- # out of order
- reverse = float_frame.reindex(columns=float_frame.columns[::-1])
-
- tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
-
- # mix vs float64, upcast
- added = float_frame + mixed_float_frame
- _check_mixed_float(added, dtype="float64")
- added = mixed_float_frame + float_frame
- _check_mixed_float(added, dtype="float64")
-
- # mix vs mix
- added = mixed_float_frame + mixed_float_frame
- _check_mixed_float(added, dtype=dict(C=None))
-
- # with int
- added = float_frame + mixed_int_frame
- _check_mixed_float(added, dtype="float64")
-
- def test_combine_series(
- self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
- ):
-
- # Series
- series = float_frame.xs(float_frame.index[0])
-
- added = float_frame + series
-
- for key, s in added.items():
- tm.assert_series_equal(s, float_frame[key] + series[key])
-
- larger_series = series.to_dict()
- larger_series["E"] = 1
- larger_series = Series(larger_series)
- larger_added = float_frame + larger_series
-
- for key, s in float_frame.items():
- tm.assert_series_equal(larger_added[key], s + series[key])
- assert "E" in larger_added
- assert np.isnan(larger_added["E"]).all()
-
- # no upcast needed
- added = mixed_float_frame + series
- _check_mixed_float(added)
-
- # vs mix (upcast) as needed
- added = mixed_float_frame + series.astype("float32")
- _check_mixed_float(added, dtype=dict(C=None))
- added = mixed_float_frame + series.astype("float16")
- _check_mixed_float(added, dtype=dict(C=None))
-
- # FIXME: don't leave commented-out
- # these raise with numexpr.....as we are adding an int64 to an
- # uint64....weird vs int
-
- # added = mixed_int_frame + (100*series).astype('int64')
- # _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
- # 'int64', D = 'int64'))
- # added = mixed_int_frame + (100*series).astype('int32')
- # _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
- # 'int32', D = 'int64'))
-
- # TimeSeries
- ts = datetime_frame["A"]
-
- # 10890
- # we no longer allow auto timeseries broadcasting
- # and require explicit broadcasting
- added = datetime_frame.add(ts, axis="index")
-
- for key, col in datetime_frame.items():
- result = col + ts
- tm.assert_series_equal(added[key], result, check_names=False)
- assert added[key].name == key
- if col.name == ts.name:
- assert result.name == "A"
- else:
- assert result.name is None
-
- smaller_frame = datetime_frame[:-5]
- smaller_added = smaller_frame.add(ts, axis="index")
-
- tm.assert_index_equal(smaller_added.index, datetime_frame.index)
-
- smaller_ts = ts[:-5]
- smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
- tm.assert_frame_equal(smaller_added, smaller_added2)
-
- # length 0, result is all-nan
- result = datetime_frame.add(ts[:0], axis="index")
- expected = DataFrame(
- np.nan, index=datetime_frame.index, columns=datetime_frame.columns
- )
- tm.assert_frame_equal(result, expected)
-
- # Frame is all-nan
- result = datetime_frame[:0].add(ts, axis="index")
- expected = DataFrame(
- np.nan, index=datetime_frame.index, columns=datetime_frame.columns
- )
- tm.assert_frame_equal(result, expected)
-
- # empty but with non-empty index
- frame = datetime_frame[:1].reindex(columns=[])
- result = frame.mul(ts, axis="index")
- assert len(result) == len(ts)
-
- def test_combineFunc(self, float_frame, mixed_float_frame):
- result = float_frame * 2
- tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
-
- # vs mix
- result = mixed_float_frame * 2
- for c, s in result.items():
- tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
- _check_mixed_float(result, dtype=dict(C=None))
-
- result = DataFrame() * 2
- assert result.index.equals(DataFrame().index)
- assert len(result.columns) == 0
-
- def test_comparisons(self, simple_frame, float_frame):
- df1 = tm.makeTimeDataFrame()
- df2 = tm.makeTimeDataFrame()
-
- row = simple_frame.xs("a")
- ndim_5 = np.ones(df1.shape + (1, 1, 1))
-
- def test_comp(func):
- result = func(df1, df2)
- tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
-
- msg = (
- "Unable to coerce to Series/DataFrame, "
- "dimension must be <= 2: (30, 4, 1, 1, 1)"
- )
- with pytest.raises(ValueError, match=re.escape(msg)):
- func(df1, ndim_5)
-
- result2 = func(simple_frame, row)
- tm.assert_numpy_array_equal(
- result2.values, func(simple_frame.values, row.values)
- )
-
- result3 = func(float_frame, 0)
- tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
-
- msg = "Can only compare identically-labeled DataFrame"
- with pytest.raises(ValueError, match=msg):
- func(simple_frame, simple_frame[:2])
-
- test_comp(operator.eq)
- test_comp(operator.ne)
- test_comp(operator.lt)
- test_comp(operator.gt)
- test_comp(operator.ge)
- test_comp(operator.le)
-
- def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
- # GH 11565
- df = DataFrame(
- {x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
- )
-
- f = getattr(operator, compare_operators_no_eq_ne)
- msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
- with pytest.raises(TypeError, match=msg):
- f(df, 0)
-
- def test_comparison_protected_from_errstate(self):
- missing_df = tm.makeDataFrame()
- missing_df.iloc[0]["A"] = np.nan
- with np.errstate(invalid="ignore"):
- expected = missing_df.values < 0
- with np.errstate(invalid="raise"):
- result = (missing_df < 0).values
- tm.assert_numpy_array_equal(result, expected)
-
- def test_boolean_comparison(self):
-
- # GH 4576
- # boolean comparisons with a tuple/list give unexpected results
- df = DataFrame(np.arange(6).reshape((3, 2)))
- b = np.array([2, 2])
- b_r = np.atleast_2d([2, 2])
- b_c = b_r.T
- lst = [2, 2, 2]
- tup = tuple(lst)
-
- # gt
- expected = DataFrame([[False, False], [False, True], [True, True]])
- result = df > b
- tm.assert_frame_equal(result, expected)
-
- result = df.values > b
- tm.assert_numpy_array_equal(result, expected.values)
-
- msg1d = "Unable to coerce to Series, length must be 2: given 3"
- msg2d = "Unable to coerce to DataFrame, shape must be"
- msg2db = "operands could not be broadcast together with shapes"
- with pytest.raises(ValueError, match=msg1d):
- # wrong shape
- df > lst
-
- with pytest.raises(ValueError, match=msg1d):
- # wrong shape
- result = df > tup
-
- # broadcasts like ndarray (GH#23000)
- result = df > b_r
- tm.assert_frame_equal(result, expected)
-
- result = df.values > b_r
- tm.assert_numpy_array_equal(result, expected.values)
-
- with pytest.raises(ValueError, match=msg2d):
- df > b_c
-
- with pytest.raises(ValueError, match=msg2db):
- df.values > b_c
-
- # ==
- expected = DataFrame([[False, False], [True, False], [False, False]])
- result = df == b
- tm.assert_frame_equal(result, expected)
-
- with pytest.raises(ValueError, match=msg1d):
- result = df == lst
-
- with pytest.raises(ValueError, match=msg1d):
- result = df == tup
-
- # broadcasts like ndarray (GH#23000)
- result = df == b_r
- tm.assert_frame_equal(result, expected)
-
- result = df.values == b_r
- tm.assert_numpy_array_equal(result, expected.values)
-
- with pytest.raises(ValueError, match=msg2d):
- df == b_c
-
- assert df.values.shape != b_c.shape
-
- # with alignment
- df = DataFrame(
- np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
- )
- expected.index = df.index
- expected.columns = df.columns
-
- with pytest.raises(ValueError, match=msg1d):
- result = df == lst
-
- with pytest.raises(ValueError, match=msg1d):
- result = df == tup
-
- def test_inplace_ops_alignment(self):
-
- # inplace ops / ops alignment
- # GH 8511
-
- columns = list("abcdefg")
- X_orig = DataFrame(
- np.arange(10 * len(columns)).reshape(-1, len(columns)),
- columns=columns,
- index=range(10),
- )
- Z = 100 * X_orig.iloc[:, 1:-1].copy()
- block1 = list("bedcf")
- subs = list("bcdef")
-
- # add
- X = X_orig.copy()
- result1 = (X[block1] + Z).reindex(columns=subs)
-
- X[block1] += Z
- result2 = X.reindex(columns=subs)
-
- X = X_orig.copy()
- result3 = (X[block1] + Z[block1]).reindex(columns=subs)
-
- X[block1] += Z[block1]
- result4 = X.reindex(columns=subs)
-
- tm.assert_frame_equal(result1, result2)
- tm.assert_frame_equal(result1, result3)
- tm.assert_frame_equal(result1, result4)
-
- # sub
- X = X_orig.copy()
- result1 = (X[block1] - Z).reindex(columns=subs)
-
- X[block1] -= Z
- result2 = X.reindex(columns=subs)
-
- X = X_orig.copy()
- result3 = (X[block1] - Z[block1]).reindex(columns=subs)
-
- X[block1] -= Z[block1]
- result4 = X.reindex(columns=subs)
-
- tm.assert_frame_equal(result1, result2)
- tm.assert_frame_equal(result1, result3)
- tm.assert_frame_equal(result1, result4)
-
- def test_inplace_ops_identity(self):
-
- # GH 5104
- # make sure that we are actually changing the object
- s_orig = Series([1, 2, 3])
- df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
-
- # no dtype change
- s = s_orig.copy()
- s2 = s
- s += 1
- tm.assert_series_equal(s, s2)
- tm.assert_series_equal(s_orig + 1, s)
- assert s is s2
- assert s._data is s2._data
-
- df = df_orig.copy()
- df2 = df
- df += 1
- tm.assert_frame_equal(df, df2)
- tm.assert_frame_equal(df_orig + 1, df)
- assert df is df2
- assert df._data is df2._data
-
- # dtype change
- s = s_orig.copy()
- s2 = s
- s += 1.5
- tm.assert_series_equal(s, s2)
- tm.assert_series_equal(s_orig + 1.5, s)
-
- df = df_orig.copy()
- df2 = df
- df += 1.5
- tm.assert_frame_equal(df, df2)
- tm.assert_frame_equal(df_orig + 1.5, df)
- assert df is df2
- assert df._data is df2._data
-
- # mixed dtype
- arr = np.random.randint(0, 10, size=5)
- df_orig = DataFrame({"A": arr.copy(), "B": "foo"})
- df = df_orig.copy()
- df2 = df
- df["A"] += 1
- expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})
- tm.assert_frame_equal(df, expected)
- tm.assert_frame_equal(df2, expected)
- assert df._data is df2._data
-
- df = df_orig.copy()
- df2 = df
- df["A"] += 1.5
- expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"})
- tm.assert_frame_equal(df, expected)
- tm.assert_frame_equal(df2, expected)
- assert df._data is df2._data
-
- @pytest.mark.parametrize(
- "op",
- [
- "add",
- "and",
- "div",
- "floordiv",
- "mod",
- "mul",
- "or",
- "pow",
- "sub",
- "truediv",
- "xor",
- ],
- )
- def test_inplace_ops_identity2(self, op):
-
- if op == "div":
- return
-
- df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]})
-
- operand = 2
- if op in ("and", "or", "xor"):
- # cannot use floats for boolean ops
- df["a"] = [True, False, True]
-
- df_copy = df.copy()
- iop = f"__i{op}__"
- op = f"__{op}__"
-
- # no id change and value is correct
- getattr(df, iop)(operand)
- expected = getattr(df_copy, op)(operand)
- tm.assert_frame_equal(df, expected)
- expected = id(df)
- assert id(df) == expected
-
- def test_alignment_non_pandas(self):
- index = ["A", "B", "C"]
- columns = ["X", "Y", "Z"]
- df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
-
- align = pd.core.ops._align_method_FRAME
- for val in [
- [1, 2, 3],
- (1, 2, 3),
- np.array([1, 2, 3], dtype=np.int64),
- range(1, 4),
- ]:
-
- tm.assert_series_equal(
- align(df, val, "index")[1], Series([1, 2, 3], index=df.index)
- )
- tm.assert_series_equal(
- align(df, val, "columns")[1], Series([1, 2, 3], index=df.columns)
- )
-
- # length mismatch
- msg = "Unable to coerce to Series, length must be 3: given 2"
- for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
-
- with pytest.raises(ValueError, match=msg):
- align(df, val, "index")
-
- with pytest.raises(ValueError, match=msg):
- align(df, val, "columns")
-
- val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
- tm.assert_frame_equal(
- align(df, val, "index")[1],
- DataFrame(val, index=df.index, columns=df.columns),
- )
- tm.assert_frame_equal(
- align(df, val, "columns")[1],
- DataFrame(val, index=df.index, columns=df.columns),
- )
-
- # shape mismatch
- msg = "Unable to coerce to DataFrame, shape must be"
- val = np.array([[1, 2, 3], [4, 5, 6]])
- with pytest.raises(ValueError, match=msg):
- align(df, val, "index")
-
- with pytest.raises(ValueError, match=msg):
- align(df, val, "columns")
-
- val = np.zeros((3, 3, 3))
- msg = re.escape(
- "Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)"
- )
- with pytest.raises(ValueError, match=msg):
- align(df, val, "index")
- with pytest.raises(ValueError, match=msg):
- align(df, val, "columns")
-
- def test_no_warning(self, all_arithmetic_operators):
- df = pd.DataFrame({"A": [0.0, 0.0], "B": [0.0, None]})
- b = df["B"]
- with tm.assert_produces_warning(None):
- getattr(df, all_arithmetic_operators)(b, 0)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33151 | 2020-03-30T18:48:42Z | 2020-03-30T21:01:18Z | 2020-03-30T21:01:18Z | 2020-03-30T21:56:50Z | |
REF: test_reindex_like | diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 4fa5e4196ae5b..c7bb058cbf151 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1639,11 +1639,6 @@ def test_reindex_methods(self, method, expected_values):
actual = df.reindex(target, method=method)
tm.assert_frame_equal(expected, actual)
- actual = df.reindex_like(df, method=method, tolerance=0)
- tm.assert_frame_equal(df, actual)
- actual = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0])
- tm.assert_frame_equal(df, actual)
-
actual = df.reindex(target, method=method, tolerance=1)
tm.assert_frame_equal(expected, actual)
actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1])
@@ -1664,17 +1659,6 @@ def test_reindex_methods(self, method, expected_values):
actual = df[::-1].reindex(target, method=switched_method)
tm.assert_frame_equal(expected, actual)
- def test_reindex_subclass(self):
- # https://github.com/pandas-dev/pandas/issues/31925
- class MyDataFrame(DataFrame):
- pass
-
- expected = DataFrame()
- df = MyDataFrame()
- result = df.reindex_like(expected)
-
- tm.assert_frame_equal(result, expected)
-
def test_reindex_methods_nearest_special(self):
df = pd.DataFrame({"x": list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
diff --git a/pandas/tests/frame/methods/test_reindex_like.py b/pandas/tests/frame/methods/test_reindex_like.py
new file mode 100644
index 0000000000000..ce68ec28eec3d
--- /dev/null
+++ b/pandas/tests/frame/methods/test_reindex_like.py
@@ -0,0 +1,39 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+class TestDataFrameReindexLike:
+ def test_reindex_like(self, float_frame):
+ other = float_frame.reindex(index=float_frame.index[:10], columns=["C", "B"])
+
+ tm.assert_frame_equal(other, float_frame.reindex_like(other))
+
+ @pytest.mark.parametrize(
+ "method,expected_values",
+ [
+ ("nearest", [0, 1, 1, 2]),
+ ("pad", [np.nan, 0, 1, 1]),
+ ("backfill", [0, 1, 2, 2]),
+ ],
+ )
+ def test_reindex_like_methods(self, method, expected_values):
+ df = DataFrame({"x": list(range(5))})
+
+ result = df.reindex_like(df, method=method, tolerance=0)
+ tm.assert_frame_equal(df, result)
+ result = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0])
+ tm.assert_frame_equal(df, result)
+
+ def test_reindex_like_subclass(self):
+ # https://github.com/pandas-dev/pandas/issues/31925
+ class MyDataFrame(DataFrame):
+ pass
+
+ expected = DataFrame()
+ df = MyDataFrame()
+ result = df.reindex_like(expected)
+
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index 42fb722c92b26..f61512b1a62d9 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -155,11 +155,6 @@ def test_reindex_int(self, int_frame):
smaller = int_frame.reindex(columns=["A", "B"])
assert smaller["A"].dtype == np.int64
- def test_reindex_like(self, float_frame):
- other = float_frame.reindex(index=float_frame.index[:10], columns=["C", "B"])
-
- tm.assert_frame_equal(other, float_frame.reindex_like(other))
-
def test_reindex_columns(self, float_frame):
new_frame = float_frame.reindex(columns=["A", "B", "E"])
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index b45f831ff00aa..f2969e15fad8a 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -1,5 +1,3 @@
-from datetime import datetime
-
import numpy as np
import pytest
@@ -149,25 +147,17 @@ def test_reindex_pad():
def test_reindex_nearest():
s = Series(np.arange(10, dtype="int64"))
target = [0.1, 0.9, 1.5, 2.0]
- actual = s.reindex(target, method="nearest")
+ result = s.reindex(target, method="nearest")
expected = Series(np.around(target).astype("int64"), target)
- tm.assert_series_equal(expected, actual)
-
- actual = s.reindex_like(actual, method="nearest")
- tm.assert_series_equal(expected, actual)
-
- actual = s.reindex_like(actual, method="nearest", tolerance=1)
- tm.assert_series_equal(expected, actual)
- actual = s.reindex_like(actual, method="nearest", tolerance=[1, 2, 3, 4])
- tm.assert_series_equal(expected, actual)
+ tm.assert_series_equal(expected, result)
- actual = s.reindex(target, method="nearest", tolerance=0.2)
+ result = s.reindex(target, method="nearest", tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
- tm.assert_series_equal(expected, actual)
+ tm.assert_series_equal(expected, result)
- actual = s.reindex(target, method="nearest", tolerance=[0.3, 0.01, 0.4, 3])
+ result = s.reindex(target, method="nearest", tolerance=[0.3, 0.01, 0.4, 3])
expected = Series([0, np.nan, np.nan, 2], target)
- tm.assert_series_equal(expected, actual)
+ tm.assert_series_equal(expected, result)
def test_reindex_backfill():
@@ -237,25 +227,6 @@ def test_reindex_categorical():
tm.assert_series_equal(result, expected)
-def test_reindex_like(datetime_series):
- other = datetime_series[::2]
- tm.assert_series_equal(
- datetime_series.reindex(other.index), datetime_series.reindex_like(other)
- )
-
- # GH 7179
- day1 = datetime(2013, 3, 5)
- day2 = datetime(2013, 5, 5)
- day3 = datetime(2014, 3, 5)
-
- series1 = Series([5, None, None], [day1, day2, day3])
- series2 = Series([None, None], [day1, day3])
-
- result = series1.reindex_like(series2, method="pad")
- expected = Series([5, np.nan], index=[day1, day3])
- tm.assert_series_equal(result, expected)
-
-
def test_reindex_fill_value():
# -----------------------------------------------------------
# floats
diff --git a/pandas/tests/series/methods/test_reindex_like.py b/pandas/tests/series/methods/test_reindex_like.py
new file mode 100644
index 0000000000000..7f24c778feb1b
--- /dev/null
+++ b/pandas/tests/series/methods/test_reindex_like.py
@@ -0,0 +1,41 @@
+from datetime import datetime
+
+import numpy as np
+
+from pandas import Series
+import pandas._testing as tm
+
+
+def test_reindex_like(datetime_series):
+ other = datetime_series[::2]
+ tm.assert_series_equal(
+ datetime_series.reindex(other.index), datetime_series.reindex_like(other)
+ )
+
+ # GH#7179
+ day1 = datetime(2013, 3, 5)
+ day2 = datetime(2013, 5, 5)
+ day3 = datetime(2014, 3, 5)
+
+ series1 = Series([5, None, None], [day1, day2, day3])
+ series2 = Series([None, None], [day1, day3])
+
+ result = series1.reindex_like(series2, method="pad")
+ expected = Series([5, np.nan], index=[day1, day3])
+ tm.assert_series_equal(result, expected)
+
+
+def test_reindex_like_nearest():
+ ser = Series(np.arange(10, dtype="int64"))
+
+ target = [0.1, 0.9, 1.5, 2.0]
+ other = ser.reindex(target, method="nearest")
+ expected = Series(np.around(target).astype("int64"), target)
+
+ result = ser.reindex_like(other, method="nearest")
+ tm.assert_series_equal(expected, result)
+
+ result = ser.reindex_like(other, method="nearest", tolerance=1)
+ tm.assert_series_equal(expected, result)
+ result = ser.reindex_like(other, method="nearest", tolerance=[1, 2, 3, 4])
+ tm.assert_series_equal(expected, result)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33150 | 2020-03-30T18:45:37Z | 2020-03-31T17:08:51Z | 2020-03-31T17:08:51Z | 2020-03-31T17:28:08Z | |
DOC: Fix capitalization among headings in documentation files (#32550) | diff --git a/doc/source/user_guide/options.rst b/doc/source/user_guide/options.rst
index 5817efb31814e..398336960e769 100644
--- a/doc/source/user_guide/options.rst
+++ b/doc/source/user_guide/options.rst
@@ -140,7 +140,7 @@ More information can be found in the `ipython documentation
.. _options.frequently_used:
-Frequently Used Options
+Frequently used options
-----------------------
The following is a walk-through of the more frequently used display options.
diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst
index 58733b852e3a1..7e890962d8da1 100644
--- a/doc/source/user_guide/reshaping.rst
+++ b/doc/source/user_guide/reshaping.rst
@@ -272,7 +272,7 @@ the right thing:
.. _reshaping.melt:
-Reshaping by Melt
+Reshaping by melt
-----------------
.. image:: ../_static/reshaping_melt.png
diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst
index 234c12ce79822..bea0f42f6849c 100644
--- a/doc/source/user_guide/text.rst
+++ b/doc/source/user_guide/text.rst
@@ -8,7 +8,7 @@ Working with text data
.. _text.types:
-Text Data Types
+Text data types
---------------
.. versionadded:: 1.0.0
@@ -113,7 +113,7 @@ Everything else that follows in the rest of this document applies equally to
.. _text.string_methods:
-String Methods
+String methods
--------------
Series and Index are equipped with a set of string processing methods
@@ -633,7 +633,7 @@ same result as a ``Series.str.extractall`` with a default index (starts from 0).
pd.Series(["a1a2", "b1", "c1"], dtype="string").str.extractall(two_groups)
-Testing for Strings that match or contain a pattern
+Testing for strings that match or contain a pattern
---------------------------------------------------
You can check whether elements contain a pattern:
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index f208c8d576131..0d49a2d8db77c 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -122,7 +122,7 @@ as ``np.nan`` does for float data.
.. _timeseries.representation:
-Timestamps vs. Time Spans
+Timestamps vs. time spans
-------------------------
Timestamped data is the most basic type of time series data that associates
@@ -1434,7 +1434,7 @@ or calendars with additional rules.
.. _timeseries.advanced_datetime:
-Time Series-Related Instance Methods
+Time series-related instance methods
------------------------------------
Shifting / lagging
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst
index 756dd06aced7f..451ddf046416e 100644
--- a/doc/source/user_guide/visualization.rst
+++ b/doc/source/user_guide/visualization.rst
@@ -796,7 +796,7 @@ before plotting.
.. _visualization.tools:
-Plotting Tools
+Plotting tools
--------------
These functions can be imported from ``pandas.plotting``
@@ -1045,7 +1045,7 @@ for more information.
.. _visualization.formatting:
-Plot Formatting
+Plot formatting
---------------
Setting the plot style
| Headers updated for the following files:
```
- [x] doc/source/user_guide/options.rst
- [x] doc/source/user_guide/reshaping.rst
- [x] doc/source/user_guide/text.rst
- [x] doc/source/user_guide/timeseries.rst
- [x] doc/source/user_guide/visualization.rst
```
Other files updated in #32944, #32978, #32991 and #33147
| https://api.github.com/repos/pandas-dev/pandas/pulls/33149 | 2020-03-30T18:27:38Z | 2020-03-31T17:24:31Z | 2020-03-31T17:24:31Z | 2020-04-07T18:38:10Z |
More Json parametrize | diff --git a/pandas/conftest.py b/pandas/conftest.py
index ad21d46e601e8..2ee64403c7cf4 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -521,6 +521,11 @@ def index_or_series_obj(request):
# ----------------------------------------------------------------
# DataFrames
# ----------------------------------------------------------------
+@pytest.fixture
+def empty_frame():
+ return DataFrame()
+
+
@pytest.fixture
def float_frame():
"""
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index e13b2b34d611b..6a7a81e88d318 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -19,7 +19,6 @@
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
-_frame2 = DataFrame(_seriesd, columns=["D", "C", "B", "A"])
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
@@ -44,9 +43,6 @@ def assert_json_roundtrip_equal(result, expected, orient):
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
- self.empty_frame = DataFrame()
- self.frame = _frame.copy()
- self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
@@ -54,10 +50,6 @@ def setup(self):
yield
- del self.empty_frame
-
- del self.frame
- del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
@@ -126,19 +118,19 @@ def test_frame_non_unique_columns_raises(self, orient):
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
- def test_frame_default_orient(self):
- assert self.frame.to_json() == self.frame.to_json(orient="columns")
+ def test_frame_default_orient(self, float_frame):
+ assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
- def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype):
- data = self.frame.to_json(orient=orient)
+ def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
+ data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
- expected = self.frame.copy()
+ expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@@ -226,12 +218,12 @@ def test_roundtrip_categorical(self, orient, convert_axes, numpy):
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
- def test_roundtrip_empty(self, orient, convert_axes, numpy):
- data = self.empty_frame.to_json(orient=orient)
+ def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
+ data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
- expected = self.empty_frame.copy()
+ expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
@@ -738,11 +730,10 @@ def test_reconstruction_index(self):
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
- def test_path(self):
+ def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
- self.frame,
- self.frame2,
+ float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
diff --git a/pandas/tests/resample/conftest.py b/pandas/tests/resample/conftest.py
index fb2111a60a261..fa53e49269f8b 100644
--- a/pandas/tests/resample/conftest.py
+++ b/pandas/tests/resample/conftest.py
@@ -153,7 +153,7 @@ def frame(index, _series_name, _static_values):
@pytest.fixture
-def empty_frame(series):
+def empty_frame_dti(series):
"""
Fixture for parametrization of empty DataFrame with date_range,
period_range and timedelta_range indexes
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index 3384c2a94487b..6384c5f19c898 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -127,9 +127,9 @@ def test_resample_count_empty_series(freq, empty_series_dti, resample_method):
@all_ts
@pytest.mark.parametrize("freq", ["M", "D", "H"])
-def test_resample_empty_dataframe(empty_frame, freq, resample_method):
+def test_resample_empty_dataframe(empty_frame_dti, freq, resample_method):
# GH13212
- df = empty_frame
+ df = empty_frame_dti
# count retains dimensions too
result = getattr(df.resample(freq), resample_method)()
if resample_method != "size":
@@ -149,15 +149,14 @@ def test_resample_empty_dataframe(empty_frame, freq, resample_method):
@all_ts
@pytest.mark.parametrize("freq", ["M", "D", "H"])
-def test_resample_count_empty_dataframe(freq, empty_frame):
+def test_resample_count_empty_dataframe(freq, empty_frame_dti):
# GH28427
- empty_frame = empty_frame.copy()
- empty_frame["a"] = []
+ empty_frame_dti["a"] = []
- result = empty_frame.resample(freq).count()
+ result = empty_frame_dti.resample(freq).count()
- index = _asfreq_compat(empty_frame.index, freq)
+ index = _asfreq_compat(empty_frame_dti.index, freq)
expected = pd.DataFrame({"a": []}, dtype="int64", index=index)
@@ -166,15 +165,14 @@ def test_resample_count_empty_dataframe(freq, empty_frame):
@all_ts
@pytest.mark.parametrize("freq", ["M", "D", "H"])
-def test_resample_size_empty_dataframe(freq, empty_frame):
+def test_resample_size_empty_dataframe(freq, empty_frame_dti):
# GH28427
- empty_frame = empty_frame.copy()
- empty_frame["a"] = []
+ empty_frame_dti["a"] = []
- result = empty_frame.resample(freq).size()
+ result = empty_frame_dti.resample(freq).size()
- index = _asfreq_compat(empty_frame.index, freq)
+ index = _asfreq_compat(empty_frame_dti.index, freq)
expected = pd.Series([], dtype="int64", index=index)
| follow on to #31191 - still a few more passes to go
| https://api.github.com/repos/pandas-dev/pandas/pulls/33148 | 2020-03-30T17:56:19Z | 2020-03-30T21:05:13Z | 2020-03-30T21:05:13Z | 2020-03-30T21:55:42Z |
DOC: Fix capitalization among headings in documentation files (#32550) | diff --git a/doc/source/user_guide/categorical.rst b/doc/source/user_guide/categorical.rst
index a55326db748fd..25885e246bb2a 100644
--- a/doc/source/user_guide/categorical.rst
+++ b/doc/source/user_guide/categorical.rst
@@ -799,7 +799,7 @@ Assigning a ``Categorical`` to parts of a column of other types will use the val
.. _categorical.merge:
.. _categorical.concat:
-Merging / Concatenation
+Merging / concatenation
~~~~~~~~~~~~~~~~~~~~~~~
By default, combining ``Series`` or ``DataFrames`` which contain the same
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index 9951642ca98a4..0b7106aa127e5 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -210,7 +210,7 @@ parameter:
.. _stats.moments:
-Window Functions
+Window functions
----------------
.. currentmodule:: pandas.core.window
@@ -323,7 +323,7 @@ We provide a number of common statistical functions:
.. _stats.rolling_apply:
-Rolling Apply
+Rolling apply
~~~~~~~~~~~~~
The :meth:`~Rolling.apply` function takes an extra ``func`` argument and performs
diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst
index e51b5c9097951..992cdfa5d7332 100644
--- a/doc/source/user_guide/cookbook.rst
+++ b/doc/source/user_guide/cookbook.rst
@@ -179,7 +179,7 @@ One could hard code:
Selection
---------
-DataFrames
+Dataframes
**********
The :ref:`indexing <indexing>` docs.
@@ -290,7 +290,7 @@ Notice the same results, with the exception of the index.
.. _cookbook.multi_index:
-MultiIndexing
+Multiindexing
-------------
The :ref:`multindexing <advanced.hierarchical>` docs.
@@ -913,7 +913,7 @@ The :ref:`Plotting <visualization>` docs.
@savefig quartile_boxplot.png
df.boxplot(column='price', by='quartiles')
-Data In/Out
+Data in/out
-----------
`Performance comparison of SQL vs HDF5
diff --git a/doc/source/user_guide/gotchas.rst b/doc/source/user_guide/gotchas.rst
index f9a72b87e58d8..4c691ebb252e7 100644
--- a/doc/source/user_guide/gotchas.rst
+++ b/doc/source/user_guide/gotchas.rst
@@ -317,7 +317,7 @@ See `this link <https://stackoverflow.com/questions/13592618/python-pandas-dataf
for more information.
-Byte-Ordering issues
+Byte-ordering issues
--------------------
Occasionally you may have to deal with data that were created on a machine with
a different byte order than the one on which you are running Python. A common
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 8cd229070e365..08e657181cba4 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -3,7 +3,7 @@
{{ header }}
*****************************
-Group By: split-apply-combine
+Group by: split-apply-combine
*****************************
By "group by" we are referring to a process involving one or more of the following
| Headers updated for the following files:
```
- [x] doc/source/user_guide/categorical.rst
- [x] doc/source/user_guide/computation.rst
- [x] doc/source/user_guide/cookbook.rst
- [x] doc/source/user_guide/gotchas.rst
- [x] doc/source/user_guide/groupby.rst
```
Other files updated in #32944, #32978 , #32991 and #33149 | https://api.github.com/repos/pandas-dev/pandas/pulls/33147 | 2020-03-30T17:48:56Z | 2020-04-07T01:28:26Z | 2020-04-07T01:28:26Z | 2020-04-07T18:29:58Z |
CLN: Use doc decorator for case using __doc__ | - [ ] works for #31942
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33145 | 2020-03-30T16:02:19Z | 2020-03-30T19:09:54Z | null | 2020-03-31T01:50:19Z | |
add match message for pytest.raises() | diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index da8327f64e26f..a9d219504e809 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -609,7 +609,8 @@ def test_bins_unequal_len():
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
- with pytest.raises(ValueError):
+ msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
+ with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 964cf320a422b..fd23e95106ab0 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -349,7 +349,8 @@ def test_take(self, indices):
if not isinstance(indices, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
- with pytest.raises(AttributeError):
+ msg = r"'(.*Index)' object has no attribute 'freq'"
+ with pytest.raises(AttributeError, match=msg):
indices.freq
def test_take_invalid_kwargs(self):
@@ -537,9 +538,10 @@ def test_delete_base(self, indices):
assert result.equals(expected)
assert result.name == expected.name
- with pytest.raises((IndexError, ValueError)):
- # either depending on numpy version
- indices.delete(len(indices))
+ length = len(indices)
+ msg = f"index {length} is out of bounds for axis 0 with size {length}"
+ with pytest.raises(IndexError, match=msg):
+ indices.delete(length)
def test_equals(self, indices):
if isinstance(indices, IntervalIndex):
@@ -787,13 +789,14 @@ def test_putmask_with_wrong_mask(self):
# GH18368
index = self.create_index()
- with pytest.raises(ValueError):
+ msg = "putmask: mask and data must be the same size"
+ with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) + 1, np.bool), 1)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) - 1, np.bool), 1)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
index.putmask("foo", 1)
@pytest.mark.parametrize("copy", [True, False])
@@ -861,10 +864,21 @@ def test_getitem_2d_deprecated(self):
def test_contains_requires_hashable_raises(self):
idx = self.create_index()
- with pytest.raises(TypeError, match="unhashable type"):
+
+ msg = "unhashable type: 'list'"
+ with pytest.raises(TypeError, match=msg):
[] in idx
- with pytest.raises(TypeError):
+ msg = "|".join(
+ [
+ r"unhashable type: 'dict'",
+ r"must be real number, not dict",
+ r"an integer is required",
+ r"\{\}",
+ r"pandas\._libs\.interval\.IntervalTree' is not iterable",
+ ]
+ )
+ with pytest.raises(TypeError, match=msg):
{} in idx._engine
def test_copy_copies_cache(self):
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index ba10976a67e9a..85d670e9dbffa 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -11,14 +11,15 @@
class DatetimeLike(Base):
def test_argmax_axis_invalid(self):
# GH#23081
+ msg = r"`axis` must be fewer than the number of dimensions \(1\)"
rng = self.create_index()
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
rng.argmax(axis=1)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
rng.argmin(axis=2)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
rng.min(axis=-2)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
rng.max(axis=-3)
def test_can_hold_identifiers(self):
diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py
index 9273de9c20412..d1f66af4a8e83 100644
--- a/pandas/tests/indexes/multi/test_compat.py
+++ b/pandas/tests/indexes/multi/test_compat.py
@@ -53,7 +53,11 @@ def test_boolean_context_compat2():
i2 = MultiIndex.from_tuples([("A", 1), ("A", 3)])
common = i1.intersection(i2)
- with pytest.raises(ValueError):
+ msg = (
+ r"The truth value of a MultiIndex is ambiguous\. "
+ r"Use a\.empty, a\.bool\(\), a\.item\(\), a\.any\(\) or a\.all\(\)\."
+ )
+ with pytest.raises(ValueError, match=msg):
bool(common)
diff --git a/pandas/tests/indexes/multi/test_reshape.py b/pandas/tests/indexes/multi/test_reshape.py
index de32bd94be491..6d8a396119ef3 100644
--- a/pandas/tests/indexes/multi/test_reshape.py
+++ b/pandas/tests/indexes/multi/test_reshape.py
@@ -175,6 +175,6 @@ def test_delete_base(idx):
assert result.equals(expected)
assert result.name == expected.name
- with pytest.raises((IndexError, ValueError)):
- # Exception raised depends on NumPy version.
+ msg = "index 6 is out of bounds for axis 0 with size 6"
+ with pytest.raises(IndexError, match=msg):
idx.delete(len(idx))
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index bb40612b9a55a..423bbed831b87 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -105,7 +105,11 @@ def test_unsortedindex():
expected = df.iloc[0]
tm.assert_series_equal(result, expected)
- with pytest.raises(UnsortedIndexError):
+ msg = (
+ "MultiIndex slicing requires the index to be lexsorted: "
+ r"slicing on levels \[1\], lexsort depth 0"
+ )
+ with pytest.raises(UnsortedIndexError, match=msg):
df.loc(axis=0)["z", slice("a")]
df.sort_index(inplace=True)
assert len(df.loc(axis=0)["z", :]) == 2
@@ -124,7 +128,8 @@ def test_unsortedindex_doc_examples():
with tm.assert_produces_warning(PerformanceWarning):
dfm.loc[(1, "z")]
- with pytest.raises(UnsortedIndexError):
+ msg = r"Key length \(2\) was greater than MultiIndex lexsort depth \(1\)"
+ with pytest.raises(UnsortedIndexError, match=msg):
dfm.loc[(0, "y"):(1, "z")]
assert not dfm.index.is_lexsorted()
| - [x] ref #30999
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33144 | 2020-03-30T15:51:23Z | 2020-04-19T22:22:23Z | 2020-04-19T22:22:23Z | 2020-04-19T22:22:28Z |
STY: Using __all__; removed "noqa" comment | diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index 8d3b00e4a44b9..4a4e53eaa45fa 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -1,4 +1,21 @@
-# flake8: noqa
+__all__ = [
+ "localize_pydatetime",
+ "normalize_date",
+ "NaT",
+ "NaTType",
+ "iNaT",
+ "is_null_datetimelike",
+ "OutOfBoundsDatetime",
+ "IncompatibleFrequency",
+ "Period",
+ "Timedelta",
+ "delta_to_nanoseconds",
+ "ints_to_pytimedelta",
+ "Timestamp",
+ "tz_convert_single",
+ "NullFrequencyError",
+]
+
from .conversion import localize_pydatetime, normalize_date
from .nattype import NaT, NaTType, iNaT, is_null_datetimelike
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33143 | 2020-03-30T15:14:02Z | 2020-04-10T15:44:35Z | 2020-04-10T15:44:35Z | 2020-04-10T15:45:40Z |
CLN: unused import _libs/reduction + remove "noqa" comment in _libs/__init__.py | diff --git a/pandas/_libs/__init__.py b/pandas/_libs/__init__.py
index af67cb3be7102..141ca0645b906 100644
--- a/pandas/_libs/__init__.py
+++ b/pandas/_libs/__init__.py
@@ -1,6 +1,15 @@
-# flake8: noqa
+__all__ = [
+ "NaT",
+ "NaTType",
+ "OutOfBoundsDatetime",
+ "Period",
+ "Timedelta",
+ "Timestamp",
+ "iNaT",
+]
-from .tslibs import (
+
+from pandas._libs.tslibs import (
NaT,
NaTType,
OutOfBoundsDatetime,
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 29a5a73ef08d0..9f8579606014a 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -1,5 +1,4 @@
from copy import copy
-from distutils.version import LooseVersion
from cython import Py_ssize_t
from cpython.ref cimport Py_INCREF
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33142 | 2020-03-30T14:56:56Z | 2020-03-30T18:13:35Z | 2020-03-30T18:13:35Z | 2020-03-30T18:21:01Z |
BUG: to_sql no longer raises an AttributeError when saving an OBB date | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 58ac2b4cba3b7..408b7d6235f3d 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -405,7 +405,7 @@ I/O
- Bug in :meth:`read_csv` was causing a segfault when there were blank lines between the header and data rows (:issue:`28071`)
- Bug in :meth:`read_csv` was raising a misleading exception on a permissions issue (:issue:`23784`)
- Bug in :meth:`read_csv` was raising an ``IndexError`` when header=None and 2 extra data columns
-
+- Bug in :meth:`DataFrame.to_sql` where an ``AttributeError`` was raised when saving an out of bounds date (:issue:`26761`)
Plotting
^^^^^^^^
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 560e7e4781cbb..ff647040ebbfb 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -978,7 +978,8 @@ def _sqlalchemy_type(self, col):
return TIMESTAMP(timezone=True)
except AttributeError:
# The column is actually a DatetimeIndex
- if col.tz is not None:
+ # GH 26761 or an Index with date-like data e.g. 9999-01-01
+ if getattr(col, "tz", None) is not None:
return TIMESTAMP(timezone=True)
return DateTime
if col_type == "timedelta64":
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index bf0ed4fe25346..2f2ae8cd9d32b 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -1480,6 +1480,14 @@ def test_datetime_with_timezone_roundtrip(self):
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
+ def test_out_of_bounds_datetime(self):
+ # GH 26761
+ data = pd.DataFrame({"date": datetime(9999, 1, 1)}, index=[0])
+ data.to_sql("test_datetime_obb", self.conn, index=False)
+ result = sql.read_sql_table("test_datetime_obb", self.conn)
+ expected = pd.DataFrame([pd.NaT], columns=["date"])
+ tm.assert_frame_equal(result, expected)
+
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
| - [x] closes #26761
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33140 | 2020-03-30T02:24:35Z | 2020-03-30T16:24:41Z | 2020-03-30T16:24:41Z | 2020-03-30T16:24:45Z |
REF: de-curry unstack functions | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index b2a8c7a0864b8..ef4d7b98deac7 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1,5 +1,4 @@
from datetime import datetime, timedelta
-import functools
import inspect
import re
from typing import Any, List
@@ -1420,18 +1419,15 @@ def equals(self, other) -> bool:
return False
return array_equivalent(self.values, other.values)
- def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
+ def _unstack(self, unstacker, new_columns, fill_value, value_columns):
"""
Return a list of unstacked blocks of self
Parameters
----------
- unstacker_func : callable
- Partially applied unstacker.
+ unstacker : reshape._Unstacker
new_columns : Index
All columns of the unstacked BlockManager.
- n_rows : int
- Only used in ExtensionBlock._unstack
fill_value : int
Only used in ExtensionBlock._unstack
@@ -1442,10 +1438,11 @@ def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
- unstacker = unstacker_func(self.values.T)
- new_items = unstacker.get_new_columns()
+ new_items = unstacker.get_new_columns(value_columns)
new_placement = new_columns.get_indexer(new_items)
- new_values, mask = unstacker.get_new_values()
+ new_values, mask = unstacker.get_new_values(
+ self.values.T, fill_value=fill_value
+ )
mask = mask.any(0)
new_values = new_values.T[mask]
@@ -1655,38 +1652,6 @@ def putmask(
new_values[mask] = new
return [self.make_block(values=new_values)]
- def _get_unstack_items(self, unstacker, new_columns):
- """
- Get the placement, values, and mask for a Block unstack.
-
- This is shared between ObjectBlock and ExtensionBlock. They
- differ in that ObjectBlock passes the values, while ExtensionBlock
- passes the dummy ndarray of positions to be used by a take
- later.
-
- Parameters
- ----------
- unstacker : pandas.core.reshape.reshape._Unstacker
- new_columns : Index
- All columns of the unstacked BlockManager.
-
- Returns
- -------
- new_placement : ndarray[int]
- The placement of the new columns in `new_columns`.
- new_values : Union[ndarray, ExtensionArray]
- The first return value from _Unstacker.get_new_values.
- mask : ndarray[bool]
- The second return value from _Unstacker.get_new_values.
- """
- # shared with ExtensionBlock
- new_items = unstacker.get_new_columns()
- new_placement = new_columns.get_indexer(new_items)
- new_values, mask = unstacker.get_new_values()
-
- mask = mask.any(0)
- return new_placement, new_values, mask
-
def _maybe_coerce_values(self, values):
"""
Unbox to an extension array.
@@ -1917,20 +1882,20 @@ def where(
return [self.make_block_same_class(result, placement=self.mgr_locs)]
- def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
+ def _unstack(self, unstacker, new_columns, fill_value, value_columns):
# ExtensionArray-safe unstack.
# We override ObjectBlock._unstack, which unstacks directly on the
# values of the array. For EA-backed blocks, this would require
# converting to a 2-D ndarray of objects.
# Instead, we unstack an ndarray of integer positions, followed by
# a `take` on the actual values.
+ n_rows = self.shape[-1]
dummy_arr = np.arange(n_rows)
- dummy_unstacker = functools.partial(unstacker_func, fill_value=-1)
- unstacker = dummy_unstacker(dummy_arr)
- new_placement, new_values, mask = self._get_unstack_items(
- unstacker, new_columns
- )
+ new_items = unstacker.get_new_columns(value_columns)
+ new_placement = new_columns.get_indexer(new_items)
+ new_values, mask = unstacker.get_new_values(dummy_arr, fill_value=-1)
+ mask = mask.any(0)
blocks = [
self.make_block_same_class(
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index e626a5ee8e111..2b35adccbe948 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1,5 +1,4 @@
from collections import defaultdict
-from functools import partial
import itertools
import operator
import re
@@ -1459,14 +1458,13 @@ def canonicalize(block):
block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks)
)
- def unstack(self, unstacker_func, fill_value) -> "BlockManager":
+ def unstack(self, unstacker, fill_value) -> "BlockManager":
"""
Return a BlockManager with all blocks unstacked..
Parameters
----------
- unstacker_func : callable
- A (partially-applied) ``pd.core.reshape._Unstacker`` class.
+ unstacker : reshape._Unstacker
fill_value : Any
fill_value for newly introduced missing values.
@@ -1474,19 +1472,16 @@ def unstack(self, unstacker_func, fill_value) -> "BlockManager":
-------
unstacked : BlockManager
"""
- n_rows = self.shape[-1]
- dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items)
- new_columns = dummy.get_new_columns()
- new_index = dummy.get_new_index()
+ new_columns = unstacker.get_new_columns(self.items)
+ new_index = unstacker.new_index
+
new_blocks: List[Block] = []
columns_mask: List[np.ndarray] = []
for blk in self.blocks:
+ blk_cols = self.items[blk.mgr_locs.indexer]
blocks, mask = blk._unstack(
- partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]),
- new_columns,
- n_rows,
- fill_value,
+ unstacker, new_columns, fill_value, value_columns=blk_cols,
)
new_blocks.extend(blocks)
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 88e61d2392773..8e56cb263b4ac 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -1,4 +1,3 @@
-from functools import partial
import itertools
from typing import List, Optional, Union
@@ -7,6 +6,7 @@
import pandas._libs.algos as libalgos
import pandas._libs.reshape as libreshape
from pandas._libs.sparse import IntIndex
+from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
@@ -42,14 +42,10 @@ class _Unstacker:
Parameters
----------
- values : ndarray
- Values of DataFrame to "Unstack"
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
- value_columns : Index, optional
- Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
@@ -88,28 +84,13 @@ class _Unstacker:
"""
def __init__(
- self,
- values: np.ndarray,
- index,
- level=-1,
- value_columns=None,
- fill_value=None,
- constructor=None,
+ self, index, level=-1, constructor=None,
):
- if values.ndim == 1:
- values = values[:, np.newaxis]
- self.values = values
- self.value_columns = value_columns
- self.fill_value = fill_value
-
if constructor is None:
constructor = DataFrame
self.constructor = constructor
- if value_columns is None and values.shape[1] != 1: # pragma: no cover
- raise ValueError("must pass column labels for multi-column data")
-
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
@@ -117,6 +98,7 @@ def __init__(
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.codes[self.level] else 0
+ # Note: the "pop" below alters these in-place.
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
@@ -137,10 +119,10 @@ def __init__(
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
raise ValueError("Unstacked DataFrame is too big, causing int32 overflow")
- self._make_sorted_values_labels()
self._make_selectors()
- def _make_sorted_values_labels(self):
+ @cache_readonly
+ def _indexer_and_to_sort(self):
v = self.level
codes = list(self.index.codes)
@@ -154,8 +136,18 @@ def _make_sorted_values_labels(self):
indexer = libalgos.groupsort_indexer(comp_index, ngroups)[0]
indexer = ensure_platform_int(indexer)
- self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
- self.sorted_labels = [l.take(indexer) for l in to_sort]
+ return indexer, to_sort
+
+ @cache_readonly
+ def sorted_labels(self):
+ indexer, to_sort = self._indexer_and_to_sort
+ return [l.take(indexer) for l in to_sort]
+
+ def _make_sorted_values(self, values):
+ indexer, _ = self._indexer_and_to_sort
+
+ sorted_values = algos.take_nd(values, indexer, axis=0)
+ return sorted_values
def _make_selectors(self):
new_levels = self.new_index_levels
@@ -183,15 +175,26 @@ def _make_selectors(self):
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
- def get_result(self):
- values, _ = self.get_new_values()
- columns = self.get_new_columns()
- index = self.get_new_index()
+ def get_result(self, values, value_columns, fill_value):
+
+ if values.ndim == 1:
+ values = values[:, np.newaxis]
+
+ if value_columns is None and values.shape[1] != 1: # pragma: no cover
+ raise ValueError("must pass column labels for multi-column data")
+
+ values, _ = self.get_new_values(values, fill_value)
+ columns = self.get_new_columns(value_columns)
+ index = self.new_index
return self.constructor(values, index=index, columns=columns)
- def get_new_values(self):
- values = self.values
+ def get_new_values(self, values, fill_value=None):
+
+ if values.ndim == 1:
+ values = values[:, np.newaxis]
+
+ sorted_values = self._make_sorted_values(values)
# place the values
length, width = self.full_shape
@@ -204,7 +207,7 @@ def get_new_values(self):
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (
- self.sorted_values.reshape(length, width, stride)
+ sorted_values.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
@@ -216,14 +219,13 @@ def get_new_values(self):
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
- dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
+ dtype, fill_value = maybe_promote(values.dtype, fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
- sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
@@ -254,8 +256,8 @@ def get_new_values(self):
return new_values, new_mask
- def get_new_columns(self):
- if self.value_columns is None:
+ def get_new_columns(self, value_columns):
+ if value_columns is None:
if self.lift == 0:
return self.removed_level._shallow_copy(name=self.removed_name)
@@ -263,16 +265,16 @@ def get_new_columns(self):
return lev.rename(self.removed_name)
stride = len(self.removed_level) + self.lift
- width = len(self.value_columns)
+ width = len(value_columns)
propagator = np.repeat(np.arange(width), stride)
- if isinstance(self.value_columns, MultiIndex):
- new_levels = self.value_columns.levels + (self.removed_level_full,)
- new_names = self.value_columns.names + (self.removed_name,)
+ if isinstance(value_columns, MultiIndex):
+ new_levels = value_columns.levels + (self.removed_level_full,)
+ new_names = value_columns.names + (self.removed_name,)
- new_codes = [lab.take(propagator) for lab in self.value_columns.codes]
+ new_codes = [lab.take(propagator) for lab in value_columns.codes]
else:
- new_levels = [self.value_columns, self.removed_level_full]
- new_names = [self.value_columns.name, self.removed_name]
+ new_levels = [value_columns, self.removed_level_full]
+ new_names = [value_columns.name, self.removed_name]
new_codes = [propagator]
# The two indices differ only if the unstacked level had unused items:
@@ -291,7 +293,9 @@ def get_new_columns(self):
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
- def get_new_index(self):
+ @cache_readonly
+ def new_index(self):
+ # Does not depend on values or value_columns
result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]]
# construct the new index
@@ -417,31 +421,22 @@ def unstack(obj, level, fill_value=None):
if is_extension_array_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(
- obj.values,
- obj.index,
- level=level,
- fill_value=fill_value,
- constructor=obj._constructor_expanddim,
+ obj.index, level=level, constructor=obj._constructor_expanddim,
+ )
+ return unstacker.get_result(
+ obj.values, value_columns=None, fill_value=fill_value
)
- return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
- unstacker = partial(
- _Unstacker, index=obj.index, level=level, fill_value=fill_value
- )
+ unstacker = _Unstacker(obj.index, level=level)
blocks = obj._data.unstack(unstacker, fill_value=fill_value)
return obj._constructor(blocks)
else:
return _Unstacker(
- obj.values,
- obj.index,
- level=level,
- value_columns=obj.columns,
- fill_value=fill_value,
- constructor=obj._constructor,
- ).get_result()
+ obj.index, level=level, constructor=obj._constructor,
+ ).get_result(obj.values, value_columns=obj.columns, fill_value=fill_value)
def _unstack_extension_series(series, level, fill_value):
@@ -476,9 +471,9 @@ def _unstack_extension_series(series, level, fill_value):
dummy_arr = np.arange(len(series))
# fill_value=-1, since we will do a series.values.take later
- result = _Unstacker(
- dummy_arr, series.index, level=level, fill_value=-1
- ).get_result()
+ result = _Unstacker(series.index, level=level).get_result(
+ dummy_arr, value_columns=None, fill_value=-1
+ )
out = []
values = extract_array(series, extract_numpy=False)
| I find this much easier to follow without the double-partial. We also avoid re-doing some work, though AFAICT its pretty tiny perf-wise. | https://api.github.com/repos/pandas-dev/pandas/pulls/33139 | 2020-03-30T01:39:12Z | 2020-03-30T21:09:52Z | 2020-03-30T21:09:52Z | 2020-03-30T21:56:02Z |
BUG: Fix SeriesGroupBy.quantile for nullable integers | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 6f2b9b4f946c7..3dd8fd4a38d7e 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -445,6 +445,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrameGroupby.transform` produces incorrect result with transformation functions (:issue:`30918`)
- Bug in :meth:`GroupBy.count` causes segmentation fault when grouped-by column contains NaNs (:issue:`32841`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` produces inconsistent type when aggregating Boolean series (:issue:`32894`)
+- Bug in :meth:`SeriesGroupBy.quantile` raising on nullable integers (:issue:`33136`)
- Bug in :meth:`SeriesGroupBy.first`, :meth:`SeriesGroupBy.last`, :meth:`SeriesGroupBy.min`, and :meth:`SeriesGroupBy.max` returning floats when applied to nullable Booleans (:issue:`33071`)
- Bug in :meth:`DataFrameGroupBy.agg` with dictionary input losing ``ExtensionArray`` dtypes (:issue:`32194`)
- Bug in :meth:`DataFrame.resample` where an ``AmbiguousTimeError`` would be raised when the resulting timezone aware :class:`DatetimeIndex` had a DST transition at midnight (:issue:`25758`)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 1474e173b4f8c..7a7ac58b9d11b 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -44,7 +44,9 @@ class providing the base-class of operations.
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
+ is_bool_dtype,
is_datetime64_dtype,
+ is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
@@ -1867,9 +1869,13 @@ def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]:
)
inference = None
- if is_integer_dtype(vals):
+ if is_integer_dtype(vals.dtype):
+ if is_extension_array_dtype(vals.dtype):
+ vals = vals.to_numpy(dtype=float, na_value=np.nan)
inference = np.int64
- elif is_datetime64_dtype(vals):
+ elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype):
+ vals = vals.to_numpy(dtype=float, na_value=np.nan)
+ elif is_datetime64_dtype(vals.dtype):
inference = "datetime64[ns]"
vals = np.asarray(vals).astype(np.float)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 9c33843cdcecc..346de55f551df 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1519,6 +1519,30 @@ def test_quantile_missing_group_values_correct_results():
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize(
+ "values",
+ [
+ pd.array([1, 0, None] * 2, dtype="Int64"),
+ pd.array([True, False, None] * 2, dtype="boolean"),
+ ],
+)
+@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])
+def test_groupby_quantile_nullable_array(values, q):
+ # https://github.com/pandas-dev/pandas/issues/33136
+ df = pd.DataFrame({"a": ["x"] * 3 + ["y"] * 3, "b": values})
+ result = df.groupby("a")["b"].quantile(q)
+
+ if isinstance(q, list):
+ idx = pd.MultiIndex.from_product((["x", "y"], q), names=["a", None])
+ true_quantiles = [0.0, 0.5, 1.0]
+ else:
+ idx = pd.Index(["x", "y"], name="a")
+ true_quantiles = [0.5]
+
+ expected = pd.Series(true_quantiles * 2, index=idx, name="b")
+ tm.assert_series_equal(result, expected)
+
+
# pipe
# --------------------------------
| - [x] closes #33136
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33138 | 2020-03-30T00:46:21Z | 2020-04-07T23:10:11Z | 2020-04-07T23:10:11Z | 2023-02-28T17:47:32Z |
BUG: DataFrame.resample raised AmbiguousTimeError at a midnight DST transition | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 8db837a38170b..ef6569f229566 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -422,7 +422,7 @@ Groupby/resample/rolling
- Bug in :meth:`GroupBy.apply` raises ``ValueError`` when the ``by`` axis is not sorted and has duplicates and the applied ``func`` does not mutate passed in objects (:issue:`30667`)
- Bug in :meth:`DataFrameGroupby.transform` produces incorrect result with transformation functions (:issue:`30918`)
- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` produces inconsistent type when aggregating Boolean series (:issue:`32894`)
-
+- Bug in :meth:`DataFrame.resample` where an ``AmbiguousTimeError`` would be raised when the resulting timezone aware :class:`DatetimeIndex` had a DST transition at midnight (:issue:`25758`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 9e3318db3cfb9..2e1dcf8da5bd4 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1422,13 +1422,15 @@ def _get_time_bins(self, ax):
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
+ # GH 25758: If DST lands at midnight (e.g. 'America/Havana'), user feedback
+ # has noted that ambiguous=True provides the most sensible result
binner = labels = date_range(
freq=self.freq,
start=first,
end=last,
tz=ax.tz,
name=ax.name,
- ambiguous="infer",
+ ambiguous=True,
nonexistent="shift_forward",
)
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 3ad82b9e075a8..bbb294bc109c1 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1440,6 +1440,24 @@ def test_downsample_across_dst_weekly():
tm.assert_series_equal(result, expected)
+def test_downsample_dst_at_midnight():
+ # GH 25758
+ start = datetime(2018, 11, 3, 12)
+ end = datetime(2018, 11, 5, 12)
+ index = pd.date_range(start, end, freq="1H")
+ index = index.tz_localize("UTC").tz_convert("America/Havana")
+ data = list(range(len(index)))
+ dataframe = pd.DataFrame(data, index=index)
+ result = dataframe.groupby(pd.Grouper(freq="1D")).mean()
+ expected = DataFrame(
+ [7.5, 28.0, 44.5],
+ index=date_range("2018-11-03", periods=3).tz_localize(
+ "America/Havana", ambiguous=True
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
def test_resample_with_nat():
# GH 13020
index = DatetimeIndex(
| - [x] closes #25758
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33137 | 2020-03-30T00:27:12Z | 2020-04-03T16:18:29Z | 2020-04-03T16:18:28Z | 2020-04-03T16:18:34Z |
BUG: fix ngroups and len(groups) inconsistency when using [Grouper(freq=)] (GH33132) | diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 742de397956c0..47cce84836ed1 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -39,6 +39,7 @@
from pandas.core.dtypes.missing import _maybe_fill, isna
import pandas.core.algorithms as algorithms
+from pandas.core.arrays import Categorical
from pandas.core.base import SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
@@ -837,11 +838,22 @@ def names(self):
@property
def groupings(self) -> "List[grouper.Grouping]":
+ codes, _, _ = self.group_info
+
+ if self.indexer is not None and len(self.indexer) != len(codes):
+ groupers = self.levels
+ else:
+ groupers = [self.result_index._constructor(
+ Categorical.from_codes(self.codes_info, self.result_index))]
+
return [
grouper.Grouping(lvl, lvl, in_axis=False, level=None, name=name)
- for lvl, name in zip(self.levels, self.names)
+ for lvl, name in zip(groupers, self.names)
]
+ def __iter__(self):
+ return iter(self.groupings[0].grouper)
+
def agg_series(self, obj: Series, func):
# Caller is responsible for checking ngroups != 0
assert self.ngroups != 0
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index b8d8f56512a69..b3e8499c5daa5 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1863,6 +1863,17 @@ def test_groupby_groups_in_BaseGrouper():
expected = df.groupby(["beta", "alpha"])
assert result.groups == expected.groups
+ # GH 33132
+ # Test if DataFrame grouped with a pandas.Grouper and freq param has correct groups
+ mi = pd.MultiIndex.from_product([date_range(datetime.today(), periods=2),
+ ["C", "D"]], names=["alpha", "beta"])
+ df = pd.DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi)
+ result = df.groupby(["beta", pd.Grouper(level="alpha", freq='D')])
+ assert result.ngroups == len(result)
+
+ result = df.groupby([pd.Grouper(level="alpha", freq='D'), "beta"])
+ assert result.ngroups == len(result)
+
@pytest.mark.parametrize("group_name", ["x", ["x"]])
def test_groupby_axis_1(group_name):
| - [ ] closes #33132
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33135 | 2020-03-29T22:52:13Z | 2020-04-11T15:29:31Z | null | 2020-04-11T15:29:31Z |
BUG: create new MI from MultiIndex._get_level_values | diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 68d7e8dd384f0..c638c59909488 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -323,8 +323,8 @@ def _formatter_func(self):
@cache_readonly
def _engine(self):
- # To avoid a reference cycle, pass a weakref of self to _engine_type.
- period = weakref.ref(self)
+ # To avoid a reference cycle, pass a weakref of self._values to _engine_type.
+ period = weakref.ref(self._values)
return self._engine_type(period, len(self))
@doc(Index.__contains__)
diff --git a/pandas/tests/indexes/multi/test_get_level_values.py b/pandas/tests/indexes/multi/test_get_level_values.py
index 1215e72be3c59..985fe5773ceed 100644
--- a/pandas/tests/indexes/multi/test_get_level_values.py
+++ b/pandas/tests/indexes/multi/test_get_level_values.py
@@ -89,3 +89,17 @@ def test_get_level_values_na():
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
+
+
+def test_get_level_values_when_periods():
+ # GH33131. See also discussion in GH32669.
+ # This test can probably be removed when PeriodIndex._engine is removed.
+ from pandas import Period, PeriodIndex
+
+ idx = MultiIndex.from_arrays(
+ [PeriodIndex([Period("2019Q1"), Period("2019Q2")], name="b")]
+ )
+ idx2 = MultiIndex.from_arrays(
+ [idx._get_level_values(level) for level in range(idx.nlevels)]
+ )
+ assert all(x.is_monotonic for x in idx2.levels)
| - [x] closes #33131
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Closes #33131, a weakref was released too early. | https://api.github.com/repos/pandas-dev/pandas/pulls/33134 | 2020-03-29T22:38:56Z | 2020-03-31T17:10:07Z | 2020-03-31T17:10:07Z | 2020-03-31T17:16:50Z |
BUG: to_datetime with infer_datetime_format dropped timezone names | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 8db837a38170b..be48552fb04e9 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -313,7 +313,7 @@ Timedelta
Timezones
^^^^^^^^^
--
+- Bug in :func:`to_datetime` with ``infer_datetime_format=True`` where timezone names (e.g. ``UTC``) would not be parsed correctly (:issue:`33133`)
-
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx
index 74b95a2f3076f..5272a0a042d0e 100644
--- a/pandas/_libs/tslibs/parsing.pyx
+++ b/pandas/_libs/tslibs/parsing.pyx
@@ -805,6 +805,7 @@ def _guess_datetime_format(dt_str, dayfirst=False, dt_str_parse=du_parse,
(('second',), '%S', 2),
(('microsecond',), '%f', 6),
(('second', 'microsecond'), '%S.%f', 0),
+ (('tzinfo',), '%Z', 0),
]
if dayfirst:
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index a2042de7f337e..207c5cc98449a 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -606,9 +606,9 @@ def to_datetime(
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : bool, default False
If True and no `format` is given, attempt to infer the format of the
- datetime strings, and if it can be inferred, switch to a faster
- method of parsing them. In some cases this can increase the parsing
- speed by ~5-10x.
+ datetime strings based on the first non-NaN element,
+ and if it can be inferred, switch to a faster method of parsing them.
+ In some cases this can increase the parsing speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index a751182dbf7af..d2049892705ea 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -1862,6 +1862,18 @@ def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
+ @pytest.mark.parametrize(
+ "tz_name, offset", [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)]
+ )
+ def test_infer_datetime_format_tz_name(self, tz_name, offset):
+ # GH 33133
+ s = pd.Series([f"2019-02-02 08:07:13 {tz_name}"])
+ result = to_datetime(s, infer_datetime_format=True)
+ expected = pd.Series(
+ [pd.Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))]
+ )
+ tm.assert_series_equal(result, expected)
+
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601_noleading_0s(self, cache):
# GH 11871
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33133 | 2020-03-29T22:33:25Z | 2020-03-31T17:11:07Z | 2020-03-31T17:11:07Z | 2020-03-31T17:11:52Z |
CLN: Added static types | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 8187fb508291a..e4aeb7ad69792 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -34,6 +34,7 @@ cimport numpy as cnp
from numpy cimport ndarray, uint8_t, uint64_t, int64_t, float64_t
cnp.import_array()
+cimport pandas._libs.util as util
from pandas._libs.util cimport UINT64_MAX, INT64_MAX, INT64_MIN
import pandas._libs.lib as lib
@@ -279,18 +280,16 @@ cdef class TextReader:
cdef public:
int64_t leading_cols, table_width, skipfooter, buffer_lines
- object allow_leading_cols
- object delimiter, converters, delim_whitespace
+ bint allow_leading_cols, mangle_dupe_cols, memory_map, low_memory
+ bint delim_whitespace
+ object delimiter, converters
object na_values
- object memory_map
object header, orig_header, names, header_start, header_end
object index_col
- object low_memory
object skiprows
object dtype
object encoding
object compression
- object mangle_dupe_cols
object usecols
list dtype_cast_order
set unnamed_cols
@@ -298,54 +297,44 @@ cdef class TextReader:
def __cinit__(self, source,
delimiter=b',',
-
header=0,
header_start=0,
header_end=0,
index_col=None,
names=None,
-
- memory_map=False,
+ bint memory_map=False,
tokenize_chunksize=DEFAULT_CHUNKSIZE,
- delim_whitespace=False,
-
+ bint delim_whitespace=False,
compression=None,
-
converters=None,
-
- skipinitialspace=False,
+ bint skipinitialspace=False,
escapechar=None,
- doublequote=True,
+ bint doublequote=True,
quotechar=b'"',
quoting=0,
lineterminator=None,
-
encoding=None,
-
comment=None,
decimal=b'.',
thousands=None,
-
dtype=None,
usecols=None,
- error_bad_lines=True,
- warn_bad_lines=True,
-
- na_filter=True,
+ bint error_bad_lines=True,
+ bint warn_bad_lines=True,
+ bint na_filter=True,
na_values=None,
na_fvalues=None,
- keep_default_na=True,
-
+ bint keep_default_na=True,
true_values=None,
false_values=None,
- allow_leading_cols=True,
- low_memory=False,
+ bint allow_leading_cols=True,
+ bint low_memory=False,
skiprows=None,
skipfooter=0,
- verbose=False,
- mangle_dupe_cols=True,
+ bint verbose=False,
+ bint mangle_dupe_cols=True,
float_precision=None,
- skip_blank_lines=True):
+ bint skip_blank_lines=True):
# set encoding for native Python and C library
if encoding is not None:
@@ -591,7 +580,7 @@ cdef class TextReader:
self.parser.quotechar = ord(quote_char)
cdef _make_skiprow_set(self):
- if isinstance(self.skiprows, (int, np.integer)):
+ if util.is_integer_object(self.skiprows):
parser_set_skipfirstnrows(self.parser, self.skiprows)
elif not callable(self.skiprows):
for i in self.skiprows:
@@ -683,15 +672,14 @@ cdef class TextReader:
# header is now a list of lists, so field_count should use header[0]
cdef:
- Py_ssize_t i, start, field_count, passed_count, unnamed_count
+ Py_ssize_t i, start, field_count, passed_count, unnamed_count, level
char *word
object name, old_name
uint64_t hr, data_line = 0
char *errors = "strict"
StringPath path = _string_path(self.c_encoding)
-
- header = []
- unnamed_cols = set()
+ list header = []
+ set unnamed_cols = set()
if self.parser.header_start >= 0:
@@ -847,7 +835,7 @@ cdef class TextReader:
cdef _read_low_memory(self, rows):
cdef:
size_t rows_read = 0
- chunks = []
+ list chunks = []
if rows is None:
while True:
@@ -2038,12 +2026,11 @@ def _concatenate_chunks(list chunks):
cdef:
list names = list(chunks[0].keys())
object name
- list warning_columns
+ list warning_columns = []
object warning_names
object common_type
result = {}
- warning_columns = list()
for name in names:
arrs = [chunk.pop(name) for chunk in chunks]
# Check each arr for consistent types.
@@ -2147,7 +2134,7 @@ def _maybe_encode(values):
def sanitize_objects(ndarray[object] values, set na_values,
- convert_empty=True):
+ bint convert_empty=True):
"""
Convert specified values, including the given set na_values and empty
strings if convert_empty is True, to np.nan.
@@ -2156,7 +2143,7 @@ def sanitize_objects(ndarray[object] values, set na_values,
----------
values : ndarray[object]
na_values : set
- convert_empty : bool (default True)
+ convert_empty : bool, default True
"""
cdef:
Py_ssize_t i, n
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33126 | 2020-03-29T19:33:37Z | 2020-04-03T19:41:19Z | 2020-04-03T19:41:19Z | 2020-04-06T08:41:06Z |
REF: reshape.concat operate on arrays, not SingleBlockManagers | diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index c6efd6a2ac6a7..c6ce4aea9fa40 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1623,33 +1623,6 @@ def fast_xs(self, loc):
"""
raise NotImplementedError("Use series._values[loc] instead")
- def concat(
- self, to_concat: List["SingleBlockManager"], new_axis: Index
- ) -> "SingleBlockManager":
- """
- Concatenate a list of SingleBlockManagers into a single
- SingleBlockManager.
-
- Used for pd.concat of Series objects with axis=0.
-
- Parameters
- ----------
- to_concat : list of SingleBlockManagers
- new_axis : Index of the result
-
- Returns
- -------
- SingleBlockManager
- """
-
- blocks = [obj.blocks[0] for obj in to_concat]
- values = concat_compat([x.values for x in blocks])
-
- new_block = make_block(values, placement=slice(0, len(values), 1))
-
- mgr = SingleBlockManager(new_block, new_axis)
- return mgr
-
# --------------------------------------------------------------------
# Constructor Helpers
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index bd90592e57485..a868e663b06a5 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -9,6 +9,7 @@
from pandas._typing import FrameOrSeriesUnion, Label
+from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas import DataFrame, Index, MultiIndex, Series
@@ -457,12 +458,13 @@ def get_result(self):
# stack blocks
if self.bm_axis == 0:
name = com.consensus_name_attr(self.objs)
-
- mgr = self.objs[0]._mgr.concat(
- [x._mgr for x in self.objs], self.new_axes[0]
- )
cons = self.objs[0]._constructor
- return cons(mgr, name=name).__finalize__(self, method="concat")
+
+ arrs = [ser._values for ser in self.objs]
+
+ res = concat_compat(arrs, axis=0)
+ result = cons(res, index=self.new_axes[0], name=name, dtype=res.dtype)
+ return result.__finalize__(self, method="concat")
# combine as columns in a frame
else:
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 61c5925383f88..aa5a99282131a 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -170,8 +170,11 @@ def test_take_series(self, data):
# ValueError: PandasArray must be 1-dimensional.
super().test_take_series(data)
- @pytest.mark.xfail(reason="astype doesn't recognize data.dtype")
def test_loc_iloc_frame_single_dtype(self, data):
+ npdtype = data.dtype.numpy_dtype
+ if npdtype == object or npdtype == np.float64:
+ # GH#33125
+ pytest.xfail(reason="GH#33125 astype doesn't recognize data.dtype")
super().test_loc_iloc_frame_single_dtype(data)
@@ -179,6 +182,8 @@ class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests):
@skip_nested
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
# ValueError: Names should be list-like for a MultiIndex
+ if data_for_grouping.dtype.numpy_dtype == np.float64:
+ pytest.xfail(reason="GH#33125 astype doesn't recognize data.dtype")
super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op)
@@ -276,7 +281,11 @@ def test_arith_series_with_array(self, data, all_arithmetic_operators):
class TestPrinting(BaseNumPyTests, base.BasePrintingTests):
- pass
+ @pytest.mark.xfail(
+ reason="GH#33125 PandasArray.astype does not recognize PandasDtype"
+ )
+ def test_series_repr(self, data):
+ super().test_series_repr(data)
@skip_nested
@@ -321,6 +330,18 @@ class TestReshaping(BaseNumPyTests, base.BaseReshapingTests):
def test_concat_mixed_dtypes(self, data):
super().test_concat_mixed_dtypes(data)
+ @pytest.mark.xfail(
+ reason="GH#33125 PandasArray.astype does not recognize PandasDtype"
+ )
+ def test_concat(self, data, in_frame):
+ super().test_concat(data, in_frame)
+
+ @pytest.mark.xfail(
+ reason="GH#33125 PandasArray.astype does not recognize PandasDtype"
+ )
+ def test_concat_all_na_block(self, data_missing, in_frame):
+ super().test_concat_all_na_block(data_missing, in_frame)
+
@skip_nested
def test_merge(self, data, na_value):
# Fails creating expected
| cc @TomAugspurger is there a better way to handle the assert_series_equal patch for the PandasArray tests? | https://api.github.com/repos/pandas-dev/pandas/pulls/33125 | 2020-03-29T18:57:11Z | 2020-04-10T16:10:01Z | 2020-04-10T16:10:01Z | 2020-04-10T17:49:18Z |
REF: DataFrame.isna internals access | diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 309c9f03ba6ed..d461db2d05f9d 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -152,7 +152,7 @@ def _isna_new(obj):
):
return _isna_ndarraylike(obj)
elif isinstance(obj, ABCDataFrame):
- return obj._constructor(obj._data.isna(func=isna))
+ return obj.isna()
elif isinstance(obj, list):
return _isna_ndarraylike(np.asarray(obj, dtype=object))
elif hasattr(obj, "__array__"):
@@ -183,7 +183,7 @@ def _isna_old(obj):
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass, ABCExtensionArray)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCDataFrame):
- return obj._constructor(obj._data.isna(func=_isna_old))
+ return obj.isna()
elif isinstance(obj, list):
return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
elif hasattr(obj, "__array__"):
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5c04678cd5fa6..f25f17d8447fe 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4478,19 +4478,20 @@ def _maybe_casted_values(index, labels=None):
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self) -> "DataFrame":
- return super().isna()
+ result = self._constructor(self._data.isna(func=isna))
+ return result.__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self) -> "DataFrame":
- return super().isnull()
+ return self.isna()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self) -> "DataFrame":
- return super().notna()
+ return ~self.isna()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self) -> "DataFrame":
- return super().notnull()
+ return ~self.isna()
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""
| https://api.github.com/repos/pandas-dev/pandas/pulls/33124 | 2020-03-29T18:32:25Z | 2020-03-30T21:46:40Z | 2020-03-30T21:46:40Z | 2020-03-30T21:54:00Z | |
REF: remove placement kwarg from Block.concat_same_type | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index b2a8c7a0864b8..596e99440d57e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -326,16 +326,15 @@ def dtype(self):
def merge(self, other):
return _merge_blocks([self, other])
- def concat_same_type(self, to_concat, placement=None):
+ def concat_same_type(self, to_concat):
"""
Concatenate list of single blocks of the same type.
"""
values = self._concatenator(
[blk.values for blk in to_concat], axis=self.ndim - 1
)
- return self.make_block_same_class(
- values, placement=placement or slice(0, len(values), 1)
- )
+ placement = self.mgr_locs if self.ndim == 2 else slice(len(values))
+ return self.make_block_same_class(values, placement=placement)
def iget(self, i):
return self.values[i]
@@ -1818,13 +1817,13 @@ def _slice(self, slicer):
return self.values[slicer]
- def concat_same_type(self, to_concat, placement=None):
+ def concat_same_type(self, to_concat):
"""
Concatenate list of single blocks of the same type.
"""
values = self._holder._concat_same_type([blk.values for blk in to_concat])
- placement = placement or slice(0, len(values), 1)
- return self.make_block_same_class(values, ndim=self.ndim, placement=placement)
+ placement = self.mgr_locs if self.ndim == 2 else slice(len(values))
+ return self.make_block_same_class(values, placement=placement)
def fillna(self, value, limit=None, inplace=False, downcast=None):
values = self.values if inplace else self.values.copy()
@@ -2336,19 +2335,19 @@ def diff(self, n: int, axis: int = 0) -> List["Block"]:
new_values = new_values.astype("timedelta64[ns]")
return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
- def concat_same_type(self, to_concat, placement=None):
+ def concat_same_type(self, to_concat):
# need to handle concat([tz1, tz2]) here, since DatetimeArray
# only handles cases where all the tzs are the same.
# Instead of placing the condition here, it could also go into the
# is_uniform_join_units check, but I'm not sure what is better.
if len({x.dtype for x in to_concat}) > 1:
values = concat_datetime([x.values for x in to_concat])
- placement = placement or slice(0, len(values), 1)
- if self.ndim > 1:
- values = np.atleast_2d(values)
- return ObjectBlock(values, ndim=self.ndim, placement=placement)
- return super().concat_same_type(to_concat, placement)
+ values = values.astype(object, copy=False)
+ placement = self.mgr_locs if self.ndim == 2 else slice(len(values))
+
+ return self.make_block(_block_shape(values, self.ndim), placement=placement)
+ return super().concat_same_type(to_concat)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# We support filling a DatetimeTZ with a `value` whose timezone
@@ -2802,7 +2801,7 @@ def __init__(self, values, placement, ndim=None):
def _holder(self):
return Categorical
- def concat_same_type(self, to_concat, placement=None):
+ def concat_same_type(self, to_concat):
"""
Concatenate list of single blocks of the same type.
@@ -2818,9 +2817,10 @@ def concat_same_type(self, to_concat, placement=None):
values = self._concatenator(
[blk.values for blk in to_concat], axis=self.ndim - 1
)
+ placement = self.mgr_locs if self.ndim == 2 else slice(len(values))
# not using self.make_block_same_class as values can be object dtype
- return make_block(
- values, placement=placement or slice(0, len(values), 1), ndim=self.ndim
+ return self.make_block(
+ _block_shape(values, ndim=self.ndim), placement=placement
)
def replace(
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index e626a5ee8e111..f52447a77e8fb 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -2007,9 +2007,8 @@ def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
values = values.view()
b = b.make_block_same_class(values, placement=placement)
elif is_uniform_join_units(join_units):
- b = join_units[0].block.concat_same_type(
- [ju.block for ju in join_units], placement=placement
- )
+ b = join_units[0].block.concat_same_type([ju.block for ju in join_units])
+ b.mgr_locs = placement
else:
b = make_block(
concatenate_join_units(join_units, concat_axis, copy=copy),
diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py
index 1960e3d09245c..e9046be15b71e 100644
--- a/pandas/tests/extension/test_external_block.py
+++ b/pandas/tests/extension/test_external_block.py
@@ -17,9 +17,8 @@ def concat_same_type(self, to_concat, placement=None):
always 1D in this custom Block
"""
values = np.concatenate([blk.values for blk in to_concat])
- return self.make_block_same_class(
- values, placement=placement or slice(0, len(values), 1)
- )
+ placement = self.mgr_locs if self.ndim == 2 else slice(len(values))
+ return self.make_block_same_class(values, placement=placement)
@pytest.fixture
| On the path to getting concat logic out of internals | https://api.github.com/repos/pandas-dev/pandas/pulls/33123 | 2020-03-29T17:59:33Z | 2020-03-30T21:49:20Z | 2020-03-30T21:49:20Z | 2020-03-30T21:53:20Z |
CLN: Remove unused cdef variables | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index c6b68d9a0ab5c..8187fb508291a 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -267,7 +267,7 @@ cdef class TextReader:
cdef:
parser_t *parser
- object file_handle, na_fvalues
+ object na_fvalues
object true_values, false_values
object handle
bint na_filter, keep_default_na, verbose, has_usecols, has_mi_columns
@@ -601,7 +601,6 @@ cdef class TextReader:
cdef _setup_parser_source(self, source):
cdef:
- int status
void *ptr
self.parser.cb_io = NULL
@@ -687,7 +686,6 @@ cdef class TextReader:
Py_ssize_t i, start, field_count, passed_count, unnamed_count
char *word
object name, old_name
- int status
uint64_t hr, data_line = 0
char *errors = "strict"
StringPath path = _string_path(self.c_encoding)
@@ -837,9 +835,6 @@ cdef class TextReader:
"""
rows=None --> read all rows
"""
- cdef:
- int status
-
if self.low_memory:
# Conserve intermediate space
columns = self._read_low_memory(rows)
@@ -888,7 +883,9 @@ cdef class TextReader:
return _concatenate_chunks(chunks)
cdef _tokenize_rows(self, size_t nrows):
- cdef int status
+ cdef:
+ int status
+
with nogil:
status = tokenize_nrows(self.parser, nrows)
@@ -1331,7 +1328,8 @@ cdef:
def _ensure_encoded(list lst):
- cdef list result = []
+ cdef:
+ list result = []
for x in lst:
if isinstance(x, str):
x = PyUnicode_AsUTF8String(x)
@@ -1458,7 +1456,7 @@ cdef _string_box_decode(parser_t *parser, int64_t col,
bint na_filter, kh_str_starts_t *na_hashset,
char *encoding):
cdef:
- int error, na_count = 0
+ int na_count = 0
Py_ssize_t i, size, lines
coliter_t it
const char *word = NULL
@@ -1517,7 +1515,7 @@ cdef _categorical_convert(parser_t *parser, int64_t col,
char *encoding):
"Convert column data into codes, categories"
cdef:
- int error, na_count = 0
+ int na_count = 0
Py_ssize_t i, size, lines
coliter_t it
const char *word = NULL
@@ -1581,7 +1579,6 @@ cdef _categorical_convert(parser_t *parser, int64_t col,
cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start,
int64_t line_end, int64_t width):
cdef:
- const char *word = NULL
char *data
ndarray result
@@ -1767,7 +1764,6 @@ cdef inline int _try_uint64_nogil(parser_t *parser, int64_t col,
Py_ssize_t i, lines = line_end - line_start
coliter_t it
const char *word = NULL
- khiter_t k
coliter_setup(&it, parser, col, line_start)
@@ -1870,9 +1866,7 @@ cdef _try_bool_flex(parser_t *parser, int64_t col,
Py_ssize_t lines
uint8_t *data
ndarray result
-
uint8_t NA = na_values[np.bool_]
- khiter_t k
lines = line_end - line_start
result = np.empty(lines, dtype=np.uint8)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33122 | 2020-03-29T16:01:01Z | 2020-03-29T18:02:36Z | 2020-03-29T18:02:36Z | 2020-03-29T18:06:32Z |
DOC: Updating capitalization of doc/source/development | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index c8d08277e9a26..732f9c5181b97 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -327,7 +327,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Validate correct capitalization among titles in documentation' ; echo $MSG
- $BASE_DIR/scripts/validate_rst_title_capitalization.py $BASE_DIR/doc/source/development/contributing.rst
+ $BASE_DIR/scripts/validate_rst_title_capitalization.py $BASE_DIR/doc/source/development/contributing.rst $BASE_DIR/doc/source/reference
RET=$(($RET + $?)) ; echo $MSG "DONE"
fi
diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index fa7532a68a06d..6d33537a40175 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -18,8 +18,8 @@ consistent code format throughout the project. For details see the
Patterns
========
-foo.__class__
--------------
+Using foo.__class__
+-------------------
pandas uses 'type(foo)' instead 'foo.__class__' as it is making the code more
@@ -47,8 +47,8 @@ String formatting
Concatenated strings
--------------------
-f-strings
-~~~~~~~~~
+Using f-strings
+~~~~~~~~~~~~~~~
pandas uses f-strings formatting instead of '%' and '.format()' string formatters.
diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index c0b20e2d2843b..d9fb2643e8a1a 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -139,7 +139,7 @@ and comments contain guidance for properly implementing the interface.
.. _extending.extension.operator:
-:class:`~pandas.api.extensions.ExtensionArray` Operator Support
+:class:`~pandas.api.extensions.ExtensionArray` operator support
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. versionadded:: 0.24.0
diff --git a/doc/source/development/policies.rst b/doc/source/development/policies.rst
index b7cc3db3ad260..1031bbfc46457 100644
--- a/doc/source/development/policies.rst
+++ b/doc/source/development/policies.rst
@@ -6,7 +6,7 @@ Policies
.. _policies.version:
-Version Policy
+Version policy
~~~~~~~~~~~~~~
.. versionchanged:: 1.0.0
@@ -48,7 +48,7 @@ deprecation removed in the next next major release (2.0.0).
These policies do not apply to features marked as **experimental** in the documentation.
pandas may change the behavior of experimental features at any time.
-Python Support
+Python support
~~~~~~~~~~~~~~
pandas will only drop support for specific Python versions (e.g. 3.6.x, 3.7.x) in
diff --git a/doc/source/development/roadmap.rst b/doc/source/development/roadmap.rst
index e57ff82add278..d331491d02883 100644
--- a/doc/source/development/roadmap.rst
+++ b/doc/source/development/roadmap.rst
@@ -152,7 +152,7 @@ We'd like to fund improvements and maintenance of these tools to
.. _roadmap.evolution:
-Roadmap Evolution
+Roadmap evolution
-----------------
pandas continues to evolve. The direction is primarily determined by community
diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst
index b326bbb5a465e..cf81540a77d11 100644
--- a/doc/source/reference/frame.rst
+++ b/doc/source/reference/frame.rst
@@ -251,7 +251,7 @@ Combining / joining / merging
DataFrame.merge
DataFrame.update
-Time series-related
+Time Series-related
~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: api/
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index ab6ea5aea6c61..ba12c19763605 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -328,7 +328,7 @@ DatetimeIndex
DatetimeIndex
-Time/Date components
+Time/date components
~~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: api/
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index 1a69fa076dbf0..ab0540a930396 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -110,7 +110,7 @@ Binary operator functions
Series.product
Series.dot
-Function application, groupby & window
+Function application, GroupBy & window
--------------------------------------
.. autosummary::
:toctree: api/
@@ -249,7 +249,7 @@ Combining / joining / merging
Series.replace
Series.update
-Time series-related
+Time Series-related
-------------------
.. autosummary::
:toctree: api/
diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py
index 3e0ae90e26527..59d422a1605a0 100755
--- a/scripts/validate_rst_title_capitalization.py
+++ b/scripts/validate_rst_title_capitalization.py
@@ -45,6 +45,60 @@
"NumFOCUS",
"sklearn",
"Docker",
+ "PeriodIndex",
+ "NA",
+ "NaN",
+ "ValueError",
+ "BooleanArray",
+ "KeyError",
+ "API",
+ "FAQ",
+ "IO",
+ "TimedeltaIndex",
+ "DatetimeIndex",
+ "IntervalIndex",
+ "CategoricalIndex",
+ "GroupBy",
+ "SPSS",
+ "ORC",
+ "R",
+ "HDF5",
+ "HDFStore",
+ "CDay",
+ "CBMonthBegin",
+ "CBMonthEnd",
+ "BMonthBegin",
+ "BMonthEnd",
+ "BDay",
+ "FY5253Quarter",
+ "FY5253",
+ "YearBegin",
+ "YearEnd",
+ "BYearBegin",
+ "BYearEnd",
+ "YearOffset",
+ "QuarterBegin",
+ "QuarterEnd",
+ "BQuarterBegin",
+ "BQuarterEnd",
+ "QuarterOffset",
+ "LastWeekOfMonth",
+ "WeekOfMonth",
+ "SemiMonthBegin",
+ "SemiMonthEnd",
+ "SemiMonthOffset",
+ "CustomBusinessMonthBegin",
+ "CustomBusinessMonthEnd",
+ "BusinessMonthBegin",
+ "BusinessMonthEnd",
+ "MonthBegin",
+ "MonthEnd",
+ "MonthOffset",
+ "CustomBusinessHour",
+ "CustomBusinessDay",
+ "BusinessHour",
+ "BusinessDay",
+ "DateOffset",
}
CAP_EXCEPTIONS_DICT = {word.lower(): word for word in CAPITALIZATION_EXCEPTIONS}
@@ -69,6 +123,11 @@ def correct_title_capitalization(title: str) -> str:
Correctly capitalized heading.
"""
+ # Skip modification no matter what if title begins by ":" to exclude specific
+ # syntax that is needed to build links.
+ if title[0] == ":":
+ return title
+
# Strip all non-word characters from the beginning of the title to the
# first word character.
correct_title: str = re.sub(r"^\W*", "", title).capitalize()
| Regarding issue #32550. Changes to documentation folder doc/source/development to capitalise title strings and keep keyword exceptions as is | https://api.github.com/repos/pandas-dev/pandas/pulls/33121 | 2020-03-29T15:50:25Z | 2020-04-03T00:12:37Z | 2020-04-03T00:12:37Z | 2020-04-17T11:08:20Z |
Fix ambiguous reference to "previous" section | diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst
index c6d9a48fcf8ed..aa93f37a313f9 100644
--- a/doc/source/user_guide/basics.rst
+++ b/doc/source/user_guide/basics.rst
@@ -7,8 +7,8 @@
==============================
Here we discuss a lot of the essential functionality common to the pandas data
-structures. Here's how to create some of the objects used in the examples from
-the previous section:
+structures. To begin, let's create some example objects like we did in
+the :ref:`10 minutes to pandas <10min>` section:
.. ipython:: python
| This section originally came just after "10 minutes to pandas",
but now it's after the "Getting started tutorials".
Also, the examples have diverged a bit, so let's not pretend
that we are creating the same objects.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33119 | 2020-03-29T13:59:18Z | 2020-03-29T15:31:04Z | 2020-03-29T15:31:03Z | 2020-03-29T18:19:53Z |
BUG: conversion of empty DataFrame to SparseDtype (#33113) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 567b6853bd633..3ff4e4e7da636 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1068,6 +1068,7 @@ Sparse
- Bug in :meth:`Series.sum` with ``SparseArray`` raises ``TypeError`` (:issue:`25777`)
- Bug where :class:`DataFrame` containing :class:`SparseArray` filled with ``NaN`` when indexed by a list-like (:issue:`27781`, :issue:`29563`)
- The repr of :class:`SparseDtype` now includes the repr of its ``fill_value`` attribute. Previously it used ``fill_value``'s string representation (:issue:`34352`)
+- Bug where empty :class:`DataFrame` could not be cast to :class:`SparseDtype` (:issue:`33113`)
ExtensionArray
^^^^^^^^^^^^^^
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1404d225eea97..0fd4c6c8c1fee 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5548,6 +5548,10 @@ def astype(
new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors,)
return self._constructor(new_data).__finalize__(self, method="astype")
+ # GH 33113: handle empty frame or series
+ if not results:
+ return self.copy()
+
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py
index 567a62a8b33a5..3aaf040a4279b 100644
--- a/pandas/tests/extension/base/casting.py
+++ b/pandas/tests/extension/base/casting.py
@@ -50,3 +50,9 @@ def test_to_numpy(self, data):
result = pd.Series(data).to_numpy()
self.assert_equal(result, expected)
+
+ def test_astype_empty_dataframe(self, dtype):
+ # https://github.com/pandas-dev/pandas/issues/33113
+ df = pd.DataFrame()
+ result = df.astype(dtype)
+ self.assert_frame_equal(result, df)
diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py
index b06c3d72a2c77..b0fd0496ea81e 100644
--- a/pandas/tests/frame/methods/test_astype.py
+++ b/pandas/tests/frame/methods/test_astype.py
@@ -557,3 +557,11 @@ def test_astype_dt64tz_to_str(self, timezone_frame):
assert (
"2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00"
) in result
+
+ def test_astype_empty_dtype_dict(self):
+ # issue mentioned further down in the following issue's thread
+ # https://github.com/pandas-dev/pandas/issues/33113
+ df = DataFrame()
+ result = df.astype(dict())
+ tm.assert_frame_equal(result, df)
+ assert result is not df
| - [x] closes #33113
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/33118 | 2020-03-29T13:54:40Z | 2020-06-25T23:13:00Z | 2020-06-25T23:13:00Z | 2020-06-25T23:13:05Z |
DOC: Partial fix SA04 errors in docstrings #28792 | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 8deeb415c17c9..1aa1db52daa58 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -839,7 +839,8 @@ def style(self) -> "Styler":
See Also
--------
- io.formats.style.Styler
+ io.formats.style.Styler : Helps style a DataFrame or Series according to the
+ data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 83064fe22eaff..d1e2e580ad6b0 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -203,10 +203,14 @@ class Index(IndexOpsMixin, PandasObject):
--------
RangeIndex : Index implementing a monotonic integer range.
CategoricalIndex : Index of :class:`Categorical` s.
- MultiIndex : A multi-level, or hierarchical, Index.
+ MultiIndex : A multi-level, or hierarchical Index.
IntervalIndex : An Index of :class:`Interval` s.
- DatetimeIndex, TimedeltaIndex, PeriodIndex
- Int64Index, UInt64Index, Float64Index
+ DatetimeIndex : Index of datetime64 data.
+ TimedeltaIndex : Index of timedelta64 data.
+ PeriodIndex : Index of Period data.
+ Int64Index : A special case of :class:`Index` with purely integer labels.
+ UInt64Index : A special case of :class:`Index` with purely unsigned integer labels.
+ Float64Index : A special case of :class:`Index` with purely float labels.
Notes
-----
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index ca1995adc1ea9..b07616b5befe9 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -720,7 +720,7 @@ def inferred_type(self) -> str:
def indexer_at_time(self, time, asof=False):
"""
- Return index locations of index values at particular time of day
+ Return index locations of values at particular time of day
(e.g. 9:30AM).
Parameters
@@ -736,7 +736,9 @@ def indexer_at_time(self, time, asof=False):
See Also
--------
- indexer_between_time, DataFrame.at_time
+ indexer_between_time : Get index locations of values between particular
+ times of day.
+ DataFrame.at_time : Select values at particular time of day.
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
@@ -777,7 +779,8 @@ def indexer_between_time(
See Also
--------
- indexer_at_time, DataFrame.between_time
+ indexer_at_time : Get index locations of values at particular time of day.
+ DataFrame.between_time : Select values between particular times of day.
"""
start_time = tools.to_time(start_time)
end_time = tools.to_time(end_time)
| - [x] xref #28792
| https://api.github.com/repos/pandas-dev/pandas/pulls/33117 | 2020-03-29T12:30:19Z | 2020-04-02T11:47:57Z | 2020-04-02T11:47:57Z | 2020-04-02T12:38:10Z |
Fix grammar | diff --git a/doc/source/getting_started/intro_tutorials/10_text_data.rst b/doc/source/getting_started/intro_tutorials/10_text_data.rst
index 936d00f68e3f0..4c03a276090d7 100644
--- a/doc/source/getting_started/intro_tutorials/10_text_data.rst
+++ b/doc/source/getting_started/intro_tutorials/10_text_data.rst
@@ -238,7 +238,7 @@ a ``dictionary`` to define the mapping ``{from : to}``.
</ul>
.. warning::
- There is also a :meth:`~Series.str.replace` methods available to replace a
+ There is also a :meth:`~Series.str.replace` method available to replace a
specific set of characters. However, when having a mapping of multiple
values, this would become:
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33116 | 2020-03-29T12:03:13Z | 2020-03-29T15:29:31Z | 2020-03-29T15:29:31Z | 2020-03-29T18:20:18Z |
CLN: update Appender to doc decorator with case __doc__ | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index b7c071a8dfbbf..093c925acbc49 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -31,7 +31,7 @@
from pandas._libs import Timestamp, lib
from pandas._typing import FrameOrSeries
-from pandas.util._decorators import Appender, Substitution
+from pandas.util._decorators import Appender, Substitution, doc
from pandas.core.dtypes.cast import (
maybe_cast_result,
@@ -633,7 +633,7 @@ def nunique(self, dropna: bool = True) -> Series:
result = Series(res, index=ri, name=self._selection_name)
return self._reindex_output(result, fill_value=0)
- @Appender(Series.describe.__doc__)
+ @doc(Series.describe)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 86171944d0c78..ebdb0062491be 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -37,7 +37,7 @@ class providing the base-class of operations.
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
-from pandas.util._decorators import Appender, Substitution, cache_readonly
+from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
@@ -1420,7 +1420,7 @@ def ohlc(self) -> DataFrame:
"""
return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc"))
- @Appender(DataFrame.describe.__doc__)
+ @doc(DataFrame.describe)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
@@ -2509,7 +2509,7 @@ def _reindex_output(
GroupBy._add_numeric_operations()
-@Appender(GroupBy.__doc__)
+@doc(GroupBy)
def get_groupby(
obj: NDFrame,
by: Optional[_KeysArgType] = None,
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f6a422180b0df..2a5dfff35e4a5 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -15,7 +15,7 @@
from pandas._typing import Label
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
-from pandas.util._decorators import Appender, Substitution, cache_readonly
+from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.cast import (
@@ -3835,7 +3835,7 @@ def values(self) -> np.ndarray:
return self._data.view(np.ndarray)
@cache_readonly
- @Appender(IndexOpsMixin.array.__doc__) # type: ignore
+ @doc(IndexOpsMixin.array) # type: ignore
def array(self) -> ExtensionArray:
array = self._data
if isinstance(array, np.ndarray):
@@ -3876,7 +3876,7 @@ def _get_engine_target(self) -> np.ndarray:
"""
return self._values
- @Appender(IndexOpsMixin.memory_usage.__doc__)
+ @doc(IndexOpsMixin.memory_usage)
def memory_usage(self, deep: bool = False) -> int:
result = super().memory_usage(deep=deep)
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 2cae09ed08f36..77c4e9e7a3330 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -9,7 +9,7 @@
from pandas._libs.hashtable import duplicated_int64
from pandas._libs.lib import no_default
from pandas._typing import Label
-from pandas.util._decorators import Appender, cache_readonly
+from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
ensure_platform_int,
@@ -241,7 +241,7 @@ def _simple_new(cls, values: Categorical, name: Label = None):
# --------------------------------------------------------------------
- @Appender(Index._shallow_copy.__doc__)
+ @doc(Index._shallow_copy)
def _shallow_copy(self, values=None, name: Label = no_default):
name = self.name if name is no_default else name
@@ -354,7 +354,7 @@ def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
- @Appender(Index.__contains__.__doc__)
+ @doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
@@ -363,7 +363,7 @@ def __contains__(self, key: Any) -> bool:
hash(key)
return contains(self, key, container=self._engine)
- @Appender(Index.astype.__doc__)
+ @doc(Index.astype)
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
@@ -382,7 +382,7 @@ def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
- @Appender(Index.fillna.__doc__)
+ @doc(Index.fillna)
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
@@ -395,7 +395,7 @@ def _engine(self):
codes = self.codes
return self._engine_type(lambda: codes, len(self))
- @Appender(Index.unique.__doc__)
+ @doc(Index.unique)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
@@ -404,7 +404,7 @@ def unique(self, level=None):
# of result, not self.
return type(self)._simple_new(result, name=self.name)
- @Appender(Index.duplicated.__doc__)
+ @doc(Index.duplicated)
def duplicated(self, keep="first"):
codes = self.codes.astype("i8")
return duplicated_int64(codes, keep)
@@ -418,7 +418,7 @@ def _maybe_cast_indexer(self, key):
code = self.codes.dtype.type(code)
return code
- @Appender(Index.where.__doc__)
+ @doc(Index.where)
def where(self, cond, other=None):
# TODO: Investigate an alternative implementation with
# 1. copy the underlying Categorical
@@ -569,7 +569,7 @@ def get_indexer_non_unique(self, target):
indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing
- @Appender(Index._convert_list_indexer.__doc__)
+ @doc(Index._convert_list_indexer)
def _convert_list_indexer(self, keyarr):
# Return our indexer or raise if all of the values are not included in
# the categories
@@ -586,7 +586,7 @@ def _convert_list_indexer(self, keyarr):
return self.get_indexer(keyarr)
- @Appender(Index._convert_arr_indexer.__doc__)
+ @doc(Index._convert_arr_indexer)
def _convert_arr_indexer(self, keyarr):
keyarr = com.asarray_tuplesafe(keyarr)
@@ -595,7 +595,7 @@ def _convert_arr_indexer(self, keyarr):
return self._shallow_copy(keyarr)
- @Appender(Index._convert_index_indexer.__doc__)
+ @doc(Index._convert_index_indexer)
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
@@ -608,7 +608,7 @@ def take_nd(self, *args, **kwargs):
)
return self.take(*args, **kwargs)
- @Appender(Index._maybe_cast_slice_bound.__doc__)
+ @doc(Index._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side, kind):
if kind == "loc":
return label
| - [x] working for #31942
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33112 | 2020-03-29T04:29:38Z | 2020-03-30T03:39:57Z | 2020-03-30T03:39:57Z | 2020-03-30T15:39:07Z |
Bug 29764 groupby loses index name sometimes | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index b16ca0a80c5b4..010c8a22dcc9f 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -132,9 +132,8 @@ Plotting
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
+- Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`)
-
--
-
Reshaping
^^^^^^^^^
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index c50b753cf3293..bc46cc041cbf3 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1728,9 +1728,10 @@ def _wrap_aggregated_output(
-------
DataFrame
"""
+ idx_name = output.pop("idx_name", None)
indexed_output = {key.position: val for key, val in output.items()}
name = self._obj_with_exclusions._get_axis(1 - self.axis).name
- columns = Index([key.label for key in output], name=name)
+ columns = Index([key.label for key in output], name=idx_name)
result = self.obj._constructor(indexed_output)
result.columns = columns
@@ -1762,8 +1763,9 @@ def _wrap_transformed_output(
-------
DataFrame
"""
+ idx_name = output.pop("idx_name", None)
indexed_output = {key.position: val for key, val in output.items()}
- columns = Index(key.label for key in output)
+ columns = Index([key.label for key in output], name=idx_name)
result = self.obj._constructor(indexed_output)
result.columns = columns
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index ac45222625569..5a20af9607cd9 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2450,8 +2450,11 @@ def _get_cythonized_result(
grouper = self.grouper
labels, _, ngroups = grouper.group_info
- output: Dict[base.OutputKey, np.ndarray] = {}
+ output: Dict[Union[base.OutputKey, str], Union[np.ndarray, str]] = {}
base_func = getattr(libgroupby, how)
+ obj = self._selected_obj
+ if isinstance(obj, DataFrame):
+ output["idx_name"] = obj.columns.name
error_msg = ""
for idx, obj in enumerate(self._iterate_slices()):
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 8c51ebf89f5c0..19dd8a1cee355 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -2069,3 +2069,26 @@ def test_group_on_two_row_multiindex_returns_one_tuple_key():
assert len(result) == 1
key = (1, 2)
assert (result[key] == expected[key]).all()
+
+
+@pytest.mark.parametrize("func", ["sum", "any", "shift"])
+def test_groupby_column_index_name_lost(func):
+ # GH: 29764 groupby loses index sometimes
+ expected = pd.Index(["a"], name="idx")
+ df = pd.DataFrame([[1]], columns=expected)
+ df_grouped = df.groupby([1])
+ result = getattr(df_grouped, func)().columns
+ tm.assert_index_equal(result, expected)
+
+
+@pytest.mark.parametrize("func", ["ffill", "bfill"])
+def test_groupby_column_index_name_lost_fill_funcs(func):
+ # GH: 29764 groupby loses index sometimes
+ df = pd.DataFrame(
+ [[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]],
+ columns=pd.Index(["type", "a", "b"], name="idx"),
+ )
+ df_grouped = df.groupby(["type"])[["a", "b"]]
+ result = getattr(df_grouped, func)().columns
+ expected = pd.Index(["a", "b"], name="idx")
+ tm.assert_index_equal(result, expected)
| - [x] closes #29764
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry´
Some group by functions lost the column index name. For the functions running into ```_get_cythonized_result``` in groupby.py, the index name was just ignored when defining the column names. So the following functions ```_wrap_aggregated_output``` and ```_wrap_transformed_output``` in the class DataFrameGroupBy had not acces to this information, because it was already lost there. I collected the information beforehand and defined the Index Name accordingly.
We could refactor both methods a bit at a later stage, because at it is right now (and was before) the first few lines are duplicates. | https://api.github.com/repos/pandas-dev/pandas/pulls/33111 | 2020-03-29T01:19:03Z | 2020-09-04T16:35:05Z | null | 2020-09-05T19:04:06Z |
REF: push concat logic out of internals and into concat_compat | diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 49034616b374a..ecfaac2210807 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -97,6 +97,9 @@ def is_nonempty(x) -> bool:
# Creating an empty array directly is tempting, but the winnings would be
# marginal given that it would still require shape & dtype calculation and
# np.concatenate which has them both implemented is compiled.
+ non_empties = [x for x in to_concat if is_nonempty(x)]
+ if non_empties and axis == 0:
+ to_concat = non_empties
typs = get_dtype_kinds(to_concat)
_contains_datetime = any(typ.startswith("datetime") for typ in typs)
@@ -114,10 +117,17 @@ def is_nonempty(x) -> bool:
elif "sparse" in typs:
return _concat_sparse(to_concat, axis=axis, typs=typs)
- all_empty = all(not is_nonempty(x) for x in to_concat)
- if any(is_extension_array_dtype(x) for x in to_concat) and axis == 1:
+ all_empty = not len(non_empties)
+ single_dtype = len({x.dtype for x in to_concat}) == 1
+ any_ea = any(is_extension_array_dtype(x.dtype) for x in to_concat)
+
+ if any_ea and axis == 1:
to_concat = [np.atleast_2d(x.astype("object")) for x in to_concat]
+ elif any_ea and single_dtype and axis == 0:
+ cls = type(to_concat[0])
+ return cls._concat_same_type(to_concat)
+
if all_empty:
# we have all empties, but may need to coerce the result dtype to
# object if we have non-numeric type operands (numpy would otherwise
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index dda932cafe73b..9630abf61f692 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1649,21 +1649,11 @@ def concat(self, to_concat, new_axis: Index) -> "SingleBlockManager":
-------
SingleBlockManager
"""
- non_empties = [x for x in to_concat if len(x) > 0]
- # check if all series are of the same block type:
- if len(non_empties) > 0:
- blocks = [obj.blocks[0] for obj in non_empties]
- if len({b.dtype for b in blocks}) == 1:
- new_block = blocks[0].concat_same_type(blocks)
- else:
- values = [x.values for x in blocks]
- values = concat_compat(values)
- new_block = make_block(values, placement=slice(0, len(values), 1))
- else:
- values = [x._block.values for x in to_concat]
- values = concat_compat(values)
- new_block = make_block(values, placement=slice(0, len(values), 1))
+ blocks = [obj.blocks[0] for obj in to_concat]
+ values = concat_compat([x.values for x in blocks])
+
+ new_block = make_block(values, placement=slice(0, len(values), 1))
mgr = SingleBlockManager(new_block, new_axis)
return mgr
diff --git a/pandas/tests/dtypes/test_concat.py b/pandas/tests/dtypes/test_concat.py
index 02daa185b1cdb..1fbbd3356ae13 100644
--- a/pandas/tests/dtypes/test_concat.py
+++ b/pandas/tests/dtypes/test_concat.py
@@ -2,7 +2,9 @@
import pandas.core.dtypes.concat as _concat
+import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, TimedeltaIndex
+import pandas._testing as tm
@pytest.mark.parametrize(
@@ -76,3 +78,13 @@ def test_get_dtype_kinds(index_or_series, to_concat, expected):
def test_get_dtype_kinds_period(to_concat, expected):
result = _concat.get_dtype_kinds(to_concat)
assert result == set(expected)
+
+
+def test_concat_mismatched_categoricals_with_empty():
+ # concat_compat behavior on series._values should match pd.concat on series
+ ser1 = Series(["a", "b", "c"], dtype="category")
+ ser2 = Series([], dtype="category")
+
+ result = _concat.concat_compat([ser1._values, ser2._values])
+ expected = pd.concat([ser1, ser2])._values
+ tm.assert_categorical_equal(result, expected)
diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py
index 26606d7e799e8..1960e3d09245c 100644
--- a/pandas/tests/extension/test_external_block.py
+++ b/pandas/tests/extension/test_external_block.py
@@ -2,7 +2,7 @@
import pytest
import pandas as pd
-from pandas.core.internals import BlockManager, SingleBlockManager
+from pandas.core.internals import BlockManager
from pandas.core.internals.blocks import ExtensionBlock
@@ -33,17 +33,6 @@ def df():
return pd.DataFrame(block_manager)
-def test_concat_series():
- # GH17728
- values = np.arange(3, dtype="int64")
- block = CustomBlock(values, placement=slice(0, 3))
- mgr = SingleBlockManager(block, pd.RangeIndex(3))
- s = pd.Series(mgr, pd.RangeIndex(3), fastpath=True)
-
- res = pd.concat([s, s])
- assert isinstance(res._data.blocks[0], CustomBlock)
-
-
def test_concat_dataframe(df):
# GH17728
res = pd.concat([df, df])
| This changes some empty-array behavior for concat_compat to match the behavior of pd.concat(list_of_series), see new test.
The follow-up to this basically gets concat out of internals altogether. | https://api.github.com/repos/pandas-dev/pandas/pulls/33110 | 2020-03-29T00:38:07Z | 2020-03-29T14:54:03Z | 2020-03-29T14:54:03Z | 2020-03-29T15:12:22Z |
REF: DataFrame delitem, take, pop, filter tests | diff --git a/pandas/tests/frame/indexing/test_delitem.py b/pandas/tests/frame/indexing/test_delitem.py
new file mode 100644
index 0000000000000..f6c7b6ed5d14d
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_delitem.py
@@ -0,0 +1,57 @@
+import re
+
+import numpy as np
+import pytest
+
+from pandas import DataFrame, MultiIndex
+
+
+class TestDataFrameDelItem:
+ def test_delitem(self, float_frame):
+ del float_frame["A"]
+ assert "A" not in float_frame
+
+ def test_delitem_multiindex(self):
+ midx = MultiIndex.from_product([["A", "B"], [1, 2]])
+ df = DataFrame(np.random.randn(4, 4), columns=midx)
+ assert len(df.columns) == 4
+ assert ("A",) in df.columns
+ assert "A" in df.columns
+
+ result = df["A"]
+ assert isinstance(result, DataFrame)
+ del df["A"]
+
+ assert len(df.columns) == 2
+
+ # A still in the levels, BUT get a KeyError if trying
+ # to delete
+ assert ("A",) not in df.columns
+ with pytest.raises(KeyError, match=re.escape("('A',)")):
+ del df[("A",)]
+
+ # behavior of dropped/deleted MultiIndex levels changed from
+ # GH 2770 to GH 19027: MultiIndex no longer '.__contains__'
+ # levels which are dropped/deleted
+ assert "A" not in df.columns
+ with pytest.raises(KeyError, match=re.escape("('A',)")):
+ del df["A"]
+
+ def test_delitem_corner(self, float_frame):
+ f = float_frame.copy()
+ del f["D"]
+ assert len(f.columns) == 3
+ with pytest.raises(KeyError, match=r"^'D'$"):
+ del f["D"]
+ del f["B"]
+ assert len(f.columns) == 2
+
+ def test_delitem_col_still_multiindex(self):
+ arrays = [["a", "b", "c", "top"], ["", "", "", "OD"], ["", "", "", "wx"]]
+
+ tuples = sorted(zip(*arrays))
+ index = MultiIndex.from_tuples(tuples)
+
+ df = DataFrame(np.random.randn(3, 4), columns=index)
+ del df[("a", "", "")]
+ assert isinstance(df.columns, MultiIndex)
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index a7aacc9e0968a..4fa5e4196ae5b 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -854,15 +854,6 @@ def test_getitem_empty_frame_with_boolean(self):
df2 = df[df > 0]
tm.assert_frame_equal(df, df2)
- def test_delitem_corner(self, float_frame):
- f = float_frame.copy()
- del f["D"]
- assert len(f.columns) == 3
- with pytest.raises(KeyError, match=r"^'D'$"):
- del f["D"]
- del f["B"]
- assert len(f.columns) == 2
-
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
diff --git a/pandas/tests/frame/indexing/test_take.py b/pandas/tests/frame/indexing/test_take.py
new file mode 100644
index 0000000000000..3b59d3cf10658
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_take.py
@@ -0,0 +1,88 @@
+import pytest
+
+import pandas._testing as tm
+
+
+class TestDataFrameTake:
+ def test_take(self, float_frame):
+ # homogeneous
+ order = [3, 1, 2, 0]
+ for df in [float_frame]:
+
+ result = df.take(order, axis=0)
+ expected = df.reindex(df.index.take(order))
+ tm.assert_frame_equal(result, expected)
+
+ # axis = 1
+ result = df.take(order, axis=1)
+ expected = df.loc[:, ["D", "B", "C", "A"]]
+ tm.assert_frame_equal(result, expected, check_names=False)
+
+ # negative indices
+ order = [2, 1, -1]
+ for df in [float_frame]:
+
+ result = df.take(order, axis=0)
+ expected = df.reindex(df.index.take(order))
+ tm.assert_frame_equal(result, expected)
+
+ result = df.take(order, axis=0)
+ tm.assert_frame_equal(result, expected)
+
+ # axis = 1
+ result = df.take(order, axis=1)
+ expected = df.loc[:, ["C", "B", "D"]]
+ tm.assert_frame_equal(result, expected, check_names=False)
+
+ # illegal indices
+ msg = "indices are out-of-bounds"
+ with pytest.raises(IndexError, match=msg):
+ df.take([3, 1, 2, 30], axis=0)
+ with pytest.raises(IndexError, match=msg):
+ df.take([3, 1, 2, -31], axis=0)
+ with pytest.raises(IndexError, match=msg):
+ df.take([3, 1, 2, 5], axis=1)
+ with pytest.raises(IndexError, match=msg):
+ df.take([3, 1, 2, -5], axis=1)
+
+ def test_take_mixed_type(self, float_string_frame):
+
+ # mixed-dtype
+ order = [4, 1, 2, 0, 3]
+ for df in [float_string_frame]:
+
+ result = df.take(order, axis=0)
+ expected = df.reindex(df.index.take(order))
+ tm.assert_frame_equal(result, expected)
+
+ # axis = 1
+ result = df.take(order, axis=1)
+ expected = df.loc[:, ["foo", "B", "C", "A", "D"]]
+ tm.assert_frame_equal(result, expected)
+
+ # negative indices
+ order = [4, 1, -2]
+ for df in [float_string_frame]:
+
+ result = df.take(order, axis=0)
+ expected = df.reindex(df.index.take(order))
+ tm.assert_frame_equal(result, expected)
+
+ # axis = 1
+ result = df.take(order, axis=1)
+ expected = df.loc[:, ["foo", "B", "D"]]
+ tm.assert_frame_equal(result, expected)
+
+ def test_take_mixed_numeric(self, mixed_float_frame, mixed_int_frame):
+ # by dtype
+ order = [1, 2, 0, 3]
+ for df in [mixed_float_frame, mixed_int_frame]:
+
+ result = df.take(order, axis=0)
+ expected = df.reindex(df.index.take(order))
+ tm.assert_frame_equal(result, expected)
+
+ # axis = 1
+ result = df.take(order, axis=1)
+ expected = df.loc[:, ["B", "C", "A", "D"]]
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_filter.py b/pandas/tests/frame/methods/test_filter.py
new file mode 100644
index 0000000000000..569b2fe21d1c2
--- /dev/null
+++ b/pandas/tests/frame/methods/test_filter.py
@@ -0,0 +1,139 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame
+import pandas._testing as tm
+
+
+class TestDataFrameFilter:
+ def test_filter(self, float_frame, float_string_frame):
+ # Items
+ filtered = float_frame.filter(["A", "B", "E"])
+ assert len(filtered.columns) == 2
+ assert "E" not in filtered
+
+ filtered = float_frame.filter(["A", "B", "E"], axis="columns")
+ assert len(filtered.columns) == 2
+ assert "E" not in filtered
+
+ # Other axis
+ idx = float_frame.index[0:4]
+ filtered = float_frame.filter(idx, axis="index")
+ expected = float_frame.reindex(index=idx)
+ tm.assert_frame_equal(filtered, expected)
+
+ # like
+ fcopy = float_frame.copy()
+ fcopy["AA"] = 1
+
+ filtered = fcopy.filter(like="A")
+ assert len(filtered.columns) == 2
+ assert "AA" in filtered
+
+ # like with ints in column names
+ df = DataFrame(0.0, index=[0, 1, 2], columns=[0, 1, "_A", "_B"])
+ filtered = df.filter(like="_")
+ assert len(filtered.columns) == 2
+
+ # regex with ints in column names
+ # from PR #10384
+ df = DataFrame(0.0, index=[0, 1, 2], columns=["A1", 1, "B", 2, "C"])
+ expected = DataFrame(
+ 0.0, index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object)
+ )
+ filtered = df.filter(regex="^[0-9]+$")
+ tm.assert_frame_equal(filtered, expected)
+
+ expected = DataFrame(0.0, index=[0, 1, 2], columns=[0, "0", 1, "1"])
+ # shouldn't remove anything
+ filtered = expected.filter(regex="^[0-9]+$")
+ tm.assert_frame_equal(filtered, expected)
+
+ # pass in None
+ with pytest.raises(TypeError, match="Must pass"):
+ float_frame.filter()
+ with pytest.raises(TypeError, match="Must pass"):
+ float_frame.filter(items=None)
+ with pytest.raises(TypeError, match="Must pass"):
+ float_frame.filter(axis=1)
+
+ # test mutually exclusive arguments
+ with pytest.raises(TypeError, match="mutually exclusive"):
+ float_frame.filter(items=["one", "three"], regex="e$", like="bbi")
+ with pytest.raises(TypeError, match="mutually exclusive"):
+ float_frame.filter(items=["one", "three"], regex="e$", axis=1)
+ with pytest.raises(TypeError, match="mutually exclusive"):
+ float_frame.filter(items=["one", "three"], regex="e$")
+ with pytest.raises(TypeError, match="mutually exclusive"):
+ float_frame.filter(items=["one", "three"], like="bbi", axis=0)
+ with pytest.raises(TypeError, match="mutually exclusive"):
+ float_frame.filter(items=["one", "three"], like="bbi")
+
+ # objects
+ filtered = float_string_frame.filter(like="foo")
+ assert "foo" in filtered
+
+ # unicode columns, won't ascii-encode
+ df = float_frame.rename(columns={"B": "\u2202"})
+ filtered = df.filter(like="C")
+ assert "C" in filtered
+
+ def test_filter_regex_search(self, float_frame):
+ fcopy = float_frame.copy()
+ fcopy["AA"] = 1
+
+ # regex
+ filtered = fcopy.filter(regex="[A]+")
+ assert len(filtered.columns) == 2
+ assert "AA" in filtered
+
+ # doesn't have to be at beginning
+ df = DataFrame(
+ {"aBBa": [1, 2], "BBaBB": [1, 2], "aCCa": [1, 2], "aCCaBB": [1, 2]}
+ )
+
+ result = df.filter(regex="BB")
+ exp = df[[x for x in df.columns if "BB" in x]]
+ tm.assert_frame_equal(result, exp)
+
+ @pytest.mark.parametrize(
+ "name,expected",
+ [
+ ("a", DataFrame({"a": [1, 2]})),
+ ("a", DataFrame({"a": [1, 2]})),
+ ("あ", DataFrame({"あ": [3, 4]})),
+ ],
+ )
+ def test_filter_unicode(self, name, expected):
+ # GH13101
+ df = DataFrame({"a": [1, 2], "あ": [3, 4]})
+
+ tm.assert_frame_equal(df.filter(like=name), expected)
+ tm.assert_frame_equal(df.filter(regex=name), expected)
+
+ @pytest.mark.parametrize("name", ["a", "a"])
+ def test_filter_bytestring(self, name):
+ # GH13101
+ df = DataFrame({b"a": [1, 2], b"b": [3, 4]})
+ expected = DataFrame({b"a": [1, 2]})
+
+ tm.assert_frame_equal(df.filter(like=name), expected)
+ tm.assert_frame_equal(df.filter(regex=name), expected)
+
+ def test_filter_corner(self):
+ empty = DataFrame()
+
+ result = empty.filter([])
+ tm.assert_frame_equal(result, empty)
+
+ result = empty.filter(like="foo")
+ tm.assert_frame_equal(result, empty)
+
+ def test_filter_regex_non_string(self):
+ # GH#5798 trying to filter on non-string columns should drop,
+ # not raise
+ df = pd.DataFrame(np.random.random((3, 2)), columns=["STRING", 123])
+ result = df.filter(regex="STRING")
+ expected = df[["STRING"]]
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/frame/methods/test_pop.py b/pandas/tests/frame/methods/test_pop.py
new file mode 100644
index 0000000000000..fccb3f10dde45
--- /dev/null
+++ b/pandas/tests/frame/methods/test_pop.py
@@ -0,0 +1,40 @@
+from pandas import DataFrame, Series
+import pandas._testing as tm
+
+
+class TestDataFramePop:
+ def test_pop(self, float_frame):
+ float_frame.columns.name = "baz"
+
+ float_frame.pop("A")
+ assert "A" not in float_frame
+
+ float_frame["foo"] = "bar"
+ float_frame.pop("foo")
+ assert "foo" not in float_frame
+ assert float_frame.columns.name == "baz"
+
+ # gh-10912: inplace ops cause caching issue
+ a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"], index=["X", "Y"])
+ b = a.pop("B")
+ b += 1
+
+ # original frame
+ expected = DataFrame([[1, 3], [4, 6]], columns=["A", "C"], index=["X", "Y"])
+ tm.assert_frame_equal(a, expected)
+
+ # result
+ expected = Series([2, 5], index=["X", "Y"], name="B") + 1
+ tm.assert_series_equal(b, expected)
+
+ def test_pop_non_unique_cols(self):
+ df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
+ df.columns = ["a", "b", "a"]
+
+ res = df.pop("a")
+ assert type(res) == DataFrame
+ assert len(res) == 2
+ assert len(df.columns) == 1
+ assert "b" in df.columns
+ assert "a" not in df.columns
+ assert len(df.index) == 2
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index d1d55d38f4a9a..42fb722c92b26 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -12,16 +12,6 @@ class TestDataFrameSelectReindex:
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
- def test_delitem_col_still_multiindex(self):
- arrays = [["a", "b", "c", "top"], ["", "", "", "OD"], ["", "", "", "wx"]]
-
- tuples = sorted(zip(*arrays))
- index = MultiIndex.from_tuples(tuples)
-
- df = DataFrame(np.random.randn(3, 4), columns=index)
- del df[("a", "", "")]
- assert isinstance(df.columns, MultiIndex)
-
def test_merge_join_different_levels(self):
# GH 9455
@@ -410,220 +400,6 @@ def test_align_int_fill_bug(self):
expected = df2 - df2.mean()
tm.assert_frame_equal(result, expected)
- def test_filter(self, float_frame, float_string_frame):
- # Items
- filtered = float_frame.filter(["A", "B", "E"])
- assert len(filtered.columns) == 2
- assert "E" not in filtered
-
- filtered = float_frame.filter(["A", "B", "E"], axis="columns")
- assert len(filtered.columns) == 2
- assert "E" not in filtered
-
- # Other axis
- idx = float_frame.index[0:4]
- filtered = float_frame.filter(idx, axis="index")
- expected = float_frame.reindex(index=idx)
- tm.assert_frame_equal(filtered, expected)
-
- # like
- fcopy = float_frame.copy()
- fcopy["AA"] = 1
-
- filtered = fcopy.filter(like="A")
- assert len(filtered.columns) == 2
- assert "AA" in filtered
-
- # like with ints in column names
- df = DataFrame(0.0, index=[0, 1, 2], columns=[0, 1, "_A", "_B"])
- filtered = df.filter(like="_")
- assert len(filtered.columns) == 2
-
- # regex with ints in column names
- # from PR #10384
- df = DataFrame(0.0, index=[0, 1, 2], columns=["A1", 1, "B", 2, "C"])
- expected = DataFrame(
- 0.0, index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object)
- )
- filtered = df.filter(regex="^[0-9]+$")
- tm.assert_frame_equal(filtered, expected)
-
- expected = DataFrame(0.0, index=[0, 1, 2], columns=[0, "0", 1, "1"])
- # shouldn't remove anything
- filtered = expected.filter(regex="^[0-9]+$")
- tm.assert_frame_equal(filtered, expected)
-
- # pass in None
- with pytest.raises(TypeError, match="Must pass"):
- float_frame.filter()
- with pytest.raises(TypeError, match="Must pass"):
- float_frame.filter(items=None)
- with pytest.raises(TypeError, match="Must pass"):
- float_frame.filter(axis=1)
-
- # test mutually exclusive arguments
- with pytest.raises(TypeError, match="mutually exclusive"):
- float_frame.filter(items=["one", "three"], regex="e$", like="bbi")
- with pytest.raises(TypeError, match="mutually exclusive"):
- float_frame.filter(items=["one", "three"], regex="e$", axis=1)
- with pytest.raises(TypeError, match="mutually exclusive"):
- float_frame.filter(items=["one", "three"], regex="e$")
- with pytest.raises(TypeError, match="mutually exclusive"):
- float_frame.filter(items=["one", "three"], like="bbi", axis=0)
- with pytest.raises(TypeError, match="mutually exclusive"):
- float_frame.filter(items=["one", "three"], like="bbi")
-
- # objects
- filtered = float_string_frame.filter(like="foo")
- assert "foo" in filtered
-
- # unicode columns, won't ascii-encode
- df = float_frame.rename(columns={"B": "\u2202"})
- filtered = df.filter(like="C")
- assert "C" in filtered
-
- def test_filter_regex_search(self, float_frame):
- fcopy = float_frame.copy()
- fcopy["AA"] = 1
-
- # regex
- filtered = fcopy.filter(regex="[A]+")
- assert len(filtered.columns) == 2
- assert "AA" in filtered
-
- # doesn't have to be at beginning
- df = DataFrame(
- {"aBBa": [1, 2], "BBaBB": [1, 2], "aCCa": [1, 2], "aCCaBB": [1, 2]}
- )
-
- result = df.filter(regex="BB")
- exp = df[[x for x in df.columns if "BB" in x]]
- tm.assert_frame_equal(result, exp)
-
- @pytest.mark.parametrize(
- "name,expected",
- [
- ("a", DataFrame({"a": [1, 2]})),
- ("a", DataFrame({"a": [1, 2]})),
- ("あ", DataFrame({"あ": [3, 4]})),
- ],
- )
- def test_filter_unicode(self, name, expected):
- # GH13101
- df = DataFrame({"a": [1, 2], "あ": [3, 4]})
-
- tm.assert_frame_equal(df.filter(like=name), expected)
- tm.assert_frame_equal(df.filter(regex=name), expected)
-
- @pytest.mark.parametrize("name", ["a", "a"])
- def test_filter_bytestring(self, name):
- # GH13101
- df = DataFrame({b"a": [1, 2], b"b": [3, 4]})
- expected = DataFrame({b"a": [1, 2]})
-
- tm.assert_frame_equal(df.filter(like=name), expected)
- tm.assert_frame_equal(df.filter(regex=name), expected)
-
- def test_filter_corner(self):
- empty = DataFrame()
-
- result = empty.filter([])
- tm.assert_frame_equal(result, empty)
-
- result = empty.filter(like="foo")
- tm.assert_frame_equal(result, empty)
-
- def test_filter_regex_non_string(self):
- # GH#5798 trying to filter on non-string columns should drop,
- # not raise
- df = pd.DataFrame(np.random.random((3, 2)), columns=["STRING", 123])
- result = df.filter(regex="STRING")
- expected = df[["STRING"]]
- tm.assert_frame_equal(result, expected)
-
- def test_take(self, float_frame):
- # homogeneous
- order = [3, 1, 2, 0]
- for df in [float_frame]:
-
- result = df.take(order, axis=0)
- expected = df.reindex(df.index.take(order))
- tm.assert_frame_equal(result, expected)
-
- # axis = 1
- result = df.take(order, axis=1)
- expected = df.loc[:, ["D", "B", "C", "A"]]
- tm.assert_frame_equal(result, expected, check_names=False)
-
- # negative indices
- order = [2, 1, -1]
- for df in [float_frame]:
-
- result = df.take(order, axis=0)
- expected = df.reindex(df.index.take(order))
- tm.assert_frame_equal(result, expected)
-
- result = df.take(order, axis=0)
- tm.assert_frame_equal(result, expected)
-
- # axis = 1
- result = df.take(order, axis=1)
- expected = df.loc[:, ["C", "B", "D"]]
- tm.assert_frame_equal(result, expected, check_names=False)
-
- # illegal indices
- msg = "indices are out-of-bounds"
- with pytest.raises(IndexError, match=msg):
- df.take([3, 1, 2, 30], axis=0)
- with pytest.raises(IndexError, match=msg):
- df.take([3, 1, 2, -31], axis=0)
- with pytest.raises(IndexError, match=msg):
- df.take([3, 1, 2, 5], axis=1)
- with pytest.raises(IndexError, match=msg):
- df.take([3, 1, 2, -5], axis=1)
-
- def test_take_mixed_type(self, float_string_frame):
-
- # mixed-dtype
- order = [4, 1, 2, 0, 3]
- for df in [float_string_frame]:
-
- result = df.take(order, axis=0)
- expected = df.reindex(df.index.take(order))
- tm.assert_frame_equal(result, expected)
-
- # axis = 1
- result = df.take(order, axis=1)
- expected = df.loc[:, ["foo", "B", "C", "A", "D"]]
- tm.assert_frame_equal(result, expected)
-
- # negative indices
- order = [4, 1, -2]
- for df in [float_string_frame]:
-
- result = df.take(order, axis=0)
- expected = df.reindex(df.index.take(order))
- tm.assert_frame_equal(result, expected)
-
- # axis = 1
- result = df.take(order, axis=1)
- expected = df.loc[:, ["foo", "B", "D"]]
- tm.assert_frame_equal(result, expected)
-
- def test_take_mixed_numeric(self, mixed_float_frame, mixed_int_frame):
- # by dtype
- order = [1, 2, 0, 3]
- for df in [mixed_float_frame, mixed_int_frame]:
-
- result = df.take(order, axis=0)
- expected = df.reindex(df.index.take(order))
- tm.assert_frame_equal(result, expected)
-
- # axis = 1
- result = df.take(order, axis=1)
- expected = df.loc[:, ["B", "C", "A", "D"]]
- tm.assert_frame_equal(result, expected)
-
def test_reindex_boolean(self):
frame = DataFrame(
np.ones((10, 2), dtype=bool), index=np.arange(0, 20, 2), columns=[0, 2]
diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py
index 9d1b6abff6241..e3f2a67c2f469 100644
--- a/pandas/tests/frame/test_mutate_columns.py
+++ b/pandas/tests/frame/test_mutate_columns.py
@@ -1,9 +1,7 @@
-import re
-
import numpy as np
import pytest
-from pandas import DataFrame, MultiIndex, Series
+from pandas import DataFrame, Series
import pandas._testing as tm
# Column add, remove, delete.
@@ -83,69 +81,3 @@ def test_setitem_empty_columns(self):
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
-
- def test_delitem(self, float_frame):
- del float_frame["A"]
- assert "A" not in float_frame
-
- def test_delitem_multiindex(self):
- midx = MultiIndex.from_product([["A", "B"], [1, 2]])
- df = DataFrame(np.random.randn(4, 4), columns=midx)
- assert len(df.columns) == 4
- assert ("A",) in df.columns
- assert "A" in df.columns
-
- result = df["A"]
- assert isinstance(result, DataFrame)
- del df["A"]
-
- assert len(df.columns) == 2
-
- # A still in the levels, BUT get a KeyError if trying
- # to delete
- assert ("A",) not in df.columns
- with pytest.raises(KeyError, match=re.escape("('A',)")):
- del df[("A",)]
-
- # behavior of dropped/deleted MultiIndex levels changed from
- # GH 2770 to GH 19027: MultiIndex no longer '.__contains__'
- # levels which are dropped/deleted
- assert "A" not in df.columns
- with pytest.raises(KeyError, match=re.escape("('A',)")):
- del df["A"]
-
- def test_pop(self, float_frame):
- float_frame.columns.name = "baz"
-
- float_frame.pop("A")
- assert "A" not in float_frame
-
- float_frame["foo"] = "bar"
- float_frame.pop("foo")
- assert "foo" not in float_frame
- assert float_frame.columns.name == "baz"
-
- # gh-10912: inplace ops cause caching issue
- a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"], index=["X", "Y"])
- b = a.pop("B")
- b += 1
-
- # original frame
- expected = DataFrame([[1, 3], [4, 6]], columns=["A", "C"], index=["X", "Y"])
- tm.assert_frame_equal(a, expected)
-
- # result
- expected = Series([2, 5], index=["X", "Y"], name="B") + 1
- tm.assert_series_equal(b, expected)
-
- def test_pop_non_unique_cols(self):
- df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
- df.columns = ["a", "b", "a"]
-
- res = df.pop("a")
- assert type(res) == DataFrame
- assert len(res) == 2
- assert len(df.columns) == 1
- assert "b" in df.columns
- assert "a" not in df.columns
- assert len(df.index) == 2
diff --git a/pandas/tests/series/indexing/test_delitem.py b/pandas/tests/series/indexing/test_delitem.py
new file mode 100644
index 0000000000000..6c7e3f2b06983
--- /dev/null
+++ b/pandas/tests/series/indexing/test_delitem.py
@@ -0,0 +1,49 @@
+import pytest
+
+from pandas import Index, Series
+import pandas._testing as tm
+
+
+class TestSeriesDelItem:
+ def test_delitem(self):
+ # GH#5542
+ # should delete the item inplace
+ s = Series(range(5))
+ del s[0]
+
+ expected = Series(range(1, 5), index=range(1, 5))
+ tm.assert_series_equal(s, expected)
+
+ del s[1]
+ expected = Series(range(2, 5), index=range(2, 5))
+ tm.assert_series_equal(s, expected)
+
+ # only 1 left, del, add, del
+ s = Series(1)
+ del s[0]
+ tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
+ s[0] = 1
+ tm.assert_series_equal(s, Series(1))
+ del s[0]
+ tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
+
+ def test_delitem_object_index(self):
+ # Index(dtype=object)
+ s = Series(1, index=["a"])
+ del s["a"]
+ tm.assert_series_equal(
+ s, Series(dtype="int64", index=Index([], dtype="object"))
+ )
+ s["a"] = 1
+ tm.assert_series_equal(s, Series(1, index=["a"]))
+ del s["a"]
+ tm.assert_series_equal(
+ s, Series(dtype="int64", index=Index([], dtype="object"))
+ )
+
+ def test_delitem_missing_key(self):
+ # empty
+ s = Series(dtype=object)
+
+ with pytest.raises(KeyError, match=r"^0$"):
+ del s[0]
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index 7e73e6366438b..5a648cc588957 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -5,44 +5,6 @@
import pandas._testing as tm
-def test_delitem():
- # GH 5542
- # should delete the item inplace
- s = Series(range(5))
- del s[0]
-
- expected = Series(range(1, 5), index=range(1, 5))
- tm.assert_series_equal(s, expected)
-
- del s[1]
- expected = Series(range(2, 5), index=range(2, 5))
- tm.assert_series_equal(s, expected)
-
- # empty
- s = Series(dtype=object)
-
- with pytest.raises(KeyError, match=r"^0$"):
- del s[0]
-
- # only 1 left, del, add, del
- s = Series(1)
- del s[0]
- tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
- s[0] = 1
- tm.assert_series_equal(s, Series(1))
- del s[0]
- tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
-
- # Index(dtype=object)
- s = Series(1, index=["a"])
- del s["a"]
- tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="object")))
- s["a"] = 1
- tm.assert_series_equal(s, Series(1, index=["a"]))
- del s["a"]
- tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="object")))
-
-
def test_slice_float64():
values = np.arange(10.0, 50.0, 2)
index = Index(values)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33109 | 2020-03-28T23:38:56Z | 2020-03-29T03:24:36Z | 2020-03-29T03:24:36Z | 2020-03-29T15:39:08Z | |
Docstring for show_versions in master | diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 7fc85a04e7d84..757620e4d561d 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -88,6 +88,20 @@ def _get_dependency_info() -> Dict[str, JSONSerializable]:
def show_versions(as_json: Union[str, bool] = False) -> None:
+ """
+ Provides useful information, important for bug reports.
+
+ It comprises info about hosting operation system, pandas version,
+ and versions of other installed relative packages.
+
+ Parameters
+ ----------
+ as_json : str or bool, default False
+ * If False, outputs info in a human readable form to the console.
+ * If str, it will be considered as a path to a file.
+ Info will be written to that file in JSON format.
+ * If True, outputs info in JSON format to the console.
+ """
sys_info = _get_sys_info()
deps = _get_dependency_info()
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33108 | 2020-03-28T22:12:56Z | 2020-03-29T18:12:57Z | null | 2020-03-29T18:12:58Z |
ENH/VIZ: Allowing `s` parameter of scatter plots to be a column name | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 20415bba99476..aeb0eeab0b3ba 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -452,6 +452,7 @@ Other
- Fixed bug in :func:`pandas.testing.assert_series_equal` where dtypes were checked for ``Interval`` and ``ExtensionArray`` operands when ``check_dtype`` was ``False`` (:issue:`32747`)
- Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`)
- Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`)
+- Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index d3db539084609..e466a215091ea 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1468,15 +1468,19 @@ def scatter(self, x, y, s=None, c=None, **kwargs):
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
- s : scalar or array_like, optional
+ s : str, scalar or array_like, optional
The size of each point. Possible values are:
+ - A string with the name of the column to be used for marker's size.
+
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
+ .. versionchanged:: 1.1.0
+
c : str, int or array_like, optional
The color of each point. Possible values are:
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index 63d0b8abe59d9..bc8346fd48433 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -934,6 +934,8 @@ def __init__(self, data, x, y, s=None, c=None, **kwargs):
# hide the matplotlib default for size, in case we want to change
# the handling of this argument later
s = 20
+ elif is_hashable(s) and s in data.columns:
+ s = data[s]
super().__init__(data, x, y, s=s, **kwargs)
if is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index 45ac18b2661c3..08b33ee547a48 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -1306,6 +1306,13 @@ def test_plot_scatter_with_c(self):
float_array = np.array([0.0, 1.0])
df.plot.scatter(x="A", y="B", c=float_array, cmap="spring")
+ def test_plot_scatter_with_s(self):
+ # this refers to GH 32904
+ df = DataFrame(np.random.random((10, 3)) * 100, columns=["a", "b", "c"],)
+
+ ax = df.plot.scatter(x="a", y="b", s="c")
+ tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes())
+
def test_scatter_colors(self):
df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
with pytest.raises(TypeError):
| - [x] closes #32904
- [x] tests added
- [x] tests passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This PR is a continuation of #32937 (made an error when pulling changes from the master).
I had a more elaborate decision tree for what to do with a passed size variable `s`, here https://github.com/pandas-dev/pandas/pull/32937, but in the end decided to go with the simpler version since the other checks are redundant (they would contain `s=s` or `pass`). | https://api.github.com/repos/pandas-dev/pandas/pulls/33107 | 2020-03-28T22:10:52Z | 2020-03-31T21:26:48Z | 2020-03-31T21:26:47Z | 2020-03-31T21:28:17Z |
Fix mixup between mean and median | diff --git a/doc/source/getting_started/intro_tutorials/09_timeseries.rst b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
index d5b4b316130bb..d7c1709ced51a 100644
--- a/doc/source/getting_started/intro_tutorials/09_timeseries.rst
+++ b/doc/source/getting_started/intro_tutorials/09_timeseries.rst
@@ -335,7 +335,7 @@ When defined, the frequency of the time series is provided by the
<ul class="task-bullet">
<li>
-Make a plot of the daily median :math:`NO_2` value in each of the stations.
+Make a plot of the daily mean :math:`NO_2` value in each of the stations.
.. ipython:: python
:okwarning:
@@ -386,4 +386,4 @@ A full overview on time series is given in the pages on :ref:`time series and da
.. raw:: html
- </div>
\ No newline at end of file
+ </div>
| Text said "median", but code uses "mean". Make text match the code
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33106 | 2020-03-28T21:54:57Z | 2020-03-29T14:52:25Z | 2020-03-29T14:52:25Z | 2020-03-29T15:07:57Z |
Add test for #32108 (error with groupby on series with period index) | diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index 06a83f4c000cf..84fd7a1bdfb05 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -769,3 +769,17 @@ def test_scalar_call_versus_list_call(self):
expected = grouped.count()
tm.assert_frame_equal(result, expected)
+
+ def test_grouper_period_index(self):
+ # GH 32108
+ periods = 2
+ index = pd.period_range(
+ start="2018-01", periods=periods, freq="M", name="Month"
+ )
+ period_series = pd.Series(range(periods), index=index)
+ result = period_series.groupby(period_series.index.month).sum()
+
+ expected = pd.Series(
+ range(0, periods), index=Index(range(1, periods + 1), name=index.name),
+ )
+ tm.assert_series_equal(result, expected)
| - [ ] closes #32108
| https://api.github.com/repos/pandas-dev/pandas/pulls/33105 | 2020-03-28T20:22:26Z | 2020-06-24T22:36:00Z | 2020-06-24T22:35:59Z | 2020-06-24T22:36:04Z |
BUG: ensure consistent behavior of Index.is_all_dates (#19204) | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 6eedf9dee5266..ba2ebfff79ad9 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -636,6 +636,7 @@ Indexing
- Bug in :meth:`Series.__getitem__` allowing missing labels with ``np.ndarray``, :class:`Index`, :class:`Series` indexers but not ``list``, these now all raise ``KeyError`` (:issue:`33646`)
- Bug in :meth:`DataFrame.truncate` and :meth:`Series.truncate` where index was assumed to be monotone increasing (:issue:`33756`)
- Indexing with a list of strings representing datetimes failed on :class:`DatetimeIndex` or :class:`PeriodIndex`(:issue:`11278`)
+- Bug in :meth:`Index.is_all_dates` incorrectly returning ``False`` when inferred type of index was other dates than datetime (:issue:`19204`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 79af28dc5f2ce..f98aa55ac7708 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -9,7 +9,7 @@
from pandas._libs import algos as libalgos, index as libindex, lib
import pandas._libs.join as libjoin
-from pandas._libs.lib import is_datetime_array, no_default
+from pandas._libs.lib import no_default
from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import tz_compare
@@ -25,7 +25,6 @@
)
from pandas.core.dtypes.common import (
ensure_int64,
- ensure_object,
ensure_platform_int,
is_bool,
is_bool_dtype,
@@ -1951,7 +1950,14 @@ def is_all_dates(self) -> bool:
"""
Whether or not the index values only consist of dates.
"""
- return is_datetime_array(ensure_object(self._values))
+ return self.inferred_type in [
+ "datetime64",
+ "datetime",
+ "date",
+ "timedelta64",
+ "timedelta",
+ "period",
+ ]
# --------------------------------------------------------------------
# Pickle Methods
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 9f235dcdbb295..ae1df002a6bd9 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1152,14 +1152,26 @@ def test_is_object(self, indices, expected):
("bool", False),
("categorical", False),
("int", False),
- ("datetime", True),
("float", False),
+ ("datetime", True),
+ ("datetime-tz", True),
+ ("period", True),
+ ("timedelta", True),
+ ("empty", False),
],
indirect=["indices"],
)
def test_is_all_dates(self, indices, expected):
assert indices.is_all_dates is expected
+ @pytest.mark.parametrize(
+ "index", ["datetime", "datetime-tz", "period", "timedelta"], indirect=["index"],
+ )
+ def test_is_all_dates_consistency(self, index):
+ # GH 19204
+ non_date = pd.Index(["not a date"])
+ assert index.is_all_dates == index.append(non_date)[:-1].is_all_dates
+
def test_summary(self, indices):
self._check_method_works(Index._summary, indices)
| - [x] closes #19204
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/33104 | 2020-03-28T18:22:21Z | 2020-07-17T11:20:49Z | null | 2023-05-11T01:19:34Z |
Check error message for raised exception | diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index c24b0b5fa64b8..be9cc53d33d6f 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -166,7 +166,8 @@ def __init__(self, values, freq=None, dtype=None, copy=False):
@classmethod
def _simple_new(cls, values: np.ndarray, freq=None, **kwargs) -> "PeriodArray":
# alias for PeriodArray.__init__
- assert isinstance(values, np.ndarray) and values.dtype == "i8"
+ assertion_msg = "Should be numpy array of type i8"
+ assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg
return cls(values, freq=freq, **kwargs)
@classmethod
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py
index 6e8e81230b2bb..954601ad423bf 100644
--- a/pandas/tests/indexes/categorical/test_category.py
+++ b/pandas/tests/indexes/categorical/test_category.py
@@ -192,7 +192,7 @@ def test_delete(self):
expected = CategoricalIndex(list("aabbc"), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
- with pytest.raises((IndexError, ValueError)):
+ with tm.external_error_raised((IndexError, ValueError)):
# Either depending on NumPy version
ci.delete(10)
diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py
index cb2140d0b4025..4ec7ef64e2272 100644
--- a/pandas/tests/indexes/period/test_constructors.py
+++ b/pandas/tests/indexes/period/test_constructors.py
@@ -327,7 +327,8 @@ def test_constructor_simple_new(self):
result = idx._simple_new(idx._data, name="p")
tm.assert_index_equal(result, idx)
- with pytest.raises(AssertionError):
+ msg = "Should be numpy array of type i8"
+ with pytest.raises(AssertionError, match=msg):
# Need ndarray, not Int64Index
type(idx._data)._simple_new(idx.astype("i8"), freq=idx.freq)
diff --git a/pandas/tests/indexes/period/test_join.py b/pandas/tests/indexes/period/test_join.py
index 9e3df0c32d6d5..8a68561dd5819 100644
--- a/pandas/tests/indexes/period/test_join.py
+++ b/pandas/tests/indexes/period/test_join.py
@@ -39,5 +39,6 @@ def test_join_does_not_recur(self):
def test_join_mismatched_freq_raises(self):
index = period_range("1/1/2000", "1/20/2000", freq="D")
index3 = period_range("1/1/2000", "1/20/2000", freq="2D")
- with pytest.raises(IncompatibleFrequency):
+ msg = r".*Input has different freq=2D from PeriodIndex\(freq=D\)"
+ with pytest.raises(IncompatibleFrequency, match=msg):
index.join(index3)
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 833901ea7ba22..c0597180184a6 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -59,6 +59,7 @@ def test_range_slice_day(self):
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
+ msg = "slice indices must be integers or None or have an __index__ method"
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = [
@@ -69,7 +70,7 @@ def test_range_slice_day(self):
"2013/02/01 09:00",
]
for v in values:
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
@@ -81,13 +82,14 @@ def test_range_slice_day(self):
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
idx[v:]
def test_range_slice_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
+ msg = "slice indices must be integers or None or have an __index__ method"
for idx in [didx, pidx]:
# slices against index should raise IndexError
@@ -99,7 +101,7 @@ def test_range_slice_seconds(self):
"2013/02/01 09:00",
]
for v in values:
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py
index 88f1687b8bb10..71b827d83b836 100644
--- a/pandas/tests/indexes/period/test_setops.py
+++ b/pandas/tests/indexes/period/test_setops.py
@@ -148,7 +148,8 @@ def test_union_misc(self, sort):
# raise if different frequencies
index = period_range("1/1/2000", "1/20/2000", freq="D")
index2 = period_range("1/1/2000", "1/20/2000", freq="W-WED")
- with pytest.raises(IncompatibleFrequency):
+ msg = r"Input has different freq=W-WED from PeriodIndex\(freq=D\)"
+ with pytest.raises(IncompatibleFrequency, match=msg):
index.union(index2, sort=sort)
# TODO: belongs elsewhere
@@ -180,11 +181,13 @@ def test_intersection(self, sort):
# raise if different frequencies
index = period_range("1/1/2000", "1/20/2000", freq="D")
index2 = period_range("1/1/2000", "1/20/2000", freq="W-WED")
- with pytest.raises(IncompatibleFrequency):
+ msg = r"Input has different freq=W-WED from PeriodIndex\(freq=D\)"
+ with pytest.raises(IncompatibleFrequency, match=msg):
index.intersection(index2, sort=sort)
index3 = period_range("1/1/2000", "1/20/2000", freq="2D")
- with pytest.raises(IncompatibleFrequency):
+ msg = r"Input has different freq=2D from PeriodIndex\(freq=D\)"
+ with pytest.raises(IncompatibleFrequency, match=msg):
index.intersection(index3, sort=sort)
def test_intersection_cases(self, sort):
diff --git a/pandas/tests/indexes/timedeltas/test_delete.py b/pandas/tests/indexes/timedeltas/test_delete.py
index 593ed7bb0a1ac..63f2b450aa818 100644
--- a/pandas/tests/indexes/timedeltas/test_delete.py
+++ b/pandas/tests/indexes/timedeltas/test_delete.py
@@ -1,5 +1,3 @@
-import pytest
-
from pandas import TimedeltaIndex, timedelta_range
import pandas._testing as tm
@@ -30,7 +28,7 @@ def test_delete(self):
assert result.name == expected.name
assert result.freq == expected.freq
- with pytest.raises((IndexError, ValueError)):
+ with tm.external_error_raised((IndexError, ValueError)):
# either depending on numpy version
idx.delete(5)
| - [x] xref #30999
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33103 | 2020-03-28T18:01:58Z | 2020-03-29T15:25:46Z | 2020-03-29T15:25:46Z | 2020-03-29T15:25:50Z |
PERF: fix performance regression in memory_usage(deep=True) for object dtype | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 2187668c96ca4..a3aff45afa116 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -619,4 +619,17 @@ def time_select_dtypes(self, n):
self.df.select_dtypes(include="int")
+class MemoryUsage:
+ def setup(self):
+ self.df = DataFrame(np.random.randn(100000, 2), columns=list("AB"))
+ self.df2 = self.df.copy()
+ self.df2["A"] = self.df2["A"].astype("object")
+
+ def time_memory_usage(self):
+ self.df.memory_usage(deep=True)
+
+ def time_memory_usage_object_dtype(self):
+ self.df2.memory_usage(deep=True)
+
+
from .pandas_vb_common import setup # noqa: F401 isort:skip
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 9ff0d60b9cd6a..34a3276d03c37 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1387,7 +1387,7 @@ def memory_usage(self, deep=False):
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
- v += lib.memory_usage_of_objects(self.array)
+ v += lib.memory_usage_of_objects(self._values)
return v
@doc(
| - [x] closes #33012
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
The pull request is to update lib.memory_usage_of_objects from taking self.arrays to self._values. An ASV included to benchmark with and without object-dtype columns.
Before:

After:
 | https://api.github.com/repos/pandas-dev/pandas/pulls/33102 | 2020-03-28T16:02:20Z | 2020-03-31T00:11:46Z | 2020-03-31T00:11:46Z | 2020-03-31T00:25:23Z |
Add test for named period index, doing group by index | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 20415bba99476..b92a3929ace41 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -452,6 +452,7 @@ Other
- Fixed bug in :func:`pandas.testing.assert_series_equal` where dtypes were checked for ``Interval`` and ``ExtensionArray`` operands when ``check_dtype`` was ``False`` (:issue:`32747`)
- Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`)
- Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`)
+- Added test for named period index, doing group by on index.(:issue:`32108`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index b8d8f56512a69..688092f85d4fc 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -8,7 +8,16 @@
from pandas.errors import PerformanceWarning
import pandas as pd
-from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ Series,
+ Timestamp,
+ date_range,
+ period_range,
+ read_csv,
+)
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
@@ -2057,3 +2066,15 @@ def test_groups_repr_truncates(max_seq_items, expected):
result = df.groupby(np.array(df.a)).groups.__repr__()
assert result == expected
+
+
+def test_groupby_period_index():
+ periods = 2
+ index = period_range(start="2018-01", periods=periods, freq="M")
+ period_series = Series(range(periods), index=index)
+ period_series.index.name = "Month"
+ result = period_series.groupby(period_series.index.month).sum()
+
+ expected = pd.Series(range(0, periods), index=range(1, periods + 1))
+ expected.index.name = period_series.index.name
+ tm.assert_series_equal(result, expected)
| - [x] closes #32108
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33101 | 2020-03-28T15:22:17Z | 2020-03-28T17:25:48Z | null | 2020-03-28T17:25:48Z |
TYP: require Index objects earlier in internals | diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py
index e70652b81c42f..bc45b7c74ecc1 100644
--- a/pandas/core/internals/__init__.py
+++ b/pandas/core/internals/__init__.py
@@ -18,8 +18,6 @@
BlockManager,
SingleBlockManager,
concatenate_block_managers,
- create_block_manager_from_arrays,
- create_block_manager_from_blocks,
)
__all__ = [
@@ -40,6 +38,4 @@
"BlockManager",
"SingleBlockManager",
"concatenate_block_managers",
- "create_block_manager_from_arrays",
- "create_block_manager_from_blocks",
]
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 3e0fb8455884a..fc7da4155db36 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -3,6 +3,7 @@
constructors before passing them to a BlockManager.
"""
from collections import abc
+from typing import Tuple
import numpy as np
import numpy.ma as ma
@@ -29,7 +30,6 @@
ABCDataFrame,
ABCDatetimeIndex,
ABCIndexClass,
- ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
@@ -44,7 +44,7 @@
get_objs_combined_axis,
union_indexes,
)
-from pandas.core.internals import (
+from pandas.core.internals.managers import (
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
@@ -53,12 +53,16 @@
# BlockManager Interface
-def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None, verify_integrity=True):
+def arrays_to_mgr(
+ arrays, arr_names, index, columns, dtype=None, verify_integrity: bool = True
+):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
+ arr_names = ensure_index(arr_names)
+
if verify_integrity:
# figure out the index, if necessary
if index is None:
@@ -70,6 +74,9 @@ def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None, verify_integrit
arrays = _homogenize(arrays, index, dtype)
columns = ensure_index(columns)
+ else:
+ columns = ensure_index(columns)
+ index = ensure_index(index)
# from BlockManager perspective
axes = [columns, index]
@@ -163,7 +170,8 @@ def init_ndarray(values, index, columns, dtype=None, copy=False):
values = [values]
if columns is None:
- columns = list(range(len(values)))
+ columns = Index(range(len(values)))
+
return arrays_to_mgr(values, columns, index, columns, dtype=dtype)
# by definition an array here
@@ -416,7 +424,7 @@ def get_names_from_index(data):
return index
-def _get_axes(N, K, index, columns):
+def _get_axes(N, K, index, columns) -> Tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
@@ -635,12 +643,7 @@ def sanitize_index(data, index: Index):
if len(data) != len(index):
raise ValueError("Length of values does not match length of index")
- if isinstance(data, ABCIndexClass):
- pass
- elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)):
- data = data._values
-
- elif isinstance(data, np.ndarray):
+ if isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ["M", "m"]:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 182a5b14a1242..07e78cf48d33d 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -2,7 +2,7 @@
import itertools
import operator
import re
-from typing import Dict, List, Optional, Sequence, Tuple, TypeVar, Union
+from typing import DefaultDict, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
import warnings
import numpy as np
@@ -342,7 +342,7 @@ def _verify_integrity(self) -> None:
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
- construction_error(tot_items, block.shape[1:], self.axes)
+ raise construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
@@ -1649,7 +1649,7 @@ def concat(
# Constructor Helpers
-def create_block_manager_from_blocks(blocks, axes):
+def create_block_manager_from_blocks(blocks, axes: List[Index]) -> BlockManager:
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
@@ -1670,10 +1670,15 @@ def create_block_manager_from_blocks(blocks, axes):
except ValueError as e:
blocks = [getattr(b, "values", b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
- construction_error(tot_items, blocks[0].shape[1:], axes, e)
+ raise construction_error(tot_items, blocks[0].shape[1:], axes, e)
-def create_block_manager_from_arrays(arrays, names, axes):
+def create_block_manager_from_arrays(
+ arrays, names: Index, axes: List[Index]
+) -> BlockManager:
+ assert isinstance(names, Index)
+ assert isinstance(axes, list)
+ assert all(isinstance(x, Index) for x in axes)
try:
blocks = form_blocks(arrays, names, axes)
@@ -1681,7 +1686,7 @@ def create_block_manager_from_arrays(arrays, names, axes):
mgr._consolidate_inplace()
return mgr
except ValueError as e:
- construction_error(len(arrays), arrays[0].shape, axes, e)
+ raise construction_error(len(arrays), arrays[0].shape, axes, e)
def construction_error(tot_items, block_shape, axes, e=None):
@@ -1696,23 +1701,25 @@ def construction_error(tot_items, block_shape, axes, e=None):
if len(implied) <= 2:
implied = implied[::-1]
+ # We return the exception object instead of raising it so that we
+ # can raise it in the caller; mypy plays better with that
if passed == implied and e is not None:
- raise e
+ return e
if block_shape[0] == 0:
- raise ValueError("Empty data passed with indices specified.")
- raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
+ return ValueError("Empty data passed with indices specified.")
+ return ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
# -----------------------------------------------------------------------
-def form_blocks(arrays, names, axes):
+def form_blocks(arrays, names: Index, axes) -> List[Block]:
# put "leftover" items in float bucket, where else?
# generalize?
- items_dict = defaultdict(list)
+ items_dict: DefaultDict[str, List] = defaultdict(list)
extra_locs = []
- names_idx = ensure_index(names)
+ names_idx = names
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
@@ -1730,7 +1737,7 @@ def form_blocks(arrays, names, axes):
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, k, v))
- blocks = []
+ blocks: List[Block] = []
if len(items_dict["FloatBlock"]):
float_blocks = _multi_blockify(items_dict["FloatBlock"])
blocks.extend(float_blocks)
| https://api.github.com/repos/pandas-dev/pandas/pulls/33100 | 2020-03-28T14:54:30Z | 2020-04-04T00:21:32Z | 2020-04-04T00:21:32Z | 2020-04-04T00:43:59Z | |
DOC iris.csv file has moved | diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 47a4fd8ff0e95..0d24a785b0be2 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -359,7 +359,7 @@ def parallel_coordinates(
--------
>>> from matplotlib import pyplot as plt
>>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master'
- '/pandas/tests/data/csv/iris.csv')
+ '/pandas/tests/data/iris.csv')
>>> pd.plotting.parallel_coordinates(
df, 'Name',
color=('#556270', '#4ECDC4', '#C7F464'))
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/33099 | 2020-03-28T14:47:04Z | 2020-03-30T03:52:35Z | 2020-03-30T03:52:35Z | 2020-04-14T02:14:36Z |
TST: GroupBy(..., as_index=True).agg() drops index | diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 962288d5d59e1..1b726860eeb66 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -809,7 +809,17 @@ def test_aggregate_mixed_types():
tm.assert_frame_equal(result, expected)
-@pytest.mark.xfail(reason="Not implemented.")
+@pytest.mark.parametrize("func", ["min", "max"])
+def test_aggregate_categorical_lost_index(func: str):
+ # GH: 28641 groupby drops index, when grouping over categorical column with min/max
+ ds = pd.Series(["b"], dtype="category").cat.as_ordered()
+ df = pd.DataFrame({"A": [1997], "B": ds})
+ result = df.groupby("A").agg({"B": func})
+ expected = pd.DataFrame({"B": ["b"]}, index=pd.Index([1997], name="A"))
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.xfail(reason="Not implemented;see GH 31256")
def test_aggregate_udf_na_extension_type():
# https://github.com/pandas-dev/pandas/pull/31359
# This is currently failing to cast back to Int64Dtype.
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index f9e89d36084c6..ff35ec04952b1 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -1388,6 +1388,19 @@ def test_groupby_agg_non_numeric():
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize("func", ["first", "last"])
+def test_groupy_first_returned_categorical_instead_of_dataframe(func):
+ # GH 28641: groupby drops index, when grouping over categorical column with
+ # first/last. Renamed Categorical instead of DataFrame previously.
+ df = pd.DataFrame(
+ {"A": [1997], "B": pd.Series(["b"], dtype="category").cat.as_ordered()}
+ )
+ df_grouped = df.groupby("A")["B"]
+ result = getattr(df_grouped, func)()
+ expected = pd.Series(["b"], index=pd.Index([1997], name="A"), name="B")
+ tm.assert_series_equal(result, expected)
+
+
def test_read_only_category_no_sort():
# GH33410
cats = np.array([1, 2])
| - [x] closes #28641
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
While searching for related issue for my other pull request, I found that this issue must have been fixed with another commit in the past (don't know when this issue was fixed).
I added a test to ensure that this error won't know occur in the future. | https://api.github.com/repos/pandas-dev/pandas/pulls/33098 | 2020-03-28T13:19:45Z | 2020-06-15T22:53:27Z | 2020-06-15T22:53:27Z | 2020-06-16T07:29:21Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.