title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
Removed NDFrameSplitter class | diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index dcd854ada9d45..7b142eae9e272 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -884,33 +884,10 @@ def _chop(self, sdata, slice_obj):
return sdata._slice(slice_obj, axis=1) # .loc[:, slice_obj]
-class NDFrameSplitter(DataSplitter):
-
- def __init__(self, data, labels, ngroups, axis=0):
- super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
-
- self.factory = data._constructor
-
- def _get_sorted_data(self):
- # this is the BlockManager
- data = self.data._data
-
- # this is sort of wasteful but...
- sorted_axis = data.axes[self.axis].take(self.sort_idx)
- sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
-
- return sorted_data
-
- def _chop(self, sdata, slice_obj):
- return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
-
-
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
- else:
- klass = NDFrameSplitter
return klass(data, *args, **kwargs)
| More GroupBy cleanup post Panel removal
| https://api.github.com/repos/pandas-dev/pandas/pulls/26153 | 2019-04-19T17:19:29Z | 2019-04-19T19:48:59Z | 2019-04-19T19:48:59Z | 2019-04-19T20:33:27Z |
Fix Bug with NA value in Grouping for Groupby.nth | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index b2a379d9fe6f5..5d611069ebd0a 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -393,6 +393,7 @@ Groupby/Resample/Rolling
- Bug in :meth:`pandas.core.window.Rolling.count` and `pandas.core.window.Expanding.count` was previously ignoring the axis keyword (:issue:`13503`)
- Bug in :meth:`pandas.core.groupby.GroupBy.idxmax` and :meth:`pandas.core.groupby.GroupBy.idxmin` with datetime column would return incorrect dtype (:issue:`25444`, :issue:`15306`)
- Bug in :meth:`pandas.core.groupby.GroupBy.cumsum`, :meth:`pandas.core.groupby.GroupBy.cumprod`, :meth:`pandas.core.groupby.GroupBy.cummin` and :meth:`pandas.core.groupby.GroupBy.cummax` with categorical column having absent categories, would return incorrect result or segfault (:issue:`16771`)
+- Bug in :meth:`pandas.core.groupby.GroupBy.nth` where NA values in the grouping would return incorrect results (:issue:`26011`)
Reshaping
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index bd8a8852964e3..945885e34fa1e 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -12,7 +12,7 @@ class providing the base-class of operations.
import datetime
from functools import partial, wraps
import types
-from typing import FrozenSet, Optional, Tuple, Type
+from typing import FrozenSet, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
@@ -1546,15 +1546,16 @@ def backfill(self, limit=None):
@Substitution(name='groupby')
@Substitution(see_also=_common_see_also)
- def nth(self, n, dropna=None):
+ def nth(self,
+ n: Union[int, List[int]],
+ dropna: Optional[str] = None) -> DataFrame:
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
- Truthy (if a Series) or 'all', 'any' (if a DataFrame);
- this is equivalent to calling dropna(how=dropna) before the
- groupby.
+ 'all' or 'any'; this is equivalent to calling dropna(how=dropna)
+ before the groupby.
Parameters
----------
@@ -1617,34 +1618,43 @@ def nth(self, n, dropna=None):
4 2 5.0
"""
- if isinstance(n, int):
- nth_values = [n]
- elif isinstance(n, (set, list, tuple)):
- nth_values = list(set(n))
- if dropna is not None:
- raise ValueError(
- "dropna option with a list of nth values is not supported")
- else:
+ valid_containers = (set, list, tuple)
+ if not isinstance(n, (valid_containers, int)):
raise TypeError("n needs to be an int or a list/set/tuple of ints")
- nth_values = np.array(nth_values, dtype=np.intp)
- self._set_group_selection()
-
if not dropna:
- mask_left = np.in1d(self._cumcount_array(), nth_values)
+
+ if isinstance(n, int):
+ nth_values = [n]
+ elif isinstance(n, valid_containers):
+ nth_values = list(set(n))
+
+ nth_array = np.array(nth_values, dtype=np.intp)
+ self._set_group_selection()
+
+ mask_left = np.in1d(self._cumcount_array(), nth_array)
mask_right = np.in1d(self._cumcount_array(ascending=False) + 1,
- -nth_values)
+ -nth_array)
mask = mask_left | mask_right
+ ids, _, _ = self.grouper.group_info
+
+ # Drop NA values in grouping
+ mask = mask & (ids != -1)
+
out = self._selected_obj[mask]
if not self.as_index:
return out
- ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
+ # dropna is truthy
+ if isinstance(n, valid_containers):
+ raise ValueError(
+ "dropna option with a list of nth values is not supported")
+
if dropna not in ['any', 'all']:
if isinstance(self._selected_obj, Series) and dropna is True:
warnings.warn("the dropna={dropna} keyword is deprecated,"
@@ -1679,7 +1689,7 @@ def nth(self, n, dropna=None):
else:
- # create a grouper with the original parameters, but on the dropped
+ # create a grouper with the original parameters, but on dropped
# object
from pandas.core.groupby.grouper import _get_grouper
grouper, _, _ = _get_grouper(dropped, key=self.keys,
@@ -1687,7 +1697,8 @@ def nth(self, n, dropna=None):
sort=self.sort,
mutated=self.mutated)
- grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
+ grb = dropped.groupby(
+ grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py
index 7a3d189d3020e..6d07ab0008adb 100644
--- a/pandas/tests/groupby/test_nth.py
+++ b/pandas/tests/groupby/test_nth.py
@@ -434,3 +434,20 @@ def test_nth_column_order():
columns=['C', 'B'],
index=Index([1, 2], name='A'))
assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("dropna", [None, 'any', 'all'])
+def test_nth_nan_in_grouper(dropna):
+ # GH 26011
+ df = DataFrame([
+ [np.nan, 0, 1],
+ ['abc', 2, 3],
+ [np.nan, 4, 5],
+ ['def', 6, 7],
+ [np.nan, 8, 9],
+ ], columns=list('abc'))
+ result = df.groupby('a').nth(0, dropna=dropna)
+ expected = pd.DataFrame([[2, 3], [6, 7]], columns=list('bc'),
+ index=Index(['abc', 'def'], name='a'))
+
+ assert_frame_equal(result, expected)
| - [X] closes #26011
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Would ideally like to combine first, nth and last implementations. Consider this a precursor
| https://api.github.com/repos/pandas-dev/pandas/pulls/26152 | 2019-04-19T16:54:38Z | 2019-05-05T22:20:01Z | 2019-05-05T22:20:00Z | 2020-01-16T00:34:07Z |
Fix Type Annotation in pandas.core.accessor | diff --git a/mypy.ini b/mypy.ini
index 80c34260acdd1..9d7262213413e 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -11,15 +11,9 @@ ignore_errors=True
[mypy-pandas.compat.numpy.function]
ignore_errors=True
-[mypy-pandas.core.accessor]
-ignore_errors=True
-
[mypy-pandas.core.api]
ignore_errors=True
-[mypy-pandas.core.apply]
-ignore_errors=True
-
[mypy-pandas.core.arrays.array_]
ignore_errors=True
@@ -32,15 +26,9 @@ ignore_errors=True
[mypy-pandas.core.arrays.interval]
ignore_errors=True
-[mypy-pandas.core.arrays.numpy_]
-ignore_errors=True
-
[mypy-pandas.core.arrays.period]
ignore_errors=True
-[mypy-pandas.core.arrays.sparse]
-ignore_errors=True
-
[mypy-pandas.core.arrays.timedeltas]
ignore_errors=True
@@ -98,9 +86,6 @@ ignore_errors=True
[mypy-pandas.core.series]
ignore_errors=True
-[mypy-pandas.core.sparse.frame]
-ignore_errors=True
-
[mypy-pandas.core.util.hashing]
ignore_errors=True
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index c5513765764a7..aeebe686c63cb 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -5,13 +5,14 @@
that can be mixed into or pinned onto other pandas classes.
"""
+from typing import Set
import warnings
from pandas.util._decorators import Appender
class DirNamesMixin:
- _accessors = frozenset()
+ _accessors = set() # type: Set[str]
_deprecations = frozenset(
['asobject', 'base', 'data', 'flags', 'itemsize', 'strides'])
| - [x] closes #26146
- [x] tests passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26147 | 2019-04-19T07:59:17Z | 2019-04-21T16:04:47Z | 2019-04-21T16:04:47Z | 2019-04-21T16:56:39Z |
Cython Unicode Method Cleanups | diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 1fd72cf763e6b..d80cc482be5b3 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -549,41 +549,6 @@ def astype_intsafe(ndarray[object] arr, new_dtype):
return result
-@cython.wraparound(False)
-@cython.boundscheck(False)
-def astype_unicode(arr: ndarray, skipna: bool=False) -> ndarray[object]:
- """
- Convert all elements in an array to unicode.
-
- Parameters
- ----------
- arr : ndarray
- The array whose elements we are casting.
- skipna : bool, default False
- Whether or not to coerce nulls to their stringified form
- (e.g. NaN becomes 'nan').
-
- Returns
- -------
- casted_arr : ndarray
- A new array with the input array's elements casted.
- """
- cdef:
- object arr_i
- Py_ssize_t i, n = arr.size
- ndarray[object] result = np.empty(n, dtype=object)
-
- for i in range(n):
- arr_i = arr[i]
-
- if not (skipna and checknull(arr_i)):
- arr_i = unicode(arr_i)
-
- result[i] = arr_i
-
- return result
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def astype_str(arr: ndarray, skipna: bool=False) -> ndarray[object]:
@@ -1321,10 +1286,6 @@ def infer_dtype(value: object, skipna: object=None) -> str:
if is_string_array(values, skipna=skipna):
return 'string'
- elif isinstance(val, unicode):
- if is_unicode_array(values, skipna=skipna):
- return 'unicode'
-
elif isinstance(val, bytes):
if is_bytes_array(values, skipna=skipna):
return 'bytes'
@@ -1596,22 +1557,6 @@ cpdef bint is_string_array(ndarray values, bint skipna=False):
return validator.validate(values)
-cdef class UnicodeValidator(Validator):
- cdef inline bint is_value_typed(self, object value) except -1:
- return isinstance(value, unicode)
-
- cdef inline bint is_array_typed(self) except -1:
- return issubclass(self.dtype.type, np.unicode_)
-
-
-cdef bint is_unicode_array(ndarray values, bint skipna=False):
- cdef:
- UnicodeValidator validator = UnicodeValidator(len(values),
- values.dtype,
- skipna=skipna)
- return validator.validate(values)
-
-
cdef class BytesValidator(Validator):
cdef inline bint is_value_typed(self, object value) except -1:
return isinstance(value, bytes)
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index a2509226bcd8e..adaddf844ea9c 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -636,10 +636,6 @@ def astype_nansafe(arr, dtype, copy=True, skipna=False):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
- return lib.astype_unicode(arr.ravel(),
- skipna=skipna).reshape(arr.shape)
-
- elif issubclass(dtype.type, str):
return lib.astype_str(arr.ravel(),
skipna=skipna).reshape(arr.shape)
| Removal of some items that should no longer be required after Py2 drop | https://api.github.com/repos/pandas-dev/pandas/pulls/26138 | 2019-04-18T19:06:22Z | 2019-04-18T19:47:01Z | 2019-04-18T19:47:01Z | 2019-04-18T20:02:55Z |
Deprecate SparseDataFrame and SparseSeries | diff --git a/doc/source/user_guide/sparse.rst b/doc/source/user_guide/sparse.rst
index 20962749e2040..8fed29d7a6316 100644
--- a/doc/source/user_guide/sparse.rst
+++ b/doc/source/user_guide/sparse.rst
@@ -6,27 +6,27 @@
Sparse data structures
**********************
-We have implemented "sparse" versions of ``Series`` and ``DataFrame``. These are not sparse
-in the typical "mostly 0". Rather, you can view these objects as being "compressed"
-where any data matching a specific value (``NaN`` / missing value, though any value
-can be chosen) is omitted. A special ``SparseIndex`` object tracks where data has been
-"sparsified". This will make much more sense with an example. All of the standard pandas
-data structures have a ``to_sparse`` method:
+.. note::
-.. ipython:: python
-
- ts = pd.Series(np.random.randn(10))
- ts[2:-2] = np.nan
- sts = ts.to_sparse()
- sts
+ ``SparseSeries`` and ``SparseDataFrame`` have been deprecated. Their purpose
+ is served equally well by a :class:`Series` or :class:`DataFrame` with
+ sparse values. See :ref:`sparse.migration` for tips on migrating.
-The ``to_sparse`` method takes a ``kind`` argument (for the sparse index, see
-below) and a ``fill_value``. So if we had a mostly zero ``Series``, we could
-convert it to sparse with ``fill_value=0``:
+Pandas provides data structures for efficiently storing sparse data.
+These are not necessarily sparse in the typical "mostly 0". Rather, you can view these
+objects as being "compressed" where any data matching a specific value (``NaN`` / missing value, though any value
+can be chosen, including 0) is omitted. The compressed values are not actually stored in the array.
.. ipython:: python
- ts.fillna(0).to_sparse(fill_value=0)
+ arr = np.random.randn(10)
+ arr[2:-2] = np.nan
+ ts = pd.Series(pd.SparseArray(arr))
+ ts
+
+Notice the dtype, ``Sparse[float64, nan]``. The ``nan`` means that elements in the
+array that are ``nan`` aren't actually stored, only the non-``nan`` elements are.
+Those non-``nan`` elements have a ``float64`` dtype.
The sparse objects exist for memory efficiency reasons. Suppose you had a
large, mostly NA ``DataFrame``:
@@ -35,21 +35,82 @@ large, mostly NA ``DataFrame``:
df = pd.DataFrame(np.random.randn(10000, 4))
df.iloc[:9998] = np.nan
- sdf = df.to_sparse()
- sdf
- sdf.density
+ sdf = df.astype(pd.SparseDtype("float", np.nan))
+ sdf.head()
+ sdf.dtypes
+ sdf.sparse.density
As you can see, the density (% of values that have not been "compressed") is
extremely low. This sparse object takes up much less memory on disk (pickled)
-and in the Python interpreter. Functionally, their behavior should be nearly
+and in the Python interpreter.
+
+.. ipython:: python
+
+ 'dense : {:0.2f} bytes'.format(df.memory_usage().sum() / 1e3)
+ 'sparse: {:0.2f} bytes'.format(sdf.memory_usage().sum() / 1e3)
+
+Functionally, their behavior should be nearly
identical to their dense counterparts.
-Any sparse object can be converted back to the standard dense form by calling
-``to_dense``:
+.. _sparse.array:
+
+SparseArray
+-----------
+
+:class:`SparseArray` is a :class:`~pandas.api.extensions.ExtensionArray`
+for storing an array of sparse values (see :ref:`basics.dtypes` for more
+on extension arrays). It is a 1-dimensional ndarray-like object storing
+only values distinct from the ``fill_value``:
.. ipython:: python
- sts.to_dense()
+ arr = np.random.randn(10)
+ arr[2:5] = np.nan
+ arr[7:8] = np.nan
+ sparr = pd.SparseArray(arr)
+ sparr
+
+A sparse array can be converted to a regular (dense) ndarray with :meth:`numpy.asarray`
+
+.. ipython:: python
+
+ np.asarray(sparr)
+
+
+SparseDtype
+-----------
+
+The :attr:`SparseArray.dtype` property stores two pieces of information
+
+1. The dtype of the non-sparse values
+2. The scalar fill value
+
+
+.. ipython:: python
+
+ sparr.dtype
+
+
+A :class:`SparseDtype` may be constructed by passing each of these
+
+.. ipython:: python
+
+ pd.SparseDtype(np.dtype('datetime64[ns]'))
+
+The default fill value for a given NumPy dtype is the "missing" value for that dtype,
+though it may be overridden.
+
+.. ipython:: python
+
+ pd.SparseDtype(np.dtype('datetime64[ns]'),
+ fill_value=pd.Timestamp('2017-01-01'))
+
+Finally, the string alias ``'Sparse[dtype]'`` may be used to specify a sparse dtype
+in many places
+
+.. ipython:: python
+
+ pd.array([1, 0, 0, 2], dtype='Sparse[int]')
.. _sparse.accessor:
@@ -71,130 +132,146 @@ attributes and methods that are specific to sparse data.
This accessor is available only on data with ``SparseDtype``, and on the :class:`Series`
class itself for creating a Series with sparse data from a scipy COO matrix with.
-.. _sparse.array:
-SparseArray
------------
+.. versionadded:: 0.25.0
+
+A ``.sparse`` accessor has been added for :class:`DataFrame` as well.
+See :ref:`api.frame.sparse` for more.
-``SparseArray`` is the base layer for all of the sparse indexed data
-structures. It is a 1-dimensional ndarray-like object storing only values
-distinct from the ``fill_value``:
+.. _sparse.calculation:
+
+Sparse Calculation
+------------------
+
+You can apply NumPy `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
+to ``SparseArray`` and get a ``SparseArray`` as a result.
.. ipython:: python
- arr = np.random.randn(10)
- arr[2:5] = np.nan
- arr[7:8] = np.nan
- sparr = pd.SparseArray(arr)
- sparr
+ arr = pd.SparseArray([1., np.nan, np.nan, -2., np.nan])
+ np.abs(arr)
+
-Like the indexed objects (SparseSeries, SparseDataFrame), a ``SparseArray``
-can be converted back to a regular ndarray by calling ``to_dense``:
+The *ufunc* is also applied to ``fill_value``. This is needed to get
+the correct dense result.
.. ipython:: python
- sparr.to_dense()
+ arr = pd.SparseArray([1., -1, -1, -2., -1], fill_value=-1)
+ np.abs(arr)
+ np.abs(arr).to_dense()
+.. _sparse.migration:
-SparseIndex objects
--------------------
+Migrating
+---------
-Two kinds of ``SparseIndex`` are implemented, ``block`` and ``integer``. We
-recommend using ``block`` as it's more memory efficient. The ``integer`` format
-keeps an arrays of all of the locations where the data are not equal to the
-fill value. The ``block`` format tracks only the locations and sizes of blocks
-of data.
+In older versions of pandas, the ``SparseSeries`` and ``SparseDataFrame`` classes (documented below)
+were the preferred way to work with sparse data. With the advent of extension arrays, these subclasses
+are no longer needed. Their purpose is better served by using a regular Series or DataFrame with
+sparse values instead.
-.. _sparse.dtype:
+.. note::
-Sparse Dtypes
--------------
+ There's no performance or memory penalty to using a Series or DataFrame with sparse values,
+ rather than a SparseSeries or SparseDataFrame.
-Sparse data should have the same dtype as its dense representation. Currently,
-``float64``, ``int64`` and ``bool`` dtypes are supported. Depending on the original
-dtype, ``fill_value`` default changes:
+This section provides some guidance on migrating your code to the new style. As a reminder,
+you can use the python warnings module to control warnings. But we recommend modifying
+your code, rather than ignoring the warning.
-* ``float64``: ``np.nan``
-* ``int64``: ``0``
-* ``bool``: ``False``
+**Construction**
-.. ipython:: python
+From an array-like, use the regular :class:`Series` or
+:class:`DataFrame` constructors with :class:`SparseArray` values.
- s = pd.Series([1, np.nan, np.nan])
- s
- s.to_sparse()
+.. code-block:: python
- s = pd.Series([1, 0, 0])
- s
- s.to_sparse()
+ # Previous way
+ >>> pd.SparseDataFrame({"A": [0, 1]})
- s = pd.Series([True, False, True])
- s
- s.to_sparse()
+.. ipython:: python
+
+ # New way
+ pd.DataFrame({"A": pd.SparseArray([0, 1])})
+
+From a SciPy sparse matrix, use :meth:`DataFrame.sparse.from_spmatrix`,
-You can change the dtype using ``.astype()``, the result is also sparse. Note that
-``.astype()`` also affects to the ``fill_value`` to keep its dense representation.
+.. code-block:: python
+ # Previous way
+ >>> from scipy import sparse
+ >>> mat = sparse.eye(3)
+ >>> df = pd.SparseDataFrame(mat, columns=['A', 'B', 'C'])
.. ipython:: python
- s = pd.Series([1, 0, 0, 0, 0])
- s
- ss = s.to_sparse()
- ss
- ss.astype(np.float64)
+ # New way
+ from scipy import sparse
+ mat = sparse.eye(3)
+ df = pd.DataFrame.sparse.from_spmatrix(mat, columns=['A', 'B', 'C'])
+ df.dtypes
-It raises if any value cannot be coerced to specified dtype.
+**Conversion**
-.. code-block:: ipython
+From sparse to dense, use the ``.sparse`` accessors
- In [1]: ss = pd.Series([1, np.nan, np.nan]).to_sparse()
- Out[1]:
- 0 1.0
- 1 NaN
- 2 NaN
- dtype: float64
- BlockIndex
- Block locations: array([0], dtype=int32)
- Block lengths: array([1], dtype=int32)
+.. ipython:: python
- In [2]: ss.astype(np.int64)
- Out[2]:
- ValueError: unable to coerce current fill_value nan to int64 dtype
+ df.sparse.to_dense()
+ df.sparse.to_coo()
-.. _sparse.calculation:
+From dense to sparse, use :meth:`DataFrame.astype` with a :class:`SparseDtype`.
-Sparse Calculation
-------------------
+.. ipython:: python
+
+ dense = pd.DataFrame({"A": [1, 0, 0, 1]})
+ dtype = pd.SparseDtype(int, fill_value=0)
+ dense.astype(dtype)
+
+**Sparse Properties**
-You can apply NumPy *ufuncs* to ``SparseArray`` and get a ``SparseArray`` as a result.
+Sparse-specific properties, like ``density``, are available on the ``.sparse`` accessor.
.. ipython:: python
- arr = pd.SparseArray([1., np.nan, np.nan, -2., np.nan])
- np.abs(arr)
+ df.sparse.density
+**General Differences**
-The *ufunc* is also applied to ``fill_value``. This is needed to get
-the correct dense result.
+In a ``SparseDataFrame``, *all* columns were sparse. A :class:`DataFrame` can have a mixture of
+sparse and dense columns. As a consequence, assigning new columns to a ``DataFrame`` with sparse
+values will not automatically convert the input to be sparse.
+
+.. code-block:: python
+
+ # Previous Way
+ >>> df = pd.SparseDataFrame({"A": [0, 1]})
+ >>> df['B'] = [0, 0] # implicitly becomes Sparse
+ >>> df['B'].dtype
+ Sparse[int64, nan]
+
+Instead, you'll need to ensure that the values being assigned are sparse
.. ipython:: python
- arr = pd.SparseArray([1., -1, -1, -2., -1], fill_value=-1)
- np.abs(arr)
- np.abs(arr).to_dense()
+ df = pd.DataFrame({"A": pd.SparseArray([0, 1])})
+ df['B'] = [0, 0] # remains dense
+ df['B'].dtype
+ df['B'] = pd.SparseArray([0, 0])
+ df['B'].dtype
+
+The ``SparseDataFrame.default_kind`` and ``SparseDataFrame.default_fill_value`` attributes
+have no replacement.
.. _sparse.scipysparse:
Interaction with scipy.sparse
-----------------------------
-SparseDataFrame
-~~~~~~~~~~~~~~~
-
-.. versionadded:: 0.20.0
+Use :meth:`DataFrame.sparse.from_coo` to create a ``DataFrame`` with sparse values from a sparse matrix.
-Pandas supports creating sparse dataframes directly from ``scipy.sparse`` matrices.
+.. versionadded:: 0.25.0
.. ipython:: python
@@ -206,20 +283,18 @@ Pandas supports creating sparse dataframes directly from ``scipy.sparse`` matric
sp_arr = csr_matrix(arr)
sp_arr
- sdf = pd.SparseDataFrame(sp_arr)
- sdf
+ sdf = pd.DataFrame.sparse.from_spmatrix(sp_arr)
+ sdf.head()
+ sdf.dtypes
All sparse formats are supported, but matrices that are not in :mod:`COOrdinate <scipy.sparse>` format will be converted, copying data as needed.
-To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you can use the :meth:`SparseDataFrame.to_coo` method:
+To convert back to sparse SciPy matrix in COO format, you can use the :meth:`DataFrame.sparse.to_coo` method:
.. ipython:: python
- sdf.to_coo()
+ sdf.sparse.to_coo()
-SparseSeries
-~~~~~~~~~~~~
-
-A :meth:`SparseSeries.to_coo` method is implemented for transforming a ``SparseSeries`` indexed by a ``MultiIndex`` to a ``scipy.sparse.coo_matrix``.
+meth:`Series.sparse.to_coo` is implemented for transforming a ``Series`` with sparse values indexed by a :class:`MultiIndex` to a :class:`scipy.sparse.coo_matrix`.
The method requires a ``MultiIndex`` with two or more levels.
@@ -233,19 +308,17 @@ The method requires a ``MultiIndex`` with two or more levels.
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
-
s
- # SparseSeries
- ss = s.to_sparse()
+ ss = s.astype('Sparse')
ss
-In the example below, we transform the ``SparseSeries`` to a sparse representation of a 2-d array by specifying that the first and second ``MultiIndex`` levels define labels for the rows and the third and fourth levels define labels for the columns. We also specify that the column and row labels should be sorted in the final sparse representation.
+In the example below, we transform the ``Series`` to a sparse representation of a 2-d array by specifying that the first and second ``MultiIndex`` levels define labels for the rows and the third and fourth levels define labels for the columns. We also specify that the column and row labels should be sorted in the final sparse representation.
.. ipython:: python
- A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
- column_levels=['C', 'D'],
- sort_labels=True)
+ A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B'],
+ column_levels=['C', 'D'],
+ sort_labels=True)
A
A.todense()
@@ -256,16 +329,16 @@ Specifying different row and column labels (and not sorting them) yields a diffe
.. ipython:: python
- A, rows, columns = ss.to_coo(row_levels=['A', 'B', 'C'],
- column_levels=['D'],
- sort_labels=False)
+ A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B', 'C'],
+ column_levels=['D'],
+ sort_labels=False)
A
A.todense()
rows
columns
-A convenience method :meth:`SparseSeries.from_coo` is implemented for creating a ``SparseSeries`` from a ``scipy.sparse.coo_matrix``.
+A convenience method :meth:`Series.sparse.from_coo` is implemented for creating a ``Series`` with sparse values from a ``scipy.sparse.coo_matrix``.
.. ipython:: python
@@ -275,12 +348,12 @@ A convenience method :meth:`SparseSeries.from_coo` is implemented for creating a
A
A.todense()
-The default behaviour (with ``dense_index=False``) simply returns a ``SparseSeries`` containing
+The default behaviour (with ``dense_index=False``) simply returns a ``Series`` containing
only the non-null entries.
.. ipython:: python
- ss = pd.SparseSeries.from_coo(A)
+ ss = pd.Series.sparse.from_coo(A)
ss
Specifying ``dense_index=True`` will result in an index that is the Cartesian product of the
@@ -289,5 +362,14 @@ row and columns coordinates of the matrix. Note that this will consume a signifi
.. ipython:: python
- ss_dense = pd.SparseSeries.from_coo(A, dense_index=True)
+ ss_dense = pd.Series.sparse.from_coo(A, dense_index=True)
ss_dense
+
+
+.. _sparse.subclasses:
+
+Sparse Subclasses
+-----------------
+
+The :class:`SparseSeries` and :class:`SparseDataFrame` classes are deprecated. Visit their
+API pages for usage.
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 6bcb71773183e..f9cf137285cd9 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -299,6 +299,32 @@ Other API Changes
Deprecations
~~~~~~~~~~~~
+Sparse Subclasses
+^^^^^^^^^^^^^^^^^
+
+The ``SparseSeries`` and ``SparseDataFrame`` subclasses are deprecated. Their functionality is better-provided
+by a ``Series`` or ``DataFrame`` with sparse values.
+
+**Previous Way**
+
+.. ipython:: python
+ :okwarning:
+
+ df = pd.SparseDataFrame({"A": [0, 0, 1, 2]})
+ df.dtypes
+
+**New Way**
+
+.. ipython:: python
+
+ df = pd.DataFrame({"A": pd.SparseArray([0, 0, 1, 2])})
+ df.dtypes
+
+The memory usage of the two approaches is identical. See :ref:`sparse.migration` for more (:issue:`19239`).
+
+Other Deprecations
+^^^^^^^^^^^^^^^^^^
+
- The deprecated ``.ix[]`` indexer now raises a more visible FutureWarning instead of DeprecationWarning (:issue:`26438`).
- Deprecated the ``units=M`` (months) and ``units=Y`` (year) parameters for ``units`` of :func:`pandas.to_timedelta`, :func:`pandas.Timedelta` and :func:`pandas.TimedeltaIndex` (:issue:`16344`)
- The :attr:`SparseArray.values` attribute is deprecated. You can use ``np.asarray(...)`` or
@@ -306,7 +332,6 @@ Deprecations
- The functions :func:`pandas.to_datetime` and :func:`pandas.to_timedelta` have deprecated the ``box`` keyword. Instead, use :meth:`to_numpy` or :meth:`Timestamp.to_datetime64` or :meth:`Timedelta.to_timedelta64`. (:issue:`24416`)
- The :meth:`DataFrame.compound` and :meth:`Series.compound` methods are deprecated and will be removed in a future version (:issue:`26405`).
-
.. _whatsnew_0250.prior_deprecations:
Removal of prior version deprecations/changes
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index b0236cb393c1c..ecc06db2bd07b 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -2014,9 +2014,9 @@ def from_coo(cls, A, dense_index=False):
from pandas.core.sparse.scipy_sparse import _coo_to_sparse_series
from pandas import Series
- result = _coo_to_sparse_series(A, dense_index=dense_index)
- # SparseSeries -> Series[sparse]
- result = Series(result.values, index=result.index, copy=False)
+ result = _coo_to_sparse_series(A, dense_index=dense_index,
+ sparse_series=False)
+ result = Series(result.array, index=result.index, copy=False)
return result
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 963da247fcaa5..5957b23535350 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1930,13 +1930,13 @@ def to_sparse(self, fill_value=None, kind='block'):
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
- >>> sdf = df.to_sparse()
- >>> sdf
+ >>> sdf = df.to_sparse() # doctest: +SKIP
+ >>> sdf # doctest: +SKIP
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
- >>> type(sdf)
+ >>> type(sdf) # doctest: +SKIP
<class 'pandas.core.sparse.frame.SparseDataFrame'>
"""
from pandas.core.sparse.api import SparseDataFrame
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 76c73fc40977c..87db069d94893 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5589,7 +5589,7 @@ def ftypes(self):
3 float64:dense
dtype: object
- >>> pd.SparseDataFrame(arr).ftypes
+ >>> pd.SparseDataFrame(arr).ftypes # doctest: +SKIP
0 float64:sparse
1 float64:sparse
2 float64:sparse
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 55b5bdcbf53f4..8fb6ad3e3ccc5 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1586,7 +1586,6 @@ def to_sparse(self, kind='block', fill_value=None):
SparseSeries
Sparse representation of the Series.
"""
- # TODO: deprecate
from pandas.core.sparse.series import SparseSeries
values = SparseArray(self, kind=kind, fill_value=fill_value)
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index d21a809d7246d..fa3cd781eaf88 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -28,6 +28,13 @@
from pandas.core.sparse.series import SparseSeries
_shared_doc_kwargs = dict(klass='SparseDataFrame')
+depr_msg = """\
+SparseDataFrame is deprecated and will be removed in a future version.
+Use a regular DataFrame whose columns are SparseArrays instead.
+
+See http://pandas.pydata.org/pandas-docs/stable/\
+user_guide/sparse.html#migrating for more.
+"""
class SparseDataFrame(DataFrame):
@@ -35,6 +42,10 @@ class SparseDataFrame(DataFrame):
DataFrame containing sparse floating point data in the form of SparseSeries
objects
+ .. deprectaed:: 0.25.0
+
+ Use a DataFrame with sparse values instead.
+
Parameters
----------
data : same types as can be passed to DataFrame or scipy.sparse.spmatrix
@@ -56,6 +67,7 @@ class SparseDataFrame(DataFrame):
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
+ warnings.warn(depr_msg, FutureWarning, stacklevel=2)
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
diff --git a/pandas/core/sparse/scipy_sparse.py b/pandas/core/sparse/scipy_sparse.py
index 40b4452caa8dc..7630983421ff9 100644
--- a/pandas/core/sparse/scipy_sparse.py
+++ b/pandas/core/sparse/scipy_sparse.py
@@ -116,14 +116,32 @@ def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ),
return sparse_matrix, rows, columns
-def _coo_to_sparse_series(A, dense_index=False):
+def _coo_to_sparse_series(A, dense_index: bool = False,
+ sparse_series: bool = True):
"""
Convert a scipy.sparse.coo_matrix to a SparseSeries.
- Use the defaults given in the SparseSeries constructor.
+
+ Parameters
+ ----------
+ A : scipy.sparse.coo.coo_matrix
+ dense_index : bool, default False
+ sparse_series : bool, default True
+
+ Returns
+ -------
+ Series or SparseSeries
"""
+ from pandas import SparseDtype
+
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
s = s.sort_index()
- s = s.to_sparse() # TODO: specify kind?
+ if sparse_series:
+ # TODO(SparseSeries): remove this and the sparse_series keyword.
+ # This is just here to avoid a DeprecationWarning when
+ # _coo_to_sparse_series is called via Series.sparse.from_coo
+ s = s.to_sparse() # TODO: specify kind?
+ else:
+ s = s.astype(SparseDtype(s.dtype))
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index eac59e2c0f5eb..e4f8579a398dd 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -32,9 +32,24 @@
optional_labels='', optional_axis='')
+depr_msg = """\
+SparseSeries is deprecated and will be removed in a future version.
+Use a Series with sparse values instead.
+
+ >>> series = pd.Series(pd.SparseArray(...))
+
+See http://pandas.pydata.org/pandas-docs/stable/\
+user_guide/sparse.html#migrating for more.
+"""
+
+
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
+ .. deprectaed:: 0.25.0
+
+ Use a Series with sparse values instead.
+
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
@@ -60,6 +75,7 @@ class SparseSeries(Series):
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
+ warnings.warn(depr_msg, FutureWarning, stacklevel=2)
# TODO: Most of this should be refactored and shared with Series
# 1. BlockManager -> array
# 2. Series.index, Series.name, index, name reconciliation
diff --git a/pandas/tests/arrays/sparse/test_accessor.py b/pandas/tests/arrays/sparse/test_accessor.py
index 676f578dd2acc..370d222c1ab4e 100644
--- a/pandas/tests/arrays/sparse/test_accessor.py
+++ b/pandas/tests/arrays/sparse/test_accessor.py
@@ -101,3 +101,21 @@ def test_density(self):
res = df.sparse.density
expected = 0.75
assert res == expected
+
+ @pytest.mark.parametrize("dtype", ['int64', 'float64'])
+ @pytest.mark.parametrize("dense_index", [True, False])
+ @td.skip_if_no_scipy
+ def test_series_from_coo(self, dtype, dense_index):
+ import scipy.sparse
+
+ A = scipy.sparse.eye(3, format='coo', dtype=dtype)
+ result = pd.Series.sparse.from_coo(A, dense_index=dense_index)
+ index = pd.MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)])
+ expected = pd.Series(pd.SparseArray(np.array([1, 1, 1], dtype=dtype)),
+ index=index)
+ if dense_index:
+ expected = expected.reindex(
+ pd.MultiIndex.from_product(index.levels)
+ )
+
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py
index b2f254a556603..eb3af4e6dea73 100644
--- a/pandas/tests/arrays/sparse/test_arithmetics.py
+++ b/pandas/tests/arrays/sparse/test_arithmetics.py
@@ -8,6 +8,7 @@
import pandas.util.testing as tm
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseArrayArithmetics:
_base = np.array
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index aa364870c7e60..659f2b97485a9 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -215,6 +215,7 @@ def test_scalar_with_index_infer_dtype(self, scalar, dtype):
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
@@ -231,6 +232,7 @@ def test_sparse_series_round_trip(self, kind, fill):
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
@@ -1098,6 +1100,7 @@ def test_npoints(self):
assert arr.npoints == 1
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestAccessor:
@pytest.mark.parametrize('attr', [
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index c48fae5c26301..c7a62dfe77c37 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -15,6 +15,10 @@
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
+ignore_sparse_warning = pytest.mark.filterwarnings(
+ "ignore:Sparse:FutureWarning"
+)
+
# EA & Actual Dtypes
def to_ea_dtypes(dtypes):
@@ -146,6 +150,7 @@ def test_is_object():
@pytest.mark.parametrize("check_scipy", [
False, pytest.param(True, marks=td.skip_if_no_scipy)
])
+@ignore_sparse_warning
def test_is_sparse(check_scipy):
assert com.is_sparse(pd.SparseArray([1, 2, 3]))
assert com.is_sparse(pd.SparseSeries([1, 2, 3]))
@@ -158,6 +163,7 @@ def test_is_sparse(check_scipy):
@td.skip_if_no_scipy
+@ignore_sparse_warning
def test_is_scipy_sparse():
from scipy.sparse import bsr_matrix
assert com.is_scipy_sparse(bsr_matrix([1, 2, 3]))
@@ -529,6 +535,7 @@ def test_is_bool_dtype():
@pytest.mark.parametrize("check_scipy", [
False, pytest.param(True, marks=td.skip_if_no_scipy)
])
+@ignore_sparse_warning
def test_is_extension_type(check_scipy):
assert not com.is_extension_type([1, 2, 3])
assert not com.is_extension_type(np.array([1, 2, 3]))
@@ -595,8 +602,6 @@ def test_is_offsetlike():
(pd.DatetimeIndex([1, 2]).dtype, np.dtype('=M8[ns]')),
('<M8[ns]', np.dtype('<M8[ns]')),
('datetime64[ns, Europe/London]', DatetimeTZDtype('ns', 'Europe/London')),
- (pd.SparseSeries([1, 2], dtype='int32'), SparseDtype('int32')),
- (pd.SparseSeries([1, 2], dtype='int32').dtype, SparseDtype('int32')),
(PeriodDtype(freq='D'), PeriodDtype(freq='D')),
('period[D]', PeriodDtype(freq='D')),
(IntervalDtype(), IntervalDtype()),
@@ -605,6 +610,14 @@ def test__get_dtype(input_param, result):
assert com._get_dtype(input_param) == result
+@ignore_sparse_warning
+def test__get_dtype_sparse():
+ ser = pd.SparseSeries([1, 2], dtype='int32')
+ expected = SparseDtype('int32')
+ assert com._get_dtype(ser) == expected
+ assert com._get_dtype(ser.dtype) == expected
+
+
@pytest.mark.parametrize('input_param,expected_error_message', [
(None, "Cannot deduce dtype from null object"),
(1, "data type not understood"),
@@ -640,8 +653,7 @@ def test__get_dtype_fails(input_param, expected_error_message):
(pd.DatetimeIndex(['2000'], tz='Europe/London').dtype,
pd.Timestamp),
('datetime64[ns, Europe/London]', pd.Timestamp),
- (pd.SparseSeries([1, 2], dtype='int32'), np.int32),
- (pd.SparseSeries([1, 2], dtype='int32').dtype, np.int32),
+
(PeriodDtype(freq='D'), pd.Period),
('period[D]', pd.Period),
(IntervalDtype(), pd.Interval),
@@ -652,3 +664,11 @@ def test__get_dtype_fails(input_param, expected_error_message):
])
def test__is_dtype_type(input_param, result):
assert com._is_dtype_type(input_param, lambda tipo: tipo == result)
+
+
+@ignore_sparse_warning
+def test__is_dtype_type_sparse():
+ ser = pd.SparseSeries([1, 2], dtype='int32')
+ result = np.dtype('int32')
+ assert com._is_dtype_type(ser, lambda tipo: tipo == result)
+ assert com._is_dtype_type(ser.dtype, lambda tipo: tipo == result)
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index decd0091e2ce8..79ebfcc30a7e4 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -870,7 +870,6 @@ def test_registry_find(dtype, expected):
(pd.Series([1, 2]), False),
(np.array([True, False]), True),
(pd.Series([True, False]), True),
- (pd.SparseSeries([True, False]), True),
(pd.SparseArray([True, False]), True),
(SparseDtype(bool), True)
])
@@ -879,6 +878,12 @@ def test_is_bool_dtype(dtype, expected):
assert result is expected
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
+def test_is_bool_dtype_sparse():
+ result = is_bool_dtype(pd.SparseSeries([True, False]))
+ assert result is True
+
+
@pytest.mark.parametrize("check", [
is_categorical_dtype,
is_datetime64tz_dtype,
diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py
index e0590591c6899..142ed2f9fc24d 100644
--- a/pandas/tests/dtypes/test_generic.py
+++ b/pandas/tests/dtypes/test_generic.py
@@ -1,4 +1,4 @@
-from warnings import catch_warnings
+from warnings import catch_warnings, simplefilter
import numpy as np
@@ -17,9 +17,12 @@ class TestABCClasses:
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({'names': ['a', 'b', 'c']}, index=multi_index)
- sparse_series = pd.Series([1, 2, 3]).to_sparse()
+ with catch_warnings():
+ simplefilter('ignore', FutureWarning)
+ sparse_series = pd.Series([1, 2, 3]).to_sparse()
+ sparse_frame = pd.SparseDataFrame({'a': [1, -1, None]})
+
sparse_array = pd.SparseArray(np.random.randn(10))
- sparse_frame = pd.SparseDataFrame({'a': [1, -1, None]})
datetime_array = pd.core.arrays.DatetimeArray(datetime_index)
timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index)
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index ab0f17fb4ff13..303604ba7d7ea 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -13,6 +13,7 @@
import pandas.util.testing as tm
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
@@ -1376,6 +1377,7 @@ def test_droplevel(self):
tm.assert_frame_equal(result, expected)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestIntervalIndex:
def test_setitem(self):
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index adb8c97584463..40785c6a1d321 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -2073,6 +2073,7 @@ def test_loc_duplicates(self):
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_iloc_sparse_propegate_fill_value(self):
from pandas.core.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index 1e72140516182..45b13e5159bcd 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -190,6 +190,7 @@ def test_subclass_iterrows(self):
assert isinstance(row, tm.SubclassedSeries)
tm.assert_series_equal(row, df.loc[i])
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_subclass_sparse_slice(self):
rows = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
ssdf = tm.SubclassedSparseDataFrame(rows)
@@ -214,6 +215,7 @@ def test_subclass_sparse_slice(self):
check_names=False,
check_kind=False)
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_subclass_sparse_transpose(self):
ossdf = tm.SubclassedSparseDataFrame([[1, 2, 3],
[4, 5, 6]])
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 8b140263b12bc..6b4c6a398962a 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1012,6 +1012,7 @@ def test_datetime_tz(self):
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py
index a00efe949e2bb..f568d717211cc 100644
--- a/pandas/tests/io/test_packers.py
+++ b/pandas/tests/io/test_packers.py
@@ -550,6 +550,7 @@ def test_dataframe_duplicate_column_names(self):
assert_frame_equal(result_3, expected_3)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
@@ -840,12 +841,12 @@ def legacy_packer(request, datapath):
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestMsgpack:
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
-TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 6acf54ab73b2d..b115a08d3b0d3 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -196,6 +196,7 @@ def legacy_pickle(request, datapath):
# ---------------------
# tests
# ---------------------
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_pickles(current_pickle_data, legacy_pickle):
if not is_platform_little_endian():
pytest.skip("known failure on non-little endian")
@@ -206,6 +207,7 @@ def test_pickles(current_pickle_data, legacy_pickle):
compare(current_pickle_data, legacy_pickle, version)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_round_trip_current(current_pickle_data):
def python_pickler(obj, path):
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index d8f3283ae5c4c..8b5907b920cca 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -50,6 +50,7 @@
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
+ignore_sparse = pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
# contextmanager to ensure the file cleanup
@@ -2243,6 +2244,7 @@ def test_series(self):
self._check_roundtrip(ts3, tm.assert_series_equal,
check_index_type=False)
+ @ignore_sparse
def test_sparse_series(self):
s = tm.makeStringSeries()
@@ -2259,6 +2261,7 @@ def test_sparse_series(self):
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
+ @ignore_sparse
def test_sparse_frame(self):
s = tm.makeDataFrame()
@@ -2597,6 +2600,7 @@ def test_overwrite_node(self):
tm.assert_series_equal(store['a'], ts)
+ @ignore_sparse
def test_sparse_with_compression(self):
# GH 2931
@@ -3741,6 +3745,7 @@ def test_start_stop_multiple(self):
expected = df.loc[[0], ['foo', 'bar']]
tm.assert_frame_equal(result, expected)
+ @ignore_sparse
def test_start_stop_fixed(self):
with ensure_clean_store(self.path) as store:
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index 552d71ac4fc38..283814d2375b1 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -13,6 +13,7 @@
from pandas.util.testing import assert_frame_equal
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestGetDummies:
@pytest.fixture
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 316c193e96a85..9b4f1f5fd0fe5 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -122,6 +122,7 @@ def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
assert result.name == self.ts.name
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
assert result.name == self.ts.name
@@ -194,9 +195,12 @@ def test_constructor_dict_timedelta_index(self):
)
self._assert_series_equal(result, expected)
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_array_deprecated(self):
- with tm.assert_produces_warning(FutureWarning):
+ # multiple FutureWarnings, so can't assert stacklevel
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
self.series_klass.from_array([1, 2, 3])
def test_sparse_accessor_updates_on_inplace(self):
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index ed5cf2d6b2c51..f11595febf6ed 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -211,6 +211,7 @@ def test_combine_first_dt_tz_values(self, tz_naive_fixture):
exp = pd.Series(exp_vals, name='ser1')
assert_series_equal(exp, result)
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_concat_empty_series_dtypes(self):
# booleans
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 11ad238eecd77..77b43c1414f77 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -780,6 +780,7 @@ def test_series_fillna_limit(self):
expected[:3] = np.nan
assert_series_equal(result, expected)
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
@@ -787,7 +788,8 @@ def test_sparse_series_fillna_limit(self):
ss = s[:2].reindex(index).to_sparse()
# TODO: what is this test doing? why are result an expected
# the same call to fillna?
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
# TODO: release-note fillna performance warning
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
@@ -797,7 +799,8 @@ def test_sparse_series_fillna_limit(self):
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
@@ -805,13 +808,15 @@ def test_sparse_series_fillna_limit(self):
expected = expected.to_sparse()
assert_series_equal(result, expected)
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
@@ -819,13 +824,15 @@ def test_sparse_series_pad_backfill_limit(self):
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
+ @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py
index aef5ccf535add..563a94f4588cb 100644
--- a/pandas/tests/series/test_subclass.py
+++ b/pandas/tests/series/test_subclass.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
import pandas as pd
from pandas import SparseDtype
@@ -39,6 +40,7 @@ def test_subclass_unstack(self):
tm.assert_frame_equal(res, exp)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesSubclassing:
def test_subclass_sparse_slice(self):
diff --git a/pandas/tests/sparse/frame/test_analytics.py b/pandas/tests/sparse/frame/test_analytics.py
index 95c1c8c453d0a..ae97682f297ad 100644
--- a/pandas/tests/sparse/frame/test_analytics.py
+++ b/pandas/tests/sparse/frame/test_analytics.py
@@ -5,6 +5,7 @@
from pandas.util import testing as tm
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)')
def test_quantile():
# GH 17386
@@ -22,6 +23,7 @@ def test_quantile():
tm.assert_sp_series_equal(result, sparse_expected)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)')
def test_quantile_multi():
# GH 17386
diff --git a/pandas/tests/sparse/frame/test_apply.py b/pandas/tests/sparse/frame/test_apply.py
index b5ea0a5c90e1a..afb54a9fa6264 100644
--- a/pandas/tests/sparse/frame/test_apply.py
+++ b/pandas/tests/sparse/frame/test_apply.py
@@ -37,17 +37,21 @@ def fill_frame(frame):
index=frame.index)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_apply(frame):
applied = frame.apply(np.sqrt)
assert isinstance(applied, SparseDataFrame)
tm.assert_almost_equal(applied.values, np.sqrt(frame.values))
# agg / broadcast
- with tm.assert_produces_warning(FutureWarning):
+ # two FutureWarnings, so we can't check stacklevel properly.
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
broadcasted = frame.apply(np.sum, broadcast=True)
assert isinstance(broadcasted, SparseDataFrame)
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
exp = frame.to_dense().apply(np.sum, broadcast=True)
tm.assert_frame_equal(broadcasted.to_dense(), exp)
@@ -56,15 +60,18 @@ def test_apply(frame):
frame.to_dense().apply(nanops.nansum).to_sparse())
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_apply_fill(fill_frame):
applied = fill_frame.apply(np.sqrt)
assert applied['A'].fill_value == np.sqrt(2)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_apply_empty(empty):
assert empty.apply(np.sqrt) is empty
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_apply_nonuq():
orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=['a', 'a', 'c'])
@@ -88,12 +95,14 @@ def test_apply_nonuq():
# tm.assert_series_equal(res.to_dense(), exp)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_applymap(frame):
# just test that it works
result = frame.applymap(lambda x: x * 2)
assert isinstance(result, SparseDataFrame)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_apply_keep_sparse_dtype():
# GH 23744
sdf = SparseDataFrame(np.array([[0, 1, 0], [0, 0, 0], [0, 0, 1]]),
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 67e50a733b2c6..050526aecd2bb 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -19,6 +19,12 @@
from pandas.tseries.offsets import BDay
+def test_deprecated():
+ with tm.assert_produces_warning(FutureWarning):
+ pd.SparseDataFrame({"A": [1, 2]})
+
+
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseDataFrame(SharedWithSparse):
klass = SparseDataFrame
@@ -668,7 +674,8 @@ def test_append(self, float_frame):
a = float_frame.iloc[:5, :3]
b = float_frame.iloc[5:]
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False,
+ raise_on_extra_warnings=False):
# Stacklevel is set for pd.concat, not append
appended = a.append(b)
tm.assert_sp_frame_equal(appended.iloc[:, :3], float_frame.iloc[:, :3],
@@ -683,12 +690,12 @@ def test_append(self, float_frame):
"A": [None, None, 2, 3],
"D": [None, None, 5, None],
}, index=a.index | b.index, columns=['B', 'C', 'A', 'D'])
- with tm.assert_produces_warning(None):
+ with tm.assert_produces_warning(None, raise_on_extra_warnings=False):
appended = a.append(b, sort=False)
tm.assert_frame_equal(appended, expected)
- with tm.assert_produces_warning(None):
+ with tm.assert_produces_warning(None, raise_on_extra_warnings=False):
appended = a.append(b, sort=True)
tm.assert_sp_frame_equal(appended, expected[['A', 'B', 'C', 'D']],
@@ -809,7 +816,8 @@ def test_sparse_frame_pad_backfill_limit(self):
result = sdf[:2].reindex(index, method='pad', limit=5)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
@@ -818,7 +826,8 @@ def test_sparse_frame_pad_backfill_limit(self):
result = sdf[-2:].reindex(index, method='backfill', limit=5)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
@@ -831,10 +840,12 @@ def test_sparse_frame_fillna_limit(self):
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
result = result.fillna(method='pad', limit=5)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
@@ -842,10 +853,12 @@ def test_sparse_frame_fillna_limit(self):
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
result = result.fillna(method='backfill', limit=5)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
@@ -1280,6 +1293,7 @@ def test_default_fill_value_with_no_data(self):
tm.assert_frame_equal(expected, result)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseDataFrameArithmetic:
def test_numeric_op_scalar(self):
@@ -1309,6 +1323,7 @@ def test_comparison_op_scalar(self):
tm.assert_frame_equal(res.to_dense(), df != 0)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseDataFrameAnalytics:
def test_cumsum(self, float_frame):
diff --git a/pandas/tests/sparse/frame/test_to_csv.py b/pandas/tests/sparse/frame/test_to_csv.py
index 8b2c1b951fdfe..0dda6b5cbbdae 100644
--- a/pandas/tests/sparse/frame/test_to_csv.py
+++ b/pandas/tests/sparse/frame/test_to_csv.py
@@ -5,6 +5,7 @@
from pandas.util import testing as tm
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseDataFrameToCsv:
fill_values = [np.nan, 0, None, 1]
diff --git a/pandas/tests/sparse/frame/test_to_from_scipy.py b/pandas/tests/sparse/frame/test_to_from_scipy.py
index a80a51a66017e..269d67976b567 100644
--- a/pandas/tests/sparse/frame/test_to_from_scipy.py
+++ b/pandas/tests/sparse/frame/test_to_from_scipy.py
@@ -19,6 +19,7 @@
@pytest.mark.parametrize('fill_value', [None, 0, np.nan])
@pytest.mark.parametrize('dtype', [bool, int, float, np.uint16])
@ignore_matrix_warning
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype):
# GH 4343
# Make one ndarray and from it one sparse matrix, both to be used for
@@ -69,6 +70,7 @@ def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype):
@pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811
@ignore_matrix_warning
@pytest.mark.filterwarnings("ignore:object dtype is not supp:UserWarning")
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_to_scipy_object(spmatrix, fill_value):
# GH 4343
dtype = object
@@ -117,6 +119,7 @@ def test_from_to_scipy_object(spmatrix, fill_value):
@ignore_matrix_warning
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_scipy_correct_ordering(spmatrix):
# GH 16179
arr = np.arange(1, 5).reshape(2, 2)
@@ -136,6 +139,7 @@ def test_from_scipy_correct_ordering(spmatrix):
@ignore_matrix_warning
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_scipy_fillna(spmatrix):
# GH 16112
arr = np.eye(3)
@@ -169,6 +173,7 @@ def test_from_scipy_fillna(spmatrix):
tm.assert_sp_frame_equal(sdf, expected)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_index_names_multiple_nones():
# https://github.com/pandas-dev/pandas/pull/24092
sparse = pytest.importorskip("scipy.sparse")
diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py
index 004a382f9067c..b8d0fab1debbd 100644
--- a/pandas/tests/sparse/series/test_series.py
+++ b/pandas/tests/sparse/series/test_series.py
@@ -21,6 +21,11 @@
from pandas.tseries.offsets import BDay
+def test_deprecated():
+ with tm.assert_produces_warning(FutureWarning):
+ pd.SparseSeries([0, 1])
+
+
def _test_data1():
# nan-based
arr = np.arange(20, dtype=float)
@@ -55,6 +60,7 @@ def _test_data2_zero():
return arr, index
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeries(SharedWithSparse):
series_klass = SparseSeries
@@ -532,10 +538,13 @@ def _compare(idx):
exp = pd.Series(np.repeat(nan, 5))
tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp.to_sparse())
- with tm.assert_produces_warning(FutureWarning):
+ # multiple FutureWarnings, can't check stacklevel
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
sp.take([1, 5], convert=True)
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
sp.take([1, 5], convert=False)
def test_numpy_take(self):
@@ -1032,6 +1041,7 @@ def test_memory_usage_deep(self, deep, fill_value):
assert sparse_usage < dense_usage
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseHandlingMultiIndexes:
def setup_method(self, method):
@@ -1062,6 +1072,7 @@ def test_round_trip_preserve_multiindex_names(self):
@pytest.mark.filterwarnings(
"ignore:the matrix subclass:PendingDeprecationWarning"
)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesScipyInteraction:
# Issue 8048: add SparseSeries coo methods
@@ -1253,13 +1264,15 @@ def test_concat_different_fill(self):
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
@@ -1285,13 +1298,15 @@ def test_concat_different_kind(self):
sparse1 = pd.SparseSeries(val1, name='x', kind='integer')
sparse2 = pd.SparseSeries(val2, name='y', kind='block', fill_value=0)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind='integer')
tm.assert_sp_series_equal(res, exp)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind='block', fill_value=0)
@@ -1425,6 +1440,7 @@ def _dense_series_compare(s, f):
tm.assert_series_equal(result.to_dense(), dense_result)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesAnalytics:
def setup_method(self, method):
@@ -1484,16 +1500,20 @@ def test_deprecated_numpy_func_call(self):
for func in funcs:
for series in ('bseries', 'zbseries'):
with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ check_stacklevel=False,
+ raise_on_extra_warnings=False):
getattr(np, func)(getattr(self, series))
with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
+ check_stacklevel=False,
+ raise_on_extra_warnings=False):
getattr(getattr(self, series), func)()
def test_deprecated_reindex_axis(self):
# https://github.com/pandas-dev/pandas/issues/17833
- with tm.assert_produces_warning(FutureWarning) as m:
+ # Multiple FutureWarnings, can't check stacklevel
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False) as m:
self.bseries.reindex_axis([0, 1, 2])
assert 'reindex' in str(m[0].message)
@@ -1502,6 +1522,7 @@ def test_deprecated_reindex_axis(self):
'datetime_type', (np.datetime64,
pd.Timestamp,
lambda x: datetime.strptime(x, '%Y-%m-%d')))
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_constructor_dict_datetime64_index(datetime_type):
# GH 9456
dates = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
@@ -1513,6 +1534,7 @@ def test_constructor_dict_datetime64_index(datetime_type):
tm.assert_sp_series_equal(result, expected)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_to_sparse():
# https://github.com/pandas-dev/pandas/issues/22389
arr = pd.SparseArray([1, 2, None, 3])
@@ -1521,12 +1543,14 @@ def test_to_sparse():
tm.assert_sp_array_equal(result.values, arr, check_kind=False)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_constructor_mismatched_raises():
msg = "Length of passed values is 2, index implies 3"
with pytest.raises(ValueError, match=msg):
SparseSeries([1, 2], index=[1, 2, 3])
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_block_deprecated():
s = SparseSeries([1])
with tm.assert_produces_warning(FutureWarning):
diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py
index dff5b51d7a967..ed29f24ae677f 100644
--- a/pandas/tests/sparse/test_combine_concat.py
+++ b/pandas/tests/sparse/test_combine_concat.py
@@ -35,6 +35,7 @@ def test_uses_first_kind(self, kind):
assert result.kind == kind
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesConcat:
@pytest.mark.parametrize('kind', [
@@ -82,14 +83,16 @@ def test_concat_different_fill(self):
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
@@ -176,6 +179,7 @@ def test_concat_sparse_dense(self, kind):
tm.assert_series_equal(res, exp)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseDataFrameConcat:
def setup_method(self, method):
@@ -245,12 +249,14 @@ def test_concat_different_fill_value(self):
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse(fill_value=0)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
- with tm.assert_produces_warning(PerformanceWarning):
+ with tm.assert_produces_warning(PerformanceWarning,
+ raise_on_extra_warnings=False):
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
@@ -260,9 +266,15 @@ def test_concat_different_columns_sort_warns(self):
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
- with tm.assert_produces_warning(FutureWarning):
+ # stacklevel is wrong since we have two FutureWarnings,
+ # one for depr, one for sorting.
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False,
+ raise_on_extra_warnings=False):
res = pd.concat([sparse, sparse3])
- with tm.assert_produces_warning(FutureWarning):
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False,
+ raise_on_extra_warnings=False,):
exp = pd.concat([self.dense1, self.dense3])
exp = exp.to_sparse()
diff --git a/pandas/tests/sparse/test_format.py b/pandas/tests/sparse/test_format.py
index 47e457e6ed519..37c2acc587cf6 100644
--- a/pandas/tests/sparse/test_format.py
+++ b/pandas/tests/sparse/test_format.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
@@ -9,6 +10,7 @@
use_32bit_repr = is_platform_windows() or is_platform_32bit()
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesFormatting:
@property
@@ -105,6 +107,7 @@ def test_sparse_int(self):
assert result == exp
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseDataFrameFormatting:
def test_sparse_frame(self):
diff --git a/pandas/tests/sparse/test_groupby.py b/pandas/tests/sparse/test_groupby.py
index ba5c0c4b18b3d..7abc1530618b8 100644
--- a/pandas/tests/sparse/test_groupby.py
+++ b/pandas/tests/sparse/test_groupby.py
@@ -5,6 +5,7 @@
import pandas.util.testing as tm
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseGroupBy:
def setup_method(self, method):
@@ -59,6 +60,7 @@ def test_aggfuncs(self):
@pytest.mark.parametrize("fill_value", [0, np.nan])
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_groupby_includes_fill_value(fill_value):
# https://github.com/pandas-dev/pandas/issues/5078
df = pd.DataFrame({'a': [fill_value, 1, fill_value, fill_value],
diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py
index e388d05fe112d..21c303fa2a064 100644
--- a/pandas/tests/sparse/test_indexing.py
+++ b/pandas/tests/sparse/test_indexing.py
@@ -6,6 +6,7 @@
import pandas.util.testing as tm
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesIndexing:
def setup_method(self, method):
@@ -454,6 +455,7 @@ def tests_indexing_with_sparse(self, kind, fill):
s.iloc[indexer]
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
def setup_method(self, method):
@@ -599,6 +601,7 @@ def test_reindex(self):
assert sparse is not res
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseDataFrameIndexing:
def test_getitem(self):
@@ -976,6 +979,7 @@ def test_reindex_fill_value(self):
tm.assert_sp_frame_equal(res, exp)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestMultitype:
def setup_method(self, method):
diff --git a/pandas/tests/sparse/test_pivot.py b/pandas/tests/sparse/test_pivot.py
index 98e16259d25d1..48d0719bc7f2b 100644
--- a/pandas/tests/sparse/test_pivot.py
+++ b/pandas/tests/sparse/test_pivot.py
@@ -1,9 +1,11 @@
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestPivotTable:
def setup_method(self, method):
diff --git a/pandas/tests/sparse/test_reshape.py b/pandas/tests/sparse/test_reshape.py
index 6830e40ce6533..37ec0bba2621d 100644
--- a/pandas/tests/sparse/test_reshape.py
+++ b/pandas/tests/sparse/test_reshape.py
@@ -15,12 +15,14 @@ def multi_index3():
return pd.MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)])
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_frame_stack(sparse_df, multi_index3):
ss = sparse_df.stack()
expected = pd.SparseSeries(np.ones(3), index=multi_index3)
tm.assert_sp_series_equal(ss, expected)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_frame_unstack(sparse_df):
mi = pd.MultiIndex.from_tuples([(0, 0), (1, 0), (1, 2)])
sparse_df.index = mi
@@ -33,6 +35,7 @@ def test_sparse_frame_unstack(sparse_df):
tm.assert_numpy_array_equal(unstacked_df.values, unstacked_sdf.values)
+@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_unstack(sparse_df, multi_index3):
frame = pd.SparseSeries(np.ones(3), index=multi_index3).unstack()
diff --git a/setup.cfg b/setup.cfg
index c0833c5609bea..68d042ecfc4b8 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -70,9 +70,9 @@ doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
addopts = --strict-data-files
xfail_strict = True
filterwarnings =
+ error:Sparse:FutureWarning
error:The SparseArray:FutureWarning
-
[coverage:run]
branch = False
omit = */tests/*
| Closes https://github.com/pandas-dev/pandas/issues/19239
This currently includes the changes from https://github.com/pandas-dev/pandas/pull/25682, which I think is mergeable.
I think this would be good to have for 0.25.0. I think it's close, but I may not have time to push this across the finish line. Anyone interested in finishing it off? | https://api.github.com/repos/pandas-dev/pandas/pulls/26137 | 2019-04-18T18:32:25Z | 2019-05-29T02:15:24Z | 2019-05-29T02:15:24Z | 2019-05-29T14:06:24Z |
BUG: errors and segfaults in groupby cython transforms (#16771) | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 20d4f46348be6..353af98f5b64d 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -387,7 +387,8 @@ Groupby/Resample/Rolling
- Ensured that ordering of outputs in ``groupby`` aggregation functions is consistent across all versions of Python (:issue:`25692`)
- Ensured that result group order is correct when grouping on an ordered ``Categorical`` and specifying ``observed=True`` (:issue:`25871`, :issue:`25167`)
- Bug in :meth:`pandas.core.window.Rolling.min` and :meth:`pandas.core.window.Rolling.max` that caused a memory leak (:issue:`25893`)
-- Bug in :func:`idxmax` and :func:`idxmin` on :meth:`DataFrame.groupby` with datetime column would return incorrect dtype (:issue:`25444`, :issue:`15306`)
+- Bug in :meth:`pandas.core.groupby.GroupBy.idxmax` and :meth:`pandas.core.groupby.GroupBy.idxmin` with datetime column would return incorrect dtype (:issue:`25444`, :issue:`15306`)
+- Bug in :meth:`pandas.core.groupby.GroupBy.cumsum`, :meth:`pandas.core.groupby.GroupBy.cumprod`, :meth:`pandas.core.groupby.GroupBy.cummin` and :meth:`pandas.core.groupby.GroupBy.cummax` with categorical column having absent categories, would return incorrect result or segfault (:issue:`16771`)
Reshaping
^^^^^^^^^
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index c1fc0062dff09..2498445e78fc3 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -142,11 +142,31 @@ def group_median_float64(ndarray[float64_t, ndim=2] out,
def group_cumprod_float64(float64_t[:, :] out,
const float64_t[:, :] values,
const int64_t[:] labels,
+ int ngroups,
bint is_datetimelike,
bint skipna=True):
+ """Cumulative product of columns of `values`, in row groups `labels`.
+
+ Parameters
+ ----------
+ out : float64 array
+ Array to store cumprod in.
+ values : float64 array
+ Values to take cumprod of.
+ labels : int64 array
+ Labels to group by.
+ ngroups : int
+ Number of groups, larger than all entries of `labels`.
+ is_datetimelike : bool
+ Always false, `values` is never datetime-like.
+ skipna : bool
+ If true, ignore nans in `values`.
+
+ Notes
+ -----
+ This method modifies the `out` parameter, rather than returning an object.
"""
- Only transforms on axis=0
- """
+
cdef:
Py_ssize_t i, j, N, K, size
float64_t val
@@ -154,7 +174,7 @@ def group_cumprod_float64(float64_t[:, :] out,
int64_t lab
N, K = (<object>values).shape
- accum = np.ones_like(values)
+ accum = np.ones((ngroups, K), dtype=np.float64)
with nogil:
for i in range(N):
@@ -179,11 +199,31 @@ def group_cumprod_float64(float64_t[:, :] out,
def group_cumsum(numeric[:, :] out,
numeric[:, :] values,
const int64_t[:] labels,
+ int ngroups,
is_datetimelike,
bint skipna=True):
+ """Cumulative sum of columns of `values`, in row groups `labels`.
+
+ Parameters
+ ----------
+ out : array
+ Array to store cumsum in.
+ values : array
+ Values to take cumsum of.
+ labels : int64 array
+ Labels to group by.
+ ngroups : int
+ Number of groups, larger than all entries of `labels`.
+ is_datetimelike : bool
+ True if `values` contains datetime-like entries.
+ skipna : bool
+ If true, ignore nans in `values`.
+
+ Notes
+ -----
+ This method modifies the `out` parameter, rather than returning an object.
"""
- Only transforms on axis=0
- """
+
cdef:
Py_ssize_t i, j, N, K, size
numeric val
@@ -191,7 +231,7 @@ def group_cumsum(numeric[:, :] out,
int64_t lab
N, K = (<object>values).shape
- accum = np.zeros_like(values)
+ accum = np.zeros((ngroups, K), dtype=np.asarray(values).dtype)
with nogil:
for i in range(N):
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in
index 63cd4d6ac6ff2..8e351244b7f43 100644
--- a/pandas/_libs/groupby_helper.pxi.in
+++ b/pandas/_libs/groupby_helper.pxi.in
@@ -474,10 +474,28 @@ def group_min(groupby_t[:, :] out,
def group_cummin(groupby_t[:, :] out,
groupby_t[:, :] values,
const int64_t[:] labels,
+ int ngroups,
bint is_datetimelike):
+ """Cumulative minimum of columns of `values`, in row groups `labels`.
+
+ Parameters
+ ----------
+ out : array
+ Array to store cummin in.
+ values : array
+ Values to take cummin of.
+ labels : int64 array
+ Labels to group by.
+ ngroups : int
+ Number of groups, larger than all entries of `labels`.
+ is_datetimelike : bool
+ True if `values` contains datetime-like entries.
+
+ Notes
+ -----
+ This method modifies the `out` parameter, rather than returning an object.
"""
- Only transforms on axis=0
- """
+
cdef:
Py_ssize_t i, j, N, K, size
groupby_t val, mval
@@ -485,7 +503,7 @@ def group_cummin(groupby_t[:, :] out,
int64_t lab
N, K = (<object>values).shape
- accum = np.empty_like(values)
+ accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype)
if groupby_t is int64_t:
accum[:] = _int64_max
else:
@@ -522,10 +540,28 @@ def group_cummin(groupby_t[:, :] out,
def group_cummax(groupby_t[:, :] out,
groupby_t[:, :] values,
const int64_t[:] labels,
+ int ngroups,
bint is_datetimelike):
+ """Cumulative maximum of columns of `values`, in row groups `labels`.
+
+ Parameters
+ ----------
+ out : array
+ Array to store cummax in.
+ values : array
+ Values to take cummax of.
+ labels : int64 array
+ Labels to group by.
+ ngroups : int
+ Number of groups, larger than all entries of `labels`.
+ is_datetimelike : bool
+ True if `values` contains datetime-like entries.
+
+ Notes
+ -----
+ This method modifies the `out` parameter, rather than returning an object.
"""
- Only transforms on axis=0
- """
+
cdef:
Py_ssize_t i, j, N, K, size
groupby_t val, mval
@@ -533,7 +569,7 @@ def group_cummax(groupby_t[:, :] out,
int64_t lab
N, K = (<object>values).shape
- accum = np.empty_like(values)
+ accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype)
if groupby_t is int64_t:
accum[:] = -_int64_max
else:
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 82b9d6c1269f9..d402ae3682f0e 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -361,8 +361,8 @@ def get_group_levels(self):
'cummax': 'group_cummax',
'rank': {
'name': 'group_rank',
- 'f': lambda func, a, b, c, d, **kwargs: func(
- a, b, c, d,
+ 'f': lambda func, a, b, c, d, e, **kwargs: func(
+ a, b, c, e,
kwargs.get('ties_method', 'average'),
kwargs.get('ascending', True),
kwargs.get('pct', False),
@@ -600,9 +600,10 @@ def _transform(self, result, values, comp_ids, transform_func,
for i, chunk in enumerate(values.transpose(2, 0, 1)):
transform_func(result[:, :, i], values,
- comp_ids, is_datetimelike, **kwargs)
+ comp_ids, ngroups, is_datetimelike, **kwargs)
else:
- transform_func(result, values, comp_ids, is_datetimelike, **kwargs)
+ transform_func(result, values, comp_ids, ngroups, is_datetimelike,
+ **kwargs)
return result
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py
index e865dc35c71b0..e330329644269 100644
--- a/pandas/tests/groupby/test_transform.py
+++ b/pandas/tests/groupby/test_transform.py
@@ -9,7 +9,8 @@
from pandas.core.dtypes.common import ensure_platform_int, is_timedelta64_dtype
import pandas as pd
-from pandas import DataFrame, MultiIndex, Series, Timestamp, concat, date_range
+from pandas import (
+ Categorical, DataFrame, MultiIndex, Series, Timestamp, concat, date_range)
from pandas.core.groupby.groupby import DataError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
@@ -470,7 +471,8 @@ def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
ans = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.int64)
- pd_op(ans, data, labels, is_datetimelike)
+ ngroups = 1
+ pd_op(ans, data, labels, ngroups, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
check_dtype=False)
@@ -496,17 +498,19 @@ def test_cython_group_transform_algos():
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
+ ngroups = 1
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
actual = np.zeros_like(data)
actual.fill(np.nan)
- groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
+ groupby.group_cumprod_float64(actual, data, labels, ngroups,
+ is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
- groupby.group_cumsum(actual, data, labels, is_datetimelike)
+ groupby.group_cumsum(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
@@ -515,7 +519,7 @@ def test_cython_group_transform_algos():
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
actual = np.zeros_like(data, dtype='int64')
groupby.group_cumsum(actual, data.view('int64'), labels,
- is_datetimelike)
+ ngroups, is_datetimelike)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
@@ -863,3 +867,16 @@ def test_groupby_transform_with_datetimes(func, values):
index=dates, name="price")
tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('func', ['cumsum', 'cumprod', 'cummin', 'cummax'])
+def test_transform_absent_categories(func):
+ # GH 16771
+ # cython transforms with more groups than rows
+ x_vals = [1]
+ x_cats = range(2)
+ y = [1]
+ df = DataFrame(dict(x=Categorical(x_vals, x_cats), y=y))
+ result = getattr(df.y.groupby(df.x), func)()
+ expected = df.y
+ assert_series_equal(result, expected)
| - [x] closes #16771
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26134 | 2019-04-18T13:02:18Z | 2019-04-20T16:45:02Z | 2019-04-20T16:45:02Z | 2019-04-20T16:45:06Z |
DOC: add deprecated to docstring of Series.data/itemsize/strides/flags/base | diff --git a/pandas/core/base.py b/pandas/core/base.py
index 1d3eb880f32e3..0b3d8b601864d 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -703,6 +703,8 @@ def item(self):
def data(self):
"""
Return the data pointer of the underlying data.
+
+ .. deprecated:: 0.23.0
"""
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
@@ -713,6 +715,8 @@ def data(self):
def itemsize(self):
"""
Return the size of the dtype of the item of the underlying data.
+
+ .. deprecated:: 0.23.0
"""
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
@@ -730,6 +734,8 @@ def nbytes(self):
def strides(self):
"""
Return the strides of the underlying data.
+
+ .. deprecated:: 0.23.0
"""
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
@@ -747,6 +753,8 @@ def size(self):
def flags(self):
"""
Return the ndarray.flags for the underlying data.
+
+ .. deprecated:: 0.23.0
"""
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
@@ -757,6 +765,8 @@ def flags(self):
def base(self):
"""
Return the base object if the memory of the underlying data is shared.
+
+ .. deprecated:: 0.23.0
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
| xref https://github.com/pandas-dev/pandas/issues/20419#issuecomment-481773575 | https://api.github.com/repos/pandas-dev/pandas/pulls/26132 | 2019-04-18T08:28:09Z | 2019-04-18T15:31:40Z | 2019-04-18T15:31:40Z | 2019-04-18T15:31:45Z |
CLN: rename test method parameter name 'object' to 'self' | diff --git a/pandas/tests/arrays/categorical/test_warnings.py b/pandas/tests/arrays/categorical/test_warnings.py
index 23d00585f950e..4a37d5097648f 100644
--- a/pandas/tests/arrays/categorical/test_warnings.py
+++ b/pandas/tests/arrays/categorical/test_warnings.py
@@ -18,14 +18,14 @@ def test_tab_complete_warning(self, ip):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('c.', 1))
- def test_CategoricalAccessor_categorical_deprecation(object):
+ def test_CategoricalAccessor_categorical_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
pd.Series(['a', 'b'], dtype='category').cat.categorical
- def test_CategoricalAccessor_name_deprecation(object):
+ def test_CategoricalAccessor_name_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
pd.Series(['a', 'b'], dtype='category').cat.name
- def test_CategoricalAccessor_index_deprecation(object):
+ def test_CategoricalAccessor_index_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
pd.Series(['a', 'b'], dtype='category').cat.index
| As part of #26128, I saw that in three test methods, their first parameter has been named ``object`` rather than ``self``. This rectifies that. | https://api.github.com/repos/pandas-dev/pandas/pulls/26129 | 2019-04-18T00:25:15Z | 2019-04-18T03:48:23Z | 2019-04-18T03:48:23Z | 2019-04-19T17:13:36Z |
CLN: remove unneeded inheritance from base object | diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py
index 74849d330f2bc..45ef47fde0a56 100644
--- a/asv_bench/benchmarks/algorithms.py
+++ b/asv_bench/benchmarks/algorithms.py
@@ -13,7 +13,7 @@
pass
-class Factorize(object):
+class Factorize:
params = [[True, False], ['int', 'uint', 'float', 'string']]
param_names = ['sort', 'dtype']
@@ -30,7 +30,7 @@ def time_factorize(self, sort, dtype):
self.idx.factorize(sort=sort)
-class FactorizeUnique(object):
+class FactorizeUnique:
params = [[True, False], ['int', 'uint', 'float', 'string']]
param_names = ['sort', 'dtype']
@@ -48,7 +48,7 @@ def time_factorize(self, sort, dtype):
self.idx.factorize(sort=sort)
-class Duplicated(object):
+class Duplicated:
params = [['first', 'last', False], ['int', 'uint', 'float', 'string']]
param_names = ['keep', 'dtype']
@@ -67,7 +67,7 @@ def time_duplicated(self, keep, dtype):
self.idx.duplicated(keep=keep)
-class DuplicatedUniqueIndex(object):
+class DuplicatedUniqueIndex:
params = ['int', 'uint', 'float', 'string']
param_names = ['dtype']
@@ -86,7 +86,7 @@ def time_duplicated_unique(self, dtype):
self.idx.duplicated()
-class Hashing(object):
+class Hashing:
def setup_cache(self):
N = 10**5
@@ -124,7 +124,7 @@ def time_series_dates(self, df):
hashing.hash_pandas_object(df['dates'])
-class Quantile(object):
+class Quantile:
params = [[0, 0.5, 1],
['linear', 'nearest', 'lower', 'higher', 'midpoint'],
['float', 'int', 'uint']]
diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py
index d061755208c9e..dd316a2bc88d0 100644
--- a/asv_bench/benchmarks/attrs_caching.py
+++ b/asv_bench/benchmarks/attrs_caching.py
@@ -6,7 +6,7 @@
from pandas.util.decorators import cache_readonly
-class DataFrameAttributes(object):
+class DataFrameAttributes:
def setup(self):
self.df = DataFrame(np.random.randn(10, 6))
@@ -19,7 +19,7 @@ def time_set_index(self):
self.df.index = self.cur_index
-class CacheReadonly(object):
+class CacheReadonly:
def setup(self):
diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py
index 22b8ed80f3d07..26cd66284c41e 100644
--- a/asv_bench/benchmarks/binary_ops.py
+++ b/asv_bench/benchmarks/binary_ops.py
@@ -7,7 +7,7 @@
import pandas.computation.expressions as expr
-class Ops(object):
+class Ops:
params = [[True, False], ['default', 1]]
param_names = ['use_numexpr', 'threads']
@@ -38,7 +38,7 @@ def teardown(self, use_numexpr, threads):
expr.set_numexpr_threads()
-class Ops2(object):
+class Ops2:
def setup(self):
N = 10**3
@@ -88,7 +88,7 @@ def time_frame_series_dot(self):
self.df.dot(self.s)
-class Timeseries(object):
+class Timeseries:
params = [None, 'US/Eastern']
param_names = ['tz']
@@ -114,7 +114,7 @@ def time_timestamp_ops_diff_with_shift(self, tz):
self.s - self.s.shift()
-class AddOverflowScalar(object):
+class AddOverflowScalar:
params = [1, -1, 0]
param_names = ['scalar']
@@ -127,7 +127,7 @@ def time_add_overflow_scalar(self, scalar):
checked_add_with_arr(self.arr, scalar)
-class AddOverflowArray(object):
+class AddOverflowArray:
def setup(self):
N = 10**6
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 4b5b2848f7e0f..790157497ca36 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -12,7 +12,7 @@
pass
-class Concat(object):
+class Concat:
def setup(self):
N = 10**5
@@ -28,7 +28,7 @@ def time_union(self):
union_categoricals([self.a, self.b])
-class Constructor(object):
+class Constructor:
def setup(self):
N = 10**5
@@ -77,7 +77,7 @@ def time_existing_series(self):
pd.Categorical(self.series)
-class ValueCounts(object):
+class ValueCounts:
params = [True, False]
param_names = ['dropna']
@@ -92,7 +92,7 @@ def time_value_counts(self, dropna):
self.ts.value_counts(dropna=dropna)
-class Repr(object):
+class Repr:
def setup(self):
self.sel = pd.Series(['s1234']).astype('category')
@@ -101,7 +101,7 @@ def time_rendering(self):
str(self.sel)
-class SetCategories(object):
+class SetCategories:
def setup(self):
n = 5 * 10**5
@@ -113,7 +113,7 @@ def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
-class RemoveCategories(object):
+class RemoveCategories:
def setup(self):
n = 5 * 10**5
@@ -125,7 +125,7 @@ def time_remove_categories(self):
self.ts.cat.remove_categories(self.ts.cat.categories[::2])
-class Rank(object):
+class Rank:
def setup(self):
N = 10**5
@@ -162,7 +162,7 @@ def time_rank_int_cat_ordered(self):
self.s_int_cat_ordered.rank()
-class Isin(object):
+class Isin:
params = ['object', 'int64']
param_names = ['dtype']
@@ -181,7 +181,7 @@ def time_isin_categorical(self, dtype):
self.series.isin(self.sample)
-class IsMonotonic(object):
+class IsMonotonic:
def setup(self):
N = 1000
@@ -201,7 +201,7 @@ def time_categorical_series_is_monotonic_decreasing(self):
self.s.is_monotonic_decreasing
-class Contains(object):
+class Contains:
def setup(self):
N = 10**5
@@ -216,7 +216,7 @@ def time_categorical_contains(self):
self.key in self.c
-class CategoricalSlicing(object):
+class CategoricalSlicing:
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
@@ -257,7 +257,7 @@ def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
-class Indexing(object):
+class Indexing:
def setup(self):
N = 10**5
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index 5715c4fb2d0d4..1c6841a296377 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -39,7 +39,7 @@ def list_of_lists_with_none(arr):
return [[i, -i] for i in arr][:-1] + [None]
-class SeriesConstructors(object):
+class SeriesConstructors:
param_names = ["data_fmt", "with_index", "dtype"]
params = [[no_change,
@@ -68,7 +68,7 @@ def time_series_constructor(self, data_fmt, with_index, dtype):
Series(self.data, index=self.index)
-class SeriesDtypesConstructors(object):
+class SeriesDtypesConstructors:
def setup(self):
N = 10**4
@@ -90,7 +90,7 @@ def time_dtindex_from_index_with_series(self):
Index(self.s)
-class MultiIndexConstructor(object):
+class MultiIndexConstructor:
def setup(self):
N = 10**4
diff --git a/asv_bench/benchmarks/dtypes.py b/asv_bench/benchmarks/dtypes.py
index e59154cd99965..9bfaaa8696009 100644
--- a/asv_bench/benchmarks/dtypes.py
+++ b/asv_bench/benchmarks/dtypes.py
@@ -12,7 +12,7 @@
_dtypes = _numpy_dtypes + extension_dtypes
-class Dtypes(object):
+class Dtypes:
params = (_dtypes +
list(map(lambda dt: dt.name, _dtypes)))
param_names = ['dtype']
@@ -21,7 +21,7 @@ def time_pandas_dtype(self, dtype):
pandas_dtype(dtype)
-class DtypesInvalid(object):
+class DtypesInvalid:
param_names = ['dtype']
params = ['scalar-string', 'scalar-int', 'list-string', 'array-string']
data_dict = {'scalar-string': 'foo',
diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py
index 68df38cd50742..be47d35f2cad1 100644
--- a/asv_bench/benchmarks/eval.py
+++ b/asv_bench/benchmarks/eval.py
@@ -6,7 +6,7 @@
import pandas.computation.expressions as expr
-class Eval(object):
+class Eval:
params = [['numexpr', 'python'], [1, 'all']]
param_names = ['engine', 'threads']
@@ -37,7 +37,7 @@ def teardown(self, engine, threads):
expr.set_numexpr_threads()
-class Query(object):
+class Query:
def setup(self):
N = 10**6
diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index dfb6ab5b189b2..19c2a913e8494 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -8,7 +8,7 @@
from pandas.core.datetools import * # noqa
-class FromDicts(object):
+class FromDicts:
def setup(self):
N, K = 5000, 50
@@ -41,7 +41,7 @@ def time_nested_dict_int64(self):
DataFrame(self.data2)
-class FromSeries(object):
+class FromSeries:
def setup(self):
mi = MultiIndex.from_product([range(100), range(100)])
@@ -51,7 +51,7 @@ def time_mi_series(self):
DataFrame(self.s)
-class FromDictwithTimestamp(object):
+class FromDictwithTimestamp:
params = [Nano(1), Hour(1)]
param_names = ['offset']
@@ -67,7 +67,7 @@ def time_dict_with_timestamp_offsets(self, offset):
DataFrame(self.d)
-class FromRecords(object):
+class FromRecords:
params = [None, 1000]
param_names = ['nrows']
@@ -81,7 +81,7 @@ def time_frame_from_records_generator(self, nrows):
self.df = DataFrame.from_records(self.gen, nrows=nrows)
-class FromNDArray(object):
+class FromNDArray:
def setup(self):
N = 100000
@@ -91,7 +91,7 @@ def time_frame_from_ndarray(self):
self.df = DataFrame(self.data)
-class FromLists(object):
+class FromLists:
goal_time = 0.2
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index ba2e63c20d3f8..0c1d861ce0839 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -7,7 +7,7 @@
import pandas.util.testing as tm
-class GetNumericData(object):
+class GetNumericData:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
@@ -19,7 +19,7 @@ def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
-class Lookup(object):
+class Lookup:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 8),
@@ -39,7 +39,7 @@ def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
-class Reindex(object):
+class Reindex:
def setup(self):
N = 10**3
@@ -65,7 +65,7 @@ def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
-class Rename(object):
+class Rename:
def setup(self):
N = 10**3
@@ -95,7 +95,7 @@ def time_dict_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
-class Iteration(object):
+class Iteration:
def setup(self):
N = 1000
@@ -189,7 +189,7 @@ def time_iterrows(self):
pass
-class ToString(object):
+class ToString:
def setup(self):
self.df = DataFrame(np.random.randn(100, 10))
@@ -198,7 +198,7 @@ def time_to_string_floats(self):
self.df.to_string()
-class ToHTML(object):
+class ToHTML:
def setup(self):
nrows = 500
@@ -210,7 +210,7 @@ def time_to_html_mixed(self):
self.df2.to_html()
-class Repr(object):
+class Repr:
def setup(self):
nrows = 10000
@@ -235,7 +235,7 @@ def time_frame_repr_wide(self):
repr(self.df_wide)
-class MaskBool(object):
+class MaskBool:
def setup(self):
data = np.random.randn(1000, 500)
@@ -251,7 +251,7 @@ def time_frame_mask_floats(self):
self.bools.astype(float).mask(self.mask)
-class Isnull(object):
+class Isnull:
def setup(self):
N = 10**3
@@ -283,7 +283,7 @@ def time_isnull_obj(self):
isnull(self.df_obj)
-class Fillna(object):
+class Fillna:
params = ([True, False], ['pad', 'bfill'])
param_names = ['inplace', 'method']
@@ -297,7 +297,7 @@ def time_frame_fillna(self, inplace, method):
self.df.fillna(inplace=inplace, method=method)
-class Dropna(object):
+class Dropna:
params = (['all', 'any'], [0, 1])
param_names = ['how', 'axis']
@@ -317,7 +317,7 @@ def time_dropna_axis_mixed_dtypes(self, how, axis):
self.df_mixed.dropna(how=how, axis=axis)
-class Count(object):
+class Count:
params = [0, 1]
param_names = ['axis']
@@ -345,7 +345,7 @@ def time_count_level_mixed_dtypes_multi(self, axis):
self.df_mixed.count(axis=axis, level=1)
-class Apply(object):
+class Apply:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 100))
@@ -373,7 +373,7 @@ def time_apply_ref_by_name(self):
self.df3.apply(lambda x: x['A'] + x['B'], axis=1)
-class Dtypes(object):
+class Dtypes:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 1000))
@@ -382,7 +382,7 @@ def time_frame_dtypes(self):
self.df.dtypes
-class Equals(object):
+class Equals:
def setup(self):
N = 10**3
@@ -418,7 +418,7 @@ def time_frame_object_unequal(self):
self.object_df.equals(self.object_df_nan)
-class Interpolate(object):
+class Interpolate:
params = [None, 'infer']
param_names = ['downcast']
@@ -443,7 +443,7 @@ def time_interpolate_some_good(self, downcast):
self.df2.interpolate(downcast=downcast)
-class Shift(object):
+class Shift:
# frame shift speedup issue-5609
params = [0, 1]
param_names = ['axis']
@@ -455,7 +455,7 @@ def time_shift(self, axis):
self.df.shift(1, axis=axis)
-class Nunique(object):
+class Nunique:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 1000))
@@ -464,7 +464,7 @@ def time_frame_nunique(self):
self.df.nunique()
-class Duplicated(object):
+class Duplicated:
def setup(self):
n = (1 << 20)
@@ -482,7 +482,7 @@ def time_frame_duplicated_wide(self):
self.df2.duplicated()
-class XS(object):
+class XS:
params = [0, 1]
param_names = ['axis']
@@ -495,7 +495,7 @@ def time_frame_xs(self, axis):
self.df.xs(self.N / 2, axis=axis)
-class SortValues(object):
+class SortValues:
params = [True, False]
param_names = ['ascending']
@@ -507,7 +507,7 @@ def time_frame_sort_values(self, ascending):
self.df.sort_values(by='A', ascending=ascending)
-class SortIndexByColumns(object):
+class SortIndexByColumns:
def setup(self):
N = 10000
@@ -520,7 +520,7 @@ def time_frame_sort_values_by_columns(self):
self.df.sort_values(by=['key1', 'key2'])
-class Quantile(object):
+class Quantile:
params = [0, 1]
param_names = ['axis']
@@ -532,7 +532,7 @@ def time_frame_quantile(self, axis):
self.df.quantile([0.1, 0.5], axis=axis)
-class GetDtypeCounts(object):
+class GetDtypeCounts:
# 2807
def setup(self):
self.df = DataFrame(np.random.randn(10, 10000))
@@ -544,7 +544,7 @@ def time_info(self):
self.df.info()
-class NSort(object):
+class NSort:
params = ['first', 'last', 'all']
param_names = ['keep']
@@ -566,7 +566,7 @@ def time_nsmallest_two_columns(self, keep):
self.df.nsmallest(100, ['A', 'B'], keep=keep)
-class Describe(object):
+class Describe:
def setup(self):
self.df = DataFrame({
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index 6819a296c81df..65a03bfda48c5 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -26,7 +26,7 @@ def wrapper(fname):
from .pandas_vb_common import BaseIO
-class ParallelGroupbyMethods(object):
+class ParallelGroupbyMethods:
params = ([2, 4, 8], ['count', 'last', 'max', 'mean', 'min', 'prod',
'sum', 'var'])
@@ -57,7 +57,7 @@ def time_loop(self, threads, method):
self.loop()
-class ParallelGroups(object):
+class ParallelGroups:
params = [2, 4, 8]
param_names = ['threads']
@@ -78,7 +78,7 @@ def time_get_groups(self, threads):
self.get_groups()
-class ParallelTake1D(object):
+class ParallelTake1D:
params = ['int64', 'float64']
param_names = ['dtype']
@@ -99,7 +99,7 @@ def time_take1d(self, dtype):
self.parallel_take1d()
-class ParallelKth(object):
+class ParallelKth:
number = 1
repeat = 5
@@ -121,7 +121,7 @@ def time_kth_smallest(self):
self.parallel_kth_smallest()
-class ParallelDatetimeFields(object):
+class ParallelDatetimeFields:
def setup(self):
if not have_real_test_parallel:
@@ -167,7 +167,7 @@ def run(period):
run(self.period)
-class ParallelRolling(object):
+class ParallelRolling:
params = ['median', 'mean', 'min', 'max', 'var', 'skew', 'kurt', 'std']
param_names = ['method']
@@ -239,7 +239,7 @@ def time_read_csv(self, dtype):
self.parallel_read_csv()
-class ParallelFactorize(object):
+class ParallelFactorize:
number = 1
repeat = 5
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 27d279bb90a31..4dfce079dd09c 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -21,7 +21,7 @@
}
-class ApplyDictReturn(object):
+class ApplyDictReturn:
def setup(self):
self.labels = np.arange(1000).repeat(10)
self.data = Series(np.random.randn(len(self.labels)))
@@ -31,7 +31,7 @@ def time_groupby_apply_dict_return(self):
'last': x.values[-1]})
-class Apply(object):
+class Apply:
def setup_cache(self):
N = 10**4
@@ -63,7 +63,7 @@ def time_copy_overhead_single_col(self, df):
df.groupby('key').apply(self.df_copy_function)
-class Groups(object):
+class Groups:
param_names = ['key']
params = ['int64_small', 'int64_large', 'object_small', 'object_large']
@@ -87,7 +87,7 @@ def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
-class GroupManyLabels(object):
+class GroupManyLabels:
params = [1, 1000]
param_names = ['ncols']
@@ -102,7 +102,7 @@ def time_sum(self, ncols):
self.df.groupby(self.labels).sum()
-class Nth(object):
+class Nth:
param_names = ['dtype']
params = ['float32', 'float64', 'datetime', 'object']
@@ -140,7 +140,7 @@ def time_series_nth(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0)
-class DateAttributes(object):
+class DateAttributes:
def setup(self):
rng = date_range('1/1/2000', '12/31/2005', freq='H')
@@ -151,7 +151,7 @@ def time_len_groupby_object(self):
len(self.ts.groupby([self.year, self.month, self.day]))
-class Int64(object):
+class Int64:
def setup(self):
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
@@ -167,7 +167,7 @@ def time_overflow(self):
self.df.groupby(self.cols).max()
-class CountMultiDtype(object):
+class CountMultiDtype:
def setup_cache(self):
n = 10000
@@ -193,7 +193,7 @@ def time_multi_count(self, df):
df.groupby(['key1', 'key2']).count()
-class CountMultiInt(object):
+class CountMultiInt:
def setup_cache(self):
n = 10000
@@ -210,7 +210,7 @@ def time_multi_int_nunique(self, df):
df.groupby(['key1', 'key2']).nunique()
-class AggFunctions(object):
+class AggFunctions:
def setup_cache(self):
N = 10**5
@@ -240,7 +240,7 @@ def time_different_python_functions_singlecol(self, df):
df.groupby('key1').agg([sum, min, max])
-class GroupStrings(object):
+class GroupStrings:
def setup(self):
n = 2 * 10**5
@@ -255,7 +255,7 @@ def time_multi_columns(self):
self.df.groupby(list('abcd')).max()
-class MultiColumn(object):
+class MultiColumn:
def setup_cache(self):
N = 10**5
@@ -282,7 +282,7 @@ def time_col_select_numpy_sum(self, df):
df.groupby(['key1', 'key2'])['data1'].agg(np.sum)
-class Size(object):
+class Size:
def setup(self):
n = 10**5
@@ -309,7 +309,7 @@ def time_category_size(self):
self.draws.groupby(self.cats).size()
-class GroupByMethods(object):
+class GroupByMethods:
param_names = ['dtype', 'method', 'application']
params = [['int', 'float', 'object', 'datetime'],
@@ -359,7 +359,7 @@ def time_dtype_as_field(self, dtype, method, application):
self.as_field_method()
-class RankWithTies(object):
+class RankWithTies:
# GH 21237
param_names = ['dtype', 'tie_method']
params = [['float64', 'float32', 'int64', 'datetime64'],
@@ -377,7 +377,7 @@ def time_rank_ties(self, dtype, tie_method):
self.df.groupby('key').rank(method=tie_method)
-class Float32(object):
+class Float32:
# GH 13335
def setup(self):
tmp1 = (np.random.random(10000) * 0.1).astype(np.float32)
@@ -390,7 +390,7 @@ def time_sum(self):
self.df.groupby(['a'])['b'].sum()
-class Categories(object):
+class Categories:
def setup(self):
N = 10**5
@@ -426,7 +426,7 @@ def time_groupby_extra_cat_nosort(self):
self.df_extra_cat.groupby('a', sort=False)['b'].count()
-class Datelike(object):
+class Datelike:
# GH 14338
params = ['period_range', 'date_range', 'date_range_tz']
param_names = ['grouper']
@@ -443,7 +443,7 @@ def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
-class SumBools(object):
+class SumBools:
# GH 2692
def setup(self):
N = 500
@@ -454,7 +454,7 @@ def time_groupby_sum_booleans(self):
self.df.groupby('ii').sum()
-class SumMultiLevel(object):
+class SumMultiLevel:
# GH 9049
timeout = 120.0
@@ -468,7 +468,7 @@ def time_groupby_sum_multiindex(self):
self.df.groupby(level=[0, 1]).sum()
-class Transform(object):
+class Transform:
def setup(self):
n1 = 400
@@ -514,7 +514,7 @@ def time_transform_multi_key4(self):
self.df4.groupby(['jim', 'joe'])['jolie'].transform('max')
-class TransformBools(object):
+class TransformBools:
def setup(self):
N = 120000
@@ -528,7 +528,7 @@ def time_transform_mean(self):
self.df['signal'].groupby(self.g).transform(np.mean)
-class TransformNaN(object):
+class TransformNaN:
# GH 12737
def setup(self):
self.df_nans = DataFrame({'key': np.repeat(np.arange(1000), 10),
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index bbe164d4858ab..fb3001319d96c 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -4,7 +4,7 @@
Float64Index)
-class SetOperations(object):
+class SetOperations:
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
@@ -29,7 +29,7 @@ def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
-class SetDisjoint(object):
+class SetDisjoint:
def setup(self):
N = 10**5
@@ -41,7 +41,7 @@ def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
-class Datetime(object):
+class Datetime:
def setup(self):
self.dr = date_range('20000101', freq='D', periods=10000)
@@ -50,7 +50,7 @@ def time_is_dates_only(self):
self.dr._is_dates_only
-class Ops(object):
+class Ops:
sample_time = 0.2
params = ['float', 'int']
@@ -77,7 +77,7 @@ def time_modulo(self, dtype):
self.index % 2
-class Range(object):
+class Range:
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3)
@@ -96,7 +96,7 @@ def time_min_trivial(self):
self.idx_inc.min()
-class IndexAppend(object):
+class IndexAppend:
def setup(self):
@@ -125,7 +125,7 @@ def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
-class Indexing(object):
+class Indexing:
params = ['String', 'Float', 'Int']
param_names = ['dtype']
@@ -170,7 +170,7 @@ def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
-class Float64IndexMethod(object):
+class Float64IndexMethod:
# GH 13166
def setup(self):
N = 100000
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index b8e983c60b8b5..4c932cf3600e8 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -8,7 +8,7 @@
IndexSlice, concat, date_range)
-class NumericSeriesIndexing(object):
+class NumericSeriesIndexing:
params = [
(Int64Index, UInt64Index, Float64Index),
@@ -79,7 +79,7 @@ def time_loc_slice(self, index, index_structure):
self.data.loc[:800000]
-class NonNumericSeriesIndexing(object):
+class NonNumericSeriesIndexing:
params = [
('string', 'datetime'),
@@ -114,7 +114,7 @@ def time_getitem_list_like(self, index, index_structure):
self.s[[self.lbl]]
-class DataFrameStringIndexing(object):
+class DataFrameStringIndexing:
def setup(self):
index = tm.makeStringIndex(1000)
@@ -146,7 +146,7 @@ def time_boolean_rows_object(self):
self.df[self.bool_obj_indexer]
-class DataFrameNumericIndexing(object):
+class DataFrameNumericIndexing:
def setup(self):
self.idx_dupe = np.array(range(30)) * 99
@@ -170,7 +170,7 @@ def time_bool_indexer(self):
self.df[self.bool_indexer]
-class Take(object):
+class Take:
params = ['int', 'datetime']
param_names = ['index']
@@ -187,7 +187,7 @@ def time_take(self, index):
self.s.take(self.indexer)
-class MultiIndexing(object):
+class MultiIndexing:
def setup(self):
mi = MultiIndex.from_product([range(1000), range(1000)])
@@ -215,7 +215,7 @@ def time_index_slice(self):
self.mdt.loc[self.idx, :]
-class IntervalIndexing(object):
+class IntervalIndexing:
def setup_cache(self):
idx = IntervalIndex.from_breaks(np.arange(1000001))
@@ -235,7 +235,7 @@ def time_loc_list(self, monotonic):
monotonic.loc[80000:]
-class CategoricalIndexIndexing(object):
+class CategoricalIndexIndexing:
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
@@ -277,7 +277,7 @@ def time_get_indexer_list(self, index):
self.data.get_indexer(self.cat_list)
-class MethodLookup(object):
+class MethodLookup:
def setup_cache(self):
s = Series()
@@ -293,7 +293,7 @@ def time_lookup_loc(self, s):
s.loc
-class GetItemSingleColumn(object):
+class GetItemSingleColumn:
def setup(self):
self.df_string_col = DataFrame(np.random.randn(3000, 1), columns=['A'])
@@ -306,7 +306,7 @@ def time_frame_getitem_single_column_int(self):
self.df_int_col[0]
-class AssignTimeseriesIndex(object):
+class AssignTimeseriesIndex:
def setup(self):
N = 100000
@@ -317,7 +317,7 @@ def time_frame_assign_timeseries_index(self):
self.df['date'] = self.df.index
-class InsertColumns(object):
+class InsertColumns:
def setup(self):
self.N = 10**3
diff --git a/asv_bench/benchmarks/indexing_engines.py b/asv_bench/benchmarks/indexing_engines.py
index f3d063ee31bc8..5655701781846 100644
--- a/asv_bench/benchmarks/indexing_engines.py
+++ b/asv_bench/benchmarks/indexing_engines.py
@@ -16,7 +16,7 @@ def _get_numeric_engines():
if hasattr(libindex, engine_name)]
-class NumericEngineIndexing(object):
+class NumericEngineIndexing:
params = [_get_numeric_engines(),
['monotonic_incr', 'monotonic_decr', 'non_monotonic'],
@@ -42,7 +42,7 @@ def time_get_loc(self, engine_and_dtype, index_type):
self.data.get_loc(2)
-class ObjectEngineIndexing(object):
+class ObjectEngineIndexing:
params = [('monotonic_incr', 'monotonic_decr', 'non_monotonic')]
param_names = ['index_type']
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py
index 423bd02b93596..065c82207d251 100644
--- a/asv_bench/benchmarks/inference.py
+++ b/asv_bench/benchmarks/inference.py
@@ -5,7 +5,7 @@
from .pandas_vb_common import numeric_dtypes, lib
-class NumericInferOps(object):
+class NumericInferOps:
# from GH 7332
params = numeric_dtypes
param_names = ['dtype']
@@ -31,7 +31,7 @@ def time_modulo(self, dtype):
self.df['A'] % self.df['B']
-class DateInferOps(object):
+class DateInferOps:
# from GH 7332
def setup_cache(self):
N = 5 * 10**5
@@ -49,7 +49,7 @@ def time_add_timedeltas(self, df):
df['timedelta'] + df['timedelta']
-class ToNumeric(object):
+class ToNumeric:
params = ['ignore', 'coerce']
param_names = ['errors']
@@ -70,7 +70,7 @@ def time_from_str(self, errors):
to_numeric(self.str, errors=errors)
-class ToNumericDowncast(object):
+class ToNumericDowncast:
param_names = ['dtype', 'downcast']
params = [['string-float', 'string-int', 'string-nint', 'datetime64',
@@ -95,7 +95,7 @@ def time_downcast(self, dtype, downcast):
to_numeric(self.data, downcast=downcast)
-class MaybeConvertNumeric(object):
+class MaybeConvertNumeric:
def setup_cache(self):
N = 10**6
diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py
index 0d4c5f1368052..d48f37cc45e99 100644
--- a/asv_bench/benchmarks/io/csv.py
+++ b/asv_bench/benchmarks/io/csv.py
@@ -50,7 +50,7 @@ def time_frame_date_formatting(self):
self.data.to_csv(self.fname, date_format='%Y%m%d')
-class StringIORewind(object):
+class StringIORewind:
def data(self, stringio_object):
stringio_object.seek(0)
diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
index 2664cac89f2cf..1decb83f2f723 100644
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -4,7 +4,7 @@
import pandas.util.testing as tm
-class Excel(object):
+class Excel:
params = ['openpyxl', 'xlsxwriter', 'xlwt']
param_names = ['engine']
diff --git a/asv_bench/benchmarks/io/sas.py b/asv_bench/benchmarks/io/sas.py
index 2783f42cad895..8181f1d41ac70 100644
--- a/asv_bench/benchmarks/io/sas.py
+++ b/asv_bench/benchmarks/io/sas.py
@@ -3,7 +3,7 @@
from pandas import read_sas
-class SAS(object):
+class SAS:
params = ['sas7bdat', 'xport']
param_names = ['format']
diff --git a/asv_bench/benchmarks/io/sql.py b/asv_bench/benchmarks/io/sql.py
index 075d3bdda5ed9..ee48f3bd0a3ab 100644
--- a/asv_bench/benchmarks/io/sql.py
+++ b/asv_bench/benchmarks/io/sql.py
@@ -6,7 +6,7 @@
from sqlalchemy import create_engine
-class SQL(object):
+class SQL:
params = ['sqlalchemy', 'sqlite']
param_names = ['connection']
@@ -38,7 +38,7 @@ def time_read_sql_query(self, connection):
read_sql_query(self.query_all, self.con)
-class WriteSQLDtypes(object):
+class WriteSQLDtypes:
params = (['sqlalchemy', 'sqlite'],
['float', 'float_with_nan', 'string', 'bool', 'int', 'datetime'])
@@ -71,7 +71,7 @@ def time_read_sql_query_select_column(self, connection, dtype):
read_sql_query(self.query_col, self.con)
-class ReadSQLTable(object):
+class ReadSQLTable:
def setup(self):
N = 10000
@@ -98,7 +98,7 @@ def time_read_sql_table_parse_dates(self):
parse_dates=['datetime_string'])
-class ReadSQLTableDtypes(object):
+class ReadSQLTableDtypes:
params = ['float', 'float_with_nan', 'string', 'bool', 'int', 'datetime']
param_names = ['dtype']
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py
index baad8b61bfd19..bbaba9909966e 100644
--- a/asv_bench/benchmarks/join_merge.py
+++ b/asv_bench/benchmarks/join_merge.py
@@ -11,7 +11,7 @@
from pandas import ordered_merge as merge_ordered
-class Append(object):
+class Append:
def setup(self):
self.df1 = DataFrame(np.random.randn(10000, 4),
@@ -33,7 +33,7 @@ def time_append_mixed(self):
self.mdf1.append(self.mdf2)
-class Concat(object):
+class Concat:
params = [0, 1]
param_names = ['axis']
@@ -65,7 +65,7 @@ def time_concat_mixed_ndims(self, axis):
concat(self.mixed_ndims, axis=axis)
-class ConcatDataFrames(object):
+class ConcatDataFrames:
params = ([0, 1], [True, False])
param_names = ['axis', 'ignore_index']
@@ -85,7 +85,7 @@ def time_f_ordered(self, axis, ignore_index):
concat(self.frame_f, axis=axis, ignore_index=ignore_index)
-class Join(object):
+class Join:
params = [True, False]
param_names = ['sort']
@@ -132,7 +132,7 @@ def time_join_dataframe_index_shuffle_key_bigger_sort(self, sort):
self.df_shuf.join(self.df_key2, on='key2', sort=sort)
-class JoinIndex(object):
+class JoinIndex:
def setup(self):
N = 50000
@@ -145,7 +145,7 @@ def time_left_outer_join_index(self):
self.left.join(self.right, on='jim')
-class JoinNonUnique(object):
+class JoinNonUnique:
# outer join of non-unique
# GH 6329
def setup(self):
@@ -162,7 +162,7 @@ def time_join_non_unique_equal(self):
self.fracofday * self.temp
-class Merge(object):
+class Merge:
params = [True, False]
param_names = ['sort']
@@ -196,7 +196,7 @@ def time_merge_dataframe_integer_key(self, sort):
merge(self.df, self.df2, on='key1', sort=sort)
-class I8Merge(object):
+class I8Merge:
params = ['inner', 'outer', 'left', 'right']
param_names = ['how']
@@ -214,7 +214,7 @@ def time_i8merge(self, how):
merge(self.left, self.right, how=how)
-class MergeCategoricals(object):
+class MergeCategoricals:
def setup(self):
self.left_object = DataFrame(
@@ -237,7 +237,7 @@ def time_merge_cat(self):
merge(self.left_cat, self.right_cat, on='X')
-class MergeOrdered(object):
+class MergeOrdered:
def setup(self):
groups = tm.makeStringIndex(10).values
@@ -251,7 +251,7 @@ def time_merge_ordered(self):
merge_ordered(self.left, self.right, on='key', left_by='group')
-class MergeAsof(object):
+class MergeAsof:
params = [['backward', 'forward', 'nearest']]
param_names = ['direction']
@@ -306,7 +306,7 @@ def time_multiby(self, direction):
direction=direction)
-class Align(object):
+class Align:
def setup(self):
size = 5 * 10**5
diff --git a/asv_bench/benchmarks/multiindex_object.py b/asv_bench/benchmarks/multiindex_object.py
index adc6730dcd946..ca2bdc45dc2cb 100644
--- a/asv_bench/benchmarks/multiindex_object.py
+++ b/asv_bench/benchmarks/multiindex_object.py
@@ -5,7 +5,7 @@
from pandas import date_range, MultiIndex
-class GetLoc(object):
+class GetLoc:
def setup(self):
self.mi_large = MultiIndex.from_product(
@@ -40,7 +40,7 @@ def time_small_get_loc_warm(self):
self.mi_small.get_loc((99, 'A', 'A'))
-class Duplicates(object):
+class Duplicates:
def setup(self):
size = 65536
@@ -54,7 +54,7 @@ def time_remove_unused_levels(self):
self.mi_unused_levels.remove_unused_levels()
-class Integer(object):
+class Integer:
def setup(self):
self.mi_int = MultiIndex.from_product([np.arange(1000),
@@ -72,7 +72,7 @@ def time_is_monotonic(self):
self.mi_int.is_monotonic
-class Duplicated(object):
+class Duplicated:
def setup(self):
n, k = 200, 5000
@@ -86,7 +86,7 @@ def time_duplicated(self):
self.mi.duplicated()
-class Sortlevel(object):
+class Sortlevel:
def setup(self):
n = 1182720
@@ -110,7 +110,7 @@ def time_sortlevel_one(self):
self.mi.sortlevel(1)
-class Values(object):
+class Values:
def setup_cache(self):
diff --git a/asv_bench/benchmarks/offset.py b/asv_bench/benchmarks/offset.py
index 4570e73cccc71..6811e3c9841e9 100644
--- a/asv_bench/benchmarks/offset.py
+++ b/asv_bench/benchmarks/offset.py
@@ -32,7 +32,7 @@
offsets = non_apply + other_offsets
-class ApplyIndex(object):
+class ApplyIndex:
params = other_offsets
param_names = ['offset']
@@ -45,7 +45,7 @@ def time_apply_index(self, offset):
offset.apply_index(self.rng)
-class OnOffset(object):
+class OnOffset:
params = offsets
param_names = ['offset']
@@ -61,7 +61,7 @@ def time_on_offset(self, offset):
offset.onOffset(date)
-class OffsetSeriesArithmetic(object):
+class OffsetSeriesArithmetic:
params = offsets
param_names = ['offset']
@@ -76,7 +76,7 @@ def time_add_offset(self, offset):
self.data + offset
-class OffsetDatetimeIndexArithmetic(object):
+class OffsetDatetimeIndexArithmetic:
params = offsets
param_names = ['offset']
@@ -90,7 +90,7 @@ def time_add_offset(self, offset):
self.data + offset
-class OffestDatetimeArithmetic(object):
+class OffestDatetimeArithmetic:
params = offsets
param_names = ['offset']
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index d479952cbfbf6..59b1638920666 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -36,7 +36,7 @@ def setup(*args, **kwargs):
np.random.seed(1234)
-class BaseIO(object):
+class BaseIO:
"""
Base class for IO benchmarks
"""
diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py
index 6d2c7156a0a3d..c8ba6c382cb64 100644
--- a/asv_bench/benchmarks/period.py
+++ b/asv_bench/benchmarks/period.py
@@ -3,7 +3,7 @@
from pandas.tseries.frequencies import to_offset
-class PeriodProperties(object):
+class PeriodProperties:
params = (['M', 'min'],
['year', 'month', 'day', 'hour', 'minute', 'second',
@@ -18,7 +18,7 @@ def time_property(self, freq, attr):
getattr(self.per, attr)
-class PeriodUnaryMethods(object):
+class PeriodUnaryMethods:
params = ['M', 'min']
param_names = ['freq']
@@ -36,7 +36,7 @@ def time_asfreq(self, freq):
self.per.asfreq('A')
-class PeriodConstructor(object):
+class PeriodConstructor:
params = [['D'], [True, False]]
param_names = ['freq', 'is_offset']
@@ -50,7 +50,7 @@ def time_period_constructor(self, freq, is_offset):
Period('2012-06-01', freq=freq)
-class PeriodIndexConstructor(object):
+class PeriodIndexConstructor:
params = [['D'], [True, False]]
param_names = ['freq', 'is_offset']
@@ -79,7 +79,7 @@ def time_from_ints_daily(self, freq, is_offset):
PeriodIndex(self.daily_ints, freq=freq)
-class DataFramePeriodColumn(object):
+class DataFramePeriodColumn:
def setup(self):
self.rng = period_range(start='1/1/1990', freq='S', periods=20000)
@@ -94,7 +94,7 @@ def time_set_index(self):
self.df.set_index('col2', append=True)
-class Algorithms(object):
+class Algorithms:
params = ['index', 'series']
param_names = ['typ']
@@ -115,7 +115,7 @@ def time_value_counts(self, typ):
self.vector.value_counts()
-class Indexing(object):
+class Indexing:
def setup(self):
self.index = period_range(start='1985', periods=1000, freq='D')
diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py
index 8a67af0bdabd1..9e3bc87c32987 100644
--- a/asv_bench/benchmarks/plotting.py
+++ b/asv_bench/benchmarks/plotting.py
@@ -8,7 +8,7 @@
matplotlib.use('Agg')
-class SeriesPlotting(object):
+class SeriesPlotting:
params = [['line', 'bar', 'area', 'barh', 'hist', 'kde', 'pie']]
param_names = ['kind']
@@ -28,7 +28,7 @@ def time_series_plot(self, kind):
self.s.plot(kind=kind)
-class FramePlotting(object):
+class FramePlotting:
params = [['line', 'bar', 'area', 'barh', 'hist', 'kde', 'pie', 'scatter',
'hexbin']]
param_names = ['kind']
@@ -52,7 +52,7 @@ def time_frame_plot(self, kind):
self.df.plot(x='x', y='y', kind=kind)
-class TimeseriesPlotting(object):
+class TimeseriesPlotting:
def setup(self):
N = 2000
@@ -78,7 +78,7 @@ def time_plot_table(self):
self.df.plot(table=True)
-class Misc(object):
+class Misc:
def setup(self):
N = 500
diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py
index 3080b34024a33..a6ceb0e93a089 100644
--- a/asv_bench/benchmarks/reindex.py
+++ b/asv_bench/benchmarks/reindex.py
@@ -5,7 +5,7 @@
from .pandas_vb_common import lib
-class Reindex(object):
+class Reindex:
def setup(self):
rng = date_range(start='1/1/1970', periods=10000, freq='1min')
@@ -33,7 +33,7 @@ def time_reindex_multiindex(self):
self.s.reindex(self.s_subset.index)
-class ReindexMethod(object):
+class ReindexMethod:
params = [['pad', 'backfill'], [date_range, period_range]]
param_names = ['method', 'constructor']
@@ -47,7 +47,7 @@ def time_reindex_method(self, method, constructor):
self.ts.reindex(self.idx, method=method)
-class Fillna(object):
+class Fillna:
params = ['pad', 'backfill']
param_names = ['method']
@@ -66,7 +66,7 @@ def time_float_32(self, method):
self.ts_float32.fillna(method=method)
-class LevelAlign(object):
+class LevelAlign:
def setup(self):
self.index = MultiIndex(
@@ -86,7 +86,7 @@ def time_reindex_level(self):
self.df_level.reindex(self.index, level=1)
-class DropDuplicates(object):
+class DropDuplicates:
params = [True, False]
param_names = ['inplace']
@@ -130,7 +130,7 @@ def time_frame_drop_dups_bool(self, inplace):
self.df_bool.drop_duplicates(inplace=inplace)
-class Align(object):
+class Align:
# blog "pandas escaped the zoo"
def setup(self):
n = 50000
@@ -145,7 +145,7 @@ def time_align_series_irregular_string(self):
self.x + self.y
-class LibFastZip(object):
+class LibFastZip:
def setup(self):
N = 10000
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py
index d8efaf99e2c4d..9dff1778f8e56 100644
--- a/asv_bench/benchmarks/replace.py
+++ b/asv_bench/benchmarks/replace.py
@@ -2,7 +2,7 @@
import pandas as pd
-class FillNa(object):
+class FillNa:
params = [True, False]
param_names = ['inplace']
@@ -21,7 +21,7 @@ def time_replace(self, inplace):
self.ts.replace(np.nan, 0.0, inplace=inplace)
-class ReplaceDict(object):
+class ReplaceDict:
params = [True, False]
param_names = ['inplace']
@@ -36,7 +36,7 @@ def time_replace_series(self, inplace):
self.s.replace(self.to_rep, inplace=inplace)
-class Convert(object):
+class Convert:
params = (['DataFrame', 'Series'], ['Timestamp', 'Timedelta'])
param_names = ['constructor', 'replace_data']
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index f6ee107ab618e..bead5a5996d1a 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -6,7 +6,7 @@
import pandas as pd
-class Melt(object):
+class Melt:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 3), columns=['A', 'B', 'C'])
@@ -17,7 +17,7 @@ def time_melt_dataframe(self):
melt(self.df, id_vars=['id1', 'id2'])
-class Pivot(object):
+class Pivot:
def setup(self):
N = 10000
@@ -31,7 +31,7 @@ def time_reshape_pivot_time_series(self):
self.df.pivot('date', 'variable', 'value')
-class SimpleReshape(object):
+class SimpleReshape:
def setup(self):
arrays = [np.arange(100).repeat(100),
@@ -47,7 +47,7 @@ def time_unstack(self):
self.df.unstack(1)
-class Unstack(object):
+class Unstack:
params = ['int', 'category']
@@ -79,7 +79,7 @@ def time_without_last_row(self, dtype):
self.df2.unstack()
-class SparseIndex(object):
+class SparseIndex:
def setup(self):
NUM_ROWS = 1000
@@ -95,7 +95,7 @@ def time_unstack(self):
self.df.unstack()
-class WideToLong(object):
+class WideToLong:
def setup(self):
nyrs = 20
@@ -113,7 +113,7 @@ def time_wide_to_long_big(self):
wide_to_long(self.df, self.letters, i='id', j='year')
-class PivotTable(object):
+class PivotTable:
def setup(self):
N = 100000
@@ -140,7 +140,7 @@ def time_pivot_table_margins(self):
margins=True)
-class Crosstab(object):
+class Crosstab:
def setup(self):
N = 100000
@@ -164,7 +164,7 @@ def time_crosstab_normalize_margins(self):
pd.crosstab(self.vec1, self.vec2, normalize=True, margins=True)
-class GetDummies(object):
+class GetDummies:
def setup(self):
categories = list(string.ascii_letters[:12])
s = pd.Series(np.random.choice(categories, size=1000000),
@@ -178,7 +178,7 @@ def time_get_dummies_1d_sparse(self):
pd.get_dummies(self.s, sparse=True)
-class Cut(object):
+class Cut:
params = [[4, 10, 1000]]
param_names = ['bins']
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py
index 7aefad6e2929b..2532d326dff4b 100644
--- a/asv_bench/benchmarks/rolling.py
+++ b/asv_bench/benchmarks/rolling.py
@@ -2,7 +2,7 @@
import numpy as np
-class Methods(object):
+class Methods:
sample_time = 0.2
params = (['DataFrame', 'Series'],
@@ -21,7 +21,7 @@ def time_rolling(self, constructor, window, dtype, method):
getattr(self.roll, method)()
-class ExpandingMethods(object):
+class ExpandingMethods:
sample_time = 0.2
params = (['DataFrame', 'Series'],
@@ -39,7 +39,7 @@ def time_expanding(self, constructor, dtype, method):
getattr(self.expanding, method)()
-class EWMMethods(object):
+class EWMMethods:
sample_time = 0.2
params = (['DataFrame', 'Series'],
@@ -73,7 +73,7 @@ def setup(self, constructor, window, dtype, method):
self.roll = getattr(pd, constructor)(arr, index=index).rolling(window)
-class Pairwise(object):
+class Pairwise:
sample_time = 0.2
params = ([10, 1000, None],
@@ -94,7 +94,7 @@ def time_pairwise(self, window, method, pairwise):
getattr(r, method)(self.df, pairwise=pairwise)
-class Quantile(object):
+class Quantile:
sample_time = 0.2
params = (['DataFrame', 'Series'],
[10, 1000],
@@ -113,7 +113,7 @@ def time_quantile(self, constructor, window, dtype, percentile,
self.roll.quantile(percentile, interpolation=interpolation)
-class PeakMemFixed(object):
+class PeakMemFixed:
def setup(self):
N = 10
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index 3303483c50e20..146e5d5996135 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -5,7 +5,7 @@
from pandas import Series, date_range, NaT
-class SeriesConstructor(object):
+class SeriesConstructor:
params = [None, 'dict']
param_names = ['data']
@@ -21,7 +21,7 @@ def time_constructor(self, data):
Series(data=self.data, index=self.idx)
-class IsIn(object):
+class IsIn:
params = ['int64', 'uint64', 'object']
param_names = ['dtype']
@@ -34,7 +34,7 @@ def time_isin(self, dtypes):
self.s.isin(self.values)
-class IsInFloat64(object):
+class IsInFloat64:
def setup(self):
self.small = Series([1, 2], dtype=np.float64)
@@ -55,7 +55,7 @@ def time_isin_nan_values(self):
self.small.isin(self.few_different_values)
-class IsInForObjects(object):
+class IsInForObjects:
def setup(self):
self.s_nans = Series(np.full(10**4, np.nan)).astype(np.object)
@@ -92,7 +92,7 @@ def time_isin_long_series_long_values_floats(self):
self.s_long_floats.isin(self.vals_long_floats)
-class NSort(object):
+class NSort:
params = ['first', 'last', 'all']
param_names = ['keep']
@@ -107,7 +107,7 @@ def time_nsmallest(self, keep):
self.s.nsmallest(3, keep=keep)
-class Dropna(object):
+class Dropna:
params = ['int', 'datetime']
param_names = ['dtype']
@@ -124,7 +124,7 @@ def time_dropna(self, dtype):
self.s.dropna()
-class SearchSorted(object):
+class SearchSorted:
goal_time = 0.2
params = ['int8', 'int16', 'int32', 'int64',
@@ -143,7 +143,7 @@ def time_searchsorted(self, dtype):
self.s.searchsorted(key)
-class Map(object):
+class Map:
params = ['dict', 'Series']
param_names = 'mapper'
@@ -158,7 +158,7 @@ def time_map(self, mapper):
self.s.map(self.map_data)
-class Clip(object):
+class Clip:
params = [50, 1000, 10**5]
param_names = ['n']
@@ -169,7 +169,7 @@ def time_clip(self, n):
self.s.clip(0, 1)
-class ValueCounts(object):
+class ValueCounts:
params = ['int', 'uint', 'float', 'object']
param_names = ['dtype']
@@ -181,7 +181,7 @@ def time_value_counts(self, dtype):
self.s.value_counts()
-class Dir(object):
+class Dir:
def setup(self):
self.s = Series(index=tm.makeStringIndex(10000))
@@ -190,7 +190,7 @@ def time_dir_strings(self):
dir(self.s)
-class SeriesGetattr(object):
+class SeriesGetattr:
# https://github.com/pandas-dev/pandas/issues/19764
def setup(self):
self.s = Series(1,
diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py
index 64f87c1670170..ca4469e64c335 100644
--- a/asv_bench/benchmarks/sparse.py
+++ b/asv_bench/benchmarks/sparse.py
@@ -14,7 +14,7 @@ def make_array(size, dense_proportion, fill_value, dtype):
return arr
-class SparseSeriesToFrame(object):
+class SparseSeriesToFrame:
def setup(self):
K = 50
@@ -31,7 +31,7 @@ def time_series_to_frame(self):
SparseDataFrame(self.series)
-class SparseArrayConstructor(object):
+class SparseArrayConstructor:
params = ([0.1, 0.01], [0, np.nan],
[np.int64, np.float64, np.object])
@@ -45,7 +45,7 @@ def time_sparse_array(self, dense_proportion, fill_value, dtype):
SparseArray(self.array, fill_value=fill_value, dtype=dtype)
-class SparseDataFrameConstructor(object):
+class SparseDataFrameConstructor:
def setup(self):
N = 1000
@@ -63,7 +63,7 @@ def time_from_dict(self):
SparseDataFrame(self.dict)
-class FromCoo(object):
+class FromCoo:
def setup(self):
self.matrix = scipy.sparse.coo_matrix(([3.0, 1.0, 2.0],
@@ -74,7 +74,7 @@ def time_sparse_series_from_coo(self):
SparseSeries.from_coo(self.matrix)
-class ToCoo(object):
+class ToCoo:
def setup(self):
s = Series([np.nan] * 10000)
@@ -90,7 +90,7 @@ def time_sparse_series_to_coo(self):
sort_labels=True)
-class Arithmetic(object):
+class Arithmetic:
params = ([0.1, 0.01], [0, np.nan])
param_names = ['dense_proportion', 'fill_value']
@@ -115,7 +115,7 @@ def time_divide(self, dense_proportion, fill_value):
self.array1 / self.array2
-class ArithmeticBlock(object):
+class ArithmeticBlock:
params = [np.nan, 0]
param_names = ['fill_value']
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py
index 7fdc713f076ed..3514335f92e77 100644
--- a/asv_bench/benchmarks/stat_ops.py
+++ b/asv_bench/benchmarks/stat_ops.py
@@ -6,7 +6,7 @@
'var']
-class FrameOps(object):
+class FrameOps:
params = [ops, ['float', 'int'], [0, 1], [True, False]]
param_names = ['op', 'dtype', 'axis', 'use_bottleneck']
@@ -24,7 +24,7 @@ def time_op(self, op, dtype, axis, use_bottleneck):
self.df_func(axis=axis)
-class FrameMultiIndexOps(object):
+class FrameMultiIndexOps:
params = ([0, 1, [0, 1]], ops)
param_names = ['level', 'op']
@@ -42,7 +42,7 @@ def time_op(self, level, op):
self.df_func(level=level)
-class SeriesOps(object):
+class SeriesOps:
params = [ops, ['float', 'int'], [True, False]]
param_names = ['op', 'dtype', 'use_bottleneck']
@@ -60,7 +60,7 @@ def time_op(self, op, dtype, use_bottleneck):
self.s_func()
-class SeriesMultiIndexOps(object):
+class SeriesMultiIndexOps:
params = ([0, 1, [0, 1]], ops)
param_names = ['level', 'op']
@@ -78,7 +78,7 @@ def time_op(self, level, op):
self.s_func(level=level)
-class Rank(object):
+class Rank:
params = [['DataFrame', 'Series'], [True, False]]
param_names = ['constructor', 'pct']
@@ -94,7 +94,7 @@ def time_average_old(self, constructor, pct):
self.data.rank(pct=pct) / len(self.data)
-class Correlation(object):
+class Correlation:
params = [['spearman', 'kendall', 'pearson'], [True, False]]
param_names = ['method', 'use_bottleneck']
@@ -123,7 +123,7 @@ def time_corrwith_rows(self, method, use_bottleneck):
self.df.corrwith(self.df2, axis=1, method=method)
-class Covariance(object):
+class Covariance:
params = [[True, False]]
param_names = ['use_bottleneck']
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index b5b2c955f0133..5dbcc71b7455e 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -5,7 +5,7 @@
import pandas.util.testing as tm
-class Methods(object):
+class Methods:
def setup(self):
self.s = Series(tm.makeStringIndex(10**5))
@@ -93,7 +93,7 @@ def time_zfill(self):
self.s.str.zfill(10)
-class Repeat(object):
+class Repeat:
params = ['int', 'array']
param_names = ['repeats']
@@ -108,7 +108,7 @@ def time_repeat(self, repeats):
self.s.str.repeat(self.values)
-class Cat(object):
+class Cat:
params = ([0, 3], [None, ','], [None, '-'], [0.0, 0.001, 0.15])
param_names = ['other_cols', 'sep', 'na_rep', 'na_frac']
@@ -133,7 +133,7 @@ def time_cat(self, other_cols, sep, na_rep, na_frac):
self.s.str.cat(others=self.others, sep=sep, na_rep=na_rep)
-class Contains(object):
+class Contains:
params = [True, False]
param_names = ['regex']
@@ -145,7 +145,7 @@ def time_contains(self, regex):
self.s.str.contains('A', regex=regex)
-class Split(object):
+class Split:
params = [True, False]
param_names = ['expand']
@@ -160,7 +160,7 @@ def time_rsplit(self, expand):
self.s.str.rsplit('--', expand=expand)
-class Dummies(object):
+class Dummies:
def setup(self):
self.s = Series(tm.makeStringIndex(10**5)).str.join('|')
@@ -169,7 +169,7 @@ def time_get_dummies(self):
self.s.str.get_dummies('|')
-class Encode(object):
+class Encode:
def setup(self):
self.ser = Series(tm.makeUnicodeIndex())
@@ -178,7 +178,7 @@ def time_encode_decode(self):
self.ser.str.encode('utf-8').str.decode('utf-8')
-class Slice(object):
+class Slice:
def setup(self):
self.s = Series(['abcdefg', np.nan] * 500000)
diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py
index 0cfbbd536bc8b..c4fe462944a2a 100644
--- a/asv_bench/benchmarks/timedelta.py
+++ b/asv_bench/benchmarks/timedelta.py
@@ -6,7 +6,7 @@
DataFrame, Series, Timedelta, Timestamp, timedelta_range, to_timedelta)
-class TimedeltaConstructor(object):
+class TimedeltaConstructor:
def time_from_int(self):
Timedelta(123456789)
@@ -34,7 +34,7 @@ def time_from_missing(self):
Timedelta('nat')
-class ToTimedelta(object):
+class ToTimedelta:
def setup(self):
self.ints = np.random.randint(0, 60, size=10000)
@@ -54,7 +54,7 @@ def time_convert_string_seconds(self):
to_timedelta(self.str_seconds)
-class ToTimedeltaErrors(object):
+class ToTimedeltaErrors:
params = ['coerce', 'ignore']
param_names = ['errors']
@@ -68,7 +68,7 @@ def time_convert(self, errors):
to_timedelta(self.arr, errors=errors)
-class TimedeltaOps(object):
+class TimedeltaOps:
def setup(self):
self.td = to_timedelta(np.arange(1000000))
@@ -78,7 +78,7 @@ def time_add_td_ts(self):
self.td + self.ts
-class TimedeltaProperties(object):
+class TimedeltaProperties:
def setup_cache(self):
td = Timedelta(days=365, minutes=35, seconds=25, milliseconds=35)
@@ -97,7 +97,7 @@ def time_timedelta_nanoseconds(self, td):
td.nanoseconds
-class DatetimeAccessor(object):
+class DatetimeAccessor:
def setup_cache(self):
N = 100000
@@ -120,7 +120,7 @@ def time_timedelta_nanoseconds(self, series):
series.dt.nanoseconds
-class TimedeltaIndexing(object):
+class TimedeltaIndexing:
def setup(self):
self.index = timedelta_range(start='1985', periods=1000, freq='D')
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 6efd720d1acdd..eea1df35c7711 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -10,7 +10,7 @@
from pandas.tseries.converter import DatetimeConverter
-class DatetimeIndex(object):
+class DatetimeIndex:
params = ['dst', 'repeated', 'tz_aware', 'tz_local', 'tz_naive']
param_names = ['index_type']
@@ -60,7 +60,7 @@ def time_to_pydatetime(self, index_type):
self.index.to_pydatetime()
-class TzLocalize(object):
+class TzLocalize:
params = [None, 'US/Eastern', 'UTC', dateutil.tz.tzutc()]
param_names = 'tz'
@@ -80,7 +80,7 @@ def time_infer_dst(self, tz):
self.index.tz_localize(tz, ambiguous='infer')
-class ResetIndex(object):
+class ResetIndex:
params = [None, 'US/Eastern']
param_names = 'tz'
@@ -93,7 +93,7 @@ def time_reest_datetimeindex(self, tz):
self.df.reset_index()
-class Factorize(object):
+class Factorize:
params = [None, 'Asia/Tokyo']
param_names = 'tz'
@@ -107,7 +107,7 @@ def time_factorize(self, tz):
self.dti.factorize()
-class InferFreq(object):
+class InferFreq:
params = [None, 'D', 'B']
param_names = ['freq']
@@ -123,7 +123,7 @@ def time_infer_freq(self, freq):
infer_freq(self.idx)
-class TimeDatetimeConverter(object):
+class TimeDatetimeConverter:
def setup(self):
N = 100000
@@ -133,7 +133,7 @@ def time_convert(self):
DatetimeConverter.convert(self.rng, None, None)
-class Iteration(object):
+class Iteration:
params = [date_range, period_range]
param_names = ['time_index']
@@ -153,7 +153,7 @@ def time_iter_preexit(self, time_index):
break
-class ResampleDataFrame(object):
+class ResampleDataFrame:
params = ['max', 'mean', 'min']
param_names = ['method']
@@ -167,7 +167,7 @@ def time_method(self, method):
self.resample()
-class ResampleSeries(object):
+class ResampleSeries:
params = (['period', 'datetime'], ['5min', '1D'], ['mean', 'ohlc'])
param_names = ['index', 'freq', 'method']
@@ -187,7 +187,7 @@ def time_resample(self, index, freq, method):
self.resample()
-class ResampleDatetetime64(object):
+class ResampleDatetetime64:
# GH 7754
def setup(self):
rng3 = date_range(start='2000-01-01 00:00:00',
@@ -198,7 +198,7 @@ def time_resample(self):
self.dt_ts.resample('1S').last()
-class AsOf(object):
+class AsOf:
params = ['DataFrame', 'Series']
param_names = ['constructor']
@@ -245,7 +245,7 @@ def time_asof_nan_single(self, constructor):
self.ts3.asof(self.date_last)
-class SortIndex(object):
+class SortIndex:
params = [True, False]
param_names = ['monotonic']
@@ -264,7 +264,7 @@ def time_get_slice(self, monotonic):
self.s[:10000]
-class IrregularOps(object):
+class IrregularOps:
def setup(self):
N = 10**5
@@ -277,7 +277,7 @@ def time_add(self):
self.left + self.right
-class Lookup(object):
+class Lookup:
def setup(self):
N = 1500000
@@ -290,7 +290,7 @@ def time_lookup_and_cleanup(self):
self.ts.index._cleanup()
-class ToDatetimeYYYYMMDD(object):
+class ToDatetimeYYYYMMDD:
def setup(self):
rng = date_range(start='1/1/2000', periods=10000, freq='D')
@@ -300,7 +300,7 @@ def time_format_YYYYMMDD(self):
to_datetime(self.stringsD, format='%Y%m%d')
-class ToDatetimeISO8601(object):
+class ToDatetimeISO8601:
def setup(self):
rng = date_range(start='1/1/2000', periods=20000, freq='H')
@@ -325,7 +325,7 @@ def time_iso8601_tz_spaceformat(self):
to_datetime(self.strings_tz_space)
-class ToDatetimeNONISO8601(object):
+class ToDatetimeNONISO8601:
def setup(self):
N = 10000
@@ -342,7 +342,7 @@ def time_different_offset(self):
to_datetime(self.diff_offset)
-class ToDatetimeFormatQuarters(object):
+class ToDatetimeFormatQuarters:
def setup(self):
self.s = Series(['2Q2005', '2Q05', '2005Q1', '05Q1'] * 10000)
@@ -351,7 +351,7 @@ def time_infer_quarter(self):
to_datetime(self.s)
-class ToDatetimeFormat(object):
+class ToDatetimeFormat:
def setup(self):
self.s = Series(['19MAY11', '19MAY11:00:00:00'] * 100000)
@@ -364,7 +364,7 @@ def time_no_exact(self):
to_datetime(self.s, format='%d%b%y', exact=False)
-class ToDatetimeCache(object):
+class ToDatetimeCache:
params = [True, False]
param_names = ['cache']
@@ -392,7 +392,7 @@ def time_dup_string_tzoffset_dates(self, cache):
to_datetime(self.dup_string_with_tz, cache=cache)
-class DatetimeAccessor(object):
+class DatetimeAccessor:
params = [None, 'US/Eastern', 'UTC', dateutil.tz.tzutc()]
param_names = 'tz'
diff --git a/asv_bench/benchmarks/timestamp.py b/asv_bench/benchmarks/timestamp.py
index b45ae22650e17..c6e56804c7b21 100644
--- a/asv_bench/benchmarks/timestamp.py
+++ b/asv_bench/benchmarks/timestamp.py
@@ -6,7 +6,7 @@
from pandas import Timestamp
-class TimestampConstruction(object):
+class TimestampConstruction:
def time_parse_iso8601_no_tz(self):
Timestamp('2017-08-25 08:16:14')
@@ -30,7 +30,7 @@ def time_fromtimestamp(self):
Timestamp.fromtimestamp(1515448538)
-class TimestampProperties(object):
+class TimestampProperties:
_tzs = [None, pytz.timezone('Europe/Amsterdam'), pytz.UTC,
dateutil.tz.tzutc()]
_freqs = [None, 'B']
@@ -92,7 +92,7 @@ def time_month_name(self, tz, freq):
self.ts.month_name()
-class TimestampOps(object):
+class TimestampOps:
params = [None, 'US/Eastern', pytz.UTC,
dateutil.tz.tzutc()]
param_names = ['tz']
@@ -130,7 +130,7 @@ def time_ceil(self, tz):
self.ts.ceil('5T')
-class TimestampAcrossDst(object):
+class TimestampAcrossDst:
def setup(self):
dt = datetime.datetime(2016, 3, 27, 1)
self.tzinfo = pytz.timezone('CET').localize(dt, is_dst=False).tzinfo
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 99b408bac8a9f..9a95b7887cfab 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -140,8 +140,8 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
invgrep -R --include="*.py" --include="*.pyx" -E "(DEPRECATED|DEPRECATE|Deprecated)(:|,|\.)" pandas
RET=$(($RET + $?)) ; echo $MSG "DONE"
- MSG='Check for old-style classes' ; echo $MSG
- invgrep -R --include="*.py" -E "class\s\S*[^)]:" pandas scripts
+ MSG='Check for python2 new-style classes' ; echo $MSG
+ invgrep -R --include="*.py" -E "class\s\S*\(object\):" pandas scripts
RET=$(($RET + $?)) ; echo $MSG "DONE"
MSG='Check for backticks incorrectly rendering because of missing spaces' ; echo $MSG
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst
index 434df772ae9d1..efc63d6c6e633 100644
--- a/doc/source/development/contributing.rst
+++ b/doc/source/development/contributing.rst
@@ -773,7 +773,7 @@ Transitioning to ``pytest``
.. code-block:: python
- class TestReallyCoolFeature(object):
+ class TestReallyCoolFeature:
pass
Going forward, we are moving to a more *functional* style using the `pytest <http://docs.pytest.org/en/latest/>`__ framework, which offers a richer testing
diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index 0bc54f99482ca..8bee0452c2207 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -26,7 +26,7 @@ decorate a class, providing the name of attribute to add. The class's
.. code-block:: python
@pd.api.extensions.register_dataframe_accessor("geo")
- class GeoAccessor(object):
+ class GeoAccessor:
def __init__(self, pandas_obj):
self._validate(pandas_obj)
self._obj = pandas_obj
diff --git a/pandas/_config/config.py b/pandas/_config/config.py
index 0bbc6ba8c519a..6b685a0ce962a 100644
--- a/pandas/_config/config.py
+++ b/pandas/_config/config.py
@@ -177,7 +177,7 @@ def get_default_val(pat):
return _get_registered_option(key).defval
-class DictWrapper(object):
+class DictWrapper:
""" provide attribute-style access to a nested dict"""
def __init__(self, d, prefix=""):
@@ -222,7 +222,7 @@ def __dir__(self):
# of options, and option descriptions.
-class CallableDynamicDoc(object):
+class CallableDynamicDoc:
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
@@ -379,7 +379,7 @@ def __doc__(self):
# Functions for use by pandas developers, in addition to User - api
-class option_context(object):
+class option_context:
"""
Context manager to temporarily set options in the `with` statement context.
diff --git a/pandas/_version.py b/pandas/_version.py
index 91f6c5d788d0a..5031f411270a1 100644
--- a/pandas/_version.py
+++ b/pandas/_version.py
@@ -26,7 +26,7 @@ def get_keywords():
return keywords
-class VersioneerConfig(object):
+class VersioneerConfig:
pass
diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py
index 0ce0f1da483f1..d318a07e03885 100644
--- a/pandas/compat/numpy/function.py
+++ b/pandas/compat/numpy/function.py
@@ -27,7 +27,7 @@
validate_args, validate_args_and_kwargs, validate_kwargs)
-class CompatValidator(object):
+class CompatValidator:
def __init__(self, defaults, fname=None, method=None,
max_fname_arg_count=None):
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 050749741e7bd..c5513765764a7 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -10,7 +10,7 @@
from pandas.util._decorators import Appender
-class DirNamesMixin(object):
+class DirNamesMixin:
_accessors = frozenset()
_deprecations = frozenset(
['asobject', 'base', 'data', 'flags', 'itemsize', 'strides'])
@@ -44,7 +44,7 @@ def __dir__(self):
return sorted(rv)
-class PandasDelegate(object):
+class PandasDelegate:
"""
An abstract base class for delegating methods/properties.
"""
@@ -151,7 +151,7 @@ def add_delegate_accessors(cls):
# 1. We don't need to catch and re-raise AttributeErrors as RuntimeErrors
# 2. We use a UserWarning instead of a custom Warning
-class CachedAccessor(object):
+class CachedAccessor:
"""
Custom property-like object (descriptor) for caching accessors.
@@ -235,7 +235,7 @@ def __init__(self, pandas_object): # noqa: E999
import pandas as pd
@pd.api.extensions.register_dataframe_accessor("geo")
- class GeoAccessor(object):
+ class GeoAccessor:
def __init__(self, pandas_obj):
self._obj = pandas_obj
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 80bf7214e7a27..df40461c96399 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -1047,7 +1047,7 @@ def _get_score(at):
# select n #
# --------------- #
-class SelectN(object):
+class SelectN:
def __init__(self, obj, n, keep):
self.obj = obj
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 4b46e14d950a1..96f623bda9a8a 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -48,7 +48,7 @@
# Deprecation: xref gh-16747
-class TimeGrouper(object):
+class TimeGrouper:
def __new__(cls, *args, **kwargs):
from pandas.core.resample import TimeGrouper
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 3099548afe787..a7aa9deaf99fe 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -31,7 +31,7 @@ def frame_apply(obj, func, axis=0, broadcast=None,
args=args, kwds=kwds)
-class FrameApply(object):
+class FrameApply:
def __init__(self, obj, func, broadcast, raw, reduce, result_type,
ignore_failures, args, kwds):
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index b487cbfe78a86..18c02f76a7911 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -29,7 +29,7 @@
_extension_array_shared_docs = dict()
-class ExtensionArray(object):
+class ExtensionArray:
"""
Abstract base class for custom 1-D array types.
@@ -964,7 +964,7 @@ def _reduce(self, name, skipna=True, **kwargs):
name=name, dtype=self.dtype))
-class ExtensionOpsMixin(object):
+class ExtensionOpsMixin:
"""
A base class for linking the operators to their dunder names.
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 6225dfcbe5c14..79cc2eb56aa77 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -39,7 +39,7 @@
from .base import ExtensionArray, ExtensionOpsMixin
-class AttributesMixin(object):
+class AttributesMixin:
@property
def _attributes(self):
@@ -135,7 +135,7 @@ def _check_compatible_with(
raise AbstractMethodError(self)
-class DatelikeOps(object):
+class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@@ -181,7 +181,7 @@ def strftime(self, date_format):
return Index(self._format_native_types(date_format=date_format))
-class TimelikeOps(object):
+class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 1d3eb880f32e3..426cae5779118 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -32,7 +32,7 @@
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
-class StringMixin(object):
+class StringMixin:
"""
Implements string methods so long as object defines a `__unicode__` method.
"""
@@ -110,7 +110,7 @@ def __sizeof__(self):
return super(PandasObject, self).__sizeof__()
-class NoNewAttributesMixin(object):
+class NoNewAttributesMixin:
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
@@ -153,7 +153,7 @@ class SpecificationError(GroupByError):
pass
-class SelectionMixin(object):
+class SelectionMixin:
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
@@ -645,7 +645,7 @@ def _is_builtin_func(self, arg):
return self._builtin_table.get(arg, arg)
-class IndexOpsMixin(object):
+class IndexOpsMixin:
""" common ops mixin to support a unified interface / docs for Series /
Index
"""
diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py
index 505589db09731..85dfbc56d5187 100644
--- a/pandas/core/computation/engines.py
+++ b/pandas/core/computation/engines.py
@@ -35,7 +35,7 @@ def _check_ne_builtin_clash(expr):
.format(expr=expr, s=s))
-class AbstractEngine(object):
+class AbstractEngine:
"""Object serving as a base class for all engines."""
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index baeb2af7d843e..9061fa0308830 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -541,7 +541,7 @@ def __unicode__(self):
return pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
-class FuncNode(object):
+class FuncNode:
def __init__(self, name):
from pandas.core.computation.check import (_NUMEXPR_INSTALLED,
_NUMEXPR_VERSION)
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index 6b03237d5991e..c957c8c85e3ad 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -572,7 +572,7 @@ def evaluate(self):
return self.condition, self.filter
-class TermValue(object):
+class TermValue:
""" hold a term value the we use to construct a condition/filter """
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index d08b663fbb538..3fc5acf7de2fe 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -8,7 +8,7 @@
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
-class _DtypeOpsMixin(object):
+class _DtypeOpsMixin:
# Not all of pandas' extension dtypes are compatibile with
# the new ExtensionArray interface. This means PandasExtensionDtype
# can't subclass ExtensionDtype yet, as is_extension_array_dtype would
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 2f1b4cc8f214d..7a5723b973eb0 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -39,7 +39,7 @@ def register_extension_dtype(cls):
return cls
-class Registry(object):
+class Registry:
"""
Registry for dtype inference
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index a5804586bdf11..823a4155bc2b8 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -11,7 +11,7 @@
from pandas.core.dtypes.common import is_list_like, is_scalar
-class GroupByMixin(object):
+class GroupByMixin:
"""
Provide the groupby facilities to the mixed object.
"""
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 561856ace2d9f..8145e5000c056 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -25,7 +25,7 @@
from pandas.io.formats.printing import pprint_thing
-class Grouper(object):
+class Grouper:
"""
A Grouper allows the user to specify a groupby instruction for a target
object
@@ -204,7 +204,7 @@ def __repr__(self):
return "{}({})".format(cls_name, attrs)
-class Grouping(object):
+class Grouping:
"""
Holds the grouping information for a single key
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index ec22548de6da3..8a801a4d0c164 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -87,7 +87,7 @@ def generate_bins_generic(values, binner, closed):
return bins
-class BaseGrouper(object):
+class BaseGrouper:
"""
This is an internal Grouper class, which actually holds
the generated groups
@@ -808,7 +808,7 @@ def _is_indexed_like(obj, axes):
# Splitting / application
-class DataSplitter(object):
+class DataSplitter:
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 0b7958a4e7b67..68d4e746f72ad 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -35,7 +35,7 @@ def get_indexers_list():
# the public IndexSlicerMaker
-class _IndexSlice(object):
+class _IndexSlice:
"""
Create an object to more easily perform multi-index slicing
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index f10252f0261d8..97d16de6ad088 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1519,7 +1519,7 @@ def _replace_coerce(self, to_replace, value, inplace=True, regex=False,
return self
-class NonConsolidatableMixIn(object):
+class NonConsolidatableMixIn:
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
@@ -2033,7 +2033,7 @@ def should_store(self, value):
return is_integer_dtype(value) and value.dtype == self.dtype
-class DatetimeLikeBlockMixin(object):
+class DatetimeLikeBlockMixin:
"""Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock."""
@property
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index cb98274962656..8769c150f019b 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -99,7 +99,7 @@ def get_mgr_concatenation_plan(mgr, indexers):
return plan
-class JoinUnit(object):
+class JoinUnit:
def __init__(self, block, shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 4ba6e04495fbb..95516aec060b7 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -54,7 +54,7 @@ def set_use_bottleneck(v=True):
set_use_bottleneck(get_option('compute.use_bottleneck'))
-class disallow(object):
+class disallow:
def __init__(self, *dtypes):
super(disallow, self).__init__()
@@ -86,7 +86,7 @@ def _f(*args, **kwargs):
return _f
-class bottleneck_switch(object):
+class bottleneck_switch:
def __init__(self, **kwargs):
self.kwargs = kwargs
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index 1c2c97d6680a3..2067e86eb75fa 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -229,7 +229,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
return op.get_result()
-class _Concatenator(object):
+class _Concatenator:
"""
Orchestrates a concatenation operation for BlockManagers
"""
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 645b394c0e04f..f779c98668eec 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -469,7 +469,7 @@ def merge_asof(left, right, on=None,
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
-class _MergeOperation(object):
+class _MergeOperation:
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index b88e3d8ee828c..beb497fb96e7e 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -24,7 +24,7 @@
get_group_index)
-class _Unstacker(object):
+class _Unstacker:
"""
Helper class to unstack data / pivot with multi-level index
diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py
index 22715c4f21a38..0738bb0d66dbc 100644
--- a/pandas/core/sorting.py
+++ b/pandas/core/sorting.py
@@ -283,7 +283,7 @@ def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
return indexer
-class _KeyMapper(object):
+class _KeyMapper:
"""
Ease my suffering. Map compressed group id -> key tuple
diff --git a/pandas/io/clipboard/clipboards.py b/pandas/io/clipboard/clipboards.py
index 3c8abe74912fd..66e2e35bf0c59 100644
--- a/pandas/io/clipboard/clipboards.py
+++ b/pandas/io/clipboard/clipboards.py
@@ -124,7 +124,7 @@ def paste_klipper():
def init_no_clipboard():
- class ClipboardUnavailable(object):
+ class ClipboardUnavailable:
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
diff --git a/pandas/io/clipboard/windows.py b/pandas/io/clipboard/windows.py
index 4f5275af693b7..ecf4598a505e0 100644
--- a/pandas/io/clipboard/windows.py
+++ b/pandas/io/clipboard/windows.py
@@ -9,7 +9,7 @@
from .exceptions import PyperclipWindowsException
-class CheckedCall(object):
+class CheckedCall:
def __init__(self, f):
super(CheckedCall, self).__setattr__("f", f)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 3bdfe52a27e7e..fb254b1f3c74e 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -37,7 +37,7 @@
_VALID_URLS.discard('')
-class BaseIterator(object):
+class BaseIterator:
"""Subclass this and provide a "__next__()" method to obtain an iterator.
Useful only when the object being iterated is non-reusable (e.g. OK for a
parser, not for an in-memory table, yes for its iterator)."""
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index b6b34b60c2ffb..b99908fd902c8 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -329,7 +329,7 @@ def read_excel(io,
@add_metaclass(abc.ABCMeta)
-class _BaseExcelReader(object):
+class _BaseExcelReader:
@property
@abc.abstractmethod
@@ -488,7 +488,7 @@ def parse(self,
@add_metaclass(abc.ABCMeta)
-class ExcelWriter(object):
+class ExcelWriter:
"""
Class for writing DataFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
@@ -732,7 +732,7 @@ def close(self):
return self.save()
-class ExcelFile(object):
+class ExcelFile:
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See read_excel for more documentation
diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py
index a7c9884e993a7..5504665c6bfb1 100644
--- a/pandas/io/excel/_xlsxwriter.py
+++ b/pandas/io/excel/_xlsxwriter.py
@@ -4,7 +4,7 @@
from pandas.io.excel._util import _validate_freeze_panes
-class _XlsxStyler(object):
+class _XlsxStyler:
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py
index be6b7b18975e5..2527e45650ea3 100644
--- a/pandas/io/formats/css.py
+++ b/pandas/io/formats/css.py
@@ -10,7 +10,7 @@ class CSSWarning(UserWarning):
pass
-class CSSResolver(object):
+class CSSResolver:
"""A callable for parsing and resolving CSS to atomic properties
"""
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 37f1372366545..16032a1257ed4 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -21,7 +21,7 @@
UnicodeWriter, _get_handle, _infer_compression, get_filepath_or_buffer)
-class CSVFormatter(object):
+class CSVFormatter:
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='',
float_format=None, cols=None, header=True, index=True,
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index aa88f6c520c0d..fd6e3304ec4ef 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -20,7 +20,7 @@
from pandas.io.formats.printing import pprint_thing
-class ExcelCell(object):
+class ExcelCell:
__fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend')
__slots__ = __fields__
@@ -34,7 +34,7 @@ def __init__(self, row, col, val, style=None, mergestart=None,
self.mergeend = mergeend
-class CSSToExcelConverter(object):
+class CSSToExcelConverter:
"""A callable for converting CSS declarations to ExcelWriter styles
Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
@@ -308,7 +308,7 @@ def build_number_format(self, props):
return {'format_code': props.get('number-format')}
-class ExcelFormatter(object):
+class ExcelFormatter:
"""
Class for formatting a DataFrame to a list of ExcelCells,
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index c7524a10e577c..287672d40c9a5 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -103,7 +103,7 @@
"""
-class CategoricalFormatter(object):
+class CategoricalFormatter:
def __init__(self, categorical, buf=None, length=True, na_rep='NaN',
footer=True):
@@ -157,7 +157,7 @@ def to_string(self):
return str('\n'.join(result))
-class SeriesFormatter(object):
+class SeriesFormatter:
def __init__(self, series, buf=None, length=True, header=True, index=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
@@ -290,7 +290,7 @@ def to_string(self):
return str(''.join(result))
-class TextAdjustment(object):
+class TextAdjustment:
def __init__(self):
self.encoding = get_option("display.encoding")
@@ -351,7 +351,7 @@ def _get_adjustment():
return TextAdjustment()
-class TableFormatter(object):
+class TableFormatter:
is_truncated = False
show_dimensions = None
@@ -913,7 +913,7 @@ def format_array(values, formatter, float_format=None, na_rep='NaN',
return fmt_obj.get_result()
-class GenericArrayFormatter(object):
+class GenericArrayFormatter:
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right', decimal='.',
@@ -1469,7 +1469,7 @@ def _has_names(index):
return index.name is not None
-class EngFormatter(object):
+class EngFormatter:
"""
Formats float values according to engineering format.
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 11d30f6b1d10d..1e80677e1a597 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -52,7 +52,7 @@ def _mpl(func):
raise ImportError(no_mpl_message.format(func.__name__))
-class Styler(object):
+class Styler:
"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
diff --git a/pandas/io/html.py b/pandas/io/html.py
index e449bf223ba94..641dfe73e24af 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -139,7 +139,7 @@ def _read(obj):
return text
-class _HtmlFrameParser(object):
+class _HtmlFrameParser:
"""Base class for parsers that parse HTML into DataFrames.
Parameters
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 867f72ed218a9..dbf7f4f49ce86 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -76,7 +76,7 @@ def to_json(path_or_buf, obj, orient=None, date_format='epoch',
path_or_buf.write(s)
-class Writer(object):
+class Writer:
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
self.obj = obj
@@ -615,7 +615,7 @@ def __next__(self):
raise StopIteration
-class Parser(object):
+class Parser:
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index ac9b132b191b6..b67685c37de90 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -777,7 +777,7 @@ def __init__(self, file_like=None, read_size=0, use_list=False,
ext_hook=ext_hook)
-class Iterator(object):
+class Iterator:
""" manage the unpacking iteration,
close the file on completion """
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index c126315cec63f..6f3d70836af47 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -42,7 +42,7 @@ def get_engine(engine):
return FastParquetImpl()
-class BaseImpl(object):
+class BaseImpl:
api = None # module
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a21c910979f9d..27877300a6b4c 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1351,7 +1351,7 @@ def _validate_parse_dates_arg(parse_dates):
return parse_dates
-class ParserBase(object):
+class ParserBase:
def __init__(self, kwds):
self.names = kwds.get('names')
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index fed73cf73a8aa..70fa8a48cafe4 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1410,7 +1410,7 @@ def _read_group(self, group, **kwargs):
return s.read(**kwargs)
-class TableIterator(object):
+class TableIterator:
""" define the iteration interface on a table
@@ -4618,7 +4618,7 @@ def _need_convert(kind):
return False
-class Selection(object):
+class Selection:
"""
Carries out a selection operation on a tables.Table object.
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 87089e204eef0..1aaf8547cb0a4 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -27,11 +27,11 @@
import pandas.io.sas.sas_constants as const
-class _subheader_pointer(object):
+class _subheader_pointer:
pass
-class _column(object):
+class _column:
pass
diff --git a/pandas/io/sas/sas_constants.py b/pandas/io/sas/sas_constants.py
index 98502d32d39e8..c37a26cd62ad2 100644
--- a/pandas/io/sas/sas_constants.py
+++ b/pandas/io/sas/sas_constants.py
@@ -102,7 +102,7 @@
61: "wcyrillic", 62: "wlatin1", 90: "ebcdic870"}
-class SASIndex(object):
+class SASIndex:
row_size_index = 0
column_size_index = 1
subheader_counts_index = 2
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index f582c3c7de284..2545cb38b1de9 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -592,7 +592,7 @@ def _cast_to_stata_types(data):
return data
-class StataValueLabel(object):
+class StataValueLabel:
"""
Parse a categorical column and prepare formatted output
@@ -833,7 +833,7 @@ def get_base_missing_value(cls, dtype):
return value
-class StataParser(object):
+class StataParser:
def __init__(self):
@@ -2483,7 +2483,7 @@ def _pad_bytes_new(name, length):
return name + b'\x00' * (length - len(name))
-class StataStrLWriter(object):
+class StataStrLWriter:
"""
Converter for Stata StrLs
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index c6ae933bbbf10..3fbb503fe0df7 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -60,7 +60,7 @@ def _gcf():
return plt.gcf()
-class MPLPlot(object):
+class MPLPlot:
"""
Base class for assembling a pandas plot using matplotlib
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index d450f5b9ce101..dddf7896fe398 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -4,7 +4,7 @@
from pandas.util import testing as tm
-class Base(object):
+class Base:
def check(self, namespace, expected, ignored=None):
# see which names are in the namespace, minus optional
@@ -133,7 +133,7 @@ def test_testing(self):
self.check(testing, self.funcs)
-class TestTopLevelDeprecations(object):
+class TestTopLevelDeprecations:
# top-level API deprecations
# GH 13790
@@ -144,7 +144,7 @@ def test_TimeGrouper(self):
pd.TimeGrouper(freq='D')
-class TestCDateRange(object):
+class TestCDateRange:
def test_deprecation_cdaterange(self):
# GH17596
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index c1452bda4be9d..333c7c614db15 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -38,7 +38,7 @@ def assert_all(obj):
# ------------------------------------------------------------------
# Comparisons
-class TestDatetime64DataFrameComparison(object):
+class TestDatetime64DataFrameComparison:
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
@@ -58,7 +58,7 @@ def test_dt64_nat_comparison(self):
tm.assert_frame_equal(result, expected)
-class TestDatetime64SeriesComparison(object):
+class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize('pair', [
@@ -337,7 +337,7 @@ def test_comparison_tzawareness_compat(self, op):
# comparison with the Series on the left-hand side
-class TestDatetimeIndexComparisons(object):
+class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
@pytest.mark.parametrize("op", [
@@ -789,7 +789,7 @@ def test_dti_cmp_object_dtype(self):
# ------------------------------------------------------------------
# Arithmetic
-class TestDatetime64Arithmetic(object):
+class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
@@ -1125,7 +1125,7 @@ def test_dt64arr_add_sub_period_scalar(self, dti_freq, box_with_array):
per - dtarr
-class TestDatetime64DateOffsetArithmetic(object):
+class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
@@ -1437,7 +1437,7 @@ def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture,
tm.assert_equal(res, expected)
-class TestDatetime64OverflowHandling(object):
+class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
@@ -1555,7 +1555,7 @@ def test_datetimeindex_sub_datetimeindex_overflow(self):
tmax - t2
-class TestTimestampSeriesArithmetic(object):
+class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
@@ -1843,7 +1843,7 @@ def test_operators_datetimelike_with_timezones(self):
td2 - dt2
-class TestDatetimeIndexArithmetic(object):
+class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 0a1c79292b1d4..617974a8d3831 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -19,7 +19,7 @@
# Comparisons
-class TestNumericComparisons(object):
+class TestNumericComparisons:
def test_operator_series_comparison_zerorank(self):
# GH#13006
result = np.float64(0) > pd.Series([1, 2, 3])
@@ -60,7 +60,7 @@ def test_compare_invalid(self):
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
-class TestNumericArraylikeArithmeticWithTimedeltaLike(object):
+class TestNumericArraylikeArithmeticWithTimedeltaLike:
# TODO: also check name retentention
@pytest.mark.parametrize('box_cls', [np.array, pd.Index, pd.Series])
@@ -185,7 +185,7 @@ def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box):
# ------------------------------------------------------------------
# Arithmetic
-class TestDivisionByZero(object):
+class TestDivisionByZero:
def test_div_zero(self, zero, numeric_idx):
idx = numeric_idx
@@ -395,7 +395,7 @@ def test_df_mod_zero_series_does_not_commute(self):
assert not res.fillna(0).equals(res2.fillna(0))
-class TestMultiplicationDivision(object):
+class TestMultiplicationDivision:
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# for non-timestamp/timedelta/period dtypes
@@ -627,7 +627,7 @@ def test_modulo2(self):
tm.assert_series_equal(result, expected)
-class TestAdditionSubtraction(object):
+class TestAdditionSubtraction:
# __add__, __sub__, __radd__, __rsub__, __iadd__, __isub__
# for non-timestamp/timedelta/period dtypes
@@ -834,7 +834,7 @@ def check(series, other):
check(tser, 5)
-class TestUFuncCompat(object):
+class TestUFuncCompat:
@pytest.mark.parametrize('holder', [pd.Int64Index, pd.UInt64Index,
pd.Float64Index, pd.RangeIndex,
@@ -894,7 +894,7 @@ def test_ufunc_coercions(self, holder):
tm.assert_equal(result, exp)
-class TestObjectDtypeEquivalence(object):
+class TestObjectDtypeEquivalence:
# Tests that arithmetic operations match operations executed elementwise
@pytest.mark.parametrize('dtype', [None, object])
@@ -937,7 +937,7 @@ def test_operators_reverse_object(self, op):
tm.assert_series_equal(result.astype(float), expected)
-class TestNumericArithmeticUnsorted(object):
+class TestNumericArithmeticUnsorted:
# Tests in this class have been moved from type-specific test modules
# but not yet sorted, parametrized, and de-duplicated
diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py
index 29063ae3f50e3..b5438b8eea10b 100644
--- a/pandas/tests/arithmetic/test_object.py
+++ b/pandas/tests/arithmetic/test_object.py
@@ -17,7 +17,7 @@
# Comparisons
-class TestObjectComparisons(object):
+class TestObjectComparisons:
def test_comparison_object_numeric_nas(self):
ser = Series(np.random.randn(10), dtype=object)
@@ -71,7 +71,7 @@ def test_more_na_comparisons(self, dtype):
# ------------------------------------------------------------------
# Arithmetic
-class TestArithmetic(object):
+class TestArithmetic:
# TODO: parametrize
def test_pow_ops_object(self):
diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index b8ae0895bda06..81df0172c1807 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -21,7 +21,7 @@
# Comparisons
-class TestPeriodIndexComparisons(object):
+class TestPeriodIndexComparisons:
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
@@ -260,7 +260,7 @@ def test_comp_nat(self, dtype):
tm.assert_numpy_array_equal(pd.NaT > left, expected)
-class TestPeriodSeriesComparisons(object):
+class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series([Period('2011', freq='A'),
@@ -292,7 +292,7 @@ def test_cmp_series_period_series_mixed_freq(self):
tm.assert_series_equal(base <= ser, exp)
-class TestPeriodIndexSeriesComparisonConsistency(object):
+class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
@@ -388,7 +388,7 @@ def test_pi_comp_period_nat(self):
# ------------------------------------------------------------------
# Arithmetic
-class TestPeriodFrameArithmetic(object):
+class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
@@ -420,7 +420,7 @@ def test_ops_frame_period(self):
tm.assert_frame_equal(df - df2, -1 * exp)
-class TestPeriodIndexArithmetic(object):
+class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
@@ -967,7 +967,7 @@ def test_parr_add_sub_td64_nat(self, box_transpose_fail):
other - obj
-class TestPeriodSeriesArithmetic(object):
+class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
# GH#13043
ser = pd.Series([pd.Period('2015-01-01', freq='D'),
@@ -1011,7 +1011,7 @@ def test_ops_series_period(self):
tm.assert_series_equal(ser - s2, -1 * expected)
-class TestPeriodIndexSeriesMethods(object):
+class TestPeriodIndexSeriesMethods:
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 0faed74d4a021..8d2afe0b10d7f 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -31,7 +31,7 @@ def get_upcast_box(box, vector):
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
-class TestTimedelta64ArrayComparisons(object):
+class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
@@ -157,7 +157,7 @@ def test_comparisons_coverage(self):
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
-class TestTimedelta64ArithmeticUnsorted(object):
+class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
@@ -442,7 +442,7 @@ def test_timedelta(self, freq):
tm.assert_index_equal(result2, result3)
-class TestAddSubNaTMasking(object):
+class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
@@ -502,7 +502,7 @@ def test_tdi_add_overflow(self):
tm.assert_index_equal(result, exp)
-class TestTimedeltaArraylikeAddSubOps(object):
+class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
@@ -1399,7 +1399,7 @@ def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
anchored - tdi
-class TestTimedeltaArraylikeMulDivOps(object):
+class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
@@ -1990,7 +1990,7 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names):
tm.assert_equal(result, expected)
-class TestTimedeltaArraylikeInvalidArithmeticOps(object):
+class TestTimedeltaArraylikeInvalidArithmeticOps:
def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
diff --git a/pandas/tests/arrays/categorical/common.py b/pandas/tests/arrays/categorical/common.py
index 9462482553ed8..f1029b46ea017 100644
--- a/pandas/tests/arrays/categorical/common.py
+++ b/pandas/tests/arrays/categorical/common.py
@@ -3,7 +3,7 @@
from pandas import Categorical
-class TestCategorical(object):
+class TestCategorical:
def setup_method(self, method):
self.factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py
index 09643c06aa56e..6b75d06438889 100644
--- a/pandas/tests/arrays/categorical/test_algos.py
+++ b/pandas/tests/arrays/categorical/test_algos.py
@@ -71,7 +71,7 @@ def test_isin_empty(empty):
tm.assert_numpy_array_equal(expected, result)
-class TestTake(object):
+class TestTake:
# https://github.com/pandas-dev/pandas/issues/20664
def test_take_warns(self):
diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py
index 7ce82d5bcdded..211b2e04b93cc 100644
--- a/pandas/tests/arrays/categorical/test_analytics.py
+++ b/pandas/tests/arrays/categorical/test_analytics.py
@@ -12,7 +12,7 @@
import pandas.util.testing as tm
-class TestCategoricalAnalytics(object):
+class TestCategoricalAnalytics:
def test_min_max(self):
diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py
index 86dbc5ebf9fe1..4e12eba805ab6 100644
--- a/pandas/tests/arrays/categorical/test_api.py
+++ b/pandas/tests/arrays/categorical/test_api.py
@@ -9,7 +9,7 @@
import pandas.util.testing as tm
-class TestCategoricalAPI(object):
+class TestCategoricalAPI:
def test_ordered_api(self):
# GH 9347
@@ -448,7 +448,7 @@ def test_set_categories_inplace(self):
tm.assert_index_equal(cat.categories, Index(['a', 'b', 'c', 'd']))
-class TestPrivateCategoricalAPI(object):
+class TestPrivateCategoricalAPI:
def test_codes_immutable(self):
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index f07e3aba53cd4..2c5a1facbd7cc 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -16,7 +16,7 @@
import pandas.util.testing as tm
-class TestCategoricalConstructors(object):
+class TestCategoricalConstructors:
def test_validate_ordered(self):
# see gh-14058
diff --git a/pandas/tests/arrays/categorical/test_dtypes.py b/pandas/tests/arrays/categorical/test_dtypes.py
index 3c7f2eac23ae5..a63266350404e 100644
--- a/pandas/tests/arrays/categorical/test_dtypes.py
+++ b/pandas/tests/arrays/categorical/test_dtypes.py
@@ -8,7 +8,7 @@
import pandas.util.testing as tm
-class TestCategoricalDtypes(object):
+class TestCategoricalDtypes:
def test_is_equal_dtype(self):
diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py
index 294344da7c95e..337da2dbbca19 100644
--- a/pandas/tests/arrays/categorical/test_indexing.py
+++ b/pandas/tests/arrays/categorical/test_indexing.py
@@ -84,7 +84,7 @@ def test_setitem_same_ordered_rasies(self, other):
target[mask] = other[mask]
-class TestCategoricalIndexing(object):
+class TestCategoricalIndexing:
def test_getitem_listlike(self):
diff --git a/pandas/tests/arrays/categorical/test_missing.py b/pandas/tests/arrays/categorical/test_missing.py
index b4b361dabac61..5218299a80545 100644
--- a/pandas/tests/arrays/categorical/test_missing.py
+++ b/pandas/tests/arrays/categorical/test_missing.py
@@ -12,7 +12,7 @@
import pandas.util.testing as tm
-class TestCategoricalMissing(object):
+class TestCategoricalMissing:
def test_na_flags_int_categories(self):
# #1457
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index c7a4e0a5fe380..7b7678cdef777 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -127,7 +127,7 @@ def test_comparisons(self):
tm.assert_numpy_array_equal(res, exp)
-class TestCategoricalOps(object):
+class TestCategoricalOps:
def test_compare_frame(self):
# GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame
diff --git a/pandas/tests/arrays/categorical/test_repr.py b/pandas/tests/arrays/categorical/test_repr.py
index 98e255692acc8..0d06cdcd3e010 100644
--- a/pandas/tests/arrays/categorical/test_repr.py
+++ b/pandas/tests/arrays/categorical/test_repr.py
@@ -18,7 +18,7 @@ def test_print(self):
assert actual == expected
-class TestCategoricalRepr(object):
+class TestCategoricalRepr:
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
diff --git a/pandas/tests/arrays/categorical/test_sorting.py b/pandas/tests/arrays/categorical/test_sorting.py
index 3d55862cd2cc0..0c8ed7a31aeb6 100644
--- a/pandas/tests/arrays/categorical/test_sorting.py
+++ b/pandas/tests/arrays/categorical/test_sorting.py
@@ -7,7 +7,7 @@
import pandas.util.testing as tm
-class TestCategoricalSort(object):
+class TestCategoricalSort:
def test_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
diff --git a/pandas/tests/arrays/categorical/test_subclass.py b/pandas/tests/arrays/categorical/test_subclass.py
index 7e90f8d51a3ef..f19ac006d39e7 100644
--- a/pandas/tests/arrays/categorical/test_subclass.py
+++ b/pandas/tests/arrays/categorical/test_subclass.py
@@ -4,7 +4,7 @@
import pandas.util.testing as tm
-class TestCategoricalSubclassing(object):
+class TestCategoricalSubclassing:
def test_constructor(self):
sc = tm.SubclassedCategorical(['a', 'b', 'c'])
diff --git a/pandas/tests/arrays/categorical/test_warnings.py b/pandas/tests/arrays/categorical/test_warnings.py
index 23d00585f950e..147ad5f8b3ecf 100644
--- a/pandas/tests/arrays/categorical/test_warnings.py
+++ b/pandas/tests/arrays/categorical/test_warnings.py
@@ -6,7 +6,7 @@
import pandas.util.testing as tm
-class TestCategoricalWarnings(object):
+class TestCategoricalWarnings:
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index e81e64d90ff5f..30b4d0f89ae7e 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -24,7 +24,7 @@ def left_right_dtypes(request):
return request.param
-class TestMethods(object):
+class TestMethods:
@pytest.mark.parametrize('new_closed', [
'left', 'right', 'both', 'neither'])
@@ -47,7 +47,7 @@ def test_where_raises(self, other):
ser.where([True, False, True], other=other)
-class TestSetitem(object):
+class TestSetitem:
def test_set_na(self, left_right_dtypes):
left, right = left_right_dtypes
diff --git a/pandas/tests/arrays/interval/test_ops.py b/pandas/tests/arrays/interval/test_ops.py
index bdbd145ed2a80..7f53c40d7bf4e 100644
--- a/pandas/tests/arrays/interval/test_ops.py
+++ b/pandas/tests/arrays/interval/test_ops.py
@@ -27,7 +27,7 @@ def start_shift(request):
return request.param
-class TestOverlaps(object):
+class TestOverlaps:
def test_overlaps_interval(
self, constructor, start_shift, closed, other_closed):
diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py
index 42a29654b44d5..b2f254a556603 100644
--- a/pandas/tests/arrays/sparse/test_arithmetics.py
+++ b/pandas/tests/arrays/sparse/test_arithmetics.py
@@ -8,7 +8,7 @@
import pandas.util.testing as tm
-class TestSparseArrayArithmetics(object):
+class TestSparseArrayArithmetics:
_base = np.array
_klass = pd.SparseArray
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index 88758c5d5c959..2fb675ea74fa8 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -20,7 +20,7 @@ def kind(request):
return request.param
-class TestSparseArray(object):
+class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
@@ -814,7 +814,7 @@ def test_nonzero(self):
tm.assert_numpy_array_equal(expected, result)
-class TestSparseArrayAnalytics(object):
+class TestSparseArrayAnalytics:
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
@@ -1071,7 +1071,7 @@ def test_npoints(self):
assert arr.npoints == 1
-class TestAccessor(object):
+class TestAccessor:
@pytest.mark.parametrize('attr', [
'npoints', 'density', 'fill_value', 'sp_values',
diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py
index c1c72f823c8b6..44bda995f9a78 100644
--- a/pandas/tests/arrays/sparse/test_libsparse.py
+++ b/pandas/tests/arrays/sparse/test_libsparse.py
@@ -42,7 +42,7 @@ def _check_case_dict(case):
_check_case([], [], [], [], [], [])
-class TestSparseIndexUnion(object):
+class TestSparseIndexUnion:
def test_index_make_union(self):
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
@@ -190,7 +190,7 @@ def test_int_index_make_union(self):
a.make_union(b)
-class TestSparseIndexIntersect(object):
+class TestSparseIndexIntersect:
@td.skip_if_windows
def test_intersect(self):
@@ -242,7 +242,7 @@ def test_intersect_identical(self):
assert case.intersect(case).equals(case)
-class TestSparseIndexCommon(object):
+class TestSparseIndexCommon:
def test_int_internal(self):
idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='integer')
@@ -390,7 +390,7 @@ def _check(index):
# corner cases
-class TestBlockIndex(object):
+class TestBlockIndex:
def test_block_internal(self):
idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='block')
@@ -477,7 +477,7 @@ def test_to_block_index(self):
assert index.to_block_index() is index
-class TestIntIndex(object):
+class TestIntIndex:
def test_check_integrity(self):
@@ -562,7 +562,7 @@ def test_to_int_index(self):
assert index.to_int_index() is index
-class TestSparseOperators(object):
+class TestSparseOperators:
def _op_tests(self, sparse_op, python_op):
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py
index b68ec2bf348b4..cca421482bb66 100644
--- a/pandas/tests/arrays/test_array.py
+++ b/pandas/tests/arrays/test_array.py
@@ -257,7 +257,7 @@ def test_array_not_registered(registry_without_decimal):
tm.assert_equal(result, expected)
-class TestArrayAnalytics(object):
+class TestArrayAnalytics:
def test_searchsorted(self, string_dtype):
arr = pd.array(['a', 'b', 'c'], dtype=string_dtype)
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 2a442f71f854b..49eaab1cc0170 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -54,7 +54,7 @@ def timedelta_index(request):
return pd.TimedeltaIndex(['1 Day', '3 Hours', 'NaT'])
-class SharedTests(object):
+class SharedTests:
index_cls = None
def test_compare_len1_raises(self):
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py
index 60caf61782bbf..53dd960c86edb 100644
--- a/pandas/tests/arrays/test_datetimes.py
+++ b/pandas/tests/arrays/test_datetimes.py
@@ -15,7 +15,7 @@
import pandas.util.testing as tm
-class TestDatetimeArrayConstructor(object):
+class TestDatetimeArrayConstructor:
def test_freq_validation(self):
# GH#24623 check that invalid instances cannot be created with the
# public constructor
@@ -85,7 +85,7 @@ def test_copy(self):
assert arr._data is not data
-class TestDatetimeArrayComparisons(object):
+class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
# sufficiently robust
@@ -118,7 +118,7 @@ def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators):
tm.assert_numpy_array_equal(result, expected)
-class TestDatetimeArray(object):
+class TestDatetimeArray:
def test_astype_to_same(self):
arr = DatetimeArray._from_sequence(['2000'], tz='US/Central')
result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False)
@@ -240,7 +240,7 @@ def test_array_interface(self):
tm.assert_numpy_array_equal(result, expected)
-class TestSequenceToDT64NS(object):
+class TestSequenceToDT64NS:
def test_tz_dtype_mismatch_raises(self):
arr = DatetimeArray._from_sequence(['2000'], tz='US/Central')
@@ -254,7 +254,7 @@ def test_tz_dtype_matches(self):
tm.assert_numpy_array_equal(arr._data, result)
-class TestReductions(object):
+class TestReductions:
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_min_max(self, tz):
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 3dae2c5ce61c9..4512e98ebe0cf 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -94,7 +94,7 @@ def test_repr_array_long():
assert result == expected
-class TestConstructors(object):
+class TestConstructors:
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
@@ -365,7 +365,7 @@ def test_compare_array(self, data, all_compare_operators):
self._compare_other(data, op_name, other)
-class TestCasting(object):
+class TestCasting:
pass
@pytest.mark.parametrize('dropna', [True, False])
diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py
index 99255d819d28e..c27200e3273ee 100644
--- a/pandas/tests/arrays/test_period.py
+++ b/pandas/tests/arrays/test_period.py
@@ -281,7 +281,7 @@ def test_repr_large():
# ----------------------------------------------------------------------------
# Reductions
-class TestReductions(object):
+class TestReductions:
def test_min_max(self):
arr = period_array([
diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py
index 1fec533a14a6f..e67227b1236ed 100644
--- a/pandas/tests/arrays/test_timedeltas.py
+++ b/pandas/tests/arrays/test_timedeltas.py
@@ -8,7 +8,7 @@
import pandas.util.testing as tm
-class TestTimedeltaArrayConstructor(object):
+class TestTimedeltaArrayConstructor:
def test_only_1dim_accepted(self):
# GH#25282
arr = np.array([0, 1, 2, 3], dtype='m8[h]').astype('m8[ns]')
@@ -62,7 +62,7 @@ def test_copy(self):
assert arr._data.base is not data
-class TestTimedeltaArray(object):
+class TestTimedeltaArray:
def test_np_sum(self):
# GH#25282
vals = np.arange(5, dtype=np.int64).view('m8[h]').astype('m8[ns]')
@@ -130,7 +130,7 @@ def test_setitem_clears_freq(self):
assert a.freq is None
-class TestReductions(object):
+class TestReductions:
def test_min_max(self):
arr = TimedeltaArray._from_sequence([
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py
index 3908a7f1f99aa..00e95da123f54 100644
--- a/pandas/tests/computation/test_eval.py
+++ b/pandas/tests/computation/test_eval.py
@@ -110,7 +110,7 @@ def _is_py3_complex_incompat(result, expected):
@td.skip_if_no_ne
-class TestEvalNumexprPandas(object):
+class TestEvalNumexprPandas:
@classmethod
def setup_class(cls):
@@ -784,7 +784,7 @@ def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
# gh-12388: Typecasting rules consistency with python
-class TestTypeCasting(object):
+class TestTypeCasting:
@pytest.mark.parametrize('op', ['+', '-', '*', '**', '/'])
# maybe someday... numexpr has too many upcasting rules now
# chain(*(np.sctypes[x] for x in ['uint', 'int', 'float']))
@@ -817,7 +817,7 @@ def should_warn(*args):
return not_mono and only_one_dt
-class TestAlignment(object):
+class TestAlignment:
index_types = 'i', 'u', 'dt'
lhs_index_types = index_types + ('s',) # 'p'
@@ -1061,7 +1061,7 @@ def test_performance_warning_for_poor_alignment(self, engine, parser):
# Slightly more complex ops
@td.skip_if_no_ne
-class TestOperationsNumExprPandas(object):
+class TestOperationsNumExprPandas:
@classmethod
def setup_class(cls):
@@ -1588,7 +1588,7 @@ def setup_class(cls):
@td.skip_if_no_ne
-class TestMathPythonPython(object):
+class TestMathPythonPython:
@classmethod
def setup_class(cls):
@@ -1734,7 +1734,7 @@ def setup_class(cls):
_var_s = randn(10)
-class TestScope(object):
+class TestScope:
def test_global_scope(self, engine, parser):
e = '_var_s * 2'
@@ -1882,7 +1882,7 @@ def test_negate_lt_eq_le(engine, parser):
tm.assert_frame_equal(result, expected)
-class TestValidate(object):
+class TestValidate:
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
diff --git a/pandas/tests/config/test_config.py b/pandas/tests/config/test_config.py
index 639d4adb3a4d3..5131459495edd 100644
--- a/pandas/tests/config/test_config.py
+++ b/pandas/tests/config/test_config.py
@@ -9,7 +9,7 @@
import pandas as pd
-class TestConfig(object):
+class TestConfig:
@classmethod
def setup_class(cls):
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py
index 5c1f6ff405b3b..3634b59047f76 100644
--- a/pandas/tests/dtypes/test_common.py
+++ b/pandas/tests/dtypes/test_common.py
@@ -29,7 +29,7 @@ def to_numpy_dtypes(dtypes):
return [getattr(np, dt) for dt in dtypes if isinstance(dt, str)]
-class TestPandasDtype(object):
+class TestPandasDtype:
# Passing invalid dtype, both as a string or object, must raise TypeError
# Per issue GH15520
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 951a87ab7e962..81da9072909c0 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -20,7 +20,7 @@
import pandas.util.testing as tm
-class Base(object):
+class Base:
def setup_method(self, method):
self.dtype = self.create()
@@ -646,7 +646,7 @@ def test_caching(self):
assert len(IntervalDtype._cache) == 0
-class TestCategoricalDtypeParametrized(object):
+class TestCategoricalDtypeParametrized:
@pytest.mark.parametrize('categories', [
list('abcd'),
diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py
index 2bb3559d56d61..f0642a0a9f1b9 100644
--- a/pandas/tests/dtypes/test_generic.py
+++ b/pandas/tests/dtypes/test_generic.py
@@ -10,7 +10,7 @@
from pandas.util import testing as tm
-class TestABCClasses(object):
+class TestABCClasses:
tuples = [[1, 2, 2], ['red', 'blue', 'red']]
multi_index = pd.MultiIndex.from_arrays(tuples, names=('number', 'color'))
datetime_index = pd.to_datetime(['2000/1/1', '2010/1/1'])
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index ba5ac744ebf05..3db958e2c64aa 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -111,7 +111,7 @@ def test_is_sequence():
assert (not is_seq("abcd"))
assert (not is_seq(np.int64))
- class A(object):
+ class A:
def __getitem__(self):
return 1
@@ -175,7 +175,7 @@ def test_is_dict_like_fails(ll):
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
- class DictLike(object):
+ class DictLike:
def __init__(self, d):
self.d = d
@@ -199,7 +199,7 @@ def __contains__(self, key):
def test_is_file_like():
- class MockFile(object):
+ class MockFile:
pass
is_file = inference.is_file_like
@@ -253,13 +253,13 @@ def test_is_names_tuple_fails(ll):
def test_is_hashable():
# all new-style classes are hashable by default
- class HashableClass(object):
+ class HashableClass:
pass
- class UnhashableClass1(object):
+ class UnhashableClass1:
__hash__ = None
- class UnhashableClass2(object):
+ class UnhashableClass2:
def __hash__(self):
raise TypeError("Not hashable")
@@ -315,7 +315,7 @@ def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
-class TestInference(object):
+class TestInference:
def test_infer_dtype_bytes(self):
compare = 'bytes'
@@ -497,7 +497,7 @@ def test_mixed_dtypes_remain_object_array(self):
tm.assert_numpy_array_equal(result, array)
-class TestTypeInference(object):
+class TestTypeInference:
# Dummy class used for testing with Python objects
class Dummy():
@@ -1084,7 +1084,7 @@ def test_categorical(self):
assert result == 'categorical'
-class TestNumberScalar(object):
+class TestNumberScalar:
def test_is_number(self):
@@ -1227,7 +1227,7 @@ def test_is_timedelta(self):
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
-class TestIsScalar(object):
+class TestIsScalar:
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py
index 1f774b2c0cb05..623e1f95c1897 100644
--- a/pandas/tests/dtypes/test_missing.py
+++ b/pandas/tests/dtypes/test_missing.py
@@ -53,7 +53,7 @@ def test_notna_notnull(notna_f):
assert (isinstance(notna_f(s), Series))
-class TestIsNA(object):
+class TestIsNA:
def test_0d_array(self):
assert isna(np.array(np.nan))
@@ -337,7 +337,7 @@ def test_na_value_for_dtype(dtype, na_value):
assert result is na_value
-class TestNAObj(object):
+class TestNAObj:
_1d_methods = ['isnaobj', 'isnaobj_old']
_2d_methods = ['isnaobj2d', 'isnaobj2d_old']
@@ -435,7 +435,7 @@ def test_empty_like(self):
]
-class TestLibMissing(object):
+class TestLibMissing:
def test_checknull(self):
for value in na_vals:
assert libmissing.checknull(value)
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
index 15ceb6adff59c..2aece66d94150 100644
--- a/pandas/tests/extension/arrow/test_bool.py
+++ b/pandas/tests/extension/arrow/test_bool.py
@@ -26,7 +26,7 @@ def data_missing():
return ArrowBoolArray.from_scalars([None, True])
-class BaseArrowTests(object):
+class BaseArrowTests:
pass
diff --git a/pandas/tests/extension/base/base.py b/pandas/tests/extension/base/base.py
index 2a4a1b9c4668b..b11603c0e185a 100644
--- a/pandas/tests/extension/base/base.py
+++ b/pandas/tests/extension/base/base.py
@@ -1,7 +1,7 @@
import pandas.util.testing as tm
-class BaseExtensionTests(object):
+class BaseExtensionTests:
assert_equal = staticmethod(tm.assert_equal)
assert_series_equal = staticmethod(tm.assert_series_equal)
assert_frame_equal = staticmethod(tm.assert_frame_equal)
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 1fa874e7341ca..8c9d7fd756377 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -65,7 +65,7 @@ def data_for_grouping():
return DecimalArray([b, b, na, na, a, a, b, c])
-class BaseDecimal(object):
+class BaseDecimal:
def assert_series_equal(self, left, right, *args, **kwargs):
def convert(x):
@@ -149,7 +149,7 @@ class TestMissing(BaseDecimal, base.BaseMissingTests):
pass
-class Reduce(object):
+class Reduce:
def check_reduce(self, s, op_name, skipna):
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 5b001325216bf..5eb6aba710234 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -71,7 +71,7 @@ def data_for_grouping():
])
-class BaseJSON(object):
+class BaseJSON:
# NumPy doesn't handle an array of equal-length UserDicts.
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py
index db3f3b80bca6b..14db04e1bcd61 100644
--- a/pandas/tests/extension/test_common.py
+++ b/pandas/tests/extension/test_common.py
@@ -35,7 +35,7 @@ def astype(self, dtype, copy=True):
return np.array(self, dtype=dtype, copy=copy)
-class TestExtensionArrayDtype(object):
+class TestExtensionArrayDtype:
@pytest.mark.parametrize('values', [
pd.Categorical([]),
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index e3fdd0db3e8b4..b228f44129623 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -74,7 +74,7 @@ def na_value():
# ----------------------------------------------------------------------------
-class BaseDatetimeTests(object):
+class BaseDatetimeTests:
pass
diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py
index 6eedbfb4aba39..221bf0b7d0648 100644
--- a/pandas/tests/extension/test_interval.py
+++ b/pandas/tests/extension/test_interval.py
@@ -70,7 +70,7 @@ def data_for_grouping():
return IntervalArray.from_tuples([b, b, None, None, a, a, b, c])
-class BaseInterval(object):
+class BaseInterval:
pass
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 84e20232c4116..f7a312c00d193 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -136,7 +136,7 @@ def skip_numpy_object(dtype):
skip_nested = pytest.mark.usefixtures('skip_numpy_object')
-class BaseNumPyTests(object):
+class BaseNumPyTests:
pass
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index fb3c4e87abcf5..4e86e8ee8b24e 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -54,7 +54,7 @@ def na_value():
return pd.NaT
-class BasePeriodTests(object):
+class BasePeriodTests:
pass
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 3e1186f59478f..1fdca8799c44c 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -81,7 +81,7 @@ def data_for_grouping(request):
fill_value=request.param)
-class BaseSparseTests(object):
+class BaseSparseTests:
def _check_unsupported(self, data):
if data.dtype == SparseDtype(int, 0):
diff --git a/pandas/tests/frame/common.py b/pandas/tests/frame/common.py
index 0485ddb0e6f43..4b71405e20d32 100644
--- a/pandas/tests/frame/common.py
+++ b/pandas/tests/frame/common.py
@@ -18,7 +18,7 @@
_mixed_frame['foo'] = 'bar'
-class TestData(object):
+class TestData:
@cache_readonly
def frame(self):
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index f2da432e9d135..a0651d7fe6602 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -300,7 +300,7 @@ def test_set_index_raise_on_len(self, frame_of_index_cols, box, length,
def test_set_index_custom_label_type(self):
# GH 24969
- class Thing(object):
+ class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
@@ -1376,7 +1376,7 @@ def test_droplevel(self):
tm.assert_frame_equal(result, expected)
-class TestIntervalIndex(object):
+class TestIntervalIndex:
def test_setitem(self):
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index e405a21ca71db..57a1e8f5d675f 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -231,7 +231,7 @@ def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
-class TestDataFrameAnalytics(object):
+class TestDataFrameAnalytics:
# ---------------------------------------------------------------------
# Correlation and covariance
@@ -2242,7 +2242,7 @@ def df_main_dtypes():
'timedelta'])
-class TestNLargestNSmallest(object):
+class TestNLargestNSmallest:
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 04418779b7062..66f99825abc58 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -16,7 +16,7 @@
assert_almost_equal, assert_frame_equal, assert_series_equal)
-class SharedWithSparse(object):
+class SharedWithSparse:
"""
A collection of tests DataFrame and SparseDataFrame can share.
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index b2f531bfea249..1c1da2bdbcea6 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -579,7 +579,7 @@ def test_apply_dup_names_multi_agg(self):
tm.assert_frame_equal(result, expected)
-class TestInferOutputShape(object):
+class TestInferOutputShape:
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py
index 307436e1e4786..877f0ed0201d3 100644
--- a/pandas/tests/frame/test_arithmetic.py
+++ b/pandas/tests/frame/test_arithmetic.py
@@ -14,7 +14,7 @@
# Comparisons
-class TestFrameComparisons(object):
+class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_comparison_invalid(self):
@@ -138,7 +138,7 @@ def test_df_string_comparison(self):
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
-class TestFrameFlexComparisons(object):
+class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
@@ -291,7 +291,7 @@ def test_df_flex_cmp_constant_return_types_empty(self, opname):
# -------------------------------------------------------------------
# Arithmetic
-class TestFrameFlexArithmetic(object):
+class TestFrameFlexArithmetic:
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
@@ -445,7 +445,7 @@ def test_arith_flex_zero_len_raises(self):
df_len0.sub(df['A'], axis=None, fill_value=3)
-class TestFrameArithmetic(object):
+class TestFrameArithmetic:
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index d071e13599e5d..695b5f88c0c12 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2215,7 +2215,7 @@ def test_from_records_bad_index_column(self):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
- class Record(object):
+ class Record:
def __init__(self, *args):
self.args = args
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index 9aad010a899d2..67c449026a531 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -337,7 +337,7 @@ def test_to_records_dtype_mi(self, df, kwargs, expected):
def test_to_records_dict_like(self):
# see gh-18146
- class DictLike(object):
+ class DictLike:
def __init__(self, **kwargs):
self.d = kwargs.copy()
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index f58fe85cad258..53d920e116cd8 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -3363,7 +3363,7 @@ def test_transpose(self):
assert_frame_equal(result, expected)
-class TestDataFrameIndexingCategorical(object):
+class TestDataFrameIndexingCategorical:
def test_assignment(self):
# assignment
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index edce25566e361..30383f7ff8426 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -15,7 +15,7 @@
assert_frame_equal, assert_numpy_array_equal, assert_series_equal)
-class TestDataFrameUnaryOperators(object):
+class TestDataFrameUnaryOperators:
# __pos__, __neg__, __inv__
@pytest.mark.parametrize('df,expected', [
@@ -88,7 +88,7 @@ def test_pos_raises(self, df):
(+ df['a'])
-class TestDataFrameLogicalOperators(object):
+class TestDataFrameLogicalOperators:
# &, |, ^
def test_logical_ops_empty_frame(self):
@@ -203,7 +203,7 @@ def test_logical_with_nas(self):
assert_series_equal(result, expected)
-class TestDataFrameOperators(object):
+class TestDataFrameOperators:
@pytest.mark.parametrize('op', [operator.add, operator.sub,
operator.mul, operator.truediv])
diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py
index 8b37d4ff2cf9e..e36f8107ba9fd 100644
--- a/pandas/tests/frame/test_period.py
+++ b/pandas/tests/frame/test_period.py
@@ -14,7 +14,7 @@ def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
-class TestPeriodIndex(object):
+class TestPeriodIndex:
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index d323a732d4895..7d5b3c9c67d8e 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -34,7 +34,7 @@ def skip_if_no_pandas_parser(parser):
pytest.skip("cannot evaluate with parser {0!r}".format(parser))
-class TestCompat(object):
+class TestCompat:
def setup_method(self, method):
self.df = DataFrame({'A': [1, 2, 3]})
@@ -158,7 +158,7 @@ def test_eval_resolvers_as_list(self):
dict1['a'] + dict2['b'])
-class TestDataFrameQueryWithMultiIndex(object):
+class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
@@ -354,7 +354,7 @@ def to_series(mi, level):
@td.skip_if_no_ne
-class TestDataFrameQueryNumExprPandas(object):
+class TestDataFrameQueryNumExprPandas:
@classmethod
def setup_class(cls):
@@ -831,7 +831,7 @@ def test_query_builtin(self):
assert_frame_equal(expected, result)
-class TestDataFrameQueryStrings(object):
+class TestDataFrameQueryStrings:
def test_str_query_method(self, parser, engine):
df = DataFrame(np.random.randn(10, 1), columns=['b'])
@@ -1004,7 +1004,7 @@ def test_query_string_scalar_variable(self, parser, engine):
assert_frame_equal(e, r)
-class TestDataFrameEvalWithFrame(object):
+class TestDataFrameEvalWithFrame:
def setup_method(self, method):
self.frame = DataFrame(np.random.randn(10, 3), columns=list('abc'))
@@ -1031,7 +1031,7 @@ def test_invalid_type_for_operator_raises(self, parser, engine, op):
df.eval('a {0} b'.format(op), engine=engine, parser=parser)
-class TestDataFrameQueryBacktickQuoting(object):
+class TestDataFrameQueryBacktickQuoting:
@pytest.fixture(scope='class')
def df(self):
diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py
index fd6587c73b8fa..c5077f0c49112 100644
--- a/pandas/tests/frame/test_timezones.py
+++ b/pandas/tests/frame/test_timezones.py
@@ -18,7 +18,7 @@
import pandas.util.testing as tm
-class TestDataFrameTimezones(object):
+class TestDataFrameTimezones:
def test_frame_values_with_tz(self):
tz = "US/Central"
diff --git a/pandas/tests/frame/test_validate.py b/pandas/tests/frame/test_validate.py
index 6513c332c6798..8597d91550c77 100644
--- a/pandas/tests/frame/test_validate.py
+++ b/pandas/tests/frame/test_validate.py
@@ -8,7 +8,7 @@ def dataframe():
return DataFrame({'a': [1, 2], 'b': [3, 4]})
-class TestDataFrameValidate(object):
+class TestDataFrameValidate:
"""Tests for error handling related to data types of method arguments."""
@pytest.mark.parametrize("func", ["query", "eval", "set_index",
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index fdcf0e2172708..87eb1cfe9ec57 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -15,7 +15,7 @@
# Generic types test cases
-class Generic(object):
+class Generic:
@property
def _ndim(self):
@@ -578,7 +578,7 @@ def test_pct_change(self, periods, fill_method, limit, exp):
tm.assert_series_equal(res, Series(exp))
-class TestNDFrame(object):
+class TestNDFrame:
# tests that don't fit elsewhere
def test_sample(sel):
diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py
index 04349ba0bd9b3..cca215588f230 100644
--- a/pandas/tests/groupby/aggregate/test_other.py
+++ b/pandas/tests/groupby/aggregate/test_other.py
@@ -361,7 +361,7 @@ def test_agg_callables():
# GH 7929
df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64)
- class fn_class(object):
+ class fn_class:
def __call__(self, x):
return sum(x)
diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py
index d7ea9bdf9209b..afa4df21e21e9 100644
--- a/pandas/tests/groupby/test_bin_groupby.py
+++ b/pandas/tests/groupby/test_bin_groupby.py
@@ -48,7 +48,7 @@ def test_series_bin_grouper():
assert_almost_equal(counts, exp_counts)
-class TestBinGroupers(object):
+class TestBinGroupers:
def setup_method(self, method):
self.obj = np.random.randn(10, 1)
@@ -121,11 +121,11 @@ def _ohlc(group):
_check('float64')
-class TestMoments(object):
+class TestMoments:
pass
-class TestReducer(object):
+class TestReducer:
def test_int_index(self):
from pandas.core.series import Series
diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py
index af3051f5eb266..e83f8e5216cae 100644
--- a/pandas/tests/groupby/test_counting.py
+++ b/pandas/tests/groupby/test_counting.py
@@ -8,7 +8,7 @@
from pandas.util.testing import assert_frame_equal, assert_series_equal
-class TestCounting(object):
+class TestCounting:
def test_cumcount(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'])
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 187fea5403aea..53ecebdbcb524 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1041,7 +1041,7 @@ def test_count_uses_size_on_exception():
class RaisingObjectException(Exception):
pass
- class RaisingObject(object):
+ class RaisingObject:
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 867cb8365476e..013c370899de1 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -20,7 +20,7 @@
# --------------------------------
-class TestSelection(object):
+class TestSelection:
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index 62e1a087a41aa..21c71154c95ef 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -15,7 +15,7 @@
from pandas.util.testing import assert_frame_equal, assert_series_equal
-class TestGroupBy(object):
+class TestGroupBy:
def test_groupby_with_timegrouper(self):
# GH 4161
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 3f0656615545c..b694d2c2dc44c 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -17,7 +17,7 @@
import pandas.util.testing as tm
-class Base(object):
+class Base:
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'nbytes']
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py
index 1b75d6bd34764..b7f411143a111 100644
--- a/pandas/tests/indexes/datetimes/test_arithmetic.py
+++ b/pandas/tests/indexes/datetimes/test_arithmetic.py
@@ -11,7 +11,7 @@
import pandas.util.testing as tm
-class TestDatetimeIndexArithmetic(object):
+class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# DatetimeIndex.shift is used in integer addition
diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py
index ddf6a6ded69f8..38a060bb0d1d3 100644
--- a/pandas/tests/indexes/datetimes/test_astype.py
+++ b/pandas/tests/indexes/datetimes/test_astype.py
@@ -13,7 +13,7 @@
import pandas.util.testing as tm
-class TestDatetimeIndex(object):
+class TestDatetimeIndex:
def test_astype(self):
# GH 13149, GH 13209
@@ -245,7 +245,7 @@ def test_integer_index_astype_datetime(self, tz, dtype):
tm.assert_index_equal(result, expected)
-class TestToPeriod(object):
+class TestToPeriod:
def setup_method(self, method):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index fa9d5b858a092..0441a48255f22 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -17,7 +17,7 @@
import pandas.util.testing as tm
-class TestDatetimeIndex(object):
+class TestDatetimeIndex:
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
@@ -667,7 +667,7 @@ def test_constructor_wrong_precision_raises(self):
pd.DatetimeIndex(['2000'], dtype='datetime64[us]')
-class TestTimeSeries(object):
+class TestTimeSeries:
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index a741023d1d8dd..7f03793d880b0 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -23,7 +23,7 @@
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
-class TestTimestampEquivDateRange(object):
+class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
@@ -598,7 +598,7 @@ def test_negative_non_tick_frequency_descending_dates(self,
tm.assert_index_equal(result, expected)
-class TestGenRangeGeneration(object):
+class TestGenRangeGeneration:
def test_generate(self):
rng1 = list(generate_range(START, END, offset=BDay()))
@@ -666,7 +666,7 @@ def test_mismatching_tz_raises_err(self, start, end):
pd.date_range(start, end, freq=BDay())
-class TestBusinessDateRange(object):
+class TestBusinessDateRange:
def test_constructor(self):
bdate_range(START, END, freq=BDay())
@@ -741,7 +741,7 @@ def test_bdays_and_open_boundaries(self, closed):
tm.assert_index_equal(result, expected)
-class TestCustomDateRange(object):
+class TestCustomDateRange:
def test_constructor(self):
bdate_range(START, END, freq=CDay())
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index c7147e6fe7063..834dbd73cdea5 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -15,7 +15,7 @@
randn = np.random.randn
-class TestDatetimeIndex(object):
+class TestDatetimeIndex:
def test_roundtrip_pickle_with_tz(self):
diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py
index df0a5742e7a49..ddc79a5aaa8fa 100644
--- a/pandas/tests/indexes/datetimes/test_formats.py
+++ b/pandas/tests/indexes/datetimes/test_formats.py
@@ -51,7 +51,7 @@ def test_to_native_types():
tm.assert_numpy_array_equal(result, expected)
-class TestDatetimeIndexRendering(object):
+class TestDatetimeIndexRendering:
def test_dti_repr_short(self):
dr = pd.date_range(start='1/1/2012', periods=1)
repr(dr)
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 8bdf4d84427ba..0e45f17849407 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -13,7 +13,7 @@
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
-class TestGetItem(object):
+class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D',
@@ -107,7 +107,7 @@ def test_dti_custom_getitem_matplotlib_hackaround(self):
tm.assert_numpy_array_equal(values, expected)
-class TestWhere(object):
+class TestWhere:
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
@@ -140,7 +140,7 @@ def test_where_tz(self):
tm.assert_index_equal(result, expected)
-class TestTake(object):
+class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
@@ -284,7 +284,7 @@ def test_take_fill_value_with_timezone(self):
idx.take(np.array([1, -5]))
-class TestDatetimeIndex(object):
+class TestDatetimeIndex:
@pytest.mark.parametrize('null', [None, np.nan, pd.NaT])
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern'])
def test_insert_nat(self, tz, null):
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index fd52a70e29848..91e614cd516b9 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -11,7 +11,7 @@
import pandas.util.testing as tm
-class TestTimeSeries(object):
+class TestTimeSeries:
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
@@ -88,7 +88,7 @@ def test_range_edges(self):
tm.assert_index_equal(idx, exp)
-class TestDatetime64(object):
+class TestDatetime64:
def test_datetimeindex_accessors(self):
dti_naive = pd.date_range(freq='D', start=datetime(1998, 1, 1),
diff --git a/pandas/tests/indexes/datetimes/test_missing.py b/pandas/tests/indexes/datetimes/test_missing.py
index c8d47caa7e947..5a6f2fa86b11f 100644
--- a/pandas/tests/indexes/datetimes/test_missing.py
+++ b/pandas/tests/indexes/datetimes/test_missing.py
@@ -4,7 +4,7 @@
import pandas.util.testing as tm
-class TestDatetimeIndex(object):
+class TestDatetimeIndex:
@pytest.mark.parametrize('tz', ['US/Eastern', 'Asia/Tokyo'])
def test_fillna_datetime64(self, tz):
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 84085141fcf92..799bec267dfb4 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -390,7 +390,7 @@ def test_offset_deprecated(self):
idx.offset = BDay()
-class TestBusinessDatetimeIndex(object):
+class TestBusinessDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END)
@@ -449,7 +449,7 @@ def test_identical(self):
assert not t1.identical(t2v)
-class TestCustomDatetimeIndex(object):
+class TestCustomDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END, freq='C')
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index 64693324521b3..085e62ed9341e 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -13,7 +13,7 @@
from pandas.util import testing as tm
-class TestSlicing(object):
+class TestSlicing:
def test_dti_slicing(self):
dti = date_range(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index 42338a751e0fc..985b5676640bf 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -16,7 +16,7 @@
from pandas.tseries.frequencies import to_offset
-class TestDatetimeIndexOps(object):
+class TestDatetimeIndexOps:
def test_dti_time(self):
rng = date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
@@ -254,7 +254,7 @@ def test_normalize_nat(self):
tm.assert_index_equal(result, expected)
-class TestDateTimeIndexToJulianDate(object):
+class TestDateTimeIndexToJulianDate:
def test_1700(self):
dr = date_range(start=Timestamp('1710-10-01'), periods=5, freq='D')
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index 9aa03f1e1c7bf..45a3a64216cab 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -16,7 +16,7 @@
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
-class TestDatetimeIndexSetOps(object):
+class TestDatetimeIndexSetOps:
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
@@ -318,7 +318,7 @@ def test_join_nonunique(self):
assert rs.is_monotonic
-class TestBusinessDatetimeIndex(object):
+class TestBusinessDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END)
@@ -472,7 +472,7 @@ def test_month_range_union_tz_dateutil(self, sort):
early_dr.union(late_dr, sort=sort)
-class TestCustomDatetimeIndex(object):
+class TestCustomDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END, freq='C')
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 760c4cd01c5b2..86ec8d2d6d1e3 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -43,7 +43,7 @@ def dst(self, dt):
fixed_off_no_name = FixedOffset(-330, None)
-class TestDatetimeIndexTimezones(object):
+class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
@@ -1092,7 +1092,7 @@ def test_iteration_preserves_nanoseconds(self, tz):
assert ts == index[i]
-class TestDateRange(object):
+class TestDateRange:
"""Tests for date_range with timezones"""
def test_hongkong_tz_convert(self):
# GH#1673 smoke test
@@ -1145,7 +1145,7 @@ def test_date_range_with_tz(self, tzstr):
assert stamp == rng[1]
-class TestToDatetime(object):
+class TestToDatetime:
"""Tests for the to_datetime constructor with timezones"""
def test_to_datetime_utc(self):
arr = np.array([dateutil.parser.parse('2012-06-13T01:39:00Z')],
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index ed1028b45f5db..27614d4f48e95 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -30,7 +30,7 @@
from pandas.util.testing import assert_series_equal
-class TestTimeConversionFormats(object):
+class TestTimeConversionFormats:
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_format(self, cache):
@@ -243,7 +243,7 @@ def test_to_datetime_parse_timezone_keeps_name(self):
tm.assert_index_equal(result, expected)
-class TestToDatetime(object):
+class TestToDatetime:
@pytest.mark.parametrize("s, _format, dt", [
['2015-1-1', '%G-%V-%u', datetime(2014, 12, 29, 0, 0)],
['2015-1-4', '%G-%V-%u', datetime(2015, 1, 1, 0, 0)],
@@ -824,7 +824,7 @@ def test_to_datetime_box_deprecated(self):
assert result == expected
-class TestToDatetimeUnit(object):
+class TestToDatetimeUnit:
@pytest.mark.parametrize('cache', [True, False])
def test_unit(self, cache):
# GH 11758
@@ -1147,7 +1147,7 @@ def test_to_datetime_errors_ignore_utc_true(self):
tm.assert_index_equal(result, expected)
-class TestToDatetimeMisc(object):
+class TestToDatetimeMisc:
def test_to_datetime_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
@@ -1396,7 +1396,7 @@ def test_dayfirst(self, cache):
tm.assert_index_equal(expected, idx6)
-class TestGuessDatetimeFormat(object):
+class TestGuessDatetimeFormat:
@td.skip_if_not_us_locale
def test_guess_datetime_format_for_array(self):
@@ -1419,7 +1419,7 @@ def test_guess_datetime_format_for_array(self):
assert format_for_string_of_nans is None
-class TestToDatetimeInferFormat(object):
+class TestToDatetimeInferFormat:
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
@@ -1498,7 +1498,7 @@ def test_to_datetime_iso8601_noleading_0s(self, cache):
cache=cache), expected)
-class TestDaysInMonth(object):
+class TestDaysInMonth:
# tests for issue #10154
@pytest.mark.parametrize('cache', [True, False])
@@ -1544,7 +1544,7 @@ def test_day_not_in_month_ignore(self, cache):
format="%Y-%m-%d", cache=cache) == '2015-04-31'
-class TestDatetimeParsingWrappers(object):
+class TestDatetimeParsingWrappers:
@pytest.mark.parametrize('date_str,expected', list({
'2011-01-01': datetime(2011, 1, 1),
@@ -1857,7 +1857,7 @@ def julian_dates():
return pd.date_range('2014-1-1', periods=10).to_julian_date().values
-class TestOrigin(object):
+class TestOrigin:
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py
index 0e82c85f2b917..16bcb459a22f9 100644
--- a/pandas/tests/indexes/interval/test_astype.py
+++ b/pandas/tests/indexes/interval/test_astype.py
@@ -9,7 +9,7 @@
import pandas.util.testing as tm
-class Base(object):
+class Base:
"""Tests common to IntervalIndex with any subtype"""
def test_astype_idempotent(self, index):
diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py
index 157b610114ce2..bd0fb4639fc3f 100644
--- a/pandas/tests/indexes/interval/test_construction.py
+++ b/pandas/tests/indexes/interval/test_construction.py
@@ -21,7 +21,7 @@ def name(request):
return request.param
-class Base(object):
+class Base:
"""
Common tests for all variations of IntervalIndex construction. Input data
to be supplied in breaks format, then converted by the subclass method
diff --git a/pandas/tests/indexes/interval/test_interval_new.py b/pandas/tests/indexes/interval/test_interval_new.py
index aa2c28e480247..5599009dbc898 100644
--- a/pandas/tests/indexes/interval/test_interval_new.py
+++ b/pandas/tests/indexes/interval/test_interval_new.py
@@ -7,7 +7,7 @@
pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316")
-class TestIntervalIndex(object):
+class TestIntervalIndex:
@pytest.mark.parametrize("side", ['right', 'left', 'both', 'neither'])
def test_get_loc_interval(self, closed, side):
diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py
index 5f0450da652dd..572fe5fbad100 100644
--- a/pandas/tests/indexes/interval/test_interval_range.py
+++ b/pandas/tests/indexes/interval/test_interval_range.py
@@ -18,7 +18,7 @@ def name(request):
return request.param
-class TestIntervalRange(object):
+class TestIntervalRange:
@pytest.mark.parametrize('freq, periods', [
(1, 100), (2.5, 40), (5, 20), (25, 4)])
diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py
index 3be56efd60c60..a3868a5675177 100644
--- a/pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas/tests/indexes/interval/test_interval_tree.py
@@ -47,7 +47,7 @@ def tree(request, leaf_size):
return IntervalTree(left, left + 2, leaf_size=leaf_size)
-class TestIntervalTree(object):
+class TestIntervalTree:
def test_get_loc(self, tree):
result = tree.get_loc(1)
diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py
index 67b642e013880..226b8994aa328 100644
--- a/pandas/tests/indexes/period/test_arithmetic.py
+++ b/pandas/tests/indexes/period/test_arithmetic.py
@@ -8,7 +8,7 @@
import pandas.util.testing as tm
-class TestPeriodIndexArithmetic(object):
+class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# PeriodIndex.shift is used by __add__ and __sub__
diff --git a/pandas/tests/indexes/period/test_asfreq.py b/pandas/tests/indexes/period/test_asfreq.py
index 30b416e3fe9dd..373f42b930425 100644
--- a/pandas/tests/indexes/period/test_asfreq.py
+++ b/pandas/tests/indexes/period/test_asfreq.py
@@ -6,7 +6,7 @@
from pandas.util import testing as tm
-class TestPeriodIndex(object):
+class TestPeriodIndex:
def test_asfreq(self):
pi1 = period_range(freq='A', start='1/1/2001', end='1/1/2001')
diff --git a/pandas/tests/indexes/period/test_astype.py b/pandas/tests/indexes/period/test_astype.py
index 6abdf5962d6cf..3b3cc43eafdd3 100644
--- a/pandas/tests/indexes/period/test_astype.py
+++ b/pandas/tests/indexes/period/test_astype.py
@@ -8,7 +8,7 @@
import pandas.util.testing as tm
-class TestPeriodIndexAsType(object):
+class TestPeriodIndexAsType:
@pytest.mark.parametrize('dtype', [
float, 'timedelta64', 'timedelta64[ns]'])
def test_astype_raises(self, dtype):
diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py
index 536aa6fcef214..1c3125571a27d 100644
--- a/pandas/tests/indexes/period/test_construction.py
+++ b/pandas/tests/indexes/period/test_construction.py
@@ -13,7 +13,7 @@
import pandas.util.testing as tm
-class TestPeriodIndex(object):
+class TestPeriodIndex:
def setup_method(self, method):
pass
@@ -527,7 +527,7 @@ def test_map_with_string_constructor(self):
tm.assert_index_equal(res, expected)
-class TestSeriesPeriod(object):
+class TestSeriesPeriod:
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py
index 5b2940372b9d7..fbfafd8b46d31 100644
--- a/pandas/tests/indexes/period/test_formats.py
+++ b/pandas/tests/indexes/period/test_formats.py
@@ -48,7 +48,7 @@ def test_to_native_types():
tm.assert_numpy_array_equal(result, expected)
-class TestPeriodIndexRendering(object):
+class TestPeriodIndexRendering:
def test_frame_repr(self):
df = pd.DataFrame({"A": [1, 2, 3]},
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index fa8199b4e6163..922c6e8908ad9 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -12,7 +12,7 @@
from pandas.util import testing as tm
-class TestGetItem(object):
+class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range('2011-01-01', '2011-01-31', freq='D',
@@ -199,7 +199,7 @@ def test_getitem_day(self):
s[v]
-class TestWhere(object):
+class TestWhere:
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range('20130101', periods=5, freq='D')
@@ -233,7 +233,7 @@ def test_where_other(self):
tm.assert_index_equal(result, i2)
-class TestTake(object):
+class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
@@ -332,7 +332,7 @@ def test_take_fill_value(self):
idx.take(np.array([1, -5]))
-class TestIndexing(object):
+class TestIndexing:
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 0a1e7225463be..dbde7ecf3826d 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -6,7 +6,7 @@
from pandas.util import testing as tm
-class TestPeriodIndex(object):
+class TestPeriodIndex:
def setup_method(self, method):
pass
diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py
index aa300111ba67a..ca75635e56161 100644
--- a/pandas/tests/indexes/period/test_period_range.py
+++ b/pandas/tests/indexes/period/test_period_range.py
@@ -4,7 +4,7 @@
import pandas.util.testing as tm
-class TestPeriodRange(object):
+class TestPeriodRange:
@pytest.mark.parametrize('freq', ['D', 'W', 'M', 'Q', 'A'])
def test_construction_from_string(self, freq):
diff --git a/pandas/tests/indexes/period/test_scalar_compat.py b/pandas/tests/indexes/period/test_scalar_compat.py
index b140a1f3c5b8b..92be4519cdefc 100644
--- a/pandas/tests/indexes/period/test_scalar_compat.py
+++ b/pandas/tests/indexes/period/test_scalar_compat.py
@@ -5,7 +5,7 @@
import pandas.util.testing as tm
-class TestPeriodIndexOps(object):
+class TestPeriodIndexOps:
def test_start_time(self):
index = period_range(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS')
diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py
index f7f1a7fadb6e0..29d07a0985574 100644
--- a/pandas/tests/indexes/period/test_setops.py
+++ b/pandas/tests/indexes/period/test_setops.py
@@ -11,7 +11,7 @@ def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
-class TestPeriodIndex(object):
+class TestPeriodIndex:
def test_joins(self, join_type):
index = period_range('1/1/2000', '1/20/2000', freq='D')
diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py
index 641400ebec925..00158d419f15f 100644
--- a/pandas/tests/indexes/period/test_tools.py
+++ b/pandas/tests/indexes/period/test_tools.py
@@ -14,7 +14,7 @@
import pandas.util.testing as tm
-class TestPeriodRepresentation(object):
+class TestPeriodRepresentation:
"""
Wish to match NumPy units
"""
@@ -54,7 +54,7 @@ def test_negone_ordinals(self):
repr(period)
-class TestPeriodIndex(object):
+class TestPeriodIndex:
def test_to_timestamp(self):
index = period_range(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
@@ -227,7 +227,7 @@ def test_searchsorted(self, freq):
pidx.searchsorted(pd.Period('2014-01-01', freq='5D'))
-class TestPeriodIndexConversion(object):
+class TestPeriodIndexConversion:
def test_tolist(self):
index = period_range(freq='A', start='1/1/2001', end='12/1/2009')
rs = index.tolist()
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 2a9246a5a9554..04d4cada13e11 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -221,7 +221,7 @@ def test_constructor_ndarray_like(self, array):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
- class ArrayLike(object):
+ class ArrayLike:
def __init__(self, array):
self.array = array
@@ -2406,7 +2406,7 @@ def test_intersect_str_dates(self):
tm.assert_index_equal(result, expected)
-class TestIndexUtils(object):
+class TestIndexUtils:
@pytest.mark.parametrize('data, names, expected', [
([[1, 2, 3]], None, Index([1, 2, 3])),
diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py
index 56e87cc32340f..451fb2ed7906d 100644
--- a/pandas/tests/indexes/test_common.py
+++ b/pandas/tests/indexes/test_common.py
@@ -17,7 +17,7 @@
import pandas.util.testing as tm
-class TestCommon(object):
+class TestCommon:
def test_droplevel(self, indices):
# GH 21115
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 3173252e174ab..dbdf5b38a015d 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -25,7 +25,7 @@ def freq(request):
return request.param
-class TestTimedeltaIndexArithmetic(object):
+class TestTimedeltaIndexArithmetic:
# Addition and Subtraction Operations
# -------------------------------------------------------------
diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py
index 23e96dbc3d6ce..41b22cd926260 100644
--- a/pandas/tests/indexes/timedeltas/test_astype.py
+++ b/pandas/tests/indexes/timedeltas/test_astype.py
@@ -10,7 +10,7 @@
import pandas.util.testing as tm
-class TestTimedeltaIndex(object):
+class TestTimedeltaIndex:
def test_astype_object(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py
index 6eaffe4c6d999..0011b2d85f8d3 100644
--- a/pandas/tests/indexes/timedeltas/test_construction.py
+++ b/pandas/tests/indexes/timedeltas/test_construction.py
@@ -9,7 +9,7 @@
import pandas.util.testing as tm
-class TestTimedeltaIndex(object):
+class TestTimedeltaIndex:
def test_verify_integrity_deprecated(self):
# GH#23919
diff --git a/pandas/tests/indexes/timedeltas/test_formats.py b/pandas/tests/indexes/timedeltas/test_formats.py
index 09921fac80d22..55bafcdc02013 100644
--- a/pandas/tests/indexes/timedeltas/test_formats.py
+++ b/pandas/tests/indexes/timedeltas/test_formats.py
@@ -6,7 +6,7 @@
from pandas import TimedeltaIndex
-class TestTimedeltaIndexRendering(object):
+class TestTimedeltaIndexRendering:
@pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__'])
def test_representation(self, method):
idx1 = TimedeltaIndex([], freq='D')
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index 7233f53572625..0a8356eae9416 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -8,7 +8,7 @@
import pandas.util.testing as tm
-class TestGetItem(object):
+class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = timedelta_range('1 day', '31 day', freq='D', name='idx')
@@ -59,12 +59,12 @@ def test_timestamp_invalid_key(self, key):
tdi.get_loc(key)
-class TestWhere(object):
+class TestWhere:
# placeholder for symmetry with DatetimeIndex and PeriodIndex tests
pass
-class TestTake(object):
+class TestTake:
def test_take(self):
# GH 10295
idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx')
@@ -168,7 +168,7 @@ def test_take_fill_value(self):
idx.take(np.array([1, -5]))
-class TestTimedeltaIndex(object):
+class TestTimedeltaIndex:
def test_insert(self):
diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
index 9fce1c9acd488..0c1ecffec2bf7 100644
--- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py
+++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
@@ -6,7 +6,7 @@
from pandas.util.testing import assert_series_equal
-class TestSlicing(object):
+class TestSlicing:
def test_slice_keeps_name(self):
# GH4226
dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')
diff --git a/pandas/tests/indexes/timedeltas/test_scalar_compat.py b/pandas/tests/indexes/timedeltas/test_scalar_compat.py
index 788d27eb8ab76..9ac6994dd9983 100644
--- a/pandas/tests/indexes/timedeltas/test_scalar_compat.py
+++ b/pandas/tests/indexes/timedeltas/test_scalar_compat.py
@@ -11,7 +11,7 @@
import pandas.util.testing as tm
-class TestVectorizedTimedelta(object):
+class TestVectorizedTimedelta:
def test_tdi_total_seconds(self):
# GH#10939
# test index
diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py
index f7c3f764df0a0..51682f0c03202 100644
--- a/pandas/tests/indexes/timedeltas/test_setops.py
+++ b/pandas/tests/indexes/timedeltas/test_setops.py
@@ -5,7 +5,7 @@
import pandas.util.testing as tm
-class TestTimedeltaIndex(object):
+class TestTimedeltaIndex:
def test_union(self):
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 062e1c1e9f46d..79d064c57fa40 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -338,7 +338,7 @@ def test_unit_m_y_deprecated(self, unit):
assert re.match(msg, str(w[0].message))
-class TestTimeSeries(object):
+class TestTimeSeries:
def test_series_box_timedelta(self):
rng = timedelta_range('1 day 1 s', periods=5, freq='h')
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
index 1c06abad1ab29..971cbe65b5da1 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
@@ -8,7 +8,7 @@
from pandas.tseries.offsets import Day, Second
-class TestTimedeltas(object):
+class TestTimedeltas:
def test_timedelta_range(self):
diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/indexes/timedeltas/test_tools.py
index 55664e6ca4323..81e51fed788e4 100644
--- a/pandas/tests/indexes/timedeltas/test_tools.py
+++ b/pandas/tests/indexes/timedeltas/test_tools.py
@@ -11,7 +11,7 @@
from pandas.util.testing import assert_series_equal
-class TestTimedeltas(object):
+class TestTimedeltas:
def test_to_timedelta(self):
def conv(v):
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 59415de57f95e..d2c68c621811b 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -29,7 +29,7 @@ def _axify(obj, key, axis):
return tuple(axes)
-class Base(object):
+class Base:
""" indexing comprehensive base class """
_objs = {'series', 'frame'}
diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py
index 938caec006f3a..d201b9644378f 100644
--- a/pandas/tests/indexing/interval/test_interval.py
+++ b/pandas/tests/indexing/interval/test_interval.py
@@ -6,7 +6,7 @@
import pandas.util.testing as tm
-class TestIntervalIndex(object):
+class TestIntervalIndex:
def setup_method(self, method):
self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py
index 4b2ec0c4d17bf..a6c42dd0ec632 100644
--- a/pandas/tests/indexing/interval/test_interval_new.py
+++ b/pandas/tests/indexing/interval/test_interval_new.py
@@ -7,7 +7,7 @@
pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316")
-class TestIntervalIndex(object):
+class TestIntervalIndex:
def setup_method(self, method):
self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
diff --git a/pandas/tests/indexing/multiindex/test_ix.py b/pandas/tests/indexing/multiindex/test_ix.py
index 4970190252e30..c719ff8b6a5c2 100644
--- a/pandas/tests/indexing/multiindex/test_ix.py
+++ b/pandas/tests/indexing/multiindex/test_ix.py
@@ -10,7 +10,7 @@
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
-class TestMultiIndexIx(object):
+class TestMultiIndexIx:
def test_frame_setitem_ix(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 073d40001a16b..8345940a15d43 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -25,7 +25,7 @@ def frame_random_data_integer_multi_index():
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
-class TestMultiIndexLoc(object):
+class TestMultiIndexLoc:
def test_loc_getitem_series(self):
# GH14730
diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py
index ccf017489e046..6dc8d67a971d3 100644
--- a/pandas/tests/indexing/multiindex/test_multiindex.py
+++ b/pandas/tests/indexing/multiindex/test_multiindex.py
@@ -10,7 +10,7 @@
from pandas.util import testing as tm
-class TestMultiIndexBasic(object):
+class TestMultiIndexBasic:
def test_multiindex_perf_warn(self):
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index a295abf0ec2a9..e52e2a234600a 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -7,7 +7,7 @@
from pandas.util import testing as tm
-class TestMultiIndexPartial(object):
+class TestMultiIndexPartial:
def test_getitem_partial_int(self):
# GH 12416
diff --git a/pandas/tests/indexing/multiindex/test_set_ops.py b/pandas/tests/indexing/multiindex/test_set_ops.py
index 1f864de2dacb1..6c7d209333d62 100644
--- a/pandas/tests/indexing/multiindex/test_set_ops.py
+++ b/pandas/tests/indexing/multiindex/test_set_ops.py
@@ -4,7 +4,7 @@
from pandas.util import testing as tm
-class TestMultiIndexSetOps(object):
+class TestMultiIndexSetOps:
def test_multiindex_symmetric_difference(self):
# GH 13490
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index f8f037dbda46b..2fbd3a55508a1 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -12,7 +12,7 @@
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
-class TestMultiIndexSetItem(object):
+class TestMultiIndexSetItem:
def test_setitem_multiindex(self):
with catch_warnings(record=True):
diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py
index db7d079186708..6433a39fe4373 100644
--- a/pandas/tests/indexing/multiindex/test_slice.py
+++ b/pandas/tests/indexing/multiindex/test_slice.py
@@ -13,7 +13,7 @@
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
-class TestMultiIndexSlicers(object):
+class TestMultiIndexSlicers:
def test_per_axis_per_level_getitem(self):
diff --git a/pandas/tests/indexing/multiindex/test_sorted.py b/pandas/tests/indexing/multiindex/test_sorted.py
index f565c30fc3e2c..b1db930765093 100644
--- a/pandas/tests/indexing/multiindex/test_sorted.py
+++ b/pandas/tests/indexing/multiindex/test_sorted.py
@@ -7,7 +7,7 @@
from pandas.util import testing as tm
-class TestMultiIndexSorted(object):
+class TestMultiIndexSorted:
def test_getitem_multilevel_index_tuple_not_sorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py
index 6f66088a151ae..d3b1dd5d45962 100644
--- a/pandas/tests/indexing/test_callable.py
+++ b/pandas/tests/indexing/test_callable.py
@@ -5,7 +5,7 @@
import pandas.util.testing as tm
-class TestIndexingCallable(object):
+class TestIndexingCallable:
def test_frame_loc_ix_callable(self):
# GH 11485
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 83ed6007aab2b..b7f2c75b13c3e 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -15,7 +15,7 @@
from pandas.util.testing import assert_frame_equal, assert_series_equal
-class TestCategoricalIndex(object):
+class TestCategoricalIndex:
def setup_method(self, method):
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index 6070edca075c2..2141b1f62f226 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -8,7 +8,7 @@
from pandas.util import testing as tm
-class TestCaching(object):
+class TestCaching:
def test_slice_consolidate_invalidate_item_cache(self):
@@ -88,7 +88,7 @@ def test_setitem_cache_updating(self):
tm.assert_series_equal(out['A'], expected['A'])
-class TestChaining(object):
+class TestChaining:
def test_setitem_chained_setfault(self):
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index dec7708d78e83..e568e8d700cd3 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -36,7 +36,7 @@ def has_test(combo):
yield
-class CoercionBase(object):
+class CoercionBase:
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 11fb90ebd9bb9..4c865d00b3adb 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -8,7 +8,7 @@
from pandas.util import testing as tm
-class TestDatetimeIndex(object):
+class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index f59151d3eb360..990953b58e9b6 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -13,7 +13,7 @@
ignore_ix = pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
-class TestFloatIndexers(object):
+class TestFloatIndexers:
def check(self, result, original, indexer, getitem):
"""
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index fa5955a3236c5..0d05d78d11480 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -412,7 +412,7 @@ def test_setitem_list(self):
tm.assert_frame_equal(result, df)
# ix with an object
- class TO(object):
+ class TO:
def __init__(self, value):
self.value = value
@@ -862,7 +862,7 @@ def test_no_reference_cycle(self):
assert wr() is None
-class TestSeriesNoneCoercion(object):
+class TestSeriesNoneCoercion:
EXPECTED_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
@@ -909,7 +909,7 @@ def test_coercion_with_loc_and_series(self):
tm.assert_series_equal(start_series, expected_series)
-class TestDataframeNoneCoercion(object):
+class TestDataframeNoneCoercion:
EXPECTED_SINGLE_ROW_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
diff --git a/pandas/tests/indexing/test_indexing_engines.py b/pandas/tests/indexing/test_indexing_engines.py
index 5d01196f796de..71a797741bbdb 100644
--- a/pandas/tests/indexing/test_indexing_engines.py
+++ b/pandas/tests/indexing/test_indexing_engines.py
@@ -5,7 +5,7 @@
import pandas.util.testing as tm
-class TestNumericEngine(object):
+class TestNumericEngine:
def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype):
engine_type, dtype = numeric_indexing_engine_type_and_dtype
num = 1000
@@ -88,7 +88,7 @@ def test_get_pad_indexer(
tm.assert_numpy_array_equal(result, expected)
-class TestObjectEngine(object):
+class TestObjectEngine:
engine_type = libindex.ObjectEngine
dtype = np.object_
values = list('abc')
diff --git a/pandas/tests/indexing/test_indexing_slow.py b/pandas/tests/indexing/test_indexing_slow.py
index 42263c813ddab..1424612963c3c 100644
--- a/pandas/tests/indexing/test_indexing_slow.py
+++ b/pandas/tests/indexing/test_indexing_slow.py
@@ -6,7 +6,7 @@
import pandas.util.testing as tm
-class TestIndexingSlow(object):
+class TestIndexingSlow:
@pytest.mark.slow
def test_large_dataframe_indexing(self):
diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py
index fb4dfbb39ce94..bd983e8aaae1e 100644
--- a/pandas/tests/indexing/test_ix.py
+++ b/pandas/tests/indexing/test_ix.py
@@ -24,7 +24,7 @@ def test_ix_deprecation():
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
-class TestIX(object):
+class TestIX:
def test_ix_loc_setitem_consistency(self):
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index e8ce5bc4c36ef..c1d530cc890e5 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -14,7 +14,7 @@
from pandas.util import testing as tm
-class TestPartialSetting(object):
+class TestPartialSetting:
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
def test_partial_setting(self):
diff --git a/pandas/tests/indexing/test_timedelta.py b/pandas/tests/indexing/test_timedelta.py
index acd8bee3e5663..8e7a71ad3d71e 100644
--- a/pandas/tests/indexing/test_timedelta.py
+++ b/pandas/tests/indexing/test_timedelta.py
@@ -5,7 +5,7 @@
from pandas.util import testing as tm
-class TestTimedeltaIndexing(object):
+class TestTimedeltaIndexing:
def test_boolean_indexing(self):
# GH 14946
df = pd.DataFrame({'x': range(10)})
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 4899703c0c72b..70ddb89ddef1a 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -193,7 +193,7 @@ def create_mgr(descr, item_shape=None):
[mgr_items] + [np.arange(n) for n in item_shape])
-class TestBlock(object):
+class TestBlock:
def setup_method(self, method):
# self.fblock = get_float_ex() # a,c,e
@@ -295,7 +295,7 @@ def test_make_block_same_class(self):
dtype=block.values.dtype)
-class TestDatetimeBlock(object):
+class TestDatetimeBlock:
def test_try_coerce_arg(self):
block = create_block('datetime', [0])
@@ -313,7 +313,7 @@ def test_try_coerce_arg(self):
assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)
-class TestBlockManager(object):
+class TestBlockManager:
def test_constructor_corner(self):
pass
@@ -834,7 +834,7 @@ def test_validate_bool_args(self):
bm1.replace_list([1], [2], inplace=value)
-class TestIndexing(object):
+class TestIndexing:
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
@@ -1037,7 +1037,7 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
# reindex_indexer(new_labels, indexer, axis)
-class TestBlockPlacement(object):
+class TestBlockPlacement:
def test_slice_len(self):
assert len(BlockPlacement(slice(0, 4))) == 4
@@ -1179,7 +1179,7 @@ def assert_add_equals(val, inc, result):
BlockPlacement(slice(2, None, -1)).add(-1)
-class DummyElement(object):
+class DummyElement:
def __init__(self, value, dtype):
self.value = value
self.dtype = np.dtype(dtype)
@@ -1204,7 +1204,7 @@ def any(self, axis=None):
return bool(self.value)
-class TestCanHoldElement(object):
+class TestCanHoldElement:
@pytest.mark.parametrize('value, dtype', [
(1, 'i8'),
(1.0, 'f8'),
diff --git a/pandas/tests/io/formats/test_console.py b/pandas/tests/io/formats/test_console.py
index 450656efa389d..809f448864c08 100644
--- a/pandas/tests/io/formats/test_console.py
+++ b/pandas/tests/io/formats/test_console.py
@@ -3,7 +3,7 @@
from pandas._config import detect_console_encoding
-class MockEncoding(object): # TODO(py27): replace with mock
+class MockEncoding: # TODO(py27): replace with mock
"""
Used to add a side effect when accessing the 'encoding' property. If the
side effect is a str in nature, the value will be returned. Otherwise, the
diff --git a/pandas/tests/io/formats/test_eng_formatting.py b/pandas/tests/io/formats/test_eng_formatting.py
index 982212b46dacc..fc9886bec766f 100644
--- a/pandas/tests/io/formats/test_eng_formatting.py
+++ b/pandas/tests/io/formats/test_eng_formatting.py
@@ -7,7 +7,7 @@
import pandas.io.formats.format as fmt
-class TestEngFormatter(object):
+class TestEngFormatter:
def test_eng_float_formatter(self):
df = DataFrame({'A': [1.41, 141., 14100, 1410000.]})
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 100b75e55600f..d75f80961c0ff 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -103,7 +103,7 @@ def has_expanded_repr(df):
return False
-class TestDataFrameFormatting(object):
+class TestDataFrameFormatting:
def setup_method(self, method):
self.warn_filters = warnings.filters
@@ -1719,7 +1719,7 @@ def test_pprint_pathological_object(self):
If the test fails, it at least won't hang.
"""
- class A(object):
+ class A:
def __getitem__(self, key):
return 3 # obviously simplified
@@ -1772,7 +1772,7 @@ def gen_series_formatting():
return test_sers
-class TestSeriesFormatting(object):
+class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
@@ -2355,7 +2355,7 @@ def _three_digit_exp():
return '{x:.4g}'.format(x=1.7e8) == '1.7e+008'
-class TestFloatArrayFormatter(object):
+class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
@@ -2441,7 +2441,7 @@ def test_too_long(self):
assert str(df) == ' x\n0 1.2346e+04\n1 2.0000e+06'
-class TestRepr_timedelta64(object):
+class TestRepr_timedelta64:
def test_none(self):
delta_1d = pd.to_timedelta(1, unit='D')
@@ -2507,7 +2507,7 @@ def test_all(self):
assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001"
-class TestTimedelta64Formatter(object):
+class TestTimedelta64Formatter:
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
@@ -2553,7 +2553,7 @@ def test_zero(self):
assert result[0].strip() == "'0 days'"
-class TestDatetime64Formatter(object):
+class TestDatetime64Formatter:
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
@@ -2634,7 +2634,7 @@ def format_func(x):
assert result == ['10:10', '12:12']
-class TestNaTFormatting(object):
+class TestNaTFormatting:
def test_repr(self):
assert repr(pd.NaT) == "NaT"
@@ -2643,7 +2643,7 @@ def test_str(self):
assert str(pd.NaT) == "NaT"
-class TestDatetimeIndexFormat(object):
+class TestDatetimeIndexFormat:
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
@@ -2670,7 +2670,7 @@ def test_date_explicit_date_format(self):
assert formatted[1] == "UT"
-class TestDatetimeIndexUnicode(object):
+class TestDatetimeIndexUnicode:
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)
@@ -2685,7 +2685,7 @@ def test_mixed(self):
assert "'2014-01-01 00:00:00']" in text
-class TestStringRepTimestamp(object):
+class TestStringRepTimestamp:
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py
index 5ec9114675fdb..5a36175369ae5 100644
--- a/pandas/tests/io/formats/test_printing.py
+++ b/pandas/tests/io/formats/test_printing.py
@@ -33,7 +33,7 @@ def test_repr_binary_type():
assert res == b
-class TestFormattBase(object):
+class TestFormattBase:
def test_adjoin(self):
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
@@ -121,7 +121,7 @@ def test_ambiguous_width(self):
assert adjoined == expected
-class TestTableSchemaRepr(object):
+class TestTableSchemaRepr:
@classmethod
def setup_class(cls):
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index 407c786725f13..dce3bb3b420d4 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -15,7 +15,7 @@
from pandas.io.formats.style import Styler, _get_level_lengths # noqa # isort:skip
-class TestStyler(object):
+class TestStyler:
def setup_method(self, method):
np.random.seed(24)
@@ -1219,7 +1219,7 @@ def f(a, b, styler):
@td.skip_if_no_mpl
-class TestStylerMatplotlibDep(object):
+class TestStylerMatplotlibDep:
def test_background_gradient(self):
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index c0b41691a67ef..07a8ad23430e2 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -11,7 +11,7 @@
from pandas.util import testing as tm
-class TestToCSV(object):
+class TestToCSV:
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index a2b65dab9a0a2..5a6511fbd20ee 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -13,7 +13,7 @@ def frame():
return DataFrame(tm.getSeriesData())
-class TestToLatex(object):
+class TestToLatex:
def test_to_latex_filename(self, frame):
with tm.ensure_clean('test.tex') as path:
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py
index 941ac1943375d..4cc62d3db124f 100644
--- a/pandas/tests/io/json/test_json_table_schema.py
+++ b/pandas/tests/io/json/test_json_table_schema.py
@@ -17,7 +17,7 @@
convert_pandas_type_to_json_field, set_default_names)
-class TestBuildSchema(object):
+class TestBuildSchema:
def setup_method(self, method):
self.df = DataFrame(
@@ -85,7 +85,7 @@ def test_multiindex(self):
assert result == expected
-class TestTableSchemaType(object):
+class TestTableSchemaType:
@pytest.mark.parametrize('int_type', [
np.int, np.int16, np.int32, np.int64])
@@ -172,7 +172,7 @@ def test_as_json_table_type_categorical_dtypes(self):
assert as_json_table_type(CategoricalDtype()) == 'any'
-class TestTableOrient(object):
+class TestTableOrient:
def setup_method(self, method):
self.df = DataFrame(
@@ -491,7 +491,7 @@ def test_mi_falsey_name(self):
assert result == ['level_0', 'level_1', 0, 1, 2, 3]
-class TestTableOrientReader(object):
+class TestTableOrientReader:
@pytest.mark.parametrize("index_nm", [
None,
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index 6c4bbffe2507d..a7407d843c6c9 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -85,7 +85,7 @@ def missing_metadata():
]
-class TestJSONNormalize(object):
+class TestJSONNormalize:
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
@@ -295,7 +295,7 @@ def test_missing_field(self, author_missing_data):
tm.assert_frame_equal(result, expected)
-class TestNestedToRecord(object):
+class TestNestedToRecord:
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 0b1b7879910e4..b222d679a6012 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -37,7 +37,7 @@
_mixed_frame = _frame.copy()
-class TestPandasContainer(object):
+class TestPandasContainer:
@pytest.fixture(scope="function", autouse=True)
def setup(self, datapath):
@@ -535,7 +535,7 @@ def test_blocks_compat_GH9037(self):
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
- class BinaryThing(object):
+ class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index 916d9ce63f4ee..cd08746c0e8ef 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -58,7 +58,7 @@ def numpy(request):
return request.param
-class TestUltraJSONTests(object):
+class TestUltraJSONTests:
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
@@ -440,11 +440,11 @@ def test_decode_from_unicode(self):
def test_encode_recursion_max(self):
# 8 is the max recursion depth
- class O2(object):
+ class O2:
member = 0
pass
- class O1(object):
+ class O1:
member = 0
pass
@@ -568,7 +568,7 @@ def test_dump_to_file(self):
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
- class FileLike(object):
+ class FileLike:
def __init__(self):
self.bytes = ''
@@ -596,7 +596,7 @@ def test_load_file(self):
ujson.load(f, numpy=True))
def test_load_file_like(self):
- class FileLike(object):
+ class FileLike:
def read(self):
try:
@@ -627,7 +627,7 @@ def test_encode_numeric_overflow(self):
ujson.encode(12839128391289382193812939)
def test_encode_numeric_overflow_nested(self):
- class Nested(object):
+ class Nested:
x = 12839128391289382193812939
for _ in range(0, 100):
@@ -662,7 +662,7 @@ def test_decode_big_escape(self):
def test_to_dict(self):
d = {"key": 31337}
- class DictTest(object):
+ class DictTest:
def toDict(self):
return d
@@ -674,7 +674,7 @@ def toDict(self):
def test_default_handler(self):
- class _TestObject(object):
+ class _TestObject:
def __init__(self, val):
self.val = val
@@ -722,7 +722,7 @@ def my_obj_handler(_):
ujson.decode(ujson.encode(obj_list, default_handler=str)))
-class TestNumpyJSONTests(object):
+class TestNumpyJSONTests:
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
@@ -885,7 +885,7 @@ def test_array_numpy_labelled(self):
assert (np.array(["a", "b"]) == output[2]).all()
-class TestPandasJSONTests(object):
+class TestPandasJSONTests:
def test_dataframe(self, orient, numpy):
if orient == "records" and numpy:
diff --git a/pandas/tests/io/msgpack/test_except.py b/pandas/tests/io/msgpack/test_except.py
index cd894109e989f..0eeda3389a935 100644
--- a/pandas/tests/io/msgpack/test_except.py
+++ b/pandas/tests/io/msgpack/test_except.py
@@ -11,7 +11,7 @@ class DummyException(Exception):
pass
-class TestExceptions(object):
+class TestExceptions:
def test_raise_on_find_unsupported_value(self):
msg = "can\'t serialize datetime"
diff --git a/pandas/tests/io/msgpack/test_limits.py b/pandas/tests/io/msgpack/test_limits.py
index 9d2e2ef33792a..d90a9adfa5c87 100644
--- a/pandas/tests/io/msgpack/test_limits.py
+++ b/pandas/tests/io/msgpack/test_limits.py
@@ -4,7 +4,7 @@
from pandas.io.msgpack import ExtType, Packer, Unpacker, packb, unpackb
-class TestLimits(object):
+class TestLimits:
def test_integer(self):
x = -(2 ** 63)
diff --git a/pandas/tests/io/msgpack/test_obj.py b/pandas/tests/io/msgpack/test_obj.py
index 471212f1bfe32..342c00f49ebff 100644
--- a/pandas/tests/io/msgpack/test_obj.py
+++ b/pandas/tests/io/msgpack/test_obj.py
@@ -9,7 +9,7 @@ class DecodeError(Exception):
pass
-class TestObj(object):
+class TestObj:
def _arr_to_str(self, arr):
return ''.join(str(c) for c in arr)
diff --git a/pandas/tests/io/msgpack/test_pack.py b/pandas/tests/io/msgpack/test_pack.py
index 18a370084ce8b..5f15797d84939 100644
--- a/pandas/tests/io/msgpack/test_pack.py
+++ b/pandas/tests/io/msgpack/test_pack.py
@@ -8,7 +8,7 @@
from pandas.io.msgpack import Packer, Unpacker, packb, unpackb
-class TestPack(object):
+class TestPack:
def check(self, data, use_list=False):
re = unpackb(packb(data), use_list=use_list)
diff --git a/pandas/tests/io/msgpack/test_sequnpack.py b/pandas/tests/io/msgpack/test_sequnpack.py
index 4a767c52d69ce..ea1e5035c7834 100644
--- a/pandas/tests/io/msgpack/test_sequnpack.py
+++ b/pandas/tests/io/msgpack/test_sequnpack.py
@@ -6,7 +6,7 @@
from pandas.io.msgpack import BufferFull, OutOfData, Unpacker
-class TestPack(object):
+class TestPack:
def test_partial_data(self):
unpacker = Unpacker()
diff --git a/pandas/tests/io/msgpack/test_unpack.py b/pandas/tests/io/msgpack/test_unpack.py
index 356156296c067..581f831f8f187 100644
--- a/pandas/tests/io/msgpack/test_unpack.py
+++ b/pandas/tests/io/msgpack/test_unpack.py
@@ -6,7 +6,7 @@
from pandas.io.msgpack import ExtType, OutOfData, Unpacker, packb
-class TestUnpack(object):
+class TestUnpack:
def test_unpack_array_header_from_file(self):
f = BytesIO(packb([1, 2, 3, 4]))
diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py
index feb6c36b5178f..8e35b58b90c48 100644
--- a/pandas/tests/io/parser/conftest.py
+++ b/pandas/tests/io/parser/conftest.py
@@ -5,7 +5,7 @@
from pandas import read_csv, read_table
-class BaseParser(object):
+class BaseParser:
engine = None
low_memory = True
float_precision_choices = []
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index 3034d54c52c45..a8349f9ef9107 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -1792,7 +1792,7 @@ def test_file_handles_with_open(all_parsers, csv1):
def test_invalid_file_buffer_class(all_parsers):
# see gh-15337
- class InvalidBuffer(object):
+ class InvalidBuffer:
pass
parser = all_parsers
diff --git a/pandas/tests/io/parser/test_dialect.py b/pandas/tests/io/parser/test_dialect.py
index 4665fbaf3414a..98cb9343aab98 100644
--- a/pandas/tests/io/parser/test_dialect.py
+++ b/pandas/tests/io/parser/test_dialect.py
@@ -65,7 +65,7 @@ def test_dialect_str(all_parsers):
def test_invalid_dialect(all_parsers):
- class InvalidDialect(object):
+ class InvalidDialect:
pass
data = "a\n1"
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index c8cace6118ad8..7c6a23701861f 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -54,7 +54,7 @@ def tips_df(datapath):
@pytest.mark.usefixtures("s3_resource")
@td.skip_if_not_us_locale()
-class TestS3(object):
+class TestS3:
def test_parse_public_s3_bucket(self, tips_df):
pytest.importorskip('s3fs')
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index 7f827808b6aae..a920814d0ee18 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -21,7 +21,7 @@
from pandas.io.parsers import TextFileReader, read_csv
-class TestTextReader(object):
+class TestTextReader:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py
index 21949586a37ef..390604a958ed9 100644
--- a/pandas/tests/io/parser/test_unsupported.py
+++ b/pandas/tests/io/parser/test_unsupported.py
@@ -25,7 +25,7 @@ def python_engine(request):
return request.param
-class TestUnsupportedFeatures(object):
+class TestUnsupportedFeatures:
def test_mangle_dupe_cols_false(self):
# see gh-12935
@@ -109,7 +109,7 @@ def test_python_engine(self, python_engine):
def test_python_engine_file_no_next(self, python_engine):
# see gh-16530
- class NoNextBuffer(object):
+ class NoNextBuffer:
def __init__(self, csv_data):
self.data = csv_data
@@ -126,7 +126,7 @@ def read(self):
read_csv(NoNextBuffer(data), engine=python_engine)
-class TestDeprecatedFeatures(object):
+class TestDeprecatedFeatures:
@pytest.mark.parametrize("engine", ["c", "python"])
@pytest.mark.parametrize("kwargs", [{"tupleize_cols": True},
diff --git a/pandas/tests/io/sas/test_sas.py b/pandas/tests/io/sas/test_sas.py
index 0e0abe0dae7d1..134aa810db5be 100644
--- a/pandas/tests/io/sas/test_sas.py
+++ b/pandas/tests/io/sas/test_sas.py
@@ -6,7 +6,7 @@
import pandas.util.testing as tm
-class TestSas(object):
+class TestSas:
def test_sas_buffer_format(self):
# see gh-14947
diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py
index cbd36bb0abeda..2c8d1281f2c34 100644
--- a/pandas/tests/io/sas/test_sas7bdat.py
+++ b/pandas/tests/io/sas/test_sas7bdat.py
@@ -13,7 +13,7 @@
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
-class TestSAS7BDAT(object):
+class TestSAS7BDAT:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
diff --git a/pandas/tests/io/sas/test_xport.py b/pandas/tests/io/sas/test_xport.py
index 1b086daf51c41..9024216fb60fa 100644
--- a/pandas/tests/io/sas/test_xport.py
+++ b/pandas/tests/io/sas/test_xport.py
@@ -20,7 +20,7 @@ def numeric_as_float(data):
data[v] = data[v].astype(np.float64)
-class TestXport(object):
+class TestXport:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py
index d9168da6a6a40..bc7f1e1fae33b 100644
--- a/pandas/tests/io/test_clipboard.py
+++ b/pandas/tests/io/test_clipboard.py
@@ -120,7 +120,7 @@ def test_mock_clipboard(mock_clipboard):
@pytest.mark.skipif(not _DEPS_INSTALLED,
reason="clipboard primitives not installed")
@pytest.mark.usefixtures("mock_clipboard")
-class TestClipboard(object):
+class TestClipboard:
def check_round_trip_frame(self, data, excel=None, sep=None,
encoding=None):
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 21479c57fb65a..0ea87d9d961f2 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -16,7 +16,7 @@
import pandas.io.common as icom
-class CustomFSPath(object):
+class CustomFSPath:
"""For testing fspath on unknown objects"""
def __init__(self, path):
self.path = path
@@ -45,7 +45,7 @@ def __fspath__(self):
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
-class TestCommonIOCapabilities(object):
+class TestCommonIOCapabilities:
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
@@ -300,7 +300,7 @@ def mmap_file(datapath):
return datapath('io', 'data', 'test_mmap.csv')
-class TestMMapWrapper(object):
+class TestMMapWrapper:
def test_constructor_bad_file(self, mmap_file):
non_file = StringIO('I am not a file')
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index b06da91f72e28..e7110c8d32236 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -50,7 +50,7 @@ def ignore_xlrd_time_clock_warning():
@td.skip_if_no('xlrd', '1.0.0')
-class SharedItems(object):
+class SharedItems:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
@@ -2327,7 +2327,7 @@ def test_write_append_mode_raises(self, merge_cells, ext, engine):
ExcelWriter(f, engine=engine, mode='a')
-class TestExcelWriterEngineTests(object):
+class TestExcelWriterEngineTests:
@pytest.mark.parametrize('klass,ext', [
pytest.param(_XlsxWriter, '.xlsx', marks=pytest.mark.skipif(
@@ -2543,7 +2543,7 @@ def custom_converter(css):
@td.skip_if_no('openpyxl')
@pytest.mark.skipif(not PY36, reason='requires fspath')
-class TestFSPath(object):
+class TestFSPath:
def test_excelfile_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index d170e4c43feb3..805ce67e76e28 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -17,7 +17,7 @@
@pytest.mark.single
-class TestFeather(object):
+class TestFeather:
def check_error_on_write(self, df, exc):
# check that we are raising the exception
diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py
index 605108f875cb9..87ffc94f7d046 100644
--- a/pandas/tests/io/test_gbq.py
+++ b/pandas/tests/io/test_gbq.py
@@ -103,7 +103,7 @@ def mock_read_gbq(*args, **kwargs):
@pytest.mark.single
-class TestToGBQIntegrationWithServiceAccountKeyPath(object):
+class TestToGBQIntegrationWithServiceAccountKeyPath:
@classmethod
def setup_class(cls):
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 42b13745b90b3..3820192f5524f 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -81,7 +81,7 @@ def test_same_ordering(datapath):
not td.safe_import('lxml'), reason='No bs4')),
pytest.param('lxml', marks=pytest.mark.skipif(
not td.safe_import('lxml'), reason='No lxml'))], scope="class")
-class TestReadHtml(object):
+class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
@@ -1109,7 +1109,7 @@ def seekable(self):
def test_parse_failure_rewinds(self):
# Issue #17975
- class MockFile(object):
+ class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py
index 91dbe5c78acf3..369432dba7b4f 100644
--- a/pandas/tests/io/test_packers.py
+++ b/pandas/tests/io/test_packers.py
@@ -85,7 +85,7 @@ def check_arbitrary(a, b):
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-class TestPackers(object):
+class TestPackers:
def setup_method(self, method):
self.path = '__%s__.msg' % tm.rands(10)
@@ -148,7 +148,7 @@ def test_iterator_with_string_io(self):
def test_invalid_arg(self):
# GH10369
- class A(object):
+ class A:
def __init__(self):
self.read = 0
@@ -841,7 +841,7 @@ def legacy_packer(request, datapath):
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-class TestMsgpack(object):
+class TestMsgpack:
"""
How to add msgpack tests:
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index b8e22de8905b1..afdd83ba9bb8c 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -220,7 +220,7 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
tm.assert_frame_equal(result, df[['a', 'd']])
-class Base(object):
+class Base:
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 868c8ca5e0842..6acf54ab73b2d 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -289,7 +289,7 @@ def get_random_path():
return '__%s__.pickle' % tm.rands(10)
-class TestCompression(object):
+class TestCompression:
_compression_to_extension = {
None: ".none",
@@ -427,7 +427,7 @@ def test_read_infer(self, ext, get_random_path):
# test pickle compression
# ---------------------
-class TestProtocol(object):
+class TestProtocol:
@pytest.mark.parametrize('protocol', [-1, 0, 1, 2])
def test_read(self, protocol, get_random_path):
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index ed070ce549081..df18518cc701a 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -129,7 +129,7 @@ def _maybe_remove(store, key):
pass
-class Base(object):
+class Base:
@classmethod
def setup_class(cls):
diff --git a/pandas/tests/io/test_s3.py b/pandas/tests/io/test_s3.py
index d347e08d0500f..23075db2b38ce 100644
--- a/pandas/tests/io/test_s3.py
+++ b/pandas/tests/io/test_s3.py
@@ -7,7 +7,7 @@
from pandas.io.common import is_s3_url
-class TestS3URL(object):
+class TestS3URL:
def test_is_s3_url(self):
assert is_s3_url("s3://pandas/somethingelse.com")
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 637697aa2ffcb..646b424b7b6cd 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -177,7 +177,7 @@
}
-class MixInBase(object):
+class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
@@ -239,7 +239,7 @@ def _close_conn(self):
pass
-class PandasSQLTest(object):
+class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
@@ -1034,7 +1034,7 @@ def test_query_by_select_obj(self):
assert all_names == {'Iris-setosa'}
-class _EngineToConnMixin(object):
+class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@@ -1734,7 +1734,7 @@ def test_transactions(self):
"Nested transactions rollbacks don't work with Pandas")
-class _TestSQLiteAlchemy(object):
+class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
@@ -1782,7 +1782,7 @@ def test_bigint_warning(self):
assert len(w) == 0
-class _TestMySQLAlchemy(object):
+class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
@@ -1849,7 +1849,7 @@ def test_read_procedure(self):
tm.assert_frame_equal(df, res2)
-class _TestPostgreSQLAlchemy(object):
+class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index ff8763ef3f9db..c62622b931f38 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -36,7 +36,7 @@ def parsed_114(dirpath):
return parsed_114
-class TestStata(object):
+class TestStata:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 30736b11817c0..2cd80c3bd944b 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -27,7 +27,7 @@
@td.skip_if_no_mpl
-class TestPlotBase(object):
+class TestPlotBase:
def setup_method(self, method):
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index 7143ffff9cae6..39cd48ff35f96 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -23,7 +23,7 @@ def test_timtetonum_accepts_unicode():
assert (converter.time2num("00:01") == converter.time2num("00:01"))
-class TestRegistration(object):
+class TestRegistration:
def test_register_by_default(self):
# Run in subprocess to ensure a clean state
@@ -143,7 +143,7 @@ def test_old_import_warns(self):
str(w[0].message))
-class TestDateTimeConverter(object):
+class TestDateTimeConverter:
def setup_method(self, method):
self.dtc = converter.DatetimeConverter()
@@ -283,12 +283,12 @@ def test_convert_nested(self):
assert (np.array(result) == expected).all()
-class TestPeriodConverter(object):
+class TestPeriodConverter:
def setup_method(self, method):
self.pc = converter.PeriodConverter()
- class Axis(object):
+ class Axis:
pass
self.axis = Axis()
diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py
index d792ee4ec94ad..eae940e8ee0ee 100644
--- a/pandas/tests/reductions/test_reductions.py
+++ b/pandas/tests/reductions/test_reductions.py
@@ -35,7 +35,7 @@ def get_objs():
objs = get_objs()
-class TestReductions(object):
+class TestReductions:
@pytest.mark.parametrize('opname', ['max', 'min'])
@pytest.mark.parametrize('obj', objs)
@@ -152,7 +152,7 @@ def test_same_tz_min_max_axis_1(self, op, expected_col):
tm.assert_series_equal(result, expected)
-class TestIndexReductions(object):
+class TestIndexReductions:
# Note: the name TestIndexReductions indicates these tests
# were moved from a Index-specific test file, _not_ that these tests are
# intended long-term to be Index-specific
@@ -414,7 +414,7 @@ def test_min_max_categorical(self):
assert ci.max() == 'b'
-class TestSeriesReductions(object):
+class TestSeriesReductions:
# Note: the name TestSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@@ -864,7 +864,7 @@ def test_idxminmax_with_inf(self):
np.isnan(s.idxmax(skipna=False))
-class TestDatetime64SeriesReductions(object):
+class TestDatetime64SeriesReductions:
# Note: the name TestDatetime64SeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@@ -921,7 +921,7 @@ def test_min_max_series(self):
assert result == exp
-class TestCategoricalSeriesReductions(object):
+class TestCategoricalSeriesReductions:
# Note: the name TestCategoricalSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@@ -984,7 +984,7 @@ def test_min_max_numeric_only(self):
assert _max == "a"
-class TestSeriesMode(object):
+class TestSeriesMode:
# Note: the name TestSeriesMode indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
diff --git a/pandas/tests/reductions/test_stat_reductions.py b/pandas/tests/reductions/test_stat_reductions.py
index e1d48ee82122c..43aff41df68a5 100644
--- a/pandas/tests/reductions/test_stat_reductions.py
+++ b/pandas/tests/reductions/test_stat_reductions.py
@@ -15,7 +15,7 @@
import pandas.util.testing as tm
-class TestSeriesStatReductions(object):
+class TestSeriesStatReductions:
# Note: the name TestSeriesStatReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 51a5c992e894b..b5cc28e07fca6 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -210,7 +210,7 @@ def test_resample_how_callables():
def fn(x, a=1):
return str(type(x))
- class FnClass(object):
+ class FnClass:
def __call__(self, x):
return str(type(x))
diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py
index a19d23fd0ca62..a6736eb89eb5b 100644
--- a/pandas/tests/resample/test_period_index.py
+++ b/pandas/tests/resample/test_period_index.py
@@ -32,7 +32,7 @@ def _series_name():
return 'pi'
-class TestPeriodIndex(object):
+class TestPeriodIndex:
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index b326d20f23f9f..6703a0e3355d1 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -14,7 +14,7 @@
a_ = np.array
-class TestJoin(object):
+class TestJoin:
def setup_method(self, method):
# aggregate multiple columns
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 59910f8e0b79f..b4a58628faa4d 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -85,7 +85,7 @@ def series_of_dtype_all_na(request):
return request.param
-class TestMerge(object):
+class TestMerge:
def setup_method(self, method):
# aggregate multiple columns
@@ -1080,7 +1080,7 @@ def _check_merge(x, y):
assert_frame_equal(result, expected, check_names=False)
-class TestMergeDtypes(object):
+class TestMergeDtypes:
@pytest.mark.parametrize('right_vals', [
['foo', 'bar'],
@@ -1280,7 +1280,7 @@ def right():
'Z': [1, 2]})
-class TestMergeCategorical(object):
+class TestMergeCategorical:
def test_identical(self, left):
# merging on the same, should preserve dtypes
@@ -1513,7 +1513,7 @@ def right_df():
return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])
-class TestMergeOnIndexes(object):
+class TestMergeOnIndexes:
@pytest.mark.parametrize(
"how, sort, expected",
diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py
index 1d1d7d48adaab..990892f3ccda3 100644
--- a/pandas/tests/reshape/merge/test_merge_asof.py
+++ b/pandas/tests/reshape/merge/test_merge_asof.py
@@ -8,7 +8,7 @@
from pandas.util.testing import assert_frame_equal
-class TestAsOfMerge(object):
+class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath('reshape', 'merge', 'data', name)
diff --git a/pandas/tests/reshape/merge/test_merge_ordered.py b/pandas/tests/reshape/merge/test_merge_ordered.py
index 414f46cdb296c..da8ac0b470f77 100644
--- a/pandas/tests/reshape/merge/test_merge_ordered.py
+++ b/pandas/tests/reshape/merge/test_merge_ordered.py
@@ -6,7 +6,7 @@
from pandas.util.testing import assert_frame_equal
-class TestMergeOrdered(object):
+class TestMergeOrdered:
def setup_method(self, method):
self.left = DataFrame({'key': ['a', 'c', 'e'],
diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py
index ed51b875d415d..0bfc8ebbd2871 100644
--- a/pandas/tests/reshape/merge/test_multi.py
+++ b/pandas/tests/reshape/merge/test_multi.py
@@ -76,7 +76,7 @@ def idx_cols_multi():
return ['Origin', 'Destination', 'Period', 'TripPurp', 'LinkType']
-class TestMergeMulti(object):
+class TestMergeMulti:
def setup_method(self):
self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
@@ -581,7 +581,7 @@ def test_join_multi_levels2(self):
tm.assert_frame_equal(result, expected)
-class TestJoinMultiMulti(object):
+class TestJoinMultiMulti:
def test_join_multi_multi(self, left_multi, right_multi, join_type,
on_cols_multi, idx_cols_multi):
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index c065900975869..3d9f3da75306a 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -39,7 +39,7 @@ def sort_with_none(request):
return request.param
-class ConcatenateBase(object):
+class ConcatenateBase:
def setup_method(self, method):
self.frame = DataFrame(tm.getSeriesData())
@@ -1728,7 +1728,7 @@ def test_concat_iterables(self):
assert_frame_equal(
concat(deque((df1, df2)), ignore_index=True), expected)
- class CustomIterator1(object):
+ class CustomIterator1:
def __len__(self):
return 2
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index bc1d810238688..56258b0f0f789 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -8,7 +8,7 @@
import pandas.util.testing as tm
-class TestMelt(object):
+class TestMelt:
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
@@ -281,7 +281,7 @@ def test_melt_missing_columns_raises(self):
multi.melt(['A'], ['F'], col_level=0)
-class TestLreshape(object):
+class TestLreshape:
def test_pairs(self):
data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008', '21dec2008',
@@ -355,7 +355,7 @@ def test_pairs(self):
lreshape(df, spec)
-class TestWideToLong(object):
+class TestWideToLong:
def test_simple(self):
np.random.seed(123)
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 1ee2ebf5de34e..5b757ac156078 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -21,7 +21,7 @@ def dropna(request):
return request.param
-class TestPivotTable(object):
+class TestPivotTable:
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
@@ -1338,7 +1338,7 @@ def test_pivot_table_aggfunc_scalar_dropna(self, dropna):
tm.assert_frame_equal(result, expected)
-class TestCrosstab(object):
+class TestCrosstab:
def setup_method(self, method):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index ca083bbde8428..77d80619969f8 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -14,7 +14,7 @@
from pandas.util.testing import assert_frame_equal
-class TestGetDummies(object):
+class TestGetDummies:
@pytest.fixture
def df(self):
@@ -584,7 +584,7 @@ def test_get_dummies_all_sparse(self):
tm.assert_frame_equal(result, expected)
-class TestCategoricalReshape(object):
+class TestCategoricalReshape:
def test_reshaping_multi_index_categorical(self):
@@ -611,7 +611,7 @@ def test_reshaping_multi_index_categorical(self):
tm.assert_frame_equal(result, expected)
-class TestMakeAxisDummies(object):
+class TestMakeAxisDummies:
def test_preserve_categorical_dtype(self):
# GH13854
diff --git a/pandas/tests/reshape/test_union_categoricals.py b/pandas/tests/reshape/test_union_categoricals.py
index 9b2b8bf9ed49f..75dc2ccc54a83 100644
--- a/pandas/tests/reshape/test_union_categoricals.py
+++ b/pandas/tests/reshape/test_union_categoricals.py
@@ -8,7 +8,7 @@
from pandas.util import testing as tm
-class TestUnionCategoricals(object):
+class TestUnionCategoricals:
def test_union_categorical(self):
# GH 13361
diff --git a/pandas/tests/reshape/test_util.py b/pandas/tests/reshape/test_util.py
index a8d9e7a775442..92a3bb9e29219 100644
--- a/pandas/tests/reshape/test_util.py
+++ b/pandas/tests/reshape/test_util.py
@@ -6,7 +6,7 @@
import pandas.util.testing as tm
-class TestCartesianProduct(object):
+class TestCartesianProduct:
def test_simple(self):
x, y = list('ABC'), [1, 22]
diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py
index 110f0c57b2bba..e19ff82b9b267 100644
--- a/pandas/tests/scalar/interval/test_interval.py
+++ b/pandas/tests/scalar/interval/test_interval.py
@@ -10,7 +10,7 @@ def interval():
return Interval(0, 1)
-class TestInterval(object):
+class TestInterval:
def test_properties(self, interval):
assert interval.closed == 'right'
diff --git a/pandas/tests/scalar/interval/test_ops.py b/pandas/tests/scalar/interval/test_ops.py
index 869ff205c2f51..963fe14d46dcd 100644
--- a/pandas/tests/scalar/interval/test_ops.py
+++ b/pandas/tests/scalar/interval/test_ops.py
@@ -16,7 +16,7 @@ def start_shift(request):
return request.param
-class TestOverlaps(object):
+class TestOverlaps:
def test_overlaps_self(self, start_shift, closed):
start, shift = start_shift
diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py
index f46f2da6c076d..c6f649aeba12f 100644
--- a/pandas/tests/scalar/period/test_asfreq.py
+++ b/pandas/tests/scalar/period/test_asfreq.py
@@ -7,7 +7,7 @@
from pandas import Period, offsets
-class TestFreqConversion(object):
+class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize('freq', ['A', 'Q', 'M', 'W', 'B', 'D'])
def test_asfreq_near_zero(self, freq):
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index ffc8de59e4d63..2a765086af403 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -18,7 +18,7 @@
import pandas.util.testing as tm
-class TestPeriodConstruction(object):
+class TestPeriodConstruction:
def test_construction(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
@@ -472,7 +472,7 @@ def test_period_cons_combined(self):
Period('2011-01', freq='1D1W')
-class TestPeriodMethods(object):
+class TestPeriodMethods:
def test_round_trip(self):
p = Period('2000Q1')
new_p = tm.round_trip_pickle(p)
@@ -655,7 +655,7 @@ def test_strftime(self):
assert isinstance(res, str)
-class TestPeriodProperties(object):
+class TestPeriodProperties:
"Test properties such as year, month, weekday, etc...."
@pytest.mark.parametrize('freq', ['A', 'M', 'D', 'H'])
@@ -923,7 +923,7 @@ def test_properties_secondly(self):
minute=0, second=0).days_in_month == 29
-class TestPeriodField(object):
+class TestPeriodField:
def test_get_period_field_array_raises_on_out_of_range(self):
msg = "Buffer dtype mismatch, expected 'int64_t' but got 'double'"
@@ -931,7 +931,7 @@ def test_get_period_field_array_raises_on_out_of_range(self):
libperiod.get_period_field_arr(-1, np.empty(1), 0)
-class TestComparisons(object):
+class TestComparisons:
def setup_method(self, method):
self.january1 = Period('2000-01', 'M')
@@ -1016,7 +1016,7 @@ def test_period_nat_comp(self):
assert not left >= right
-class TestArithmetic(object):
+class TestArithmetic:
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index b6ad251d598ab..bab41499cb379 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -14,7 +14,7 @@
import pandas.util.testing as tm
-class TestTimedeltaAdditionSubtraction(object):
+class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
@@ -240,7 +240,7 @@ def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
tm.assert_numpy_array_equal(res, exp)
-class TestTimedeltaMultiplicationDivision(object):
+class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index df68d56b8276c..57b3705640202 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -13,7 +13,7 @@
import pandas.util.testing as tm
-class TestTimedeltaArithmetic(object):
+class TestTimedeltaArithmetic:
def test_arithmetic_overflow(self):
with pytest.raises(OverflowError):
@@ -50,7 +50,7 @@ def test_ops_error_str(self):
assert left != right
def test_ops_notimplemented(self):
- class Other(object):
+ class Other:
pass
other = Other()
@@ -76,7 +76,7 @@ def test_unary_ops(self):
assert abs(-td) == Timedelta('10d')
-class TestTimedeltaComparison(object):
+class TestTimedeltaComparison:
def test_compare_tick(self, tick_classes):
cls = tick_classes
@@ -131,7 +131,7 @@ def test_compare_custom_object(self):
Make sure non supported operations on Timedelta returns NonImplemented
and yields to other operand (GH#20829).
"""
- class CustomClass(object):
+ class CustomClass:
def __init__(self, cmp_result=None):
self.cmp_result = cmp_result
@@ -174,7 +174,7 @@ def test_compare_unknown_type(self, val):
t < val
-class TestTimedeltas(object):
+class TestTimedeltas:
@pytest.mark.parametrize("unit, value, expected", [
('us', 9.999, 9999), ('ms', 9.999999, 9999999),
diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py
index 5aff646820862..da058234c4274 100644
--- a/pandas/tests/scalar/timestamp/test_arithmetic.py
+++ b/pandas/tests/scalar/timestamp/test_arithmetic.py
@@ -11,7 +11,7 @@
from pandas.tseries.frequencies import to_offset
-class TestTimestampArithmetic(object):
+class TestTimestampArithmetic:
def test_overflow_offset(self):
# no overflow expected
diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py
index 2821c0a578752..319bbf88d1989 100644
--- a/pandas/tests/scalar/timestamp/test_comparisons.py
+++ b/pandas/tests/scalar/timestamp/test_comparisons.py
@@ -8,7 +8,7 @@
from pandas import Timestamp
-class TestTimestampComparison(object):
+class TestTimestampComparison:
def test_comparison_object_array(self):
# GH#15183
ts = Timestamp('2011-01-03 00:00:00-0500', tz='US/Eastern')
@@ -162,7 +162,7 @@ def test_rich_comparison_with_unsupported_type():
# Comparisons with unsupported objects should return NotImplemented
# (it previously raised TypeError, see #24011)
- class Inf(object):
+ class Inf:
def __lt__(self, o):
return False
diff --git a/pandas/tests/scalar/timestamp/test_rendering.py b/pandas/tests/scalar/timestamp/test_rendering.py
index 29b65ee4df745..a0b045cb1f86d 100644
--- a/pandas/tests/scalar/timestamp/test_rendering.py
+++ b/pandas/tests/scalar/timestamp/test_rendering.py
@@ -10,7 +10,7 @@
from pandas import Timestamp
-class TestTimestampRendering(object):
+class TestTimestampRendering:
# dateutil zone change (only matters for repr)
if LooseVersion(dateutil.__version__) >= LooseVersion('2.6.0'):
diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py
index 38dcfefaccbc4..773b4e6f21a19 100644
--- a/pandas/tests/scalar/timestamp/test_timestamp.py
+++ b/pandas/tests/scalar/timestamp/test_timestamp.py
@@ -24,7 +24,7 @@
from pandas.tseries import offsets
-class TestTimestampProperties(object):
+class TestTimestampProperties:
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
@@ -192,7 +192,7 @@ def test_resolution(self):
assert dt.resolution == Timedelta(nanoseconds=1)
-class TestTimestampConstructors(object):
+class TestTimestampConstructors:
def test_constructor(self):
base_str = '2014-07-01 09:00'
@@ -616,7 +616,7 @@ class SubDatetime(datetime):
assert result == expected
-class TestTimestamp(object):
+class TestTimestamp:
def test_tz(self):
tstr = '2014-02-01 09:00'
@@ -787,7 +787,7 @@ def test_tz_conversion_freq(self, tz_naive_fixture):
assert t2.tz_convert(tz='UTC').freq == t2.freq
-class TestTimestampNsOperations(object):
+class TestTimestampNsOperations:
def setup_method(self, method):
self.timestamp = Timestamp(datetime.utcnow())
@@ -874,7 +874,7 @@ def test_nanosecond_timestamp(self):
assert t.nanosecond == 10
-class TestTimestampToJulianDate(object):
+class TestTimestampToJulianDate:
def test_compare_1700(self):
r = Timestamp('1700-06-23').to_julian_date()
@@ -897,7 +897,7 @@ def test_compare_hour13(self):
assert r == 2451769.0416666666666666
-class TestTimestampConversion(object):
+class TestTimestampConversion:
def test_conversion(self):
# GH#9255
ts = Timestamp('2000-01-01')
diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py
index bc67a3e72f8d0..7aa007eedca93 100644
--- a/pandas/tests/scalar/timestamp/test_timezones.py
+++ b/pandas/tests/scalar/timestamp/test_timezones.py
@@ -19,7 +19,7 @@
import pandas.util.testing as tm
-class TestTimestampTZOperations(object):
+class TestTimestampTZOperations:
# --------------------------------------------------------------
# Timestamp.tz_localize
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 8a174c4ecba61..de1e33fe1ea7e 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -17,7 +17,7 @@
from pandas.tseries.frequencies import to_offset
-class TestTimestampUnaryOps(object):
+class TestTimestampUnaryOps:
# --------------------------------------------------------------
# Timestamp.round
diff --git a/pandas/tests/series/common.py b/pandas/tests/series/common.py
index cacca38b2d608..220bf20c81dc3 100644
--- a/pandas/tests/series/common.py
+++ b/pandas/tests/series/common.py
@@ -6,7 +6,7 @@
_ts = tm.makeTimeSeries()
-class TestData(object):
+class TestData:
@cache_readonly
def ts(self):
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index e01a4c4f1842d..8b7021ede62d1 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -10,7 +10,7 @@
import pandas.util.testing as tm
-class TestSeriesAlterAxes(object):
+class TestSeriesAlterAxes:
def test_setindex(self, string_series):
# wrong type
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index a1ff3a3c9d848..325056b789162 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -21,7 +21,7 @@
assert_series_equal)
-class TestSeriesAnalytics(object):
+class TestSeriesAnalytics:
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name='int_data')
@@ -1211,7 +1211,7 @@ def assert_check_nselect_boundary(vals, dtype, method):
tm.assert_series_equal(result, expected)
-class TestNLargestNSmallest(object):
+class TestNLargestNSmallest:
@pytest.mark.parametrize(
"r", [Series([3., 2, 1, 2, '5'], dtype='object'),
@@ -1331,7 +1331,7 @@ def test_duplicate_keep_all_ties(self):
assert_series_equal(result, expected)
-class TestCategoricalSeriesAnalytics(object):
+class TestCategoricalSeriesAnalytics:
def test_count(self):
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 23e39182dd779..406cb9b0b349c 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -22,7 +22,7 @@
from .common import TestData
-class SharedWithSparse(object):
+class SharedWithSparse:
"""
A collection of tests Series and SparseSeries can share.
@@ -499,7 +499,7 @@ def test_integer_series_size(self):
assert s.size == 9
-class TestCategoricalSeries(object):
+class TestCategoricalSeries:
@pytest.mark.parametrize(
"method",
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index e1bb5b66c080e..db320dca7f15e 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -14,7 +14,7 @@ def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
-class TestSeriesFlexArithmetic(object):
+class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
'ts',
[
@@ -55,7 +55,7 @@ def test_flex_method_equivalence(self, opname, ts):
tm.assert_almost_equal(result, expected)
-class TestSeriesArithmetic(object):
+class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
@@ -78,7 +78,7 @@ def test_add_series_with_period_index(self):
# ------------------------------------------------------------------
# Comparisons
-class TestSeriesFlexComparison(object):
+class TestSeriesFlexComparison:
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
@@ -106,7 +106,7 @@ def test_comparison_flex_basic(self):
getattr(left, op)(right, axis=1)
-class TestSeriesComparison(object):
+class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
diff --git a/pandas/tests/series/test_block_internals.py b/pandas/tests/series/test_block_internals.py
index e74b32181ce0f..eb91ad96a1224 100644
--- a/pandas/tests/series/test_block_internals.py
+++ b/pandas/tests/series/test_block_internals.py
@@ -6,7 +6,7 @@
# structure
-class TestSeriesBlockInternals(object):
+class TestSeriesBlockInternals:
def test_setitem_invalidates_datetime_index_freq(self):
# GH#24096 altering a datetime64tz Series inplace invalidates the
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 3aa98db171a46..c1189db2c7ced 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -11,7 +11,7 @@
from pandas.util.testing import assert_frame_equal, assert_series_equal
-class TestSeriesCombine(object):
+class TestSeriesCombine:
def test_append(self, datetime_series, string_series, object_series):
appendedSeries = string_series.append(object_series)
@@ -277,7 +277,7 @@ def test_combine_first_dt64(self):
assert_series_equal(rs, xp)
-class TestTimeseries(object):
+class TestTimeseries:
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 460966fa8ae39..2fc8965b0174d 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -24,7 +24,7 @@
from pandas.util.testing import assert_series_equal
-class TestSeriesConstructors(object):
+class TestSeriesConstructors:
@pytest.mark.parametrize('constructor,check_index_type', [
# NOTE: some overlap with test_constructor_empty but that test does not
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index c17039fb11409..d1df75fb15381 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -17,7 +17,7 @@
import pandas.util.testing as tm
-class TestSeriesDtypes(object):
+class TestSeriesDtypes:
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range('20130101', periods=3))
diff --git a/pandas/tests/series/test_duplicates.py b/pandas/tests/series/test_duplicates.py
index a975edacc19c7..b8d6acf2314e0 100644
--- a/pandas/tests/series/test_duplicates.py
+++ b/pandas/tests/series/test_duplicates.py
@@ -75,7 +75,7 @@ def test_is_unique(data, expected):
def test_is_unique_class_ne(capsys):
# GH 20661
- class Foo(object):
+ class Foo:
def __init__(self, val):
self._value = val
diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py
index dfc15146307c9..f7dd40b71e442 100644
--- a/pandas/tests/series/test_internals.py
+++ b/pandas/tests/series/test_internals.py
@@ -11,7 +11,7 @@
from pandas.util.testing import assert_series_equal
-class TestSeriesInternals(object):
+class TestSeriesInternals:
def test_convert_objects(self):
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 9257c7d9977dd..ca1dbc30e0ff0 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -17,7 +17,7 @@
from .common import TestData
-class TestSeriesLogicalOps(object):
+class TestSeriesLogicalOps:
@pytest.mark.parametrize('bool_op', [operator.and_,
operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
@@ -356,7 +356,7 @@ def test_logical_ops_df_compat(self):
assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
-class TestSeriesComparisons(object):
+class TestSeriesComparisons:
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
@@ -566,7 +566,7 @@ def test_compare_series_interval_keyword(self):
assert_series_equal(result, expected)
-class TestSeriesFlexComparisonOps(object):
+class TestSeriesFlexComparisonOps:
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
@@ -747,7 +747,7 @@ def test_divmod(self):
assert_series_equal(result[1], expected[1])
-class TestSeriesUnaryOps(object):
+class TestSeriesUnaryOps:
# __neg__, __pos__, __inv__
def test_neg(self):
diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
index 7e0feb418e8df..6b0edf670e03e 100644
--- a/pandas/tests/series/test_period.py
+++ b/pandas/tests/series/test_period.py
@@ -7,7 +7,7 @@
import pandas.util.testing as tm
-class TestSeriesPeriod(object):
+class TestSeriesPeriod:
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 4ee1627831824..36e5c8c9c8240 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -198,7 +198,7 @@ def test_index_repr_in_frame_with_nan(self):
assert repr(s) == exp
-class TestCategoricalRepr(object):
+class TestCategoricalRepr:
def test_categorical_repr_unicode(self):
# see gh-21002
diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py
index deb09a8a9dac3..b8f074db52980 100644
--- a/pandas/tests/series/test_subclass.py
+++ b/pandas/tests/series/test_subclass.py
@@ -6,7 +6,7 @@
import pandas.util.testing as tm
-class TestSeriesSubclassing(object):
+class TestSeriesSubclassing:
def test_indexing_sliced(self):
s = tm.SubclassedSeries([1, 2, 3, 4], index=list('abcd'))
@@ -40,7 +40,7 @@ def test_subclass_unstack(self):
tm.assert_frame_equal(res, exp)
-class TestSparseSeriesSubclassing(object):
+class TestSparseSeriesSubclassing:
def test_subclass_sparse_slice(self):
# int64
diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py
index ec644a8e93da2..f47bbe51f4670 100644
--- a/pandas/tests/series/test_timezones.py
+++ b/pandas/tests/series/test_timezones.py
@@ -17,7 +17,7 @@
import pandas.util.testing as tm
-class TestSeriesTimezones(object):
+class TestSeriesTimezones:
# -----------------------------------------------------------------
# Series.tz_localize
def test_series_tz_localize(self):
diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py
index 8f7c16f2c3132..cef38d5ce3f23 100644
--- a/pandas/tests/series/test_validate.py
+++ b/pandas/tests/series/test_validate.py
@@ -1,7 +1,7 @@
import pytest
-class TestSeriesValidate(object):
+class TestSeriesValidate:
"""Tests for error handling related to data types of method arguments."""
@pytest.mark.parametrize("func", ["reset_index", "_set_name",
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 22868030308d7..85654635a7926 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -212,7 +212,7 @@ def test_constructor_from_dense_series(self):
def test_constructor_from_unknown_type(self):
# GH 19393
- class Unknown(object):
+ class Unknown:
pass
with pytest.raises(TypeError,
match=('SparseDataFrame called with unknown type '
@@ -1283,7 +1283,7 @@ def test_default_fill_value_with_no_data(self):
tm.assert_frame_equal(expected, result)
-class TestSparseDataFrameArithmetic(object):
+class TestSparseDataFrameArithmetic:
def test_numeric_op_scalar(self):
df = pd.DataFrame({'A': [nan, nan, 0, 1, ],
@@ -1312,7 +1312,7 @@ def test_comparison_op_scalar(self):
tm.assert_frame_equal(res.to_dense(), df != 0)
-class TestSparseDataFrameAnalytics(object):
+class TestSparseDataFrameAnalytics:
def test_cumsum(self, float_frame):
expected = SparseDataFrame(float_frame.to_dense().cumsum())
diff --git a/pandas/tests/sparse/frame/test_to_csv.py b/pandas/tests/sparse/frame/test_to_csv.py
index ed19872f8a7ef..8b2c1b951fdfe 100644
--- a/pandas/tests/sparse/frame/test_to_csv.py
+++ b/pandas/tests/sparse/frame/test_to_csv.py
@@ -5,7 +5,7 @@
from pandas.util import testing as tm
-class TestSparseDataFrameToCsv(object):
+class TestSparseDataFrameToCsv:
fill_values = [np.nan, 0, None, 1]
@pytest.mark.parametrize('fill_value', fill_values)
diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py
index 35ca5e1ec58fa..88921cf932140 100644
--- a/pandas/tests/sparse/series/test_series.py
+++ b/pandas/tests/sparse/series/test_series.py
@@ -1032,7 +1032,7 @@ def test_memory_usage_deep(self, deep, fill_value):
assert sparse_usage < dense_usage
-class TestSparseHandlingMultiIndexes(object):
+class TestSparseHandlingMultiIndexes:
def setup_method(self, method):
miindex = pd.MultiIndex.from_product(
@@ -1062,7 +1062,7 @@ def test_round_trip_preserve_multiindex_names(self):
@pytest.mark.filterwarnings(
"ignore:the matrix subclass:PendingDeprecationWarning"
)
-class TestSparseSeriesScipyInteraction(object):
+class TestSparseSeriesScipyInteraction:
# Issue 8048: add SparseSeries coo methods
def setup_method(self, method):
@@ -1425,7 +1425,7 @@ def _dense_series_compare(s, f):
tm.assert_series_equal(result.to_dense(), dense_result)
-class TestSparseSeriesAnalytics(object):
+class TestSparseSeriesAnalytics:
def setup_method(self, method):
arr, index = _test_data1()
diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py
index 51875148daadb..dff5b51d7a967 100644
--- a/pandas/tests/sparse/test_combine_concat.py
+++ b/pandas/tests/sparse/test_combine_concat.py
@@ -9,7 +9,7 @@
import pandas.util.testing as tm
-class TestSparseArrayConcat(object):
+class TestSparseArrayConcat:
@pytest.mark.parametrize('kind', ['integer', 'block'])
def test_basic(self, kind):
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
@@ -35,7 +35,7 @@ def test_uses_first_kind(self, kind):
assert result.kind == kind
-class TestSparseSeriesConcat(object):
+class TestSparseSeriesConcat:
@pytest.mark.parametrize('kind', [
'integer',
@@ -176,7 +176,7 @@ def test_concat_sparse_dense(self, kind):
tm.assert_series_equal(res, exp)
-class TestSparseDataFrameConcat(object):
+class TestSparseDataFrameConcat:
def setup_method(self, method):
diff --git a/pandas/tests/sparse/test_format.py b/pandas/tests/sparse/test_format.py
index 5f44266620f86..38961a96d634b 100644
--- a/pandas/tests/sparse/test_format.py
+++ b/pandas/tests/sparse/test_format.py
@@ -10,7 +10,7 @@
use_32bit_repr = is_platform_windows() or is_platform_32bit()
-class TestSparseSeriesFormatting(object):
+class TestSparseSeriesFormatting:
@property
def dtype_format_for_platform(self):
@@ -106,7 +106,7 @@ def test_sparse_int(self):
assert result == exp
-class TestSparseDataFrameFormatting(object):
+class TestSparseDataFrameFormatting:
def test_sparse_frame(self):
# GH 13110
diff --git a/pandas/tests/sparse/test_groupby.py b/pandas/tests/sparse/test_groupby.py
index d0ff2a02c4046..9695b6a2c8955 100644
--- a/pandas/tests/sparse/test_groupby.py
+++ b/pandas/tests/sparse/test_groupby.py
@@ -6,7 +6,7 @@
import pandas.util.testing as tm
-class TestSparseGroupBy(object):
+class TestSparseGroupBy:
def setup_method(self, method):
self.dense = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py
index fb6cae3ad6deb..e388d05fe112d 100644
--- a/pandas/tests/sparse/test_indexing.py
+++ b/pandas/tests/sparse/test_indexing.py
@@ -6,7 +6,7 @@
import pandas.util.testing as tm
-class TestSparseSeriesIndexing(object):
+class TestSparseSeriesIndexing:
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
@@ -599,7 +599,7 @@ def test_reindex(self):
assert sparse is not res
-class TestSparseDataFrameIndexing(object):
+class TestSparseDataFrameIndexing:
def test_getitem(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
@@ -976,7 +976,7 @@ def test_reindex_fill_value(self):
tm.assert_sp_frame_equal(res, exp)
-class TestMultitype(object):
+class TestMultitype:
def setup_method(self, method):
self.cols = ['string', 'int', 'float', 'object']
diff --git a/pandas/tests/sparse/test_pivot.py b/pandas/tests/sparse/test_pivot.py
index af7de43ec0f8a..98e16259d25d1 100644
--- a/pandas/tests/sparse/test_pivot.py
+++ b/pandas/tests/sparse/test_pivot.py
@@ -4,7 +4,7 @@
import pandas.util.testing as tm
-class TestPivotTable(object):
+class TestPivotTable:
def setup_method(self, method):
self.dense = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index b64786de264cd..6e93aae984821 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -28,7 +28,7 @@
from pandas.util.testing import assert_almost_equal
-class TestMatch(object):
+class TestMatch:
def test_ints(self):
values = np.array([0, 2, 1])
@@ -64,7 +64,7 @@ def test_strings(self):
tm.assert_series_equal(result, expected)
-class TestFactorize(object):
+class TestFactorize:
def test_basic(self):
@@ -341,7 +341,7 @@ def test_factorize_na_sentinel(self, sort, na_sentinel):
tm.assert_numpy_array_equal(uniques, expected_uniques)
-class TestUnique(object):
+class TestUnique:
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
@@ -604,7 +604,7 @@ def test_do_not_mangle_na_values(self, unique_nulls_fixture,
assert a[1] is unique_nulls_fixture2
-class TestIsin(object):
+class TestIsin:
def test_invalid(self):
@@ -720,7 +720,7 @@ def test_same_object_is_in(self):
# the user however could define a custom class
# with similar behavior, then we at least should
# fall back to usual python's behavior: "a in [a] == True"
- class LikeNan(object):
+ class LikeNan:
def __eq__(self):
return False
@@ -804,7 +804,7 @@ def test_different_nans_as_float64(self):
tm.assert_numpy_array_equal(result, expected)
-class TestValueCounts(object):
+class TestValueCounts:
def test_value_counts(self):
np.random.seed(1234)
@@ -993,7 +993,7 @@ def test_value_counts_uint64(self):
tm.assert_series_equal(result, expected)
-class TestDuplicated(object):
+class TestDuplicated:
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
@@ -1160,7 +1160,7 @@ def test_unique_tuples(self, arr, unique):
tm.assert_numpy_array_equal(result, expected)
-class GroupVarTestMixin(object):
+class GroupVarTestMixin:
def test_group_var_generic_1d(self):
prng = RandomState(1234)
@@ -1275,7 +1275,7 @@ class TestGroupVarFloat32(GroupVarTestMixin):
rtol = 1e-2
-class TestHashTable(object):
+class TestHashTable:
def test_lookup_nan(self, writable):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
@@ -1469,7 +1469,7 @@ def test_unique_label_indices():
check_dtype=False)
-class TestRank(object):
+class TestRank:
@td.skip_if_no_scipy
def test_scipy_compat(self):
@@ -1548,7 +1548,7 @@ def test_arrmap():
assert (result.dtype == np.bool_)
-class TestTseriesUtil(object):
+class TestTseriesUtil:
def test_combineFunc(self):
pass
@@ -1758,7 +1758,7 @@ def test_int64_add_overflow():
b_mask=np.array([False, True]))
-class TestMode(object):
+class TestMode:
def test_no_mode(self):
exp = Series([], dtype=np.float64)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 9539519604a24..009fc015dd61c 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -27,7 +27,7 @@
import pandas.util.testing as tm
-class CheckStringMixin(object):
+class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
@@ -42,7 +42,7 @@ def test_tricky_container(self):
bytes(self.unicode_container)
-class CheckImmutable(object):
+class CheckImmutable:
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
@@ -87,9 +87,9 @@ def check_result(self, result, expected, klass=None):
assert result == expected
-class TestPandasDelegate(object):
+class TestPandasDelegate:
- class Delegator(object):
+ class Delegator:
_properties = ['foo']
_methods = ['bar']
@@ -149,7 +149,7 @@ def test_memory_usage(self):
sys.getsizeof(delegate)
-class Ops(object):
+class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
@@ -979,7 +979,7 @@ def test_numpy_transpose(self):
np.transpose(obj, axes=1)
-class TestNoNewAttributesMixin(object):
+class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
@@ -1001,7 +1001,7 @@ class T(NoNewAttributesMixin):
assert not hasattr(t, "b")
-class TestToIterable(object):
+class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 18eb760e31db8..b6de8dd8dcbc9 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -22,7 +22,7 @@ def fn(x):
part1 = partial(fn)
part2 = partial(part1)
- class somecall(object):
+ class somecall:
def __call__(self):
return x # noqa
diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py
index fd0ae85ce634f..a3fa7f6314806 100644
--- a/pandas/tests/test_compat.py
+++ b/pandas/tests/test_compat.py
@@ -8,7 +8,7 @@
from pandas.compat import lmap, lrange, lzip, re_type
-class TestBuiltinIterators(object):
+class TestBuiltinIterators:
@classmethod
def check_results(cls, results, expecteds, lengths):
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index d3b6a237a97a1..27135d0ef14cc 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -47,7 +47,7 @@ def test_error_rename():
pass
-class Foo(object):
+class Foo:
@classmethod
def classmethod(cls):
raise AbstractMethodError(cls, methodtype='classmethod')
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 0c8c33b3db8cf..8d165e221b0d3 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -33,7 +33,7 @@
@pytest.mark.skipif(not expr._USE_NUMEXPR, reason='not using numexpr')
-class TestExpressions(object):
+class TestExpressions:
def setup_method(self, method):
diff --git a/pandas/tests/test_join.py b/pandas/tests/test_join.py
index 5b6656de15731..897b62c447e83 100644
--- a/pandas/tests/test_join.py
+++ b/pandas/tests/test_join.py
@@ -9,7 +9,7 @@
from pandas.util.testing import assert_almost_equal, assert_frame_equal
-class TestIndexer(object):
+class TestIndexer:
def test_outer_join_indexer(self):
typemap = [('int32', _join.outer_join_indexer_int32),
diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py
index c5dcfc89faa67..d29480bf143d8 100644
--- a/pandas/tests/test_lib.py
+++ b/pandas/tests/test_lib.py
@@ -9,7 +9,7 @@
import pandas.util.testing as tm
-class TestMisc(object):
+class TestMisc:
def test_max_len_string_array(self):
@@ -42,7 +42,7 @@ def test_fast_unique_multiple_list_gen_sort(self):
tm.assert_numpy_array_equal(np.array(out), expected)
-class TestIndexing(object):
+class TestIndexing:
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 799571da8bdc4..189b7e93ec36f 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -23,7 +23,7 @@
'std', 'var', 'sem']
-class Base(object):
+class Base:
def setup_method(self, method):
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 5a163e7819fd1..eae63bf8de98a 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -18,7 +18,7 @@
use_bn = nanops._USE_BOTTLENECK
-class TestnanopsDataFrame(object):
+class TestnanopsDataFrame:
def setup_method(self, method):
np.random.seed(11235)
@@ -713,7 +713,7 @@ def test__bn_ok_dtype(self):
assert not nanops._bn_ok_dtype(self.arr_obj.dtype, 'test')
-class TestEnsureNumeric(object):
+class TestEnsureNumeric:
def test_numeric_values(self):
# Test integer
@@ -761,7 +761,7 @@ def test_non_convertable_values(self):
nanops._ensure_numeric([])
-class TestNanvarFixedValues(object):
+class TestNanvarFixedValues:
# xref GH10242
@@ -874,7 +874,7 @@ def prng(self):
return np.random.RandomState(1234)
-class TestNanskewFixedValues(object):
+class TestNanskewFixedValues:
# xref GH 11974
@@ -924,7 +924,7 @@ def prng(self):
return np.random.RandomState(1234)
-class TestNankurtFixedValues(object):
+class TestNankurtFixedValues:
# xref GH 11974
@@ -974,7 +974,7 @@ def prng(self):
return np.random.RandomState(1234)
-class TestDatetime64NaNOps(object):
+class TestDatetime64NaNOps:
@pytest.mark.parametrize('tz', [None, 'UTC'])
@pytest.mark.xfail(reason="disabled")
# Enabling mean changes the behavior of DataFrame.mean
diff --git a/pandas/tests/test_register_accessor.py b/pandas/tests/test_register_accessor.py
index acc18ed7ad049..e79ec56c819c1 100644
--- a/pandas/tests/test_register_accessor.py
+++ b/pandas/tests/test_register_accessor.py
@@ -20,7 +20,7 @@ def ensure_removed(obj, attr):
obj._accessors.discard(attr)
-class MyAccessor(object):
+class MyAccessor:
def __init__(self, obj):
self.obj = obj
@@ -81,7 +81,7 @@ def test_raises_attribute_error():
with ensure_removed(pd.Series, 'bad'):
@pd.api.extensions.register_series_accessor("bad")
- class Bad(object):
+ class Bad:
def __init__(self, data):
raise AttributeError("whoops")
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index b5e7a5f6abf4c..3c298816ed81a 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -15,7 +15,7 @@
from pandas.util.testing import assert_frame_equal, assert_series_equal
-class TestSorting(object):
+class TestSorting:
@pytest.mark.slow
def test_int64_overflow(self):
@@ -188,7 +188,7 @@ def test_nargsort_datetimearray_warning(self):
nargsort(data)
-class TestMerge(object):
+class TestMerge:
@pytest.mark.slow
def test_int64_overflow_issues(self):
@@ -340,7 +340,7 @@ def testit(label_list, shape):
testit(label_list, shape)
-class TestSafeSort(object):
+class TestSafeSort:
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 22200b72b852d..d3f98ea819498 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -159,7 +159,7 @@ def any_allowed_skipna_inferred_dtype(request):
return inferred_dtype, values
-class TestStringMethods(object):
+class TestStringMethods:
def test_api(self):
diff --git a/pandas/tests/test_take.py b/pandas/tests/test_take.py
index b2e695bdea17d..229ccc1dfcbad 100644
--- a/pandas/tests/test_take.py
+++ b/pandas/tests/test_take.py
@@ -65,7 +65,7 @@ def dtype_fill_out_dtype(request):
return request.param
-class TestTake(object):
+class TestTake:
# Standard incompatible fill error.
fill_error = re.compile("Incompatible type for fill_value")
@@ -422,7 +422,7 @@ def test_take_axis_1(self):
tm.assert_numpy_array_equal(result, expected)
-class TestExtensionTake(object):
+class TestExtensionTake:
# The take method found in pd.api.extensions
def test_bounds_check_large(self):
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 8e2925f52c04d..937b3218eb5c6 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -47,7 +47,7 @@ def win_types_special(request):
return request.param
-class Base(object):
+class Base:
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
@@ -846,7 +846,7 @@ def test_numpy_compat(self, method):
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
-class Dtype(object):
+class Dtype:
window = 2
funcs = {
@@ -1900,7 +1900,7 @@ def _check_ew(self, name=None, preserve_nan=False):
assert result2.dtype == np.float_
-class TestPairwise(object):
+class TestPairwise:
# GH 7738
df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
@@ -3274,7 +3274,7 @@ def test_rolling_min_max_numeric_types(self):
assert result.dtypes[0] == np.dtype("f8")
-class TestGrouperGrouping(object):
+class TestGrouperGrouping:
def setup_method(self, method):
self.series = Series(np.arange(10))
@@ -3443,7 +3443,7 @@ def test_expanding_apply(self, raw):
tm.assert_frame_equal(result, expected)
-class TestRollingTS(object):
+class TestRollingTS:
# rolling time-series friendly
# xref GH13327
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index ea13be8601463..c46cc1de7aa97 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -31,7 +31,7 @@
from .common import assert_offset_equal, assert_onOffset
-class WeekDay(object):
+class WeekDay:
# TODO: Remove: This is not used outside of tests
MON = 0
TUE = 1
@@ -58,7 +58,7 @@ def test_to_M8():
#####
-class Base(object):
+class Base:
_offset = None
d = Timestamp(datetime(2008, 1, 2))
@@ -1868,7 +1868,7 @@ def test_pickle_compat_0_14_1(self, datapath):
assert cday == cday0_14_1
-class CustomBusinessMonthBase(object):
+class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
@@ -2785,7 +2785,7 @@ def test_Easter():
datetime(2008, 3, 23))
-class TestOffsetNames(object):
+class TestOffsetNames:
def test_get_offset_name(self):
assert BDay().freqstr == 'B'
@@ -2825,7 +2825,7 @@ def test_get_offset_legacy():
get_offset(name)
-class TestOffsetAliases(object):
+class TestOffsetAliases:
def setup_method(self, method):
_offset_map.clear()
@@ -2884,7 +2884,7 @@ def test_freq_offsets():
assert (off.freqstr == 'B-30Min')
-class TestReprNames(object):
+class TestReprNames:
def test_str_for_named_is_name(self):
# look at all the amazing combinations!
@@ -2909,7 +2909,7 @@ def get_utc_offset_hours(ts):
return (o.days * 24 * 3600 + o.seconds) / 3600.0
-class TestDST(object):
+class TestDST:
"""
test DateOffset additions over Daylight Savings Time
"""
diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py
index 8c6b401f91114..6a61d6f7c2549 100644
--- a/pandas/tests/util/test_assert_almost_equal.py
+++ b/pandas/tests/util/test_assert_almost_equal.py
@@ -132,7 +132,7 @@ def test_assert_almost_equal_dict_like_object(val):
dict_val = 1
real_dict = dict(a=val)
- class DictLikeObj(object):
+ class DictLikeObj:
def keys(self):
return "a",
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 0b27670685293..ce5da63e9665b 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -253,7 +253,7 @@ def infer_freq(index, warn=True):
return inferer.get_freq()
-class _FrequencyInferer(object):
+class _FrequencyInferer:
"""
Not sure if I can avoid the state machine here
"""
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index 8e8a90872b2f6..cafe546109b74 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -122,7 +122,7 @@ def after_nearest_workday(dt):
return next_workday(nearest_workday(dt))
-class Holiday(object):
+class Holiday:
"""
Class that defines a holiday with start/end dates and rules
for observance.
@@ -325,7 +325,7 @@ def __new__(cls, clsname, bases, attrs):
@add_metaclass(HolidayCalendarMetaClass)
-class AbstractHolidayCalendar(object):
+class AbstractHolidayCalendar:
"""
Abstract interface to create holidays following certain rules.
"""
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 39684d7bc9eb1..d11946f99dd36 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -410,7 +410,7 @@ def _from_name(cls, suffix=None):
return cls()
-class _CustomMixin(object):
+class _CustomMixin:
"""
Mixin for classes that define and validate calendar, holidays,
and weekdays attributes.
@@ -428,7 +428,7 @@ def __init__(self, weekmask, holidays, calendar):
object.__setattr__(self, "calendar", calendar)
-class BusinessMixin(object):
+class BusinessMixin:
"""
Mixin to business types to provide related functions.
"""
@@ -1414,7 +1414,7 @@ def _from_name(cls, suffix=None):
return cls(weekday=weekday)
-class _WeekOfMonthMixin(object):
+class _WeekOfMonthMixin:
"""
Mixin for methods common to WeekOfMonth and LastWeekOfMonth.
"""
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 679bf086c65b9..ac23fa5d7b0ad 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -218,7 +218,7 @@ def wrapper(*args, **kwargs):
# module http://matplotlib.org/users/license.html
-class Substitution(object):
+class Substitution:
"""
A decorator to take a function's docstring and perform string
substitution on it.
@@ -279,7 +279,7 @@ def from_params(cls, params):
return result
-class Appender(object):
+class Appender:
"""
A function decorator that will append an addendum to the docstring
of the target function.
diff --git a/pandas/util/_depr_module.py b/pandas/util/_depr_module.py
index 2c8feec798c66..714ea1ce8086f 100644
--- a/pandas/util/_depr_module.py
+++ b/pandas/util/_depr_module.py
@@ -7,7 +7,7 @@
import warnings
-class _DeprecatedModule(object):
+class _DeprecatedModule:
""" Class for mocking deprecated modules.
Parameters
diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py
index d6d4792c19ea8..244775e0c1bf0 100644
--- a/pandas/util/_doctools.py
+++ b/pandas/util/_doctools.py
@@ -3,7 +3,7 @@
import pandas as pd
-class TablePlotter(object):
+class TablePlotter:
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 9659cb33686d0..6d7d1639ec0d5 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -2263,7 +2263,7 @@ def assert_raises_regex(_exception, _regexp, _callable=None,
return manager
-class _AssertRaisesContextmanager(object):
+class _AssertRaisesContextmanager:
"""
Context manager behind `assert_raises_regex`.
"""
@@ -2458,7 +2458,7 @@ class for all warnings. To check that no warning is returned,
)
-class RNGContext(object):
+class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index 120f8d79819ff..14172a790887d 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -10,7 +10,7 @@
validate_one = validate_docstrings.validate_one
-class GoodDocStrings(object):
+class GoodDocStrings:
"""
Collection of good doc strings.
@@ -253,7 +253,7 @@ def say_hello():
return None
-class BadGenericDocStrings(object):
+class BadGenericDocStrings:
"""Everything here has a bad docstring
"""
@@ -445,7 +445,7 @@ def method_wo_docstrings(self):
pass
-class BadSummaries(object):
+class BadSummaries:
def wrong_line(self):
"""Exists on the wrong line"""
@@ -484,7 +484,7 @@ def two_paragraph_multi_line(self):
"""
-class BadParameters(object):
+class BadParameters:
"""
Everything here has a problem with its Parameters section.
"""
@@ -611,7 +611,7 @@ def list_incorrect_parameter_type(self, kind):
pass
-class BadReturns(object):
+class BadReturns:
def return_not_documented(self):
"""
@@ -694,7 +694,7 @@ def no_period_multi(self):
return "Hello", "World!"
-class BadSeeAlso(object):
+class BadSeeAlso:
def desc_no_period(self):
"""
@@ -732,7 +732,7 @@ def prefix_pandas(self):
pass
-class BadExamples(object):
+class BadExamples:
def unused_import(self):
"""
@@ -770,7 +770,7 @@ def missing_whitespace_after_comma(self):
pass
-class TestValidator(object):
+class TestValidator:
def _import_path(self, klass=None, func=None):
"""
@@ -951,7 +951,7 @@ def test_validate_all_ignore_deprecated(self, monkeypatch):
assert len(result) == 0
-class TestApiItems(object):
+class TestApiItems:
@property
def api_doc(self):
return io.StringIO(textwrap.dedent('''
@@ -1027,7 +1027,7 @@ def test_item_subsection(self, idx, subsection):
assert result[idx][3] == subsection
-class TestDocstringClass(object):
+class TestDocstringClass:
@pytest.mark.parametrize('name, expected_obj',
[('pandas.isnull', pd.isnull),
('pandas.DataFrame', pd.DataFrame),
@@ -1053,7 +1053,7 @@ def test_raises_for_invalid_attribute_name(self, invalid_name):
validate_docstrings.Docstring(invalid_name)
-class TestMainFunction(object):
+class TestMainFunction:
def test_exit_status_for_validate_one(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings, 'validate_one', lambda func_name: {
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 1173b34b2e430..19df1b92448ef 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -223,7 +223,7 @@ def get_api_items(api_doc_fd):
previous_line = line
-class Docstring(object):
+class Docstring:
def __init__(self, name):
self.name = name
obj = self._load_obj(name)
| - [x] xref #25725
Bit of a tedious whopper to review (sorry), but this PR removes all instances of inheriting from the base ``object`` from the code base, as dropping Python2 support means that this particular idiom is no longer needed.
Additionally, as part of the above, I've removed the check in code_checks.sh that classes must inherit, as that is no longer a requirement after dropping Python2. I don't think a different check is possible to do now, but I can change instead of delete the check if anyone thinks that would make sense.
| https://api.github.com/repos/pandas-dev/pandas/pulls/26128 | 2019-04-17T23:48:09Z | 2019-04-18T21:52:15Z | 2019-04-18T21:52:15Z | 2019-04-19T17:13:24Z |
DOC: bunch of docstring formatting fixes (sphinx warnings) | diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index 99f0d77e9bd34..a81941da4a05a 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -512,8 +512,8 @@ class NaTType(_NaT):
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
@@ -550,8 +550,8 @@ class NaTType(_NaT):
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
@@ -584,8 +584,8 @@ class NaTType(_NaT):
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
@@ -648,8 +648,8 @@ class NaTType(_NaT):
- 'NaT' will return NaT for an ambiguous time
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index c666178b11512..eb676d2265f9f 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -455,8 +455,8 @@ class Timestamp(_Timestamp):
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
@@ -497,8 +497,8 @@ class Timestamp(_Timestamp):
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
@@ -533,8 +533,8 @@ class Timestamp(_Timestamp):
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
@@ -786,8 +786,8 @@ class Timestamp(_Timestamp):
- 'NaT' will return NaT for an ambiguous time
- 'raise' will raise an AmbiguousTimeError for an ambiguous time
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx
index 50a35cea2ba91..26a64c13f6de1 100644
--- a/pandas/_libs/tslibs/tzconversion.pyx
+++ b/pandas/_libs/tslibs/tzconversion.pyx
@@ -53,8 +53,8 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None,
- bool if True, treat all vals as DST. If False, treat them as non-DST
- 'NaT' will return NaT where there are ambiguous times
- nonexistent : {None, "NaT", "shift_forward", "shift_backward", "raise",
- timedelta-like}
+ nonexistent : {None, "NaT", "shift_forward", "shift_backward", "raise", \
+timedelta-like}
How to handle non-existent times when converting wall times to UTC
.. versionadded:: 0.24.0
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 6225dfcbe5c14..15a675a92d279 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -211,8 +211,8 @@ class TimelikeOps(object):
.. versionadded:: 0.24.0
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index 272c6ef4f9267..09a3d53efdd07 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -895,8 +895,8 @@ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise',
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
- nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
- default 'raise'
+ nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
+default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index bee8234907aba..48922ee870b8e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6311,7 +6311,7 @@ def _gotitem(self,
@Substitution(see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
- versionadded='.. versionadded:: 0.20.0',
+ versionadded='\n.. versionadded:: 0.20.0\n',
**_shared_doc_kwargs)
@Appender(_shared_docs['aggregate'])
def aggregate(self, func, axis=0, *args, **kwargs):
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index d17f2d0c3b0a9..3519b5c078ee2 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1534,7 +1534,8 @@ def nunique(self, dropna=True):
ham 1 1 2
spam 1 2 1
- # check for rows with the same id but conflicting values
+ Check for rows with the same id but conflicting values:
+
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b382c963f6006..34088e86cf80a 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3514,7 +3514,7 @@ def _gotitem(self, key, ndim, subset=None):
@Substitution(see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
- versionadded='.. versionadded:: 0.20.0',
+ versionadded='\n.. versionadded:: 0.20.0\n',
**_shared_doc_kwargs)
@Appender(generic._shared_docs['aggregate'])
def aggregate(self, func, axis=0, *args, **kwargs):
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 416647831880d..eb65ca7a92584 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -2127,8 +2127,7 @@ class EWM(_Rolling):
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`.
halflife : float, optional
Specify decay in terms of half-life,
- :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for }
- halflife > 0`.
+ :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`.
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`.
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index a21c910979f9d..ac55583274ab1 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -727,7 +727,7 @@ def parser_f(filepath_or_buffer: FilePathOrBuffer,
summary="""Read general delimited file into DataFrame.
.. deprecated:: 0.24.0
-Use :func:`pandas.read_csv` instead, passing ``sep='\\t'`` if necessary.""",
+ Use :func:`pandas.read_csv` instead, passing ``sep='\\t'`` if necessary.""",
_default_sep=r"'\\t' (tab-stop)")
)(read_table)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index d91b2785c4fe5..789b6ca7e97bd 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -188,7 +188,6 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- The behavior is as follows:
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index c6ae933bbbf10..e36e9ffdac633 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -2088,15 +2088,15 @@ def plot_series(data, kind='line', ax=None, # Series unique
-----
The return type depends on the `return_type` parameter:
- * 'axes' : object of class matplotlib.axes.Axes
- * 'dict' : dict of matplotlib.lines.Line2D objects
- * 'both' : a namedtuple with structure (ax, lines)
+ * 'axes' : object of class matplotlib.axes.Axes
+ * 'dict' : dict of matplotlib.lines.Line2D objects
+ * 'both' : a namedtuple with structure (ax, lines)
- For data grouped with ``by``:
+ For data grouped with ``by``, return a Series of the above or a numpy
+ array:
- * :class:`~pandas.Series`
- * :class:`~numpy.array` (for ``return_type = None``)
- Return Series or numpy.array.
+ * :class:`~pandas.Series`
+ * :class:`~numpy.array` (for ``return_type = None``)
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
| WIP towards getting sphinx doc build warning free | https://api.github.com/repos/pandas-dev/pandas/pulls/26117 | 2019-04-17T08:03:47Z | 2019-04-17T12:45:50Z | 2019-04-17T12:45:50Z | 2019-04-17T12:51:00Z |
DOC: fixup code-block directives from GH26076 | diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst
index 3671a5e24bdaa..9d497f2fc658d 100644
--- a/doc/source/whatsnew/v0.10.0.rst
+++ b/doc/source/whatsnew/v0.10.0.rst
@@ -430,7 +430,7 @@ Updated PyTables Support
- added mixed-dtype support!
- .. ipython:: python
+ .. code-block:: ipython
In [64]: df['string'] = 'string'
diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst
index 5dbfb2c728f06..e99e5147d73b5 100644
--- a/doc/source/whatsnew/v0.12.0.rst
+++ b/doc/source/whatsnew/v0.12.0.rst
@@ -320,7 +320,7 @@ Other Enhancements
Let's say that we had an option ``'a.b'`` and another option ``'b.c'``.
We can set them at the same time:
- .. ipython:: python
+ .. code-block:: ipython
In [31]: pd.get_option('a.b')
Out[31]: 2
| Forgot to change some ipython directives to code-block directives in #26076 (should have checked the travis doc build log there ..) | https://api.github.com/repos/pandas-dev/pandas/pulls/26116 | 2019-04-17T07:25:55Z | 2019-04-17T08:06:44Z | 2019-04-17T08:06:44Z | 2019-04-17T08:06:48Z |
Updated PeriodIndex docstrings #10094 | diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 70312f2e61445..b0b87d98ce518 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -439,19 +439,16 @@ def asfreq(self, freq=None, how='E'):
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
- <class 'pandas.core.indexes.period.PeriodIndex'>
- [2010, ..., 2015]
- Length: 6, Freq: A-DEC
+ PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
+ dtype='period[A-DEC]', freq='A-DEC')
>>> pidx.asfreq('M')
- <class 'pandas.core.indexes.period.PeriodIndex'>
- [2010-12, ..., 2015-12]
- Length: 6, Freq: M
+ PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
+ '2015-12'], dtype='period[M]', freq='M')
>>> pidx.asfreq('M', how='S')
- <class 'pandas.core.indexes.period.PeriodIndex'>
- [2010-01, ..., 2015-01]
- Length: 6, Freq: M
+ PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
+ '2015-01'], dtype='period[M]', freq='M')
"""
how = libperiod._validate_end_alias(how)
| - [x] closes #10094
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26115 | 2019-04-17T00:31:11Z | 2019-04-17T20:43:59Z | 2019-04-17T20:43:59Z | 2019-04-18T23:17:23Z |
CLN: replace usage internally of .iteritems with .items | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 5008b77d9fb28..e2f6764c76eef 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -115,15 +115,15 @@ def setup(self):
)
self.df4 = DataFrame(np.random.randn(N * 1000, 10))
- def time_iteritems(self):
+ def time_items(self):
# (monitor no-copying behaviour)
if hasattr(self.df, "_item_cache"):
self.df._item_cache.clear()
- for name, col in self.df.iteritems():
+ for name, col in self.df.items():
pass
- def time_iteritems_cached(self):
- for name, col in self.df.iteritems():
+ def time_items_cached(self):
+ for name, col in self.df.items():
pass
def time_iteritems_indexing(self):
diff --git a/doc/source/development/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst
index 62216f168af3c..34bc5f44eb0c0 100644
--- a/doc/source/development/contributing_docstring.rst
+++ b/doc/source/development/contributing_docstring.rst
@@ -522,7 +522,7 @@ examples:
* ``loc`` and ``iloc``, as they do the same, but in one case providing indices
and in the other positions
* ``max`` and ``min``, as they do the opposite
-* ``iterrows``, ``itertuples`` and ``iteritems``, as it is easy that a user
+* ``iterrows``, ``itertuples`` and ``items``, as it is easy that a user
looking for the method to iterate over columns ends up in the method to
iterate over rows, and vice-versa
* ``fillna`` and ``dropna``, as both methods are used to handle missing values
diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst
index 682d6c1ef8301..bc3b7b4c70fd1 100644
--- a/doc/source/getting_started/basics.rst
+++ b/doc/source/getting_started/basics.rst
@@ -1475,7 +1475,7 @@ Thus, for example, iterating over a DataFrame gives you the column names:
print(col)
-Pandas objects also have the dict-like :meth:`~DataFrame.iteritems` method to
+Pandas objects also have the dict-like :meth:`~DataFrame.items` method to
iterate over the (key, value) pairs.
To iterate over the rows of a DataFrame, you can use the following methods:
@@ -1524,10 +1524,10 @@ To iterate over the rows of a DataFrame, you can use the following methods:
df
-iteritems
-~~~~~~~~~
+items
+~~~~~
-Consistent with the dict-like interface, :meth:`~DataFrame.iteritems` iterates
+Consistent with the dict-like interface, :meth:`~DataFrame.items` iterates
through key-value pairs:
* **Series**: (index, scalar value) pairs
@@ -1537,7 +1537,7 @@ For example:
.. ipython:: python
- for label, ser in df.iteritems():
+ for label, ser in df.items():
print(label)
print(ser)
diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst
index 1a316c2f25ec6..c0b58fd2d99f5 100644
--- a/doc/source/reference/frame.rst
+++ b/doc/source/reference/frame.rst
@@ -67,8 +67,8 @@ Indexing, iteration
DataFrame.insert
DataFrame.__iter__
DataFrame.items
- DataFrame.keys
DataFrame.iteritems
+ DataFrame.keys
DataFrame.iterrows
DataFrame.itertuples
DataFrame.lookup
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst
index e8e2f64e22cb5..8d2a764c33a43 100644
--- a/doc/source/reference/series.rst
+++ b/doc/source/reference/series.rst
@@ -76,8 +76,8 @@ Indexing, iteration
Series.loc
Series.iloc
Series.__iter__
- Series.iteritems
Series.items
+ Series.iteritems
Series.keys
Series.pop
Series.item
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index daca08d69346d..a8360d8aeb037 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -889,6 +889,7 @@ Other deprecations
- :meth:`DataFrame.get_dtype_counts` is deprecated. (:issue:`18262`)
- :meth:`Categorical.ravel` will return a :class:`Categorical` instead of a ``np.ndarray`` (:issue:`27199`)
+
.. _whatsnew_0250.prior_deprecations:
Removal of prior version deprecations/changes
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ce1b99b315936..55a9eb6a0810a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -771,15 +771,15 @@ def style(self):
return Styler(self)
- def iteritems(self):
- r"""
+ _shared_docs[
+ "items"
+ ] = r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
- Yields
- ------
+ %s
label : object
The column names for the DataFrame being iterated over.
content : Series
@@ -802,7 +802,7 @@ def iteritems(self):
panda bear 1864
polar bear 22000
koala marsupial 80000
- >>> for label, content in df.iteritems():
+ >>> for label, content in df.items():
... print('label:', label)
... print('content:', content, sep='\n')
...
@@ -819,6 +819,9 @@ def iteritems(self):
koala 80000
Name: population, dtype: int64
"""
+
+ @Appender(_shared_docs["items"] % "Yields\n ------")
+ def items(self):
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
@@ -826,6 +829,10 @@ def iteritems(self):
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
+ @Appender(_shared_docs["items"] % "Returns\n -------")
+ def iteritems(self):
+ return self.items()
+
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
@@ -843,7 +850,7 @@ def iterrows(self):
See Also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
- iteritems : Iterate over (column name, Series) pairs.
+ items : Iterate over (column name, Series) pairs.
Notes
-----
@@ -901,7 +908,7 @@ def itertuples(self, index=True, name="Pandas"):
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
- DataFrame.iteritems : Iterate over (column name, Series) pairs.
+ DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
@@ -958,8 +965,6 @@ def itertuples(self, index=True, name="Pandas"):
# fallback to regular tuples
return zip(*arrays)
- items = iteritems
-
def __len__(self):
"""
Returns length of info axis, but here we use the index.
@@ -2634,7 +2639,7 @@ def memory_usage(self, index=True, deep=False):
5216
"""
result = Series(
- [c.memory_usage(index=False, deep=deep) for col, c in self.iteritems()],
+ [c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
@@ -4955,7 +4960,7 @@ def f(vals):
if not diff.empty:
raise KeyError(diff)
- vals = (col.values for name, col in self.iteritems() if name in subset)
+ vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
@@ -7343,7 +7348,7 @@ def round(self, decimals=0, *args, **kwargs):
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
- for col, vals in df.iteritems():
+ for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
@@ -7363,7 +7368,7 @@ def _series_round(s, decimals):
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
- new_cols = [_series_round(v, decimals) for _, v in self.iteritems()]
+ new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a " "Series")
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 5db06d32880cc..4e05dfca43e78 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -494,7 +494,7 @@ def _get_space_character_free_column_resolvers(self):
"""
from pandas.core.computation.common import _remove_spaces_column_name
- return {_remove_spaces_column_name(k): v for k, v in self.iteritems()}
+ return {_remove_spaces_column_name(k): v for k, v in self.items()}
@property
def _info_axis(self):
@@ -1936,15 +1936,22 @@ def keys(self):
"""
return self._info_axis
- def iteritems(self):
- """
- Iterate over (label, values) on info axis
+ def items(self):
+ """Iterate over (label, values) on info axis
- This is index for Series, columns for DataFrame and so on.
+ This is index for Series and columns for DataFrame.
+
+ Returns
+ -------
+ Generator
"""
for h in self._info_axis:
yield h, self[h]
+ @Appender(items.__doc__)
+ def iteritems(self):
+ return self.items()
+
def __len__(self):
"""Returns length of info axis"""
return len(self._info_axis)
@@ -5912,7 +5919,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs):
"key in a dtype mappings argument."
)
results = []
- for col_name, col in self.iteritems():
+ for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(
@@ -10328,7 +10335,7 @@ def describe_1d(data):
else:
data = self.select_dtypes(include=include, exclude=exclude)
- ldesc = [describe_1d(s) for _, s in data.iteritems()]
+ ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index ff0bffacd37ad..670a4666a3440 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -601,7 +601,7 @@ def from_frame(cls, df, sortorder=None, names=None):
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
- column_names, columns = zip(*df.iteritems())
+ column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 188f2edd96590..23bf89b2bc1ac 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -272,7 +272,7 @@ def _compute_grand_margin(data, values, aggfunc, margins_name="All"):
if values:
grand_margin = {}
- for k, v in data[values].iteritems():
+ for k, v in data[values].items():
try:
if isinstance(aggfunc, str):
grand_margin[k] = getattr(v, aggfunc)()
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 5d932d7ded9b8..540a06caec220 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -478,7 +478,7 @@ def _unstack_extension_series(series, level, fill_value):
out = []
values = extract_array(series, extract_numpy=False)
- for col, indices in result.iteritems():
+ for col, indices in result.items():
out.append(
Series(
values.take(indices.values, allow_fill=True, fill_value=fill_value),
@@ -544,7 +544,7 @@ def factorize(index):
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type(
- [col._values for _, col in frame.iteritems()]
+ [col._values for _, col in frame.items()]
)
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
@@ -695,7 +695,7 @@ def _convert_level_number(level_num, columns):
subset = this[this.columns[loc]]
value_slice = dtype.construct_array_type()._concat_same_type(
- [x._values for _, x in subset.iteritems()]
+ [x._values for _, x in subset.items()]
)
N, K = this.shape
idx = np.arange(N * K).reshape(K, N).T.ravel()
@@ -909,7 +909,7 @@ def check_len(item, name):
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
- for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix, prefix_sep):
+ for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(
col[1],
diff --git a/pandas/core/series.py b/pandas/core/series.py
index b3a7f38aef8ef..a4f1f1d2a6296 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1692,13 +1692,12 @@ def to_string(
# ----------------------------------------------------------------------
- def iteritems(self):
+ def items(self):
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
- convenient if you want to create a lazy iterator. Note that the
- methods Series.items and Series.iteritems are the same methods.
+ convenient if you want to create a lazy iterator.
Returns
-------
@@ -1708,12 +1707,12 @@ def iteritems(self):
See Also
--------
- DataFrame.iteritems : Equivalent to Series.iteritems for DataFrame.
+ DataFrame.items : Equivalent to Series.items for DataFrame.
Examples
--------
>>> s = pd.Series(['A', 'B', 'C'])
- >>> for index, value in s.iteritems():
+ >>> for index, value in s.items():
... print("Index : {}, Value : {}".format(index, value))
Index : 0, Value : A
Index : 1, Value : B
@@ -1721,7 +1720,9 @@ def iteritems(self):
"""
return zip(iter(self.index), iter(self))
- items = iteritems
+ @Appender(items.__doc__)
+ def iteritems(self):
+ return self.items()
# ----------------------------------------------------------------------
# Misc public methods
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index f195e4b5f4e37..ecb5dc93031ab 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -695,7 +695,7 @@ def _reindex_index(
need_mask = mask.any()
new_series = {}
- for col, series in self.iteritems():
+ for col, series in self.items():
if mask.all():
continue
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 70700653c4795..7c293ca4e50b0 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -998,7 +998,7 @@ def str_extractall(arr, pat, flags=0):
index_list = []
is_mi = arr.index.nlevels > 1
- for subject_key, subject in arr.iteritems():
+ for subject_key, subject in arr.items():
if isinstance(subject, str):
if not is_mi:
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index f07133baed435..f5ab81ad9089e 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -113,7 +113,7 @@ def hash_pandas_object(
h = Series(h, index=obj.index, dtype="uint64", copy=False)
elif isinstance(obj, ABCDataFrame):
- hashes = (hash_array(series.values) for _, series in obj.iteritems())
+ hashes = (hash_array(series.values) for _, series in obj.items())
num_items = len(obj.columns)
if index:
index_hash_generator = (
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index e7aa5d22995c6..98349fe1e4792 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -538,7 +538,7 @@ def _update_ctx(self, attrs):
matter.
"""
for row_label, v in attrs.iterrows():
- for col_label, col in v.iteritems():
+ for col_label, col in v.items():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 1f0728ee96469..f3c966bb1a476 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -1105,7 +1105,7 @@ def _process_converter(self, f, filt=None):
needs_new_obj = False
new_obj = dict()
- for i, (col, c) in enumerate(self.obj.iteritems()):
+ for i, (col, c) in enumerate(self.obj.items()):
if filt(col, c):
new_data, result = f(col, c)
if result:
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py
index 045127c63af5c..1e7cd54d9f4a0 100644
--- a/pandas/io/json/_table_schema.py
+++ b/pandas/io/json/_table_schema.py
@@ -249,7 +249,7 @@ def build_table_schema(data, index=True, primary_key=None, version=True):
fields.append(convert_pandas_type_to_json_field(data.index))
if data.ndim > 1:
- for column, s in data.iteritems():
+ for column, s in data.items():
fields.append(convert_pandas_type_to_json_field(s))
else:
fields.append(convert_pandas_type_to_json_field(data))
diff --git a/pandas/io/msgpack/_packer.pyx b/pandas/io/msgpack/_packer.pyx
index a0d2b013c8e9d..0ed188074f3d9 100644
--- a/pandas/io/msgpack/_packer.pyx
+++ b/pandas/io/msgpack/_packer.pyx
@@ -194,7 +194,7 @@ cdef class Packer:
raise ValueError("dict is too large")
ret = msgpack_pack_map(&self.pk, L)
if ret == 0:
- for k, v in d.iteritems():
+ for k, v in d.items():
ret = self._pack(k, nest_limit - 1)
if ret != 0: break
ret = self._pack(v, nest_limit - 1)
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 211571c7dbaa1..6fe34e4e9705a 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -108,7 +108,7 @@ def _parse_date_columns(data_frame, parse_dates):
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
- for col_name, df_col in data_frame.iteritems():
+ for col_name, df_col in data_frame.items():
if is_datetime64tz_dtype(df_col) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 7087d2ee963cb..29cb2a5dc0f0e 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -2302,7 +2302,7 @@ def _check_column_names(self, data):
def _set_formats_and_types(self, data, dtypes):
self.typlist = []
self.fmtlist = []
- for col, dtype in dtypes.iteritems():
+ for col, dtype in dtypes.items():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
@@ -3168,7 +3168,7 @@ def _convert_strls(self, data):
def _set_formats_and_types(self, data, dtypes):
self.typlist = []
self.fmtlist = []
- for col, dtype in dtypes.iteritems():
+ for col, dtype in dtypes.items():
force_strl = col in self._convert_strl
fmt = _dtype_to_default_stata_fmt(
dtype, data[col], dta_version=117, force_strl=force_strl
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index d25715e6d167b..519465802085b 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -258,7 +258,7 @@ def _iter_data(self, data=None, keep_index=False, fillna=None):
# else:
# columns = data.columns
- for col, values in data.iteritems():
+ for col, values in data.items():
if keep_index is True:
yield col, values
else:
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 76a210e129eb3..3c0eca297f243 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -319,7 +319,7 @@ def test_sequence_like_with_categorical(self):
for row, s in df.iterrows():
str(s)
- for c, col in df.iteritems():
+ for c, col in df.items():
str(s)
def test_len(self, float_frame):
@@ -430,7 +430,7 @@ def test_repr_with_mi_nat(self, float_string_frame):
expected = " X\nNaT a 1\n2013-01-01 b 2"
assert result == expected
- def test_iteritems_names(self, float_string_frame):
+ def test_items_names(self, float_string_frame):
for k, v in float_string_frame.items():
assert v.name == k
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index c2d38b2938fca..3c102f49c6cbf 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -2712,7 +2712,7 @@ def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
- for k, v in rs.iteritems():
+ for k, v in rs.items():
exp = Series(np.where(cond[k], df[k], other1[k]), index=v.index)
assert_series_equal(v, exp, check_names=False)
assert_frame_equal(rs, rs2)
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 67482ddf657fb..bffdf17a49750 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -281,7 +281,7 @@ def test_binary_ops_align(self):
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
- [opa(df.loc[idx[:, :, i], :], v) for i, v in x.iteritems()]
+ [opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
assert_frame_equal(result, expected)
@@ -289,7 +289,7 @@ def test_binary_ops_align(self):
result = getattr(df, op)(x, level="second", axis=0)
expected = (
- pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.iteritems()])
+ pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index e06047b52ac15..ad3957138ceee 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -842,7 +842,7 @@ def test_float_index_non_scalar_assignment(self):
def test_float_index_at_iat(self):
s = Series([1, 2, 3], index=[0.1, 0.2, 0.3])
- for el, item in s.iteritems():
+ for el, item in s.items():
assert s.at[el] == item
for i in range(len(s)):
assert s.iat[i] == i + 1
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index e6ccee684b76b..38b4897e55c84 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -198,7 +198,7 @@ def test_series_set_tz_timestamp(self, tz_naive_fixture):
def test_mixed_index_at_iat_loc_iloc_series(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
- for el, item in s.iteritems():
+ for el, item in s.items():
assert s.at[el] == s.loc[el] == item
for i in range(len(s)):
assert s.iat[i] == s.iloc[i] == i + 1
@@ -214,7 +214,7 @@ def test_mixed_index_at_iat_loc_iloc_dataframe(self):
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], columns=["a", "b", "c", 1, 2]
)
for rowIdx, row in df.iterrows():
- for el, item in row.iteritems():
+ for el, item in row.items():
assert df.at[rowIdx, el] == df.loc[rowIdx, el] == item
for row in range(2):
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 9752b4c62aff7..f64defc848e17 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -380,7 +380,7 @@ def test_thousands_macau_stats(self, datapath):
dfs = self.read_html(macau_data, index_col=0, attrs={"class": "style1"})
df = dfs[all_non_nan_table_index]
- assert not any(s.isna().any() for _, s in df.iteritems())
+ assert not any(s.isna().any() for _, s in df.items())
@pytest.mark.slow
def test_thousands_macau_index_col(self, datapath):
@@ -389,7 +389,7 @@ def test_thousands_macau_index_col(self, datapath):
dfs = self.read_html(macau_data, index_col=0, header=0)
df = dfs[all_non_nan_table_index]
- assert not any(s.isna().any() for _, s in df.iteritems())
+ assert not any(s.isna().any() for _, s in df.items())
def test_empty_tables(self):
"""
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 2097264ba5e78..f91a1d1f2035b 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -336,10 +336,10 @@ def test_values(self):
tm.assert_almost_equal(self.ts.values, self.ts, check_dtype=False)
def test_iteritems(self):
- for idx, val in self.series.items():
+ for idx, val in self.series.iteritems():
assert val == self.series[idx]
- for idx, val in self.ts.items():
+ for idx, val in self.ts.iteritems():
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 0238314122462..8f79210c40d31 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -259,5 +259,5 @@ def test_to_dict(self, mapping, datetime_series):
Series(datetime_series.to_dict(mapping), name="ts"), datetime_series
)
from_method = Series(datetime_series.to_dict(collections.Counter))
- from_constructor = Series(collections.Counter(datetime_series.iteritems()))
+ from_constructor = Series(collections.Counter(datetime_series.items()))
tm.assert_series_equal(from_method, from_constructor)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 279d6dd84d92b..d75016824d6cf 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -1107,13 +1107,13 @@ def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj):
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
- # test items / iteritems yields the correct boxed scalars
+ # test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
- _, result = list(s.iteritems())[0]
+ _, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
| - [x] xref #25725
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Now that Python2 is being removed, ``.iteritems`` should be deprecated, and ``.items`` should be used instead.
EDIT: This PR ended up not deprecating .iteritems, but only change its usage internally with using .items | https://api.github.com/repos/pandas-dev/pandas/pulls/26114 | 2019-04-16T23:09:29Z | 2019-07-10T19:16:55Z | 2019-07-10T19:16:55Z | 2019-07-10T23:01:51Z |
BUG: GroupBy.size with all-null data raises ValueError | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index da712f84eb1b5..5f6483ede5544 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -381,6 +381,7 @@ Groupby/Resample/Rolling
- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.nunique` in which the names of column levels were lost (:issue:`23222`)
- Bug in :func:`pandas.core.groupby.GroupBy.agg` when applying a aggregation function to timezone aware data (:issue:`23683`)
- Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` where timezone information would be dropped (:issue:`21603`)
+- Bug in :func:`pandas.core.groupby.GroupBy.size` when grouping only NA values (:issue:`23050`)
- Bug in :func:`Series.groupby` where using ``groupby`` with a :class:`MultiIndex` Series with a list of labels equal to the length of the series caused incorrect grouping (:issue:`25704`)
- Ensured that ordering of outputs in ``groupby`` aggregation functions is consistent across all versions of Python (:issue:`25692`)
- Ensured that result group order is correct when grouping on an ordered ``Categorical`` and specifying ``observed=True`` (:issue:`25871`, :issue:`25167`)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index ec22548de6da3..8a6ec285cc79e 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -246,7 +246,7 @@ def size(self):
if ngroup:
out = np.bincount(ids[ids != -1], minlength=ngroup)
else:
- out = ids
+ out = []
return Series(out,
index=self.result_index,
dtype='int64')
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 187fea5403aea..7ca483e12b332 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -1090,6 +1090,15 @@ def test_size(df):
tm.assert_series_equal(df.groupby('A').size(), out)
+def test_size_groupby_all_null():
+ # GH23050
+ # Assert no 'Value Error : Length of passed values is 2, index implies 0'
+ df = DataFrame({'A': [None, None]}) # all-null groups
+ result = df.groupby('A').size()
+ expected = Series(dtype='int64', index=Index([], name='A'))
+ tm.assert_series_equal(result, expected)
+
+
# quantile
# --------------------------------
@pytest.mark.parametrize("interpolation", [
| - [x] closes #23050
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26112 | 2019-04-16T21:53:06Z | 2019-04-17T12:51:13Z | 2019-04-17T12:51:12Z | 2019-04-17T12:51:16Z |
CLN: remove compat.lfilter | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 7e201920f4331..332b435d33403 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -5,7 +5,7 @@
Cross-compatible functions for different versions of Python.
Key items to import for compatible code:
-* lists: lrange(), lmap(), lzip(), lfilter()
+* lists: lrange(), lmap(), lzip()
* add_metaclass(metaclass) - class decorator that recreates class with with the
given metaclass instead (and avoids intermediary class creation)
@@ -35,10 +35,6 @@ def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
-def lfilter(*args, **kwargs):
- return list(filter(*args, **kwargs))
-
-
# ----------------------------------------------------------------------------
# functions largely based / taken from the six module
diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py
index 654de06082363..fd0ae85ce634f 100644
--- a/pandas/tests/test_compat.py
+++ b/pandas/tests/test_compat.py
@@ -5,7 +5,7 @@
import builtins
import re
-from pandas.compat import lfilter, lmap, lrange, lzip, re_type
+from pandas.compat import lmap, lrange, lzip, re_type
class TestBuiltinIterators(object):
@@ -35,14 +35,6 @@ def test_lmap(self):
lengths = 10,
self.check_results(results, expecteds, lengths)
- def test_lfilter(self):
- func = lambda x: x
- lst = list(builtins.range(10))
- results = lfilter(lambda x: x, lst),
- lengths = 9,
- expecteds = list(builtins.filter(func, lst)),
- self.check_results(results, expecteds, lengths)
-
def test_lzip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
results = lzip(*lst),
| - [x] xref #25725
Removes ``compat.lfilter``. | https://api.github.com/repos/pandas-dev/pandas/pulls/26110 | 2019-04-16T15:27:46Z | 2019-04-16T17:17:17Z | 2019-04-16T17:17:16Z | 2019-05-05T06:59:30Z |
Improve documentation for assert_frame|series_equal #26101 | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 9659cb33686d0..0278432117631 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -990,6 +990,12 @@ def assert_series_equal(left, right, check_dtype=True,
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
+
+ When comparing two numbers, if the first number has magnitude less
+ than 1e-5, we compare the two numbers directly and check whether
+ they are equivalent within the specified precision. Otherwise, we
+ compare the **ratio** of the second number to the first number and
+ check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
@@ -1131,6 +1137,12 @@ def assert_frame_equal(left, right, check_dtype=True,
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
+
+ When comparing two numbers, if the first number has magnitude less
+ than 1e-5, we compare the two numbers directly and check whether
+ they are equivalent within the specified precision. Otherwise, we
+ compare the **ratio** of the second number to the first number and
+ check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical, i.e.
| - [x] closes #26101
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26106 | 2019-04-16T10:58:30Z | 2019-04-22T15:31:40Z | 2019-04-22T15:31:40Z | 2019-04-22T15:31:48Z |
ENH: Add parameter to download BigQuery results with the BigQuery Storage API | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 1953132c826ba..f2393161e7eac 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -364,6 +364,7 @@ I/O
- Improved the explanation for the failure when value labels are repeated in Stata dta files and suggested work-arounds (:issue:`25772`)
- Improved :meth:`pandas.read_stata` and :class:`pandas.io.stata.StataReader` to read incorrectly formatted 118 format files saved by Stata (:issue:`25960`)
- Fixed bug in loading objects from S3 that contain ``#`` characters in the URL (:issue:`25945`)
+- Adds ``use_bqstorage_api`` parameter to :func:`read_gbq` to speed up downloads of large data frames. This feature requires version 0.10.0 of the ``pandas-gbq`` library as well as the ``google-cloud-bigquery-storage`` and ``fastavro`` libraries. (:issue:`26104`)
Plotting
^^^^^^^^
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index a6cec7ea8fb16..871bc4a8221c2 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -1,7 +1,5 @@
""" Google BigQuery support """
-import warnings
-
def _try_import():
# since pandas is a dependency of pandas-gbq
@@ -26,7 +24,7 @@ def _try_import():
def read_gbq(query, project_id=None, index_col=None, col_order=None,
reauth=False, auth_local_webserver=False, dialect=None,
location=None, configuration=None, credentials=None,
- private_key=None, verbose=None):
+ use_bqstorage_api=None, private_key=None, verbose=None):
"""
Load data from Google BigQuery.
@@ -103,6 +101,21 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None,
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
+ use_bqstorage_api : bool, default False
+ Use the `BigQuery Storage API
+ <https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
+ download query results quickly, but at an increased cost. To use this
+ API, first `enable it in the Cloud Console
+ <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
+ You must also have the `bigquery.readsessions.create
+ <https://cloud.google.com/bigquery/docs/access-control#roles>`__
+ permission on the project you are billing queries to.
+
+ This feature requires version 0.10.0 or later of the ``pandas-gbq``
+ package. It also requires the ``google-cloud-bigquery-storage`` and
+ ``fastavro`` packages.
+
+ .. versionadded:: 0.25.0
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
@@ -131,22 +144,27 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None,
"""
pandas_gbq = _try_import()
- if dialect is None:
- dialect = "legacy"
- warnings.warn(
- 'The default value for dialect is changing to "standard" in a '
- 'future version of pandas-gbq. Pass in dialect="legacy" to '
- "disable this warning.",
- FutureWarning,
- stacklevel=2,
- )
+ kwargs = {}
+
+ # START: new kwargs. Don't populate unless explicitly set.
+ if use_bqstorage_api is not None:
+ kwargs["use_bqstorage_api"] = use_bqstorage_api
+ # END: new kwargs
+
+ # START: deprecated kwargs. Don't populate unless explicitly set.
+ if verbose is not None:
+ kwargs["verbose"] = verbose
+
+ if private_key is not None:
+ kwargs["private_key"] = private_key
+ # END: deprecated kwargs
return pandas_gbq.read_gbq(
query, project_id=project_id, index_col=index_col,
col_order=col_order, reauth=reauth,
auth_local_webserver=auth_local_webserver, dialect=dialect,
location=location, configuration=configuration,
- credentials=credentials, verbose=verbose, private_key=private_key)
+ credentials=credentials, **kwargs)
def to_gbq(dataframe, destination_table, project_id=None, chunksize=None,
diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py
index 605108f875cb9..9ac1f0b520c76 100644
--- a/pandas/tests/io/test_gbq.py
+++ b/pandas/tests/io/test_gbq.py
@@ -8,7 +8,6 @@
import pandas as pd
from pandas import DataFrame
-import pandas.util.testing as tm
api_exceptions = pytest.importorskip("google.api_core.exceptions")
bigquery = pytest.importorskip("google.cloud.bigquery")
@@ -90,16 +89,59 @@ def make_mixed_dataframe_v2(test_size):
index=range(test_size))
-def test_read_gbq_without_dialect_warns_future_change(monkeypatch):
- # Default dialect is changing to standard SQL. See:
- # https://github.com/pydata/pandas-gbq/issues/195
+def test_read_gbq_with_deprecated_kwargs(monkeypatch):
+ captured_kwargs = {}
- def mock_read_gbq(*args, **kwargs):
+ def mock_read_gbq(sql, **kwargs):
+ captured_kwargs.update(kwargs)
return DataFrame([[1.0]])
- monkeypatch.setattr(pandas_gbq, 'read_gbq', mock_read_gbq)
- with tm.assert_produces_warning(FutureWarning):
- pd.read_gbq("SELECT 1")
+ monkeypatch.setattr("pandas_gbq.read_gbq", mock_read_gbq)
+ private_key = object()
+ pd.read_gbq("SELECT 1", verbose=True, private_key=private_key)
+
+ assert captured_kwargs["verbose"]
+ assert captured_kwargs["private_key"] is private_key
+
+
+def test_read_gbq_without_deprecated_kwargs(monkeypatch):
+ captured_kwargs = {}
+
+ def mock_read_gbq(sql, **kwargs):
+ captured_kwargs.update(kwargs)
+ return DataFrame([[1.0]])
+
+ monkeypatch.setattr("pandas_gbq.read_gbq", mock_read_gbq)
+ pd.read_gbq("SELECT 1")
+
+ assert "verbose" not in captured_kwargs
+ assert "private_key" not in captured_kwargs
+
+
+def test_read_gbq_with_new_kwargs(monkeypatch):
+ captured_kwargs = {}
+
+ def mock_read_gbq(sql, **kwargs):
+ captured_kwargs.update(kwargs)
+ return DataFrame([[1.0]])
+
+ monkeypatch.setattr("pandas_gbq.read_gbq", mock_read_gbq)
+ pd.read_gbq("SELECT 1", use_bqstorage_api=True)
+
+ assert captured_kwargs["use_bqstorage_api"]
+
+
+def test_read_gbq_without_new_kwargs(monkeypatch):
+ captured_kwargs = {}
+
+ def mock_read_gbq(sql, **kwargs):
+ captured_kwargs.update(kwargs)
+ return DataFrame([[1.0]])
+
+ monkeypatch.setattr("pandas_gbq.read_gbq", mock_read_gbq)
+ pd.read_gbq("SELECT 1")
+
+ assert "use_bqstorage_api" not in captured_kwargs
@pytest.mark.single
| pandas-gbq 0.10.0 adds a new `use_bqstorage_api` parameter to speed up downloads of large dataframes.
- [ ] ~closes #xxxx~
- [x] tests added / passed
```
$ pytest pandas/tests/io/test_gbq.py
=============================== test session starts ================================
platform darwin -- Python 3.6.4, pytest-4.4.1, py-1.5.3, pluggy-0.9.0
hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/Users/swast/src/pandas/pandas/.hypothesis/examples')
rootdir: /Users/swast/src/pandas/pandas, inifile: setup.cfg
plugins: xdist-1.22.2, forked-0.2, cov-2.5.1, hypothesis-3.70.3
collected 1 item
pandas/tests/io/test_gbq.py . [100%]
============================= 1 passed in 9.84 seconds =============================
```
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26104 | 2019-04-15T21:20:55Z | 2019-04-20T18:33:34Z | 2019-04-20T18:33:34Z | 2019-04-22T15:54:34Z |
CI/DOC: fix clipboard on travis | diff --git a/.travis.yml b/.travis.yml
index 022e11b7db950..ce8817133a477 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -86,6 +86,14 @@ install:
- ci/submit_cython_cache.sh
- echo "install done"
+before_script:
+ # display server (for clipboard functionality) needs to be started here,
+ # does not work if done in install:setup_env.sh (GH-26103)
+ - export DISPLAY=":99.0"
+ - echo "sh -e /etc/init.d/xvfb start"
+ - sh -e /etc/init.d/xvfb start
+ - sleep 3
+
script:
- echo "script start"
- source activate pandas-dev
diff --git a/ci/setup_env.sh b/ci/setup_env.sh
index 414a5c8705ee9..e2667558a63d7 100755
--- a/ci/setup_env.sh
+++ b/ci/setup_env.sh
@@ -118,16 +118,10 @@ echo "conda list"
conda list
# Install DB for Linux
-export DISPLAY=":99."
if [ ${TRAVIS_OS_NAME} == "linux" ]; then
echo "installing dbs"
mysql -e 'create database pandas_nosetest;'
psql -c 'create database pandas_nosetest;' -U postgres
-
- echo
- echo "sh -e /etc/init.d/xvfb start"
- sh -e /etc/init.d/xvfb start
- sleep 3
else
echo "not using dbs on non-linux"
fi
| - [ ] closes #26075
| https://api.github.com/repos/pandas-dev/pandas/pulls/26103 | 2019-04-15T18:20:37Z | 2019-05-16T13:11:18Z | 2019-05-16T13:11:17Z | 2019-05-17T06:32:24Z |
CLN: remove compat.itervalues | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index e92c053a282bc..7e201920f4331 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -6,8 +6,6 @@
Key items to import for compatible code:
* lists: lrange(), lmap(), lzip(), lfilter()
-* iterable method compatibility: itervalues
- * Uses the original method if available, otherwise uses items, keys, values.
* add_metaclass(metaclass) - class decorator that recreates class with with the
given metaclass instead (and avoids intermediary class creation)
@@ -41,9 +39,6 @@ def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
-def itervalues(obj, **kw):
- return iter(obj.values(**kw))
-
# ----------------------------------------------------------------------------
# functions largely based / taken from the six module
diff --git a/pandas/core/base.py b/pandas/core/base.py
index e86d751182b2a..1d3eb880f32e3 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -9,7 +9,6 @@
import numpy as np
import pandas._libs.lib as lib
-import pandas.compat as compat
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
@@ -362,7 +361,7 @@ def nested_renaming_depr(level=4):
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
- if any(is_aggregator(x) for x in compat.itervalues(arg)):
+ if any(is_aggregator(x) for x in arg.values()):
new_arg = OrderedDict()
for k, v in arg.items():
if not isinstance(v, (tuple, list, dict)):
@@ -493,13 +492,12 @@ def _agg(arg, func):
def is_any_series():
# return a boolean if we have *any* nested series
- return any(isinstance(r, ABCSeries)
- for r in compat.itervalues(result))
+ return any(isinstance(r, ABCSeries) for r in result.values())
def is_any_frame():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCDataFrame)
- for r in compat.itervalues(result))
+ for r in result.values())
if isinstance(result, list):
return concat(result, keys=keys, axis=1, sort=True), True
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 7e765a38cedcd..f8488b7a153e3 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -14,8 +14,6 @@
ABCDatetimeArray, ABCDatetimeIndex, ABCIndexClass, ABCPeriodIndex,
ABCRangeIndex, ABCSparseDataFrame, ABCTimedeltaIndex)
-from pandas import compat
-
def get_dtype_kinds(l):
"""
@@ -69,7 +67,7 @@ def _get_series_result_type(result, objs=None):
if isinstance(result, dict):
# concat Series with axis 1
if all(isinstance(c, (SparseSeries, SparseDataFrame))
- for c in compat.itervalues(result)):
+ for c in result.values()):
return SparseDataFrame
else:
return DataFrame
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index b6fc31bb6f015..0f0abeb0a318f 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -15,7 +15,6 @@
import numpy as np
from pandas._libs import Timestamp, lib
-import pandas.compat as compat
from pandas.compat import lzip
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
@@ -850,7 +849,7 @@ def _aggregate_multiple_funcs(self, arg, _level):
obj._selection = name
results[name] = obj.aggregate(func)
- if any(isinstance(x, DataFrame) for x in compat.itervalues(results)):
+ if any(isinstance(x, DataFrame) for x in results.values()):
# let higher level handle
if _level:
return results
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 6c08cacb551df..4ba6e04495fbb 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -9,7 +9,6 @@
from pandas._config import get_option
from pandas._libs import iNaT, lib, tslibs
-import pandas.compat as compat
from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask
from pandas.core.dtypes.common import (
@@ -68,7 +67,7 @@ def check(self, obj):
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
- obj_iter = itertools.chain(args, compat.itervalues(kwargs))
+ obj_iter = itertools.chain(args, kwargs.values())
if any(self.check(obj) for obj in obj_iter):
msg = 'reduction operation {name!r} not allowed for this dtype'
raise TypeError(msg.format(name=f.__name__.replace('nan', '')))
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index a7edddc1da8db..645a3dc31f637 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -6,7 +6,6 @@
import numpy as np
-import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, deprecate_kwarg
from pandas.util._validators import validate_axis_style_args
@@ -1180,7 +1179,7 @@ def _construct_return_type(self, result, axes=None):
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
- ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
+ ndim = getattr(list(result.values())[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
index 26bf6a8cf410d..fa4e35b08bf6e 100644
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -8,7 +8,7 @@
from pandas._libs.writers import convert_json_to_lines
-from pandas import DataFrame, compat
+from pandas import DataFrame
def _convert_to_line_delimits(s):
@@ -198,8 +198,7 @@ def _pull_field(js, spec):
data = [data]
if record_path is None:
- if any([isinstance(x, dict)
- for x in compat.itervalues(y)] for y in data):
+ if any([isinstance(x, dict) for x in y.values()] for y in data):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 1e5b6529911bb..a21c910979f9d 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1154,7 +1154,7 @@ def read(self, nrows=None):
if index is None:
if col_dict:
# Any column is actually fine:
- new_rows = len(next(compat.itervalues(col_dict)))
+ new_rows = len(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py
index c24adcbf29477..91dbe5c78acf3 100644
--- a/pandas/tests/io/test_packers.py
+++ b/pandas/tests/io/test_packers.py
@@ -14,7 +14,7 @@
import pandas
from pandas import (
Categorical, DataFrame, Index, Interval, MultiIndex, NaT, Period, Series,
- Timestamp, bdate_range, compat, date_range, period_range)
+ Timestamp, bdate_range, date_range, period_range)
import pandas.util.testing as tm
from pandas.util.testing import (
assert_categorical_equal, assert_frame_equal, assert_index_equal,
@@ -818,12 +818,12 @@ def setup_method(self, method):
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
- for frame in compat.itervalues(self.frame):
+ for frame in self.frame.values():
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
def test_default_encoding(self):
- for frame in compat.itervalues(self.frame):
+ for frame in self.frame.values():
result = frame.to_msgpack()
expected = frame.to_msgpack(encoding='utf8')
assert result == expected
diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py
index 3777b585ea92e..654de06082363 100644
--- a/pandas/tests/test_compat.py
+++ b/pandas/tests/test_compat.py
@@ -5,7 +5,7 @@
import builtins
import re
-from pandas.compat import itervalues, lfilter, lmap, lrange, lzip, re_type
+from pandas.compat import lfilter, lmap, lrange, lzip, re_type
class TestBuiltinIterators(object):
@@ -50,9 +50,6 @@ def test_lzip(self):
lengths = 10,
self.check_results(results, expecteds, lengths)
- def test_dict_iterators(self):
- assert next(itervalues({1: 2})) == 2
-
def test_re_type():
assert isinstance(re.compile(''), re_type)
| - [x] xref #25725
Removes ``compat.itervalues``. | https://api.github.com/repos/pandas-dev/pandas/pulls/26099 | 2019-04-15T12:47:06Z | 2019-04-15T17:09:20Z | 2019-04-15T17:09:20Z | 2019-04-15T22:22:29Z |
BLD: add __pycache__ to .gitignore | diff --git a/.gitignore b/.gitignore
index f4f64aac23905..56828fa1d9331 100644
--- a/.gitignore
+++ b/.gitignore
@@ -65,6 +65,7 @@ coverage_html_report
*.pytest_cache
# hypothesis test database
.hypothesis/
+__pycache__
# OS generated files #
######################
| https://api.github.com/repos/pandas-dev/pandas/pulls/26098 | 2019-04-15T12:28:31Z | 2019-04-15T13:35:36Z | 2019-04-15T13:35:35Z | 2019-04-15T13:35:36Z | |
BUG: _convert_and_box_cache raise ValueError: Tz-aware datetime.datetime cannot be converted to datetime64 unless utc=True | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 2030bb4d974c3..02210ed4897f5 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -705,6 +705,8 @@ Datetimelike
- Bug in :func:`date_range` with unnecessary ``OverflowError`` being raised for very large or very small dates (:issue:`26651`)
- Bug where adding :class:`Timestamp` to a ``np.timedelta64`` object would raise instead of returning a :class:`Timestamp` (:issue:`24775`)
- Bug where comparing a zero-dimensional numpy array containing a ``np.datetime64`` object to a :class:`Timestamp` would incorrect raise ``TypeError`` (:issue:`26916`)
+- Bug in :func:`to_datetime` which would raise ``ValueError: Tz-aware datetime.datetime cannot be converted to datetime64 unless utc=True`` when called with ``cache=True``, with ``arg`` including datetime strings with different offset (:issue:`26097`)
+-
Timedelta
^^^^^^^^^
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 2d1c22f5623a1..f14f32c67d4e1 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1,3 +1,7 @@
-# flake8: noqa
-from pandas.core.indexes.api import *
-from pandas.core.indexes.multi import _sparsify
+from pandas.core.indexes.api import ( # noqa:F401
+ CategoricalIndex, DatetimeIndex, Float64Index, Index, Int64Index,
+ IntervalIndex, InvalidIndexError, MultiIndex, NaT, NumericIndex,
+ PeriodIndex, RangeIndex, TimedeltaIndex, UInt64Index, _all_indexes_same,
+ _get_combined_index, _get_consensus_names, _get_objs_combined_axis,
+ _new_Index, _union_indexes, ensure_index, ensure_index_from_sequences)
+from pandas.core.indexes.multi import _sparsify # noqa:F401
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 5893ff0e0dd8f..d543ae91ad344 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -1,6 +1,7 @@
from collections import abc
from datetime import datetime, time
from functools import partial
+from typing import Optional, TypeVar, Union
import numpy as np
@@ -14,12 +15,25 @@
from pandas.core.dtypes.common import (
ensure_object, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_float, is_integer, is_integer_dtype,
- is_list_like, is_numeric_dtype, is_object_dtype, is_scalar)
-from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
+ is_list_like, is_numeric_dtype, is_scalar)
+from pandas.core.dtypes.generic import (
+ ABCDataFrame, ABCDatetimeIndex, ABCIndex, ABCIndexClass, ABCSeries)
from pandas.core.dtypes.missing import notna
+from pandas._typing import ArrayLike
from pandas.core import algorithms
+# ---------------------------------------------------------------------
+# types used in annotations
+
+Scalar = Union[int, float, str]
+DatetimeScalar = TypeVar('DatetimeScalar', Scalar, datetime)
+DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, list, tuple,
+ ArrayLike, ABCSeries]
+
+
+# ---------------------------------------------------------------------
+
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
@@ -60,7 +74,43 @@ def _maybe_cache(arg, format, cache, convert_listlike):
return cache_array
-def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
+def _box_as_indexlike(
+ dt_array: ArrayLike,
+ utc: Optional[bool] = None,
+ name: Optional[str] = None
+) -> Union[ABCIndex, ABCDatetimeIndex]:
+ """
+ Properly boxes the ndarray of datetimes to DatetimeIndex
+ if it is possible or to generic Index instead
+
+ Parameters
+ ----------
+ dt_array: 1-d array
+ array of datetimes to be boxed
+ tz : object
+ None or 'utc'
+ name : string, default None
+ Name for a resulting index
+
+ Returns
+ -------
+ result : datetime of converted dates
+ - DatetimeIndex if convertible to sole datetime64 type
+ - general Index otherwise
+ """
+ from pandas import DatetimeIndex, Index
+ if is_datetime64_dtype(dt_array):
+ tz = 'utc' if utc else None
+ return DatetimeIndex(dt_array, tz=tz, name=name)
+ return Index(dt_array, name=name)
+
+
+def _convert_and_box_cache(
+ arg: DatetimeScalarOrArrayConvertible,
+ cache_array: ABCSeries,
+ box: bool,
+ name: Optional[str] = None
+) -> Union[ABCIndex, np.ndarray]:
"""
Convert array of dates with a cache and box the result
@@ -71,26 +121,19 @@ def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
- errors : string
- 'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
- Returns:
-
- Index-like if box=True
- ndarray if box=False
"""
- from pandas import Series, DatetimeIndex, Index
+ from pandas import Series
result = Series(arg).map(cache_array)
if box:
- if errors == 'ignore':
- return Index(result, name=name)
- else:
- return DatetimeIndex(result, name=name)
+ return _box_as_indexlike(result, utc=None, name=name)
return result.values
@@ -118,7 +161,6 @@ def _return_parsed_timezone_results(result, timezones, box, tz, name):
- Index-like if box=True
- ndarray of Timestamps if box=False
-
"""
if tz is not None:
raise ValueError("Cannot pass a tz argument when "
@@ -324,13 +366,8 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
return np.array(result, dtype=object)
if box:
- # Ensure we return an Index in all cases where box=True
- if is_datetime64_dtype(result):
- return DatetimeIndex(result, tz=tz, name=name)
- elif is_object_dtype(result):
- # e.g. an Index of datetime objects
- from pandas import Index
- return Index(result, name=name)
+ utc = tz == 'utc'
+ return _box_as_indexlike(result, utc=utc, name=name)
return result
@@ -611,7 +648,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
- result = _convert_and_box_cache(arg, cache_array, box, errors,
+ result = _convert_and_box_cache(arg, cache_array, box,
name=arg.name)
else:
convert_listlike = partial(convert_listlike, name=arg.name)
@@ -619,7 +656,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
- result = _convert_and_box_cache(arg, cache_array, box, errors)
+ result = _convert_and_box_cache(arg, cache_array, box)
else:
result = convert_listlike(arg, box, format)
else:
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index a971a1088860a..f401a7f7c9e9b 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -504,6 +504,17 @@ def test_to_datetime_tz(self, cache):
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, cache=cache)
+ @pytest.mark.parametrize('cache', [True, False])
+ def test_to_datetime_different_offsets(self, cache):
+ # inspired by asv timeseries.ToDatetimeNONISO8601 benchmark
+ # see GH-26097 for more
+ ts_string_1 = 'March 1, 2018 12:00:00+0400'
+ ts_string_2 = 'March 1, 2018 12:00:00+0500'
+ arr = [ts_string_1] * 5 + [ts_string_2] * 5
+ expected = pd.Index([parse(x) for x in arr])
+ result = pd.to_datetime(arr, cache=cache)
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_tz_pytz(self, cache):
# see gh-8260
| - [x] closes #N/A
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Bug was found there: https://github.com/pandas-dev/pandas/pull/26043#issuecomment-483214458
Reproduction:
```python
import pandas as pd
N = 10
half = int(N / 2)
ts_string_1 = 'March 1, 2018 12:00:00+0400'
ts_string_2 = 'March 1, 2018 12:00:00+0500'
diff_offset = [ts_string_1] * half + [ts_string_2] * half
pd.to_datetime(diff_offset, cache=True)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/26097 | 2019-04-15T12:22:57Z | 2019-07-03T13:17:59Z | 2019-07-03T13:17:59Z | 2019-07-03T13:18:04Z |
CLN: Remove accidentally added pycache | diff --git a/pandas/tests/io/json/__pycache__/tmp2c7r4efu b/pandas/tests/io/json/__pycache__/tmp2c7r4efu
deleted file mode 100644
index d1258abbcdf40..0000000000000
Binary files a/pandas/tests/io/json/__pycache__/tmp2c7r4efu and /dev/null differ
| Removes this accidentally committed file https://github.com/pandas-dev/pandas/pull/26079/files#diff-0e2f9eb4477870d5461d93c7d8add1ec
| https://api.github.com/repos/pandas-dev/pandas/pulls/26095 | 2019-04-15T05:40:43Z | 2019-04-15T12:25:29Z | 2019-04-15T12:25:29Z | 2019-04-15T17:04:05Z |
STY: Turn on flake8 for pandas/compat/__init__.py | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 54a7afd90a09a..e7ca0d4c846c0 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -14,14 +14,11 @@
Other items:
* platform checker
"""
-# pylint disable=W0611
-# flake8: noqa
-import re
-from distutils.version import LooseVersion
-import sys
import platform
+import re
import struct
+import sys
PY36 = sys.version_info >= (3, 6)
PY37 = sys.version_info >= (3, 7)
@@ -110,6 +107,7 @@ def raise_with_traceback(exc, traceback=Ellipsis):
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
+
# In Python 3.7, the private re._pattern_type is removed.
# Python 3.5+ have typing.re.Pattern
if PY36:
| With all of the great cleanup in this file, let's ensure that it remains nice and tidy. | https://api.github.com/repos/pandas-dev/pandas/pulls/26093 | 2019-04-15T01:21:58Z | 2019-04-15T02:50:04Z | 2019-04-15T02:50:04Z | 2019-04-15T02:50:19Z |
CLN: pylint references | diff --git a/pandas/__init__.py b/pandas/__init__.py
index d3e6d97960fb3..bd367bbe27d5e 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -1,5 +1,3 @@
-# pylint: disable-msg=W0614,W0401,W0611,W0622
-
# flake8: noqa
__docformat__ = 'restructuredtext'
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index e7ca0d4c846c0..7a27b22b96103 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -14,7 +14,6 @@
Other items:
* platform checker
"""
-
import platform
import re
import struct
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 09ca0c82395e2..4b46e14d950a1 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -1,5 +1,3 @@
-
-# pylint: disable=W0614,W0401,W0611
# flake8: noqa
import numpy as np
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index cd49946652566..53adf99148052 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1101,W0232
-
from shutil import get_terminal_size
import textwrap
from warnings import warn
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fdc99e957e257..43fc162496ee4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1101
-# pylint: disable=W0212,W0703,W0622
"""
DataFrame
---------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 885c499c58dfa..0368045cd3ab5 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1,4 +1,3 @@
-# pylint: disable=W0231,E1101
import collections
from datetime import timedelta
import functools
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index ff6d80dbadded..a0dd19b7ecd4b 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1,4 +1,3 @@
-# pylint: disable=E1101
from datetime import datetime, time, timedelta
import operator
import warnings
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index f9347117bec23..34413f441a5d6 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1,4 +1,3 @@
-# pylint: disable=E1101,E1103,W0232
from collections import OrderedDict
import datetime
from sys import getsizeof
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 133cf1c0755cf..e2c222f11d85d 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -1,4 +1,3 @@
-# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import warnings
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 79bdfebd9f90b..0b7958a4e7b67 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1,4 +1,3 @@
-# pylint: disable=W0223
import textwrap
import warnings
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 542b1075313bf..97066384f3e4b 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1,7 +1,6 @@
"""
Contains data structures designed for manipulating panel (3-dimensional) data
"""
-# pylint: disable=E1103,W0231,W0212,W0621
from collections import OrderedDict
import warnings
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 99224f6fb7c5b..ece5144eba3a1 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1101,E1103
-# pylint: disable=W0703,W0622,W0613,W0201
import re
import numpy as np
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 580810f3b6829..3aaae3b59a0d4 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -1,4 +1,3 @@
-# pylint: disable=E1103
import numpy as np
from pandas.compat import lrange
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 33b8384f1dd86..b88e3d8ee828c 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1101,E1103
-# pylint: disable=W0703,W0622,W0613,W0201
from functools import partial
import itertools
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 8a22765d85aec..061fc1e7b60c4 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -51,10 +51,6 @@
import pandas.io.formats.format as fmt
import pandas.plotting._core as gfx
-# pylint: disable=E1101,E1103
-# pylint: disable=W0703,W0622,W0613,W0201
-
-
__all__ = ['Series']
_shared_doc_kwargs = dict(
diff --git a/pandas/core/sparse/api.py b/pandas/core/sparse/api.py
index 33e8b921905ba..6a00fa570b2ac 100644
--- a/pandas/core/sparse/api.py
+++ b/pandas/core/sparse/api.py
@@ -1,4 +1,3 @@
-# pylint: disable=W0611
# flake8: noqa
from pandas.core.arrays.sparse import SparseArray, SparseDtype
from pandas.core.sparse.frame import SparseDataFrame
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index 08729442e701f..e7f507055709d 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -30,9 +30,6 @@
from pandas.core.series import Series
from pandas.core.sparse.series import SparseSeries
-# pylint: disable=E1101,E1103,W0231,E0202
-
-
_shared_doc_kwargs = dict(klass='SparseDataFrame')
diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py
index c5b07c7b6c881..11231ce90b6b9 100644
--- a/pandas/core/sparse/series.py
+++ b/pandas/core/sparse/series.py
@@ -2,9 +2,6 @@
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
-
-# pylint: disable=E1101,E1103,W0231
-
from collections import abc
import warnings
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index ce724a32a7a56..c7524a10e577c 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -35,9 +35,6 @@
from pandas.io.common import _expand_user, _stringify_path
from pandas.io.formats.printing import adjoin, justify, pprint_thing
-# pylint: disable=W0141
-
-
common_docstring = """
Parameters
----------
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 28cc768ba4e21..84b812535025e 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -1,4 +1,3 @@
-# pylint: disable-msg=E1101,W0613,W0603
from io import StringIO
from itertools import islice
import os
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 2dedeaf0a4cda..4aba8f3f0ac77 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1,4 +1,3 @@
-# pylint: disable-msg=E1101,W0613,W0603
"""
High level interface to PyTables for reading and writing pandas data structures
to disk
diff --git a/pandas/plotting/_compat.py b/pandas/plotting/_compat.py
index 67f3d983480f8..4077bef8f36f5 100644
--- a/pandas/plotting/_compat.py
+++ b/pandas/plotting/_compat.py
@@ -1,5 +1,4 @@
# being a bit too dynamic
-# pylint: disable=E1101
from distutils.version import LooseVersion
import operator
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index af23c13063aa3..3bd0562b178a5 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -1,5 +1,4 @@
# being a bit too dynamic
-# pylint: disable=E1101
from collections import namedtuple
import re
from typing import List, Optional, Type
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py
index 1f60febaef1e4..cefc4d8aca4f2 100644
--- a/pandas/plotting/_misc.py
+++ b/pandas/plotting/_misc.py
@@ -1,5 +1,4 @@
# being a bit too dynamic
-# pylint: disable=E1101
import numpy as np
from pandas.compat import lmap, lrange
diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py
index 43365bd91c724..0947908803559 100644
--- a/pandas/plotting/_style.py
+++ b/pandas/plotting/_style.py
@@ -1,5 +1,4 @@
# being a bit too dynamic
-# pylint: disable=E1101
from contextlib import contextmanager
import warnings
diff --git a/pandas/plotting/_tools.py b/pandas/plotting/_tools.py
index 01a26edb309bc..39f3554f509a7 100644
--- a/pandas/plotting/_tools.py
+++ b/pandas/plotting/_tools.py
@@ -1,5 +1,4 @@
# being a bit too dynamic
-# pylint: disable=E1101
from math import ceil
import warnings
diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py
index 1b3f285e64059..8743de0b18912 100644
--- a/pandas/tests/extension/test_external_block.py
+++ b/pandas/tests/extension/test_external_block.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable=W0102
-
import numpy as np
import pytest
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 600575b5255d2..5926dd73aee36 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-
-# pylint: disable-msg=W0612,E1101
from copy import deepcopy
import pydoc
diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py
index 9ae7252104746..f075682ac19f1 100644
--- a/pandas/tests/generic/test_frame.py
+++ b/pandas/tests/generic/test_frame.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=E1101,W0612
-
from copy import deepcopy
from distutils.version import LooseVersion
from operator import methodcaller
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index d3b63e428b374..fdcf0e2172708 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=E1101,W0612
-
from copy import copy, deepcopy
import numpy as np
diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py
index dd311dc214d30..9c30814ee6cab 100644
--- a/pandas/tests/generic/test_series.py
+++ b/pandas/tests/generic/test_series.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=E1101,W0612
-
from distutils.version import LooseVersion
from operator import methodcaller
diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py
index d8f65c211a115..6f66088a151ae 100644
--- a/pandas/tests/indexing/test_callable.py
+++ b/pandas/tests/indexing/test_callable.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=W0612,E1101
-
import numpy as np
import pandas as pd
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 3cdae198cad31..5cb2b138f18e3 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=W0612,E1101
-
""" test fancy indexing & misc """
from datetime import datetime
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 6c672db4070fb..4899703c0c72b 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable=W0102
from collections import OrderedDict
from datetime import date, datetime
from distutils.version import LooseVersion
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 9ca5bf95e5e08..23e1968524203 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=W0612,E1101
from collections import OrderedDict
from datetime import timedelta
from io import StringIO
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index b4e942fc086a9..114c0bba0c31f 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1101,E1103,W0232
-
"""
manage legacy pickle tests
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 52fead0166dc5..109cc6dd4b886 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable=E1101
-
from collections import OrderedDict
import datetime as dt
from datetime import datetime
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index ff4088f5c4376..7157ecccace00 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1101
-
from collections import OrderedDict
from datetime import datetime
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 6aa844dc119c1..959b6febcf1c9 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1101
-
from textwrap import dedent
import numpy as np
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 63ee899944e92..69b0f71c4c335 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1103
-
import numpy as np
from numpy.random import randn
import pytest
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index 8d8f8a723c97a..59910f8e0b79f 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1103
-
from collections import OrderedDict
from datetime import date, datetime
import random
diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py
index 7e8b5b1120bc6..ed51b875d415d 100644
--- a/pandas/tests/reshape/merge/test_multi.py
+++ b/pandas/tests/reshape/merge/test_multi.py
@@ -1,5 +1,3 @@
-# pylint: disable=E1103
-
from collections import OrderedDict
import numpy as np
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index 203e565818f34..bc1d810238688 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=W0612,E1101
-
import numpy as np
from numpy import nan
import pytest
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index c6e025324bf4a..ca083bbde8428 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=W0612,E1101
-
from collections import OrderedDict
import numpy as np
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index edc21282ccafa..63e493c5d7b53 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime
import numpy as np
diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py
index a94b0c863b536..66c357e5292d4 100644
--- a/pandas/tests/series/indexing/test_boolean.py
+++ b/pandas/tests/series/indexing/test_boolean.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
import numpy as np
import pytest
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index 2d2eb6db7c51d..02da8b6cc5eb2 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime, timedelta
import numpy as np
diff --git a/pandas/tests/series/indexing/test_iloc.py b/pandas/tests/series/indexing/test_iloc.py
index 95c940b7518f4..f5352bce291f6 100644
--- a/pandas/tests/series/indexing/test_iloc.py
+++ b/pandas/tests/series/indexing/test_iloc.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
import numpy as np
from pandas.compat import lrange
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 98a83400ea7ab..37e3831fb3f29 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
""" test get/set & misc """
from datetime import timedelta
diff --git a/pandas/tests/series/indexing/test_loc.py b/pandas/tests/series/indexing/test_loc.py
index 8c1709ff016b3..a455c0c502420 100644
--- a/pandas/tests/series/indexing/test_loc.py
+++ b/pandas/tests/series/indexing/test_loc.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
import numpy as np
import pytest
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index 2f1e89b1146ae..46920b6c6c163 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
import numpy as np
import pytest
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index 264297faec1ce..e01a4c4f1842d 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime
import numpy as np
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index 9e44cc32f8f45..b01ded138be7f 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from itertools import product
import operator
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 81642f65f05a4..bc27d5dcad442 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -1,5 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
from collections import OrderedDict
import pydoc
import warnings
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index 162a27db34cb1..217f4697d6140 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from collections import Counter, OrderedDict, defaultdict
from itertools import chain
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 45e3dffde60f7..43190db63f2c6 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime
import numpy as np
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 121081e041ef4..460966fa8ae39 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from collections import OrderedDict
from datetime import datetime, timedelta
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index a05bd2965fb6e..2d2d6306f5360 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
import calendar
from datetime import date, datetime, time
import locale
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index d044a220965f9..c17039fb11409 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime, timedelta
from importlib import reload
import string
diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py
index 26b868872ee0d..dfc15146307c9 100644
--- a/pandas/tests/series/test_internals.py
+++ b/pandas/tests/series/test_internals.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime
import numpy as np
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 20cc05f9a5cee..bbfb5db61c9f9 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
import collections
from datetime import datetime
from io import StringIO
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 7b1df6917e77c..94b643900ee0f 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime, timedelta
import numpy as np
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index cb8d5239aa684..9257c7d9977dd 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime, timedelta
import operator
diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py
index 4f462e11e9bb9..1d39cbbe51d23 100644
--- a/pandas/tests/series/test_quantile.py
+++ b/pandas/tests/series/test_quantile.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
import numpy as np
import pytest
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index f71877dabf3bd..e37e0b92a7ba4 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
import numpy as np
import pytest
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 9ed7e7e0e75ec..4ee1627831824 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime, timedelta
import numpy as np
diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py
index 68a162ee4c287..deb09a8a9dac3 100644
--- a/pandas/tests/series/test_subclass.py
+++ b/pandas/tests/series/test_subclass.py
@@ -1,5 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index 3a82339375699..dbd1d4bcb8eb0 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -1,6 +1,4 @@
# coding=utf-8
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime, time, timedelta
from io import StringIO
from itertools import product
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 45df08ccfeb48..f2cc826918789 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -1,5 +1,3 @@
-# pylint: disable-msg=E1101,W0612
-
import operator
import numpy as np
diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py
index 703ae3bde71d1..2c721e1009952 100644
--- a/pandas/tests/sparse/series/test_series.py
+++ b/pandas/tests/sparse/series/test_series.py
@@ -1,5 +1,3 @@
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime
import operator
diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py
index 97d5aaca82778..51875148daadb 100644
--- a/pandas/tests/sparse/test_combine_concat.py
+++ b/pandas/tests/sparse/test_combine_concat.py
@@ -1,4 +1,3 @@
-# pylint: disable-msg=E1101,W0612
import itertools
import numpy as np
diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py
index 6d8c6f13cd32b..fb6cae3ad6deb 100644
--- a/pandas/tests/sparse/test_indexing.py
+++ b/pandas/tests/sparse/test_indexing.py
@@ -1,5 +1,3 @@
-# pylint: disable-msg=E1101,W0612
-
import numpy as np
import pytest
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py
index 53d62a492794e..0c8c33b3db8cf 100644
--- a/pandas/tests/test_expressions.py
+++ b/pandas/tests/test_expressions.py
@@ -14,9 +14,6 @@
from pandas.io.formats.printing import pprint_thing
-# pylint: disable-msg=W0612,E1101
-
-
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns=list('ABCD'), dtype='float64')
_mixed = DataFrame({'A': _frame['A'].copy(),
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 992c743849fbb..799571da8bdc4 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=W0612,E1101,W0141
import datetime
from io import StringIO
import itertools
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 69c76d7a090c4..22200b72b852d 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
-# pylint: disable-msg=E1101,W0612
-
from datetime import datetime, timedelta
import re
| Removing leftover `pylint` references since we exclusively use `flake8` for style checking. | https://api.github.com/repos/pandas-dev/pandas/pulls/26091 | 2019-04-15T00:31:30Z | 2019-04-15T04:07:48Z | 2019-04-15T04:07:48Z | 2019-04-15T05:19:58Z |
Cleanup of GroupBy Code | diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index b6fc31bb6f015..1c5a69e164c86 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -69,27 +69,6 @@ def _cython_agg_general(self, how, alt=None, numeric_only=True,
how, alt=alt, numeric_only=numeric_only, min_count=min_count)
return self._wrap_agged_blocks(new_items, new_blocks)
- def _wrap_agged_blocks(self, items, blocks):
- obj = self._obj_with_exclusions
-
- new_axes = list(obj._data.axes)
-
- # more kludge
- if self.axis == 0:
- new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
- else:
- new_axes[self.axis] = self.grouper.result_index
-
- # Make sure block manager integrity check passes.
- assert new_axes[0].equals(items)
- new_axes[0] = items
-
- mgr = BlockManager(blocks, new_axes)
-
- new_obj = type(obj)(mgr)
-
- return self._post_process_cython_aggregate(new_obj)
-
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True,
@@ -166,19 +145,6 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True,
return new_items, new_blocks
- def _get_data_to_aggregate(self):
- obj = self._obj_with_exclusions
- if self.axis == 0:
- return obj.swapaxes(0, 1)._data, 1
- else:
- return obj._data, self.axis
-
- def _post_process_cython_aggregate(self, obj):
- # undoing kludge from below
- if self.axis == 0:
- obj = obj.swapaxes(0, 1)
- return obj
-
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
| Removing some dead code I came across on review of the module which appears in NDFrameGroupBy but is overridden by its only public child class DataFrameGroupBy
FWIW with the removal of PanelGroupBy the NDFrameGroupBy is an unnecessary layer of inheritance and could probably be merged in with DataFrameGroupBy, though I figure that could be a separate dedicated PR to merge those if so desired | https://api.github.com/repos/pandas-dev/pandas/pulls/26090 | 2019-04-14T23:54:15Z | 2019-04-16T11:56:03Z | 2019-04-16T11:56:03Z | 2020-01-16T00:34:12Z |
Fix Up Typing in GroupBy | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 1953132c826ba..20d4f46348be6 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -218,6 +218,7 @@ Other API Changes
- :meth:`Timestamp.strptime` will now rise a ``NotImplementedError`` (:issue:`25016`)
- Comparing :class:`Timestamp` with unsupported objects now returns :py:obj:`NotImplemented` instead of raising ``TypeError``. This implies that unsupported rich comparisons are delegated to the other object, and are now consistent with Python 3 behavior for ``datetime`` objects (:issue:`24011`)
- Bug in :meth:`DatetimeIndex.snap` which didn't preserving the ``name`` of the input :class:`Index` (:issue:`25575`)
+- The ``arg`` argument in :meth:`pandas.core.groupby.DataFrameGroupBy.agg` has been renamed to ``func`` (:issue:`26089`)
.. _whatsnew_0250.deprecations:
diff --git a/mypy.ini b/mypy.ini
index abec13b76cc21..80c34260acdd1 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -59,15 +59,6 @@ ignore_errors=True
[mypy-pandas.core.config_init]
ignore_errors=True
-[mypy-pandas.core.groupby.generic]
-ignore_errors=True
-
-[mypy-pandas.core.groupby.groupby]
-ignore_errors=True
-
-[mypy-pandas.core.groupby.ops]
-ignore_errors=True
-
[mypy-pandas.core.indexes.base]
ignore_errors=True
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 3519b5c078ee2..01784513704b4 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -144,10 +144,10 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True,
return new_items, new_blocks
- def aggregate(self, arg, *args, **kwargs):
+ def aggregate(self, func, *args, **kwargs):
_level = kwargs.pop('_level', None)
- result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
+ result, how = self._aggregate(func, _level=_level, *args, **kwargs)
if how is None:
return result
@@ -155,14 +155,14 @@ def aggregate(self, arg, *args, **kwargs):
# grouper specific aggregations
if self.grouper.nkeys > 1:
- return self._python_agg_general(arg, *args, **kwargs)
+ return self._python_agg_general(func, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
- [arg], _level=_level, _axis=self.axis)
+ [func], _level=_level, _axis=self.axis)
result.columns = Index(
result.columns.levels[0],
@@ -174,7 +174,7 @@ def aggregate(self, arg, *args, **kwargs):
# to SparseDataFrame, so we do it here.
result = SparseDataFrame(result._data)
except Exception:
- result = self._aggregate_generic(arg, *args, **kwargs)
+ result = self._aggregate_generic(func, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index b1936a8f5121f..bd8a8852964e3 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -12,14 +12,15 @@ class providing the base-class of operations.
import datetime
from functools import partial, wraps
import types
-from typing import Optional, Tuple, Type
+from typing import FrozenSet, Optional, Tuple, Type
import warnings
import numpy as np
from pandas._config.config import option_context
-from pandas._libs import Timestamp, groupby as libgroupby
+from pandas._libs import Timestamp
+import pandas._libs.groupby as libgroupby
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
@@ -325,7 +326,7 @@ def _group_selection_context(groupby):
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
- _apply_whitelist = frozenset()
+ _apply_whitelist = frozenset() # type: FrozenSet[str]
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 8a6ec285cc79e..82b9d6c1269f9 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -10,7 +10,9 @@
import numpy as np
-from pandas._libs import NaT, groupby as libgroupby, iNaT, lib, reduction
+from pandas._libs import NaT, iNaT, lib
+import pandas._libs.groupby as libgroupby
+import pandas._libs.reduction as reduction
from pandas.compat import lzip
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
| More typing cleanups
Original failures:
```sh
pandas/core/groupby/ops.py:13: error: Module 'pandas._libs' has no attribute 'groupby'
pandas/core/groupby/ops.py:13: error: Module 'pandas._libs' has no attribute 'reduction'
pandas/core/groupby/groupby.py:22: error: Module 'pandas._libs' has no attribute 'groupby'
pandas/core/groupby/groupby.py:329: error: Need type annotation for '_apply_whitelist'
pandas/core/groupby/generic.py:220: error: Incompatible types in assignment (expression has type "Callable[[Arg(Any, 'arg'), VarArg(Any), KwArg(Any)], Any]", base class "SelectionMixin" defined the type as "Callable[[Arg(Any, 'func'), VarArg(Any), KwArg(Any)], Any]")
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/26089 | 2019-04-14T23:49:31Z | 2019-04-17T22:00:11Z | 2019-04-17T22:00:11Z | 2020-01-16T00:34:12Z |
CLN: Misc Python 2 references | diff --git a/LICENSES/SIX b/LICENSES/SIX
deleted file mode 100644
index 6fd669af222d3..0000000000000
--- a/LICENSES/SIX
+++ /dev/null
@@ -1,21 +0,0 @@
-six license (substantial portions used in the python 3 compatibility module)
-===========================================================================
-Copyright (c) 2010-2013 Benjamin Peterson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-#
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-#
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index e5b50ae3c19de..624cad2bfa95a 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -2224,10 +2224,7 @@ cdef class _Period(object):
def __unicode__(self):
"""
- Return a string representation for a particular DataFrame
-
- Invoked by unicode(df) in py2 only. Yields a Unicode String in both
- py2/py3.
+ Return a unicode string representation for a particular DataFrame
"""
base, mult = get_freq_code(self.freq)
formatted = period_format(self.ordinal, base)
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index ff9e92aa05bb6..ddd5d0d224264 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -7,19 +7,7 @@ import calendar
import re
from datetime import date as datetime_date
-
-# Python 2 vs Python 3
-try:
- from thread import allocate_lock as _thread_allocate_lock
-except:
- try:
- from _thread import allocate_lock as _thread_allocate_lock
- except:
- try:
- from dummy_thread import allocate_lock as _thread_allocate_lock
- except:
- from _dummy_thread import allocate_lock as _thread_allocate_lock
-
+from _thread import allocate_lock as _thread_allocate_lock
import pytz
diff --git a/pandas/core/base.py b/pandas/core/base.py
index d4294e59cc845..6842a632223ed 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -699,12 +699,7 @@ def item(self):
"""
Return the first element of the underlying data as a python scalar.
"""
- try:
- return self.values.item()
- except IndexError:
- # copy numpy's message here because Py26 raises an IndexError
- raise ValueError('can only convert an array of size 1 to a '
- 'Python scalar')
+ return self.values.item()
@property
def data(self):
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 78754b13e1705..a2509226bcd8e 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -636,7 +636,6 @@ def astype_nansafe(arr, dtype, copy=True, skipna=False):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
- # in Py3 that's str, in Py2 that's unicode
return lib.astype_unicode(arr.ravel(),
skipna=skipna).reshape(arr.shape)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fdc99e957e257..98b0555d3d771 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -615,10 +615,7 @@ def _info_repr(self):
def __unicode__(self):
"""
- Return a string representation for a particular DataFrame.
-
- Invoked by unicode(df) in py2 only. Yields a Unicode String in both
- py2/py3.
+ Return a unicode string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
@@ -904,16 +901,10 @@ def itertuples(self, index=True, name="Pandas"):
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
- # Python 3 supports at most 255 arguments to constructor, and
- # things get slow with this many fields in Python 2
+ # Python 3 supports at most 255 arguments to constructor
if name is not None and len(self.columns) + index < 256:
- # `rename` is unsupported in Python 2.6
- try:
- itertuple = collections.namedtuple(name, fields, rename=True)
- return map(itertuple._make, zip(*arrays))
-
- except Exception:
- pass
+ itertuple = collections.namedtuple(name, fields, rename=True)
+ return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 885c499c58dfa..f785988d558da 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2581,11 +2581,8 @@ def to_pickle(self, path, compression='infer',
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
- values for this parameter depend on the version of Python. For
- Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a
- valid value. For Python >= 3.4, 4 is a valid value. A negative
- value for the protocol parameter is equivalent to setting its value
- to HIGHEST_PROTOCOL.
+ values are 0, 1, 2, 3, 4. A negative value for the protocol
+ parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html
.. versionadded:: 0.21.0
@@ -2838,7 +2835,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, header=True,
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
- defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
+ defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
@@ -2967,7 +2964,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
- defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
+ defaults to 'utf-8'.
compression : str, default 'infer'
Compression mode among the following possible values: {'infer',
'gzip', 'bz2', 'zip', 'xz', None}. If 'infer' and `path_or_buf`
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 98647a6895574..b89dc4c769c26 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -921,10 +921,7 @@ def __deepcopy__(self, memo=None):
def __unicode__(self):
"""
- Return a string representation for this object.
-
- Invoked by unicode(df) in py2 only. Yields a Unicode String in both
- py2/py3.
+ Return a unicode string representation for this object.
"""
klass = self.__class__.__name__
data = self._format_data()
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index 982645ebd5124..cad094e59b022 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -151,10 +151,7 @@ def values(self):
def __unicode__(self):
"""
- Return a string representation for this object.
-
- Invoked by unicode(df) in py2 only. Yields a Unicode String in both
- py2/py3.
+ Return a unicode string representation for this object.
"""
prepr = pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index da6a917c93ba4..5832d106dae34 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -126,7 +126,7 @@ def ensure_int(value, field):
@classmethod
def from_range(cls, data, name=None, dtype=None, **kwargs):
- """ Create RangeIndex from a range (py3), or xrange (py2) object. """
+ """ Create RangeIndex from a range object. """
if not isinstance(data, range):
raise TypeError(
'{0}(...) must be called with object coercible to a '
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 542b1075313bf..4181f6c8ca3ab 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -345,10 +345,7 @@ def _compare_constructor(self, other, func):
def __unicode__(self):
"""
- Return a string representation for a particular Panel.
-
- Invoked by unicode(df) in py2 only.
- Yields a Unicode String in both py2/py3.
+ Return a unicode string representation for a particular Panel.
"""
class_name = str(self.__class__)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 8a22765d85aec..65050db8c4380 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1374,10 +1374,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False):
def __unicode__(self):
"""
- Return a string representation for a particular DataFrame.
-
- Invoked by unicode(df) in py2 only. Yields a Unicode String in both
- py2/py3.
+ Return a unicode string representation for a particular DataFrame.
"""
buf = StringIO("")
width, height = get_terminal_size()
diff --git a/pandas/io/clipboard/clipboards.py b/pandas/io/clipboard/clipboards.py
index b7bed084d72f1..3c8abe74912fd 100644
--- a/pandas/io/clipboard/clipboards.py
+++ b/pandas/io/clipboard/clipboards.py
@@ -33,11 +33,7 @@ def copy_gtk(text):
def paste_gtk():
clipboardContents = gtk.Clipboard().wait_for_text()
- # for python 2, returns None if the clipboard is blank.
- if clipboardContents is None:
- return ''
- else:
- return clipboardContents
+ return clipboardContents
return copy_gtk, paste_gtk
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 0e6366705c98a..bee66fcbfaa82 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -175,7 +175,7 @@ def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
Returns
-------
- result - unicode object on py2, str on py3. Always Unicode.
+ result - unicode str
"""
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 5d73b377838b6..56ba842fa26a2 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1326,12 +1326,6 @@ def _validate_usecols_arg(usecols):
usecols = set(usecols)
- if usecols_dtype == "unicode":
- # see gh-13253
- #
- # Python 2.x compatibility
- usecols = {col.encode("utf-8") for col in usecols}
-
return usecols, usecols_dtype
return usecols, None
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 45574262354f5..4b93f0e12e32a 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -27,12 +27,7 @@
class JSONDtype(ExtensionDtype):
type = abc.Mapping
name = 'json'
-
- try:
- na_value = UserDict()
- except AttributeError:
- # source compatibility with Py2.
- na_value = {}
+ na_value = UserDict()
@classmethod
def construct_array_type(cls):
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index a8dcabbb824d5..e9f96390821a6 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -113,10 +113,7 @@ def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
result = op(s, other)
expected = s.combine(other, op)
- if op_name == '__rdiv__':
- # combine is not giving the correct result for this case
- pytest.skip("skipping reverse div in python 2")
- elif op_name in ('__rtruediv__', '__truediv__', '__div__'):
+ if op_name in ('__rtruediv__', '__truediv__', '__div__'):
expected = expected.astype(float)
if op_name == '__rtruediv__':
# TODO reverse operators result in object dtype
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 116a12c1c520b..2a9246a5a9554 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -6,7 +6,6 @@
import math
import operator
import re
-import sys
import numpy as np
import pytest
@@ -414,9 +413,6 @@ def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc,
index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
- # TODO(GH-24559): Remove the sys.modules and warnings
- # not sure what this is from. It's Py2 only.
- modules = [sys.modules['pandas.core.indexes.base']]
if (tz_naive_fixture and attr == "asi8" and
str(tz_naive_fixture) not in ('UTC', 'tzutc()', 'UTC+00:00')):
ex_warn = FutureWarning
@@ -425,8 +421,7 @@ def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc,
# stacklevel is checked elsewhere. We don't do it here since
# Index will have an frame, throwing off the expected.
- with tm.assert_produces_warning(ex_warn, check_stacklevel=False,
- clear=modules):
+ with tm.assert_produces_warning(ex_warn, check_stacklevel=False):
result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py
index fbd71dfa8262b..c0b41691a67ef 100644
--- a/pandas/tests/io/formats/test_to_csv.py
+++ b/pandas/tests/io/formats/test_to_csv.py
@@ -49,8 +49,7 @@ def test_to_csv_defualt_encoding(self):
df = DataFrame({'col': ["AAAAA", "ÄÄÄÄÄ", "ßßßßß", "聞聞聞聞聞"]})
with tm.ensure_clean('test.csv') as path:
- # the default to_csv encoding in Python 2 is ascii, and that in
- # Python 3 is uft-8.
+ # the default to_csv encoding is uft-8.
df.to_csv(path)
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py
index 22157a742bacc..0b5fe1623699c 100644
--- a/pandas/tests/io/parser/test_c_parser_only.py
+++ b/pandas/tests/io/parser/test_c_parser_only.py
@@ -416,7 +416,7 @@ def test_read_nrows_large(c_parser_only):
def test_float_precision_round_trip_with_text(c_parser_only):
- # see gh-15140 - This should not segfault on Python 2.7+
+ # see gh-15140
parser = c_parser_only
df = parser.read_csv(StringIO("a"), header=None,
float_precision="round_trip")
diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index a3fb35f9f01f2..c0e19f07c148d 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -100,11 +100,6 @@ def test_series_compression_defaults_to_infer(
def test_compression_warning(compression_only):
# Assert that passing a file object to to_csv while explicitly specifying a
# compression protocol triggers a RuntimeWarning, as per GH21227.
- # Note that pytest has an issue that causes assert_produces_warning to fail
- # in Python 2 if the warning has occurred in previous tests
- # (see https://git.io/fNEBm & https://git.io/fNEBC). Hence, should this
- # test fail in just Python 2 builds, it likely indicates that other tests
- # are producing RuntimeWarnings, thereby triggering the pytest bug.
df = pd.DataFrame(100 * [[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
columns=['X', 'Y', 'Z'])
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index b4e942fc086a9..872267e86d95f 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -67,15 +67,7 @@ def compare_element(result, expected, typ, version=None):
def compare(data, vf, version):
- # py3 compat when reading py2 pickle
- try:
- data = pd.read_pickle(vf)
- except (ValueError) as e:
- if 'unsupported pickle protocol:' in str(e):
- # trying to read a py3 pickle in py2
- return
- else:
- raise
+ data = pd.read_pickle(vf)
m = globals()
for typ, dv in data.items():
diff --git a/scripts/merge-pr.py b/scripts/merge-pr.py
index b9233abe56149..5c665faac5976 100755
--- a/scripts/merge-pr.py
+++ b/scripts/merge-pr.py
@@ -30,8 +30,6 @@
import sys
import textwrap
-from six.moves import input
-
PANDAS_HOME = '.'
PROJECT_NAME = 'pandas'
print("PANDAS_HOME = " + PANDAS_HOME)
| - [x] xref https://github.com/pandas-dev/pandas/issues/25725#issuecomment-482460520
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The notable change here is removing the SIX license since I think we've removed any borrowed compatibility code. | https://api.github.com/repos/pandas-dev/pandas/pulls/26085 | 2019-04-14T18:42:26Z | 2019-04-15T12:12:58Z | 2019-04-15T12:12:58Z | 2019-04-15T17:03:33Z |
BUG: positional getitem indexing with list on Series with duplicate integer index fails | diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 3cdae198cad31..03916ead17dc3 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -243,6 +243,14 @@ def test_dups_fancy_indexing2(self):
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
+ @pytest.mark.parametrize('case', [lambda s: s, lambda s: s.loc])
+ def test_duplicate_int_indexing(self, case):
+ # GH 17347
+ s = pd.Series(range(3), index=[1, 1, 3])
+ expected = s[1]
+ result = case(s)[[1]]
+ tm.assert_series_equal(result, expected)
+
def test_indexing_mixed_frame_bug(self):
# GH3492
| Create test case for issue GH17347
- [x] closes #17347
- [x] tests added / passed | https://api.github.com/repos/pandas-dev/pandas/pulls/26083 | 2019-04-14T10:46:53Z | 2019-04-16T11:59:05Z | 2019-04-16T11:59:04Z | 2019-12-06T15:06:35Z |
CLN: collect ordered fixtures in pandas.conftest | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 462ee10f11975..4088697fa6f5f 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -126,6 +126,12 @@ def observed(request):
return request.param
+@pytest.fixture(params=[True, False, None])
+def ordered_fixture(request):
+ """Boolean 'ordered' parameter for Categorical."""
+ return request.param
+
+
_all_arithmetic_operators = ['__add__', '__radd__',
'__sub__', '__rsub__',
'__mul__', '__rmul__',
diff --git a/pandas/tests/arrays/categorical/conftest.py b/pandas/tests/arrays/categorical/conftest.py
index 274389d484995..640f5dfd63887 100644
--- a/pandas/tests/arrays/categorical/conftest.py
+++ b/pandas/tests/arrays/categorical/conftest.py
@@ -5,9 +5,3 @@
def allow_fill(request):
"""Boolean 'allow_fill' parameter for Categorical.take"""
return request.param
-
-
-@pytest.fixture(params=[True, False])
-def ordered(request):
- """Boolean 'ordered' parameter for Categorical."""
- return request.param
diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py
index 50f643756c5dc..09643c06aa56e 100644
--- a/pandas/tests/arrays/categorical/test_algos.py
+++ b/pandas/tests/arrays/categorical/test_algos.py
@@ -96,20 +96,20 @@ def test_take_empty(self, allow_fill):
with pytest.raises(IndexError):
cat.take([0], allow_fill=allow_fill)
- def test_positional_take(self, ordered):
+ def test_positional_take(self, ordered_fixture):
cat = pd.Categorical(['a', 'a', 'b', 'b'], categories=['b', 'a'],
- ordered=ordered)
+ ordered=ordered_fixture)
result = cat.take([0, 1, 2], allow_fill=False)
expected = pd.Categorical(['a', 'a', 'b'], categories=cat.categories,
- ordered=ordered)
+ ordered=ordered_fixture)
tm.assert_categorical_equal(result, expected)
- def test_positional_take_unobserved(self, ordered):
+ def test_positional_take_unobserved(self, ordered_fixture):
cat = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'],
- ordered=ordered)
+ ordered=ordered_fixture)
result = cat.take([1, 0], allow_fill=False)
expected = pd.Categorical(['b', 'a'], categories=cat.categories,
- ordered=ordered)
+ ordered=ordered_fixture)
tm.assert_categorical_equal(result, expected)
def test_take_allow_fill(self):
diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 4366f610871ff..951a87ab7e962 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -20,11 +20,6 @@
import pandas.util.testing as tm
-@pytest.fixture(params=[True, False, None])
-def ordered(request):
- return request.param
-
-
class Base(object):
def setup_method(self, method):
@@ -659,10 +654,10 @@ class TestCategoricalDtypeParametrized(object):
['a', 'b', 10, 2, 1.3, True],
[True, False],
pd.date_range('2017', periods=4)])
- def test_basic(self, categories, ordered):
- c1 = CategoricalDtype(categories, ordered=ordered)
+ def test_basic(self, categories, ordered_fixture):
+ c1 = CategoricalDtype(categories, ordered=ordered_fixture)
tm.assert_index_equal(c1.categories, pd.Index(categories))
- assert c1.ordered is ordered
+ assert c1.ordered is ordered_fixture
def test_order_matters(self):
categories = ['a', 'b']
@@ -683,7 +678,7 @@ def test_categories(self):
tm.assert_index_equal(result.categories, pd.Index(['a', 'b', 'c']))
assert result.ordered is None
- def test_equal_but_different(self, ordered):
+ def test_equal_but_different(self, ordered_fixture):
c1 = CategoricalDtype([1, 2, 3])
c2 = CategoricalDtype([1., 2., 3.])
assert c1 is not c2
@@ -748,8 +743,9 @@ def test_categorical_equality(self, ordered1, ordered2):
@pytest.mark.parametrize('categories', [list('abc'), None])
@pytest.mark.parametrize('other', ['category', 'not a category'])
- def test_categorical_equality_strings(self, categories, ordered, other):
- c1 = CategoricalDtype(categories, ordered)
+ def test_categorical_equality_strings(self, categories, ordered_fixture,
+ other):
+ c1 = CategoricalDtype(categories, ordered_fixture)
result = c1 == other
expected = other == 'category'
assert result is expected
@@ -793,12 +789,12 @@ def test_from_categorical_dtype_both(self):
c1, categories=[1, 2], ordered=False)
assert result == CategoricalDtype([1, 2], ordered=False)
- def test_str_vs_repr(self, ordered):
- c1 = CategoricalDtype(['a', 'b'], ordered=ordered)
+ def test_str_vs_repr(self, ordered_fixture):
+ c1 = CategoricalDtype(['a', 'b'], ordered=ordered_fixture)
assert str(c1) == 'category'
# Py2 will have unicode prefixes
pat = r"CategoricalDtype\(categories=\[.*\], ordered={ordered}\)"
- assert re.match(pat.format(ordered=ordered), repr(c1))
+ assert re.match(pat.format(ordered=ordered_fixture), repr(c1))
def test_categorical_categories(self):
# GH17884
@@ -810,8 +806,8 @@ def test_categorical_categories(self):
@pytest.mark.parametrize('new_categories', [
list('abc'), list('cba'), list('wxyz'), None])
@pytest.mark.parametrize('new_ordered', [True, False, None])
- def test_update_dtype(self, ordered, new_categories, new_ordered):
- dtype = CategoricalDtype(list('abc'), ordered)
+ def test_update_dtype(self, ordered_fixture, new_categories, new_ordered):
+ dtype = CategoricalDtype(list('abc'), ordered_fixture)
new_dtype = CategoricalDtype(new_categories, new_ordered)
expected_categories = new_dtype.categories
@@ -826,8 +822,8 @@ def test_update_dtype(self, ordered, new_categories, new_ordered):
tm.assert_index_equal(result.categories, expected_categories)
assert result.ordered is expected_ordered
- def test_update_dtype_string(self, ordered):
- dtype = CategoricalDtype(list('abc'), ordered)
+ def test_update_dtype_string(self, ordered_fixture):
+ dtype = CategoricalDtype(list('abc'), ordered_fixture)
expected_categories = dtype.categories
expected_ordered = dtype.ordered
result = dtype.update_dtype('category')
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index b01ded138be7f..a1ff3a3c9d848 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1406,14 +1406,14 @@ def test_value_counts_with_nan(self):
pytest.param("datetime64[D]",
marks=pytest.mark.xfail(reason="GH#7996"))]
)
- @pytest.mark.parametrize("is_ordered", [True, False])
- def test_drop_duplicates_categorical_non_bool(self, dtype, is_ordered):
+ def test_drop_duplicates_categorical_non_bool(self, dtype,
+ ordered_fixture):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
# Test case 1
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
tc1 = Series(Categorical(input1, categories=cat_array,
- ordered=is_ordered))
+ ordered=ordered_fixture))
expected = Series([False, False, False, True])
tm.assert_series_equal(tc1.duplicated(), expected)
@@ -1440,7 +1440,7 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, is_ordered):
# Test case 2
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
tc2 = Series(Categorical(
- input2, categories=cat_array, ordered=is_ordered)
+ input2, categories=cat_array, ordered=ordered_fixture)
)
expected = Series([False, False, False, False, True, True, False])
@@ -1465,10 +1465,10 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, is_ordered):
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
- @pytest.mark.parametrize("is_ordered", [True, False])
- def test_drop_duplicates_categorical_bool(self, is_ordered):
+ def test_drop_duplicates_categorical_bool(self, ordered_fixture):
tc = Series(Categorical([True, False, True, False],
- categories=[True, False], ordered=is_ordered))
+ categories=[True, False],
+ ordered=ordered_fixture))
expected = Series([False, False, True, True])
tm.assert_series_equal(tc.duplicated(), expected)
| Currently, ``Categorical .ordered``, ``CategoricaDtype.ordered`` and similar is tested using different fixtures + parametrization in some places. This PR collects the ordered fixtures/parametrizations and places a common ``ordered`` fixture in pandas.conftest. | https://api.github.com/repos/pandas-dev/pandas/pulls/26082 | 2019-04-14T09:49:58Z | 2019-04-16T11:47:54Z | 2019-04-16T11:47:54Z | 2019-05-08T04:47:02Z |
CLN: remove compat.iterkeys | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 815f39ef6ebe6..e92c053a282bc 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -6,7 +6,7 @@
Key items to import for compatible code:
* lists: lrange(), lmap(), lzip(), lfilter()
-* iterable method compatibility: iterkeys, itervalues
+* iterable method compatibility: itervalues
* Uses the original method if available, otherwise uses items, keys, values.
* add_metaclass(metaclass) - class decorator that recreates class with with the
given metaclass instead (and avoids intermediary class creation)
@@ -41,10 +41,6 @@ def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
-def iterkeys(obj, **kw):
- return iter(obj.keys(**kw))
-
-
def itervalues(obj, **kw):
return iter(obj.values(**kw))
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 40f090f661c2f..984be28d11935 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -401,7 +401,7 @@ def nested_renaming_depr(level=4):
else:
# deprecation of renaming keys
# GH 15931
- keys = list(compat.iterkeys(arg))
+ keys = list(arg.keys())
if (isinstance(obj, ABCDataFrame) and
len(obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
@@ -437,7 +437,7 @@ def _agg(arg, func):
return result
# set the final keys
- keys = list(compat.iterkeys(arg))
+ keys = list(arg.keys())
result = OrderedDict()
# nested renamer
@@ -449,7 +449,7 @@ def _agg(arg, func):
result, results = OrderedDict(), result
for r in results:
result.update(r)
- keys = list(compat.iterkeys(result))
+ keys = list(result.keys())
else:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 5b186afdfdb3e..f582c3c7de284 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -30,8 +30,8 @@
ensure_object, is_categorical_dtype, is_datetime64_dtype)
from pandas import (
- Categorical, DatetimeIndex, NaT, Timestamp, compat, concat, isna,
- to_datetime, to_timedelta)
+ Categorical, DatetimeIndex, NaT, Timestamp, concat, isna, to_datetime,
+ to_timedelta)
from pandas.core.base import StringMixin
from pandas.core.frame import DataFrame
from pandas.core.series import Series
@@ -1700,7 +1700,7 @@ def _do_convert_categoricals(self, data, value_label_dict, lbllist,
"""
Converts categorical columns to Categorical type.
"""
- value_labels = list(compat.iterkeys(value_label_dict))
+ value_labels = list(value_label_dict.keys())
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 6c465a9317c0d..ff8763ef3f9db 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -11,8 +11,6 @@
import numpy as np
import pytest
-from pandas.compat import iterkeys
-
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
@@ -755,8 +753,7 @@ def test_missing_value_generator(self):
def test_missing_value_conversion(self, file):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
- keys = [key for key in iterkeys(smv.MISSING_VALUES)]
- keys.sort()
+ keys = sorted(smv.MISSING_VALUES.keys())
data = []
for i in range(27):
row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py
index 148b3fba45375..3777b585ea92e 100644
--- a/pandas/tests/test_compat.py
+++ b/pandas/tests/test_compat.py
@@ -5,8 +5,7 @@
import builtins
import re
-from pandas.compat import (
- iterkeys, itervalues, lfilter, lmap, lrange, lzip, re_type)
+from pandas.compat import itervalues, lfilter, lmap, lrange, lzip, re_type
class TestBuiltinIterators(object):
@@ -53,7 +52,6 @@ def test_lzip(self):
def test_dict_iterators(self):
assert next(itervalues({1: 2})) == 2
- assert next(iterkeys({1: 2})) == 1
def test_re_type():
| - [x] xref #25725
Removes ``compat.iterkeys``. | https://api.github.com/repos/pandas-dev/pandas/pulls/26081 | 2019-04-14T09:33:37Z | 2019-04-15T12:13:34Z | 2019-04-15T12:13:34Z | 2019-04-15T22:22:41Z |
ERR: better error message on too large excel sheet | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 276812a564e03..1a0df4789f4c7 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -466,6 +466,7 @@ I/O
- Fixed memory leak in :meth:`DataFrame.to_json` when dealing with numeric data (:issue:`24889`)
- Bug in :func:`read_json` where date strings with ``Z`` were not converted to a UTC timezone (:issue:`26168`)
- Added ``cache_dates=True`` parameter to :meth:`read_csv`, which allows to cache unique dates when they are parsed (:issue:`25990`)
+- :meth:`DataFrame.to_excel` now raises a ``ValueError`` when the caller's dimensions exceed the limitations of Excel (:issue:`26051`)
Plotting
^^^^^^^^
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index fd6e3304ec4ef..4db00e34b39e2 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -341,6 +341,9 @@ class ExcelFormatter:
This is only called for body cells.
"""
+ max_rows = 2**20
+ max_cols = 2**14
+
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
inf_rep='inf', style_converter=None):
@@ -648,6 +651,13 @@ def write(self, writer, sheet_name='Sheet1', startrow=0,
from pandas.io.excel import ExcelWriter
from pandas.io.common import _stringify_path
+ num_rows, num_cols = self.df.shape
+ if num_rows > self.max_rows or num_cols > self.max_cols:
+ raise ValueError("This sheet is too large! Your sheet size is: " +
+ "{}, {} ".format(num_rows, num_cols) +
+ "Max sheet size is: {}, {}".
+ format(self.max_rows, self.max_cols))
+
if isinstance(writer, ExcelWriter):
need_save = False
else:
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 100de227aa97c..7fe8e1d18838f 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -1185,6 +1185,24 @@ class and any subclasses, on account of the `autouse=True`
class TestExcelWriter(_WriterBase):
# Base class for test cases to run with different Excel writers.
+ def test_excel_sheet_size(self):
+
+ # GH 26080
+ breaking_row_count = 2**20 + 1
+ breaking_col_count = 2**14 + 1
+ # purposely using two arrays to prevent memory issues while testing
+ row_arr = np.zeros(shape=(breaking_row_count, 1))
+ col_arr = np.zeros(shape=(1, breaking_col_count))
+ row_df = pd.DataFrame(row_arr)
+ col_df = pd.DataFrame(col_arr)
+
+ msg = "sheet is too large"
+ with pytest.raises(ValueError, match=msg):
+ row_df.to_excel(self.path)
+
+ with pytest.raises(ValueError, match=msg):
+ col_df.to_excel(self.path)
+
def test_excel_sheet_by_name_raise(self, *_):
import xlrd
| - [ ] closes #26051
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/26080 | 2019-04-14T04:21:39Z | 2019-06-01T14:04:15Z | 2019-06-01T14:04:15Z | 2019-06-01T14:04:21Z |
CLN: remove compat.iteritems | diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 54a7afd90a09a..549359259bbd4 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -6,7 +6,7 @@
Key items to import for compatible code:
* lists: lrange(), lmap(), lzip(), lfilter()
-* iterable method compatibility: iteritems, iterkeys, itervalues
+* iterable method compatibility: iterkeys, itervalues
* Uses the original method if available, otherwise uses items, keys, values.
* add_metaclass(metaclass) - class decorator that recreates class with with the
given metaclass instead (and avoids intermediary class creation)
@@ -45,10 +45,6 @@ def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
-def iteritems(obj, **kw):
- return iter(obj.items(**kw))
-
-
def iterkeys(obj, **kw):
return iter(obj.keys(**kw))
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index cd49946652566..995ed59ddabb1 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -9,7 +9,6 @@
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib
-import pandas.compat as compat
from pandas.compat import lzip
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
@@ -1317,7 +1316,7 @@ def __setstate__(self, state):
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
- for k, v in compat.iteritems(state):
+ for k, v in state.items():
setattr(self, k, v)
@property
diff --git a/pandas/core/base.py b/pandas/core/base.py
index d4294e59cc845..40f090f661c2f 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -364,7 +364,7 @@ def nested_renaming_depr(level=4):
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = OrderedDict()
- for k, v in compat.iteritems(arg):
+ for k, v in arg.items():
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
@@ -432,7 +432,7 @@ def _agg(arg, func):
return an OrderedDict
"""
result = OrderedDict()
- for fname, agg_how in compat.iteritems(arg):
+ for fname, agg_how in arg.items():
result[fname] = func(fname, agg_how)
return result
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 3cb23e9ee921d..e62a2119df820 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -14,7 +14,7 @@
import numpy as np
from pandas._libs import lib, tslibs
-from pandas.compat import PY36, iteritems
+from pandas.compat import PY36
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
@@ -362,7 +362,7 @@ def dict_compat(d):
dict
"""
- return {maybe_box_datetimelike(key): value for key, value in iteritems(d)}
+ return {maybe_box_datetimelike(key): value for key, value in d.items()}
def standardize_mapping(into):
diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py
index 71b57ec4ecd1e..a7524161dd80e 100644
--- a/pandas/core/computation/align.py
+++ b/pandas/core/computation/align.py
@@ -9,7 +9,6 @@
from pandas.errors import PerformanceWarning
import pandas as pd
-from pandas import compat
import pandas.core.common as com
from pandas.core.computation.common import _result_type_many
@@ -30,7 +29,7 @@ def _align_core_single_unary_op(term):
def _zip_axes_from_type(typ, new_axes):
axes = {ax_name: new_axes[ax_ind]
- for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES)}
+ for ax_ind, ax_name in typ._AXIS_NAMES.items()}
return axes
@@ -84,7 +83,7 @@ def _align_core(terms):
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how='outer')
- for i, ndim in compat.iteritems(ndims):
+ for i, ndim in ndims.items():
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index e61dbd07dac5d..245cd9c403080 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -10,7 +10,7 @@
import numpy as np
-from pandas.compat import iteritems, lmap
+from pandas.compat import lmap
import pandas as pd
from pandas.core import common as com
@@ -300,7 +300,7 @@ def f(self, node, *args, **kwargs):
def add_ops(op_classes):
"""Decorator to add default implementation of ops."""
def f(cls):
- for op_attr_name, op_class in iteritems(op_classes):
+ for op_attr_name, op_class in op_classes.items():
ops = getattr(cls, '{name}_ops'.format(name=op_attr_name))
ops_map = getattr(cls, '{name}_op_nodes_map'.format(
name=op_attr_name))
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index fdc99e957e257..501bc7811a385 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -33,7 +33,6 @@
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
-from pandas import compat
from pandas.compat import PY36, lmap, lzip, raise_with_traceback
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.cast import (
@@ -1275,9 +1274,9 @@ def to_dict(self, orient='dict', into=dict):
into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
- (k, v.to_dict(into)) for k, v in compat.iteritems(self))
+ (k, v.to_dict(into)) for k, v in self.items())
elif orient.lower().startswith('l'):
- return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
+ return into_c((k, v.tolist()) for k, v in self.items())
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
@@ -1287,14 +1286,14 @@ def to_dict(self, orient='dict', into=dict):
])))
elif orient.lower().startswith('s'):
return into_c((k, com.maybe_box_datetimelike(v))
- for k, v in compat.iteritems(self))
+ for k, v in self.items())
elif orient.lower().startswith('r'):
columns = self.columns.tolist()
rows = (dict(zip(columns, row))
for row in self.itertuples(index=False, name=None))
return [
into_c((k, com.maybe_box_datetimelike(v))
- for k, v in compat.iteritems(row))
+ for k, v in row.items())
for row in rows]
elif orient.lower().startswith('i'):
if not self.index.is_unique:
@@ -1480,7 +1479,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None,
else:
arrays = []
arr_columns = []
- for k, v in compat.iteritems(data):
+ for k, v in data.items():
if k in columns:
arr_columns.append(k)
arrays.append(v)
@@ -2430,7 +2429,7 @@ def _sizeof_fmt(num, size_qualifier):
counts = self.get_dtype_counts()
dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k
- in sorted(compat.iteritems(counts))]
+ in sorted(counts.items())]
lines.append('dtypes: {types}'.format(types=', '.join(dtypes)))
if memory_usage is None:
@@ -8051,8 +8050,8 @@ def isin(self, values):
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
- for index, s in compat.iteritems(data):
- for col, v in compat.iteritems(s):
+ for index, s in data.items():
+ for col, v in s.items():
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 885c499c58dfa..e17e3fd5d3e92 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -15,7 +15,6 @@
from pandas._config import config
from pandas._libs import Timestamp, iNaT, properties
-import pandas.compat as compat
from pandas.compat import lrange, lzip, set_function_name, to_str
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
@@ -6154,7 +6153,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
'by column')
result = self if inplace else self.copy()
- for k, v in compat.iteritems(value):
+ for k, v in value.items():
if k not in result:
continue
obj = result[k]
@@ -6512,7 +6511,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
to_replace = regex
regex = True
- items = list(compat.iteritems(to_replace))
+ items = list(to_replace.items())
keys, values = lzip(*items) or ([], [])
are_mappings = [is_dict_like(v) for v in values]
@@ -6551,7 +6550,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
- for c, src in compat.iteritems(to_replace):
+ for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursivelly
@@ -6563,7 +6562,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
# {'A': NA} -> 0
elif not is_list_like(value):
- keys = [(k, src) for k, src in compat.iteritems(to_replace)
+ keys = [(k, src) for k, src in to_replace.items()
if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
@@ -6610,7 +6609,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
- for k, v in compat.iteritems(value):
+ for k, v in value.items():
if k in self:
new_data = new_data.replace(to_replace=to_replace,
value=v, filter=[k],
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 92cb4db2ac868..b1936a8f5121f 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -20,7 +20,6 @@ class providing the base-class of operations.
from pandas._config.config import option_context
from pandas._libs import Timestamp, groupby as libgroupby
-import pandas.compat as compat
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
@@ -876,7 +875,7 @@ def _python_agg_general(self, func, *args, **kwargs):
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
- for name, result in compat.iteritems(output):
+ for name, result in output.items():
# since we are masking, make sure that we have a float object
values = result
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 98647a6895574..4848170eaea43 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -11,7 +11,6 @@
from pandas._libs.lib import is_datetime_array
from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp
from pandas._libs.tslibs.timezones import tz_compare
-import pandas.compat as compat
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
@@ -535,7 +534,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs):
# we actually set this value too.
result._index_data = values
result.name = name
- for k, v in compat.iteritems(kwargs):
+ for k, v in kwargs.items():
setattr(result, k, v)
return result._reset_identity()
@@ -1754,7 +1753,7 @@ def __setstate__(self, state):
if isinstance(state, dict):
self._data = state.pop('data')
- for k, v in compat.iteritems(state):
+ for k, v in state.items():
setattr(self, k, v)
elif isinstance(state, tuple):
@@ -4486,7 +4485,7 @@ def groupby(self, values):
result = values._reverse_indexer()
# map to the label
- result = {k: self.take(v) for k, v in compat.iteritems(result)}
+ result = {k: self.take(v) for k, v in result.items()}
return result
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 4493136e3e61e..930b2a4a5161f 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -238,7 +238,7 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs):
values = cls._create_categorical(values, dtype=dtype)
result._data = values
result.name = name
- for k, v in compat.iteritems(kwargs):
+ for k, v in kwargs.items():
setattr(result, k, v)
result._reset_identity()
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index da6a917c93ba4..52899ea311e9b 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -156,7 +156,7 @@ def _simple_new(cls, start, stop=None, step=None, name=None,
result._stop = stop or 0
result._step = step or 1
result.name = name
- for k, v in compat.iteritems(kwargs):
+ for k, v in kwargs.items():
setattr(result, k, v)
result._reset_identity()
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index cfc42d26c5471..6c08cacb551df 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -103,7 +103,7 @@ def __call__(self, alt):
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
- for k, v in compat.iteritems(self.kwargs):
+ for k, v in self.kwargs.items():
if k not in kwds:
kwds[k] = v
try:
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 542b1075313bf..3fb14c5d2ad9a 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -200,13 +200,13 @@ def _init_dict(self, data, axes, dtype=None):
if haxis is not None:
haxis = ensure_index(haxis)
data = OrderedDict((k, v)
- for k, v in compat.iteritems(data)
+ for k, v in data.items()
if k in haxis)
else:
keys = com.dict_keys_to_ordered_list(data)
haxis = Index(keys)
- for k, v in compat.iteritems(data):
+ for k, v in data.items():
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
@@ -266,8 +266,8 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None):
orient = orient.lower()
if orient == 'minor':
new_data = defaultdict(OrderedDict)
- for col, df in compat.iteritems(data):
- for item, s in compat.iteritems(df):
+ for col, df in data.items():
+ for item, s in df.items():
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
@@ -1500,7 +1500,7 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None):
result = OrderedDict()
adj_frames = OrderedDict()
- for k, v in compat.iteritems(frames):
+ for k, v in frames.items():
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
@@ -1512,7 +1512,7 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None):
reindex_dict = {self._AXIS_SLICEMAP[a]: axes_dict[a] for a in axes}
reindex_dict['copy'] = False
- for key, frame in compat.iteritems(adj_frames):
+ for key, frame in adj_frames.items():
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 99224f6fb7c5b..65b28a7ecc849 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -10,7 +10,6 @@
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import notna
-from pandas import compat
from pandas.core.arrays import Categorical
from pandas.core.frame import _shared_docs
from pandas.core.indexes.base import Index
@@ -173,7 +172,7 @@ def lreshape(data, groups, dropna=True, label=None):
for c in pivot_cols:
mask &= notna(mdata[c])
if not mask.all():
- mdata = {k: v[mask] for k, v in compat.iteritems(mdata)}
+ mdata = {k: v[mask] for k, v in mdata.items()}
return data._constructor(mdata, columns=id_cols + pivot_cols)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 8a22765d85aec..716ccb0201fea 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -12,7 +12,6 @@
from pandas._config import get_option
from pandas._libs import iNaT, index as libindex, lib, tslibs
-import pandas.compat as compat
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, deprecate
@@ -291,7 +290,7 @@ def _init_dict(self, data, index=None, dtype=None):
# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
# raises KeyError), so we iterate the entire dict, and align
if data:
- keys, values = zip(*compat.iteritems(data))
+ keys, values = zip(*data.items())
values = list(values)
elif index is not None:
# fastpath for Series(data=None). Just use broadcasting a scalar
@@ -1523,7 +1522,7 @@ def to_dict(self, into=dict):
"""
# GH16122
into_c = com.standardize_mapping(into)
- return into_c(compat.iteritems(self))
+ return into_c(self.items())
def to_frame(self, name=None):
"""
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index 08729442e701f..0ae371d8c8c77 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -7,7 +7,6 @@
import numpy as np
from pandas._libs.sparse import BlockIndex, get_blocks
-import pandas.compat as compat
from pandas.compat import lmap
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
@@ -145,7 +144,7 @@ def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = ensure_index(columns)
- data = {k: v for k, v in compat.iteritems(data) if k in columns}
+ data = {k: v for k, v in data.items() if k in columns}
else:
keys = com.dict_keys_to_ordered_list(data)
columns = Index(keys)
@@ -158,7 +157,7 @@ def sp_maker(x):
fill_value=self._default_fill_value,
copy=True, dtype=dtype)
sdict = {}
- for k, v in compat.iteritems(data):
+ for k, v in data.items():
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
@@ -322,7 +321,7 @@ def _unpickle_sparse_frame_compat(self, state):
index = idx
series_dict = DataFrame()
- for col, (sp_index, sp_values) in compat.iteritems(series):
+ for col, (sp_index, sp_values) in series.items():
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
@@ -338,7 +337,7 @@ def to_dense(self):
-------
df : DataFrame
"""
- data = {k: v.to_dense() for k, v in compat.iteritems(self)}
+ data = {k: v.to_dense() for k, v in self.items()}
return DataFrame(data, index=self.index, columns=self.columns)
def _apply_columns(self, func):
@@ -347,7 +346,7 @@ def _apply_columns(self, func):
"""
new_data = {col: func(series)
- for col, series in compat.iteritems(self)}
+ for col, series in self.items()}
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
@@ -380,7 +379,7 @@ def density(self):
represented in the frame
"""
tot_nonsparse = sum(ser.sp_index.npoints
- for _, ser in compat.iteritems(self))
+ for _, ser in self.items())
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
@@ -599,7 +598,7 @@ def _combine_match_index(self, other, func, level=None):
this, other = self.align(other, join='outer', axis=0, level=level,
copy=False)
- for col, series in compat.iteritems(this):
+ for col, series in this.items():
new_data[col] = func(series.values, other.values)
fill_value = self._get_op_result_fill_value(other, func)
@@ -723,7 +722,7 @@ def _reindex_columns(self, columns, method, copy, level, fill_value=None,
raise NotImplementedError("'method' argument is not supported")
# TODO: fill value handling
- sdict = {k: v for k, v in compat.iteritems(self) if k in columns}
+ sdict = {k: v for k, v in self.items() if k in columns}
return self._constructor(
sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value).__finalize__(self)
@@ -739,7 +738,7 @@ def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
fill_value = np.nan
reindexers = {self._get_axis_number(a): val
- for (a, val) in compat.iteritems(reindexers)}
+ for (a, val) in reindexers.items()}
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
@@ -917,7 +916,7 @@ def apply(self, func, axis=0, broadcast=None, reduce=None,
if isinstance(func, np.ufunc):
new_series = {}
- for k, v in compat.iteritems(self):
+ for k, v in self.items():
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
@@ -969,7 +968,7 @@ def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
- lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
+ lengths = [s.sp_index.npoints for _, s in frame.items()]
nobs = sum(lengths)
# this is pretty fast
@@ -980,7 +979,7 @@ def stack_sparse_frame(frame):
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
- for _, series in compat.iteritems(frame):
+ for _, series in frame.items():
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
@@ -1021,7 +1020,7 @@ def homogenize(series_dict):
need_reindex = False
- for _, series in compat.iteritems(series_dict):
+ for _, series in series_dict.items():
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
@@ -1033,7 +1032,7 @@ def homogenize(series_dict):
if need_reindex:
output = {}
- for name, series in compat.iteritems(series_dict):
+ for name, series in series_dict.items():
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 1d588632b69f8..e449bf223ba94 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -9,7 +9,7 @@
import os
import re
-from pandas.compat import iteritems, lmap, lrange, raise_with_traceback
+from pandas.compat import lmap, lrange, raise_with_traceback
from pandas.errors import AbstractMethodError, EmptyDataError
from pandas.core.dtypes.common import is_list_like
@@ -617,7 +617,7 @@ def _build_xpath_expr(attrs):
if 'class_' in attrs:
attrs['class'] = attrs.pop('class_')
- s = ["@{key}={val!r}".format(key=k, val=v) for k, v in iteritems(attrs)]
+ s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()]
return '[{expr}]'.format(expr=' and '.join(s))
@@ -769,7 +769,7 @@ def _expand_elements(body):
not_max = lens[lens != lens_max]
empty = ['']
- for ind, length in iteritems(not_max):
+ for ind, length in not_max.items():
body[ind] += empty * (lens_max - length)
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 28cc768ba4e21..8a9533991fada 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -12,7 +12,7 @@
from pandas.core.dtypes.common import is_period_dtype
-from pandas import DataFrame, MultiIndex, Series, compat, isna, to_datetime
+from pandas import DataFrame, MultiIndex, Series, isna, to_datetime
from pandas.core.reshape.concat import concat
from pandas.io.common import (
@@ -822,8 +822,8 @@ def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
- decoded = {str(k): v for k, v in compat.iteritems(
- loads(json, precise_float=self.precise_float))}
+ decoded = {str(k): v for k, v in loads(
+ json, precise_float=self.precise_float).items()}
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
@@ -837,7 +837,7 @@ def _parse_numpy(self):
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
- decoded = {str(k): v for k, v in compat.iteritems(decoded)}
+ decoded = {str(k): v for k, v in decoded.items()}
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
@@ -875,7 +875,7 @@ def _parse_numpy(self):
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
- decoded = {str(k): v for k, v in compat.iteritems(decoded)}
+ decoded = {str(k): v for k, v in decoded.items()}
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
@@ -895,8 +895,8 @@ def _parse_no_numpy(self):
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
- decoded = {str(k): v for k, v in compat.iteritems(
- loads(json, precise_float=self.precise_float))}
+ decoded = {str(k): v for k, v in loads(
+ json, precise_float=self.precise_float).items()}
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
index a836faec2b04f..26bf6a8cf410d 100644
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -273,7 +273,7 @@ def _recursive_extract(data, path, seen_meta, level=0):
columns=lambda x: "{p}{c}".format(p=record_prefix, c=x))
# Data types, a problem
- for k, v in compat.iteritems(meta_vals):
+ for k, v in meta_vals.items():
if meta_prefix is not None:
k = meta_prefix + k
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index cff0f0e4b34d0..ac9b132b191b6 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -462,7 +462,7 @@ def encode(obj):
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
- # for name, ss in compat.iteritems(obj)])
+ # for name, ss in obj.items()])
# return d
else:
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 5d73b377838b6..5ad6eb009b6ee 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -912,7 +912,7 @@ def _get_options_with_defaults(self, engine):
options = {}
- for argname, default in compat.iteritems(_parser_defaults):
+ for argname, default in _parser_defaults.items():
value = kwds.get(argname, default)
# see gh-12935
@@ -922,7 +922,7 @@ def _get_options_with_defaults(self, engine):
else:
options[argname] = value
- for argname, default in compat.iteritems(_c_parser_defaults):
+ for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
@@ -941,7 +941,7 @@ def _get_options_with_defaults(self, engine):
options[argname] = value
if engine == 'python-fwf':
- for argname, default in compat.iteritems(_fwf_defaults):
+ for argname, default in _fwf_defaults.items():
options[argname] = kwds.get(argname, default)
return options
@@ -1657,7 +1657,7 @@ def _agg_index(self, index, try_parse_dates=True):
def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
converters=None, dtypes=None):
result = {}
- for c, values in compat.iteritems(dct):
+ for c, values in dct.items():
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
@@ -2471,7 +2471,7 @@ def _convert_data(self, data):
def _clean_mapping(mapping):
"converts col numbers to names"
clean = {}
- for col, v in compat.iteritems(mapping):
+ for col, v in mapping.items():
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean[col] = v
@@ -3258,7 +3258,7 @@ def _isindex(colspec):
elif isinstance(parse_spec, dict):
# dict of new name to column list
- for new_name, colspec in compat.iteritems(parse_spec):
+ for new_name, colspec in parse_spec.items():
if new_name in data_dict:
raise ValueError(
'Date column {name} already in dict'.format(name=new_name))
@@ -3316,7 +3316,7 @@ def _clean_na_values(na_values, keep_default_na=True):
# into array-likes for further use. This is also
# where we append the default NaN values, provided
# that `keep_default_na=True`.
- for k, v in compat.iteritems(old_na_values):
+ for k, v in old_na_values.items():
if not is_list_like(v):
v = [v]
@@ -3386,7 +3386,7 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None):
dtype = defaultdict(lambda: np.object)
# Convert column indexes to column names.
- for k, v in compat.iteritems(_dtype):
+ for k, v in _dtype.items():
col = columns[k] if is_integer(k) else k
dtype[col] = v
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 2dedeaf0a4cda..2cfc1bc5eac2e 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -29,8 +29,8 @@
from pandas import (
DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, PeriodIndex,
- Series, SparseDataFrame, SparseSeries, TimedeltaIndex, compat, concat,
- isna, to_datetime)
+ Series, SparseDataFrame, SparseSeries, TimedeltaIndex, concat, isna,
+ to_datetime)
from pandas.core.arrays.categorical import Categorical
from pandas.core.arrays.sparse import BlockIndex, IntIndex
from pandas.core.base import StringMixin
@@ -2448,7 +2448,7 @@ class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}
- _reverse_index_map = {v: k for k, v in compat.iteritems(_index_type_map)}
+ _reverse_index_map = {v: k for k, v in _index_type_map.items()}
attributes = []
# indexer helpders
@@ -2912,7 +2912,7 @@ def read(self, **kwargs):
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super(SparseFrameFixed, self).write(obj, **kwargs)
- for name, ss in compat.iteritems(obj):
+ for name, ss in obj.items():
key = 'sparse_series_{name}'.format(name=name)
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index af23c13063aa3..06560f5d702d6 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -9,7 +9,6 @@
from pandas._config import get_option
-import pandas.compat as compat
from pandas.compat import lrange
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, cache_readonly
@@ -1627,7 +1626,7 @@ def _validate_color_args(self):
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
- for key, values in compat.iteritems(self.color):
+ for key, values in self.color.items():
if key not in valid_keys:
raise ValueError("color dict contains invalid "
"key '{0}' "
diff --git a/pandas/tests/frame/common.py b/pandas/tests/frame/common.py
index 5e9a73719f67b..0485ddb0e6f43 100644
--- a/pandas/tests/frame/common.py
+++ b/pandas/tests/frame/common.py
@@ -3,7 +3,6 @@
from pandas.util._decorators import cache_readonly
import pandas as pd
-from pandas import compat
import pandas.util.testing as tm
_seriesd = tm.getSeriesData()
@@ -11,8 +10,7 @@
_frame = pd.DataFrame(_seriesd)
_frame2 = pd.DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
-_intframe = pd.DataFrame({k: v.astype(int)
- for k, v in compat.iteritems(_seriesd)})
+_intframe = pd.DataFrame({k: v.astype(int) for k, v in _seriesd.items()})
_tsframe = pd.DataFrame(_tsd)
@@ -33,7 +31,7 @@ def frame2(self):
@cache_readonly
def intframe(self):
# force these all to int64 to avoid platform testing issues
- return pd.DataFrame({c: s for c, s in compat.iteritems(_intframe)},
+ return pd.DataFrame({c: s for c, s in _intframe.items()},
dtype=np.int64)
@cache_readonly
diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py
index fbe03325a3ad9..27c0e070c10c2 100644
--- a/pandas/tests/frame/conftest.py
+++ b/pandas/tests/frame/conftest.py
@@ -1,7 +1,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, NaT, compat, date_range
+from pandas import DataFrame, NaT, date_range
import pandas.util.testing as tm
@@ -51,10 +51,9 @@ def int_frame():
Columns are ['A', 'B', 'C', 'D']
"""
- df = DataFrame({k: v.astype(int)
- for k, v in compat.iteritems(tm.getSeriesData())})
+ df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})
# force these all to int64 to avoid platform testing issues
- return DataFrame({c: s for c, s in compat.iteritems(df)}, dtype=np.int64)
+ return DataFrame({c: s for c, s in df.items()}, dtype=np.int64)
@pytest.fixture
@@ -101,8 +100,7 @@ def mixed_int_frame():
Columns are ['A', 'B', 'C', 'D'].
"""
- df = DataFrame({k: v.astype(int)
- for k, v in compat.iteritems(tm.getSeriesData())})
+ df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})
df.A = df.A.astype('int32')
df.B = np.ones(len(df.B), dtype='uint64')
df.C = df.C.astype('uint8')
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 600575b5255d2..4d715d19dccc0 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -193,7 +193,7 @@ def test_nonzero(self, float_frame, float_string_frame):
def test_iteritems(self):
df = self.klass([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
- for k, v in compat.iteritems(df):
+ for k, v in df.items():
assert isinstance(v, self.klass._constructor_sliced)
def test_items(self):
@@ -343,8 +343,8 @@ def test_to_numpy_copy(self):
def test_transpose(self, float_frame):
frame = float_frame
dft = frame.T
- for idx, series in compat.iteritems(dft):
- for col, value in compat.iteritems(series):
+ for idx, series in dft.items():
+ for col, value in series.items():
if np.isnan(value):
assert np.isnan(frame[col][idx])
else:
@@ -355,7 +355,7 @@ def test_transpose(self, float_frame):
mixed = self.klass(data, index=index)
mixed_T = mixed.T
- for col, s in compat.iteritems(mixed_T):
+ for col, s in mixed_T.items():
assert s.dtype == np.object_
def test_swapaxes(self):
@@ -398,12 +398,12 @@ def test_repr_with_mi_nat(self, float_string_frame):
assert result == expected
def test_iteritems_names(self, float_string_frame):
- for k, v in compat.iteritems(float_string_frame):
+ for k, v in float_string_frame.items():
assert v.name == k
def test_series_put_names(self, float_string_frame):
series = float_string_frame._series
- for k, v in compat.iteritems(series):
+ for k, v in series.items():
assert v.name == k
def test_empty_nonzero(self):
@@ -459,7 +459,7 @@ def test_deepcopy(self, float_frame):
cp = deepcopy(float_frame)
series = cp['A']
series[:] = 10
- for idx, value in compat.iteritems(series):
+ for idx, value in series.items():
assert float_frame['A'][idx] != value
def test_transpose_get_view(self, float_frame):
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index af6d4391dca74..b2f531bfea249 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -12,8 +12,7 @@
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
-from pandas import (
- DataFrame, MultiIndex, Series, Timestamp, compat, date_range, notna)
+from pandas import DataFrame, MultiIndex, Series, Timestamp, date_range, notna
from pandas.conftest import _get_cython_table_params
from pandas.core.apply import frame_apply
import pandas.util.testing as tm
@@ -334,13 +333,13 @@ def test_apply_differently_indexed(self):
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame({i: v.describe()
- for i, v in compat.iteritems(df)},
+ for i, v in df.items()},
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame({i: v.describe()
- for i, v in compat.iteritems(df.T)},
+ for i, v in df.T.items()},
columns=df.index).T
assert_frame_equal(result1, expected1)
diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py
index 4df297bcc436e..f06d3d38e0a6d 100644
--- a/pandas/tests/frame/test_axis_select_reindex.py
+++ b/pandas/tests/frame/test_axis_select_reindex.py
@@ -10,8 +10,7 @@
import pandas as pd
from pandas import (
- Categorical, DataFrame, Index, MultiIndex, Series, compat, date_range,
- isna)
+ Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
@@ -212,7 +211,7 @@ def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
- for idx, val in compat.iteritems(newFrame[col]):
+ for idx, val in newFrame[col].items():
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
@@ -221,7 +220,7 @@ def test_reindex(self):
else:
assert np.isnan(val)
- for col, series in compat.iteritems(newFrame):
+ for col, series in newFrame.items():
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = self.frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
@@ -230,7 +229,7 @@ def test_reindex(self):
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
- for idx, val in compat.iteritems(nonContigFrame[col]):
+ for idx, val in nonContigFrame[col].items():
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
@@ -239,7 +238,7 @@ def test_reindex(self):
else:
assert np.isnan(val)
- for col, series in compat.iteritems(nonContigFrame):
+ for col, series in nonContigFrame.items():
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index e8736e514425f..d071e13599e5d 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -17,7 +17,7 @@
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, RangeIndex, Series, Timedelta,
- Timestamp, compat, date_range, isna)
+ Timestamp, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@@ -462,11 +462,11 @@ def test_constructor_subclass_dict(self):
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
- refdf = DataFrame({col: dict(compat.iteritems(val))
- for col, val in compat.iteritems(data)})
+ refdf = DataFrame({col: dict(val.items())
+ for col, val in data.items()})
tm.assert_frame_equal(refdf, df)
- data = tm.TestSubDict(compat.iteritems(data))
+ data = tm.TestSubDict(data.items())
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
@@ -474,7 +474,7 @@ def test_constructor_subclass_dict(self):
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
- for k, v in compat.iteritems(self.frame):
+ for k, v in self.frame.items():
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
@@ -526,7 +526,7 @@ def test_constructor_dict_of_tuples(self):
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
- expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
+ expected = DataFrame({k: list(v) for k, v in data.items()})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
@@ -2099,13 +2099,13 @@ def test_from_records_sequencelike(self):
tuples = []
columns = []
dtypes = []
- for dtype, b in compat.iteritems(blocks):
+ for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1])
for c in b.columns])
for i in range(len(df.index)):
tup = []
- for _, b in compat.iteritems(blocks):
+ for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
@@ -2172,11 +2172,11 @@ def test_from_records_dictlike(self):
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
- for dtype, b in compat.iteritems(blocks):
+ for dtype, b in blocks.items():
columns.extend(b.columns)
- asdict = {x: y for x, y in compat.iteritems(df)}
- asdict2 = {x: y.values for x, y in compat.iteritems(df)}
+ asdict = {x: y for x, y in df.items()}
+ asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index decd9ec304b37..9aad010a899d2 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -8,8 +8,7 @@
import pytz
from pandas import (
- CategoricalDtype, DataFrame, MultiIndex, Series, Timestamp, compat,
- date_range)
+ CategoricalDtype, DataFrame, MultiIndex, Series, Timestamp, date_range)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@@ -374,20 +373,20 @@ def test_to_dict(self, mapping):
# GH16122
recons_data = DataFrame(test_data).to_dict(into=mapping)
- for k, v in compat.iteritems(test_data):
- for k2, v2 in compat.iteritems(v):
+ for k, v in test_data.items():
+ for k2, v2 in v.items():
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l", mapping)
- for k, v in compat.iteritems(test_data):
- for k2, v2 in compat.iteritems(v):
+ for k, v in test_data.items():
+ for k2, v2 in v.items():
assert (v2 == recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s", mapping)
- for k, v in compat.iteritems(test_data):
- for k2, v2 in compat.iteritems(v):
+ for k, v in test_data.items():
+ for k2, v2 in v.items():
assert (v2 == recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp", mapping)
@@ -407,8 +406,8 @@ def test_to_dict(self, mapping):
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
- for k, v in compat.iteritems(test_data):
- for k2, v2 in compat.iteritems(v):
+ for k, v in test_data.items():
+ for k2, v2 in v.items():
assert (v2 == recons_data[k2][k])
df = DataFrame(test_data)
@@ -416,8 +415,8 @@ def test_to_dict(self, mapping):
recons_data = df.to_dict("i")
comp_data = test_data.copy()
comp_data['duped'] = comp_data[df.columns[0]]
- for k, v in compat.iteritems(comp_data):
- for k2, v2 in compat.iteritems(v):
+ for k, v in comp_data.items():
+ for k2, v2 in v.items():
assert (v2 == recons_data[k2][k])
@pytest.mark.parametrize('mapping', [list, defaultdict, []])
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index fa8a6ab3c29bd..9a10595a9f7ea 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -11,7 +11,7 @@
import pandas as pd
from pandas import (
Categorical, DataFrame, Series, Timedelta, Timestamp,
- _np_version_under1p14, compat, concat, date_range, option_context)
+ _np_version_under1p14, concat, date_range, option_context)
from pandas.core.arrays import integer_array
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@@ -388,8 +388,7 @@ def test_select_dtypes_typecodes(self):
def test_dtypes_gh8722(self):
self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
result = self.mixed_frame.dtypes
- expected = Series({k: v.dtype
- for k, v in compat.iteritems(self.mixed_frame)},
+ expected = Series({k: v.dtype for k, v in self.mixed_frame.items()},
index=result.index)
assert_series_equal(result, expected)
@@ -431,7 +430,7 @@ def test_astype(self):
# mixed casting
def _check_cast(df, v):
assert (list({s.dtype.name for
- _, s in compat.iteritems(df)})[0] == v)
+ _, s in df.items()})[0] == v)
mn = self.all_mixed._get_numeric_data().copy()
mn['little_float'] = np.array(12345., dtype='float16')
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 9149b305f5d0d..f58fe85cad258 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -15,7 +15,7 @@
import pandas as pd
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series,
- Timestamp, compat, date_range, isna, notna)
+ Timestamp, date_range, isna, notna)
import pandas.core.common as com
from pandas.core.indexing import IndexingError
from pandas.tests.frame.common import TestData
@@ -34,11 +34,11 @@ def test_getitem(self):
assert len(sl.index) == 20
# Column access
- for _, series in compat.iteritems(sl):
+ for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
- for key, _ in compat.iteritems(self.frame._series):
+ for key, _ in self.frame._series.items():
assert self.frame[key] is not None
assert 'random' not in self.frame
@@ -2438,7 +2438,7 @@ def test_at_time_between_time_datetimeindex(self):
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
- for item, value in compat.iteritems(xs):
+ for item, value in xs.items():
if np.isnan(value):
assert np.isnan(self.frame[item][idx])
else:
@@ -2595,7 +2595,7 @@ def is_ok(s):
s.dtype != 'uint8')
return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s)
- for c, s in compat.iteritems(df)))
+ for c, s in df.items()))
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
@@ -2713,7 +2713,7 @@ def _check_set(df, cond, check_dtypes=True):
# dtypes (and confirm upcasts)x
if check_dtypes:
- for k, v in compat.iteritems(df.dtypes):
+ for k, v in df.dtypes.items():
if issubclass(v.type, np.integer) and not cond[k].all():
v = np.dtype('float64')
assert dfi[k].dtype == v
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index fc991cd17cae8..edce25566e361 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -7,7 +7,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, MultiIndex, Series, compat
+from pandas import DataFrame, MultiIndex, Series
import pandas.core.common as com
from pandas.tests.frame.common import _check_mixed_float
import pandas.util.testing as tm
@@ -383,7 +383,7 @@ def test_combineSeries(self, float_frame, mixed_float_frame,
added = float_frame + series
- for key, s in compat.iteritems(added):
+ for key, s in added.items():
assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
@@ -391,7 +391,7 @@ def test_combineSeries(self, float_frame, mixed_float_frame,
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
- for key, s in compat.iteritems(float_frame):
+ for key, s in float_frame.items():
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
assert np.isnan(larger_added['E']).all()
@@ -424,7 +424,7 @@ def test_combineSeries(self, float_frame, mixed_float_frame,
# and require explicit broadcasting
added = datetime_frame.add(ts, axis='index')
- for key, col in compat.iteritems(datetime_frame):
+ for key, col in datetime_frame.items():
result = col + ts
assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
@@ -465,7 +465,7 @@ def test_combineFunc(self, float_frame, mixed_float_frame):
# vs mix
result = mixed_float_frame * 2
- for c, s in compat.iteritems(result):
+ for c, s in result.items():
tm.assert_numpy_array_equal(
s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py
index f44739e83267f..20479f9a4fcbf 100644
--- a/pandas/tests/frame/test_replace.py
+++ b/pandas/tests/frame/test_replace.py
@@ -10,7 +10,7 @@
from pandas.compat import lrange
import pandas as pd
-from pandas import DataFrame, Index, Series, Timestamp, compat, date_range
+from pandas import DataFrame, Index, Series, Timestamp, date_range
from pandas.tests.frame.common import TestData
from pandas.util.testing import assert_frame_equal, assert_series_equal
@@ -809,8 +809,7 @@ def test_replace_input_formats_listlike(self):
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
- expected = {k: v.replace(to_rep[k], values[k])
- for k, v in compat.iteritems(df)}
+ expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()}
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
@@ -823,8 +822,7 @@ def test_replace_input_formats_listlike(self):
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
- expected = {k: v.replace(np.nan, values[k])
- for k, v in compat.iteritems(df)}
+ expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()}
assert_frame_equal(filled, DataFrame(expected))
# list to list
@@ -847,8 +845,7 @@ def test_replace_input_formats_scalar(self):
# dict to scalar
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
filled = df.replace(to_rep, 0)
- expected = {k: v.replace(to_rep[k], 0)
- for k, v in compat.iteritems(df)}
+ expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()}
assert_frame_equal(filled, DataFrame(expected))
msg = "value argument must be scalar, dict, or Series"
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 0d1575a35a0bc..187fea5403aea 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -10,7 +10,7 @@
import pandas as pd
from pandas import (
- DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range, isna)
+ DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@@ -392,7 +392,7 @@ def test_groupby_non_arithmetic_agg_int_like_precision(i):
"args": [1]},
"count": {"expected": 2}}
- for method, data in compat.iteritems(grp_exp):
+ for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 885def32db046..31b602e38c4ad 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -12,7 +12,7 @@
import pandas as pd
from pandas import (
- DataFrame, Index, MultiIndex, Panel, Series, Timestamp, compat, date_range,
+ DataFrame, Index, MultiIndex, Panel, Series, Timestamp, date_range,
read_csv)
import pandas.core.common as com
import pandas.util.testing as tm
@@ -403,7 +403,7 @@ def test_frame_groupby(tsframe):
groups = grouped.groups
indices = grouped.indices
- for k, v in compat.iteritems(groups):
+ for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
@@ -524,7 +524,7 @@ def test_groupby_multiple_columns(df, op):
for n2, gp2 in gp1.groupby('B'):
expected[n1][n2] = op(gp2.loc[:, ['C', 'D']])
expected = {k: DataFrame(v)
- for k, v in compat.iteritems(expected)}
+ for k, v in expected.items()}
expected = Panel.fromDict(expected).swapaxes(0, 1)
expected.major_axis.name, expected.minor_axis.name = 'A', 'B'
@@ -1275,7 +1275,7 @@ def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
- for k, v in compat.iteritems(expected):
+ for k, v in expected.items():
assert (result[k] == v)
_check_groupby(df, result, ['a', 'b'], 'd')
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index 8382111ec9901..867cb8365476e 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -9,7 +9,7 @@
import pandas as pd
from pandas import (
- CategoricalIndex, DataFrame, Index, MultiIndex, Series, Timestamp, compat,
+ CategoricalIndex, DataFrame, Index, MultiIndex, Series, Timestamp,
date_range)
from pandas.core.groupby.grouper import Grouping
import pandas.util.testing as tm
@@ -671,14 +671,14 @@ def test_groups(self, df):
groups = grouped.groups
assert groups is grouped.groups # caching works
- for k, v in compat.iteritems(grouped.groups):
+ for k, v in grouped.groups.items():
assert (df.loc[v]['A'] == k).all()
grouped = df.groupby(['A', 'B'])
groups = grouped.groups
assert groups is grouped.groups # caching works
- for k, v in compat.iteritems(grouped.groups):
+ for k, v in grouped.groups.items():
assert (df.loc[v]['A'] == k[0]).all()
assert (df.loc[v]['B'] == k[1]).all()
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index be266798973d1..3f0656615545c 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -4,7 +4,6 @@
import pytest
from pandas._libs.tslib import iNaT
-import pandas.compat as compat
from pandas.core.dtypes.dtypes import CategoricalDtype
@@ -235,7 +234,7 @@ def test_copy_name(self):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
- for name, index in compat.iteritems(self.indices):
+ for name, index in self.indices.items():
if isinstance(index, MultiIndex):
continue
@@ -262,7 +261,7 @@ def test_copy_name(self):
def test_ensure_copied_data(self):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
- for name, index in compat.iteritems(self.indices):
+ for name, index in self.indices.items():
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
@@ -298,7 +297,7 @@ def test_ensure_copied_data(self):
check_same='same')
def test_memory_usage(self):
- for name, index in compat.iteritems(self.indices):
+ for name, index in self.indices.items():
result = index.memory_usage()
if len(index):
index.get_loc(index[0])
@@ -428,7 +427,7 @@ def test_where(self, klass):
@pytest.mark.parametrize("method", ["intersection", "union",
"difference", "symmetric_difference"])
def test_set_ops_error_cases(self, case, method):
- for name, idx in compat.iteritems(self.indices):
+ for name, idx in self.indices.items():
# non-iterable input
msg = "Input must be Index or array-like"
@@ -436,7 +435,7 @@ def test_set_ops_error_cases(self, case, method):
getattr(idx, method)(case)
def test_intersection_base(self):
- for name, idx in compat.iteritems(self.indices):
+ for name, idx in self.indices.items():
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second)
@@ -466,7 +465,7 @@ def test_intersection_base(self):
first.intersection([1, 2, 3])
def test_union_base(self):
- for name, idx in compat.iteritems(self.indices):
+ for name, idx in self.indices.items():
first = idx[3:]
second = idx[:5]
everything = idx
@@ -494,7 +493,7 @@ def test_union_base(self):
@pytest.mark.parametrize("sort", [None, False])
def test_difference_base(self, sort):
- for name, idx in compat.iteritems(self.indices):
+ for name, idx in self.indices.items():
first = idx[2:]
second = idx[:4]
answer = idx[4:]
@@ -529,7 +528,7 @@ def test_difference_base(self, sort):
first.difference([1, 2, 3], sort)
def test_symmetric_difference(self):
- for name, idx in compat.iteritems(self.indices):
+ for name, idx in self.indices.items():
first = idx[1:]
second = idx[:-1]
if isinstance(idx, CategoricalIndex):
@@ -560,7 +559,7 @@ def test_symmetric_difference(self):
def test_insert_base(self):
- for name, idx in compat.iteritems(self.indices):
+ for name, idx in self.indices.items():
result = idx[1:4]
if not len(idx):
@@ -571,7 +570,7 @@ def test_insert_base(self):
def test_delete_base(self):
- for name, idx in compat.iteritems(self.indices):
+ for name, idx in self.indices.items():
if not len(idx):
continue
@@ -596,7 +595,7 @@ def test_delete_base(self):
def test_equals(self):
- for name, idx in compat.iteritems(self.indices):
+ for name, idx in self.indices.items():
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
@@ -682,7 +681,7 @@ def test_numpy_ufuncs(self):
# test ufuncs of numpy, see:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
- for name, idx in compat.iteritems(self.indices):
+ for name, idx in self.indices.items():
for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,
np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin,
np.arccos, np.arctan, np.sinh, np.cosh, np.tanh,
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index c3b00133228d8..8bdf4d84427ba 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -4,8 +4,6 @@
import pytest
import pytz
-import pandas.compat as compat
-
import pandas as pd
from pandas import DatetimeIndex, Index, Timestamp, date_range, notna
import pandas.util.testing as tm
@@ -413,7 +411,7 @@ def test_delete(self):
-1: expected_4,
4: expected_4,
1: expected_1}
- for n, expected in compat.iteritems(cases):
+ for n, expected in cases.items():
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
@@ -460,7 +458,7 @@ def test_delete_slice(self):
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
- for n, expected in compat.iteritems(cases):
+ for n, expected in cases.items():
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index fa08315e13600..ed1028b45f5db 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -22,8 +22,8 @@
import pandas as pd
from pandas import (
- DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, compat,
- date_range, isna, to_datetime)
+ DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, date_range, isna,
+ to_datetime)
from pandas.core.arrays import DatetimeArray
from pandas.core.tools import datetimes as tools
from pandas.util import testing as tm
@@ -1701,7 +1701,7 @@ def test_parsers_dayfirst_yearfirst(self, cache):
(True, True,
datetime(2020, 12, 21))]}
- for date_str, values in compat.iteritems(cases):
+ for date_str, values in cases.items():
for dayfirst, yearfirst, expected in values:
# odd comparisons across version
@@ -1739,7 +1739,7 @@ def test_parsers_timestring(self, cache):
cases = {'10:15': (parse('10:15'), datetime(1, 1, 1, 10, 15)),
'9:05': (parse('9:05'), datetime(1, 1, 1, 9, 5))}
- for date_str, (exp_now, exp_def) in compat.iteritems(cases):
+ for date_str, (exp_now, exp_def) in cases.items():
result1, _, _ = parsing.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index a6264e4dad4f0..7233f53572625 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -4,7 +4,7 @@
import pytest
import pandas as pd
-from pandas import Index, Timedelta, TimedeltaIndex, compat, timedelta_range
+from pandas import Index, Timedelta, TimedeltaIndex, timedelta_range
import pandas.util.testing as tm
@@ -240,7 +240,7 @@ def test_delete(self):
-1: expected_4,
4: expected_4,
1: expected_1}
- for n, expected in compat.iteritems(cases):
+ for n, expected in cases.items():
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
@@ -267,7 +267,7 @@ def test_delete_slice(self):
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
- for n, expected in compat.iteritems(cases):
+ for n, expected in cases.items():
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
diff --git a/pandas/tests/io/json/__pycache__/tmp2c7r4efu b/pandas/tests/io/json/__pycache__/tmp2c7r4efu
new file mode 100644
index 0000000000000..d1258abbcdf40
Binary files /dev/null and b/pandas/tests/io/json/__pycache__/tmp2c7r4efu differ
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 9ca5bf95e5e08..42c3b7bf556da 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -13,8 +13,7 @@
import pandas.util._test_decorators as td
import pandas as pd
-from pandas import (
- DataFrame, DatetimeIndex, Series, Timestamp, compat, read_json)
+from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_index_equal,
@@ -26,7 +25,7 @@
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame({k: v.astype(np.int64)
- for k, v in compat.iteritems(_seriesd)})
+ for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index 8bf315b73366e..916d9ce63f4ee 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -40,7 +40,7 @@ def _clean_dict(d):
cleaned_dict : dict
"""
- return {str(k): v for k, v in compat.iteritems(d)}
+ return {str(k): v for k, v in d.items()}
@pytest.fixture(params=[
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index c5bac4724d70b..7f827808b6aae 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -13,7 +13,6 @@
import pandas._libs.parsers as parser
from pandas._libs.parsers import TextReader
-import pandas.compat as compat
from pandas import DataFrame
import pandas.util.testing as tm
@@ -347,6 +346,6 @@ def test_empty_csv_input(self):
def assert_array_dicts_equal(left, right):
- for k, v in compat.iteritems(left):
+ for k, v in left.items():
assert tm.assert_numpy_array_equal(np.asarray(v),
np.asarray(right[k]))
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 1377888a58d07..b06da91f72e28 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -11,7 +11,7 @@
from numpy import nan
import pytest
-from pandas.compat import PY36, iteritems
+from pandas.compat import PY36
import pandas.util._test_decorators as td
import pandas as pd
@@ -798,7 +798,7 @@ def tdf(col_sheet_name):
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as ew:
- for sheetname, df in iteritems(dfs):
+ for sheetname, df in dfs.items():
df.to_excel(ew, sheetname)
dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0)
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 52fead0166dc5..407095e379a04 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -13,7 +13,6 @@
import numpy as np
import pytest
-import pandas.compat as compat
from pandas.compat import iterkeys
from pandas.core.dtypes.common import is_categorical_dtype
@@ -685,7 +684,7 @@ def test_variable_labels(self):
sr_117 = rdr.variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
- for k, v in compat.iteritems(sr_115):
+ for k, v in sr_115.items():
assert k in sr_117
assert v == sr_117[k]
assert k in keys
diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py
index 1eef226749383..30736b11817c0 100644
--- a/pandas/tests/plotting/common.py
+++ b/pandas/tests/plotting/common.py
@@ -7,7 +7,6 @@
import numpy as np
from numpy import random
-from pandas.compat import iteritems
from pandas.util._decorators import cache_readonly
import pandas.util._test_decorators as td
@@ -416,7 +415,7 @@ def _check_box_return_type(self, returned, return_type, expected_keys=None,
assert isinstance(returned, Series)
assert sorted(returned.keys()) == sorted(expected_keys)
- for key, value in iteritems(returned):
+ for key, value in returned.items():
assert isinstance(value, types[return_type])
# check returned dict has correct mapping
if return_type == 'axes':
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 63ee899944e92..cce1b6f13c942 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -5,7 +5,6 @@
import pytest
from pandas._libs import join as libjoin
-import pandas.compat as compat
from pandas.compat import lrange
import pandas as pd
@@ -783,6 +782,6 @@ def _join_by_hand(a, b, how='left'):
result_columns = a.columns.append(b.columns)
- for col, s in compat.iteritems(b_re):
+ for col, s in b_re.items():
a_re[col] = s
return a_re.reindex(columns=result_columns)
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index 5080db9354a1f..c065900975869 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -11,8 +11,6 @@
from numpy.random import randn
import pytest
-from pandas.compat import iteritems
-
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
@@ -102,13 +100,13 @@ def _check_expected_dtype(self, obj, label):
def test_dtypes(self):
# to confirm test case covers intended dtypes
- for typ, vals in iteritems(self.data):
+ for typ, vals in self.data.items():
self._check_expected_dtype(pd.Index(vals), typ)
self._check_expected_dtype(pd.Series(vals), typ)
def test_concatlike_same_dtypes(self):
# GH 13660
- for typ1, vals1 in iteritems(self.data):
+ for typ1, vals1 in self.data.items():
vals2 = vals1
vals3 = vals1
@@ -214,8 +212,8 @@ def test_concatlike_same_dtypes(self):
def test_concatlike_dtypes_coercion(self):
# GH 13660
- for typ1, vals1 in iteritems(self.data):
- for typ2, vals2 in iteritems(self.data):
+ for typ1, vals1 in self.data.items():
+ for typ2, vals2 in self.data.items():
vals3 = vals2
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index a7c2768c5b319..ffc8de59e4d63 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -10,7 +10,6 @@
from pandas._libs.tslibs.parsing import DateParseError
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz
-from pandas.compat import iteritems
from pandas.compat.numpy import np_datetime64_compat
import pandas as pd
@@ -708,7 +707,7 @@ def test_period_deprecated_freq(self):
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"]}
msg = INVALID_FREQ_ERR_MSG
- for exp, freqs in iteritems(cases):
+ for exp, freqs in cases.items():
for freq in freqs:
with pytest.raises(ValueError, match=msg):
Period('2016-03-01 09:00', freq=freq)
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index edc21282ccafa..579ef7955ddb1 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -7,7 +7,6 @@
from numpy import nan
import pytest
-import pandas.compat as compat
from pandas.compat import lrange
import pandas as pd
@@ -171,13 +170,13 @@ def test_reindex(test_data):
subIndex = test_data.series.index[10:20]
subSeries = test_data.series.reindex(subIndex)
- for idx, val in compat.iteritems(subSeries):
+ for idx, val in subSeries.items():
assert val == test_data.series[idx]
subIndex2 = test_data.ts.index[10:20]
subTS = test_data.ts.reindex(subIndex2)
- for idx, val in compat.iteritems(subTS):
+ for idx, val in subTS.items():
assert val == test_data.ts[idx]
stuffSeries = test_data.ts.reindex(subIndex)
@@ -186,7 +185,7 @@ def test_reindex(test_data):
# This is extremely important for the Cython code to not screw up
nonContigIndex = test_data.ts.index[::2]
subNonContig = test_data.ts.reindex(nonContigIndex)
- for idx, val in compat.iteritems(subNonContig):
+ for idx, val in subNonContig.items():
assert val == test_data.ts[idx]
# return a copy the same index here
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 81642f65f05a4..279d09ea335ff 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -7,7 +7,6 @@
import numpy as np
import pytest
-import pandas.compat as compat
from pandas.compat import lzip
import pandas as pd
@@ -145,7 +144,7 @@ def test_constructor_dict(self):
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = self.series_klass(data)
- expected = self.series_klass(dict(compat.iteritems(data)))
+ expected = self.series_klass(dict(data.items()))
self._assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
@@ -315,10 +314,10 @@ def test_values(self):
tm.assert_almost_equal(self.ts.values, self.ts, check_dtype=False)
def test_iteritems(self):
- for idx, val in compat.iteritems(self.series):
+ for idx, val in self.series.items():
assert val == self.series[idx]
- for idx, val in compat.iteritems(self.ts):
+ for idx, val in self.ts.items():
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index 162a27db34cb1..27dee79603af5 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -7,7 +7,6 @@
import numpy as np
import pytest
-import pandas.compat as compat
from pandas.compat import lrange
import pandas as pd
@@ -432,13 +431,13 @@ def test_map(self, datetime_series):
merged = target.map(source)
- for k, v in compat.iteritems(merged):
+ for k, v in merged.items():
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
- for k, v in compat.iteritems(merged):
+ for k, v in merged.items():
assert v == source[target[k]]
# function
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 45e3dffde60f7..9ef771ac6e5d6 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -8,7 +8,7 @@
import pytest
import pandas as pd
-from pandas import DataFrame, DatetimeIndex, Series, compat, date_range
+from pandas import DataFrame, DatetimeIndex, Series, date_range
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
@@ -17,7 +17,7 @@ class TestSeriesCombine(object):
def test_append(self, datetime_series, string_series, object_series):
appendedSeries = string_series.append(object_series)
- for idx, value in compat.iteritems(appendedSeries):
+ for idx, value in appendedSeries.items():
if idx in string_series.index:
assert value == string_series[idx]
elif idx in object_series.index:
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index d15320cee644f..b687720f8d0a2 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -7,7 +7,6 @@
from pandas._libs.algos import Infinity, NegInfinity
from pandas._libs.tslib import iNaT
-import pandas.compat as compat
import pandas.util._test_decorators as td
from pandas import NaT, Series, Timestamp, date_range
@@ -376,7 +375,7 @@ def test_rank_descending(self):
def test_rank_int(self):
s = self.s.dropna().astype('i8')
- for method, res in compat.iteritems(self.results):
+ for method, res in self.results.items():
result = s.rank(method=method)
expected = Series(res).dropna()
expected.index = result.index
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 45df08ccfeb48..31f5f6cb2d7be 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -78,7 +78,7 @@ def test_copy(self, float_frame):
def test_constructor(self, float_frame, float_frame_int_kind,
float_frame_fill0):
- for col, series in compat.iteritems(float_frame):
+ for col, series in float_frame.items():
assert isinstance(series, SparseSeries)
assert isinstance(float_frame_int_kind['A'].sp_index, IntIndex)
@@ -96,11 +96,11 @@ def test_constructor(self, float_frame, float_frame_int_kind,
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
- for col, series in compat.iteritems(sdf):
+ for col, series in sdf.items():
assert isinstance(series, SparseSeries)
# construct from nested dict
- data = {c: s.to_dict() for c, s in compat.iteritems(float_frame)}
+ data = {c: s.to_dict() for c, s in float_frame.items()}
sdf = SparseDataFrame(data)
tm.assert_sp_frame_equal(sdf, float_frame)
diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py
index 703ae3bde71d1..7d702b8cd2b5a 100644
--- a/pandas/tests/sparse/series/test_series.py
+++ b/pandas/tests/sparse/series/test_series.py
@@ -14,7 +14,7 @@
import pandas as pd
from pandas import (
- DataFrame, Series, SparseDtype, SparseSeries, bdate_range, compat, isna)
+ DataFrame, Series, SparseDtype, SparseSeries, bdate_range, isna)
from pandas.core.reshape.util import cartesian_product
import pandas.core.sparse.frame as spf
from pandas.tests.series.test_api import SharedWithSparse
@@ -431,7 +431,7 @@ def _check_all(self, check_func):
def test_getitem(self):
def _check_getitem(sp, dense):
- for idx, val in compat.iteritems(dense):
+ for idx, val in dense.items():
tm.assert_almost_equal(val, sp[idx])
for i in range(len(dense)):
@@ -850,7 +850,7 @@ def _check_matches(indices, expected):
# homogenized is only valid with NaN fill values
homogenized = spf.homogenize(data)
- for k, v in compat.iteritems(homogenized):
+ for k, v in homogenized.items():
assert (v.sp_index.equals(expected))
indices1 = [BlockIndex(10, [2], [7]), BlockIndex(10, [1, 6], [3, 4]),
diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py
index 0fa9fcd8aae9c..148b3fba45375 100644
--- a/pandas/tests/test_compat.py
+++ b/pandas/tests/test_compat.py
@@ -6,7 +6,7 @@
import re
from pandas.compat import (
- iteritems, iterkeys, itervalues, lfilter, lmap, lrange, lzip, re_type)
+ iterkeys, itervalues, lfilter, lmap, lrange, lzip, re_type)
class TestBuiltinIterators(object):
@@ -54,7 +54,6 @@ def test_lzip(self):
def test_dict_iterators(self):
assert next(itervalues({1: 2})) == 2
assert next(iterkeys({1: 2})) == 1
- assert next(iteritems({1: 2})) == (1, 2)
def test_re_type():
diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py
index 04a50cf6facd5..b5e7a5f6abf4c 100644
--- a/pandas/tests/test_sorting.py
+++ b/pandas/tests/test_sorting.py
@@ -6,8 +6,7 @@
from numpy import nan
import pytest
-from pandas import (
- DataFrame, MultiIndex, Series, compat, concat, merge, to_datetime)
+from pandas import DataFrame, MultiIndex, Series, concat, merge, to_datetime
from pandas.core import common as com
from pandas.core.sorting import (
decons_group_index, get_group_index, is_int64_overflow_possible,
@@ -51,7 +50,7 @@ def test_int64_overflow(self):
expected = df.groupby(tups).sum()['values']
- for k, v in compat.iteritems(expected):
+ for k, v in expected.items():
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py
index 0aa29e451b1ba..7de1e8117289e 100644
--- a/pandas/tests/tseries/frequencies/test_freq_code.py
+++ b/pandas/tests/tseries/frequencies/test_freq_code.py
@@ -3,12 +3,11 @@
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.frequencies import (
FreqGroup, _period_code_map, get_freq, get_freq_code)
-import pandas.compat as compat
import pandas.tseries.offsets as offsets
-@pytest.fixture(params=list(compat.iteritems(_period_code_map)))
+@pytest.fixture(params=list(_period_code_map.items()))
def period_code_item(request):
return request.param
diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py
index c2ef939d1915e..fb65ec1eb9961 100644
--- a/pandas/tests/tseries/frequencies/test_inference.py
+++ b/pandas/tests/tseries/frequencies/test_inference.py
@@ -5,7 +5,6 @@
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG
-import pandas.compat as compat
from pandas.compat import is_platform_windows
from pandas import (
@@ -218,14 +217,14 @@ def test_infer_freq_index(freq, expected):
@pytest.mark.parametrize(
"expected,dates",
- list(compat.iteritems(
+ list(
{"AS-JAN": ["2009-01-01", "2010-01-01", "2011-01-01", "2012-01-01"],
"Q-OCT": ["2009-01-31", "2009-04-30", "2009-07-31", "2009-10-31"],
"M": ["2010-11-30", "2010-12-31", "2011-01-31", "2011-02-28"],
"W-SAT": ["2010-12-25", "2011-01-01", "2011-01-08", "2011-01-15"],
"D": ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"],
"H": ["2011-12-31 22:00", "2011-12-31 23:00",
- "2012-01-01 00:00", "2012-01-01 01:00"]}))
+ "2012-01-01 00:00", "2012-01-01 01:00"]}.items())
)
def test_infer_freq_tz(tz_naive_fixture, expected, dates):
# see gh-7310
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 0c58e515979c2..ea13be8601463 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -709,7 +709,7 @@ def test_onOffset(self):
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
@@ -924,7 +924,7 @@ def test_roll_date_object(self):
@pytest.mark.parametrize('case', normalize_cases)
def test_normalize(self, case):
offset, cases = case
- for dt, expected in compat.iteritems(cases):
+ for dt, expected in cases.items():
assert offset.apply(dt) == expected
on_offset_cases = []
@@ -964,7 +964,7 @@ def test_normalize(self, case):
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, cases = case
- for dt, expected in compat.iteritems(cases):
+ for dt, expected in cases.items():
assert offset.onOffset(dt) == expected
opening_time_cases = []
@@ -1130,7 +1130,7 @@ def test_onOffset(self, case):
def test_opening_time(self, case):
_offsets, cases = case
for offset in _offsets:
- for dt, (exp_next, exp_prev) in compat.iteritems(cases):
+ for dt, (exp_next, exp_prev) in cases.items():
assert offset._next_opening_time(dt) == exp_next
assert offset._prev_opening_time(dt) == exp_prev
@@ -1290,7 +1290,7 @@ def test_opening_time(self, case):
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
apply_large_n_cases = []
@@ -1346,7 +1346,7 @@ def test_apply(self, case):
@pytest.mark.parametrize('case', apply_large_n_cases)
def test_apply_large_n(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_nanoseconds(self):
@@ -1369,7 +1369,7 @@ def test_apply_nanoseconds(self):
'2014-07-03 17:00') - Nano(5), }))
for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_datetimeindex(self):
@@ -1561,7 +1561,7 @@ def test_roll_date_object(self):
@pytest.mark.parametrize('norm_cases', normalize_cases)
def test_normalize(self, norm_cases):
offset, cases = norm_cases
- for dt, expected in compat.iteritems(cases):
+ for dt, expected in cases.items():
assert offset.apply(dt) == expected
def test_onOffset(self):
@@ -1577,7 +1577,7 @@ def test_onOffset(self):
datetime(2014, 7, 6, 12): False}))
for offset, cases in tests:
- for dt, expected in compat.iteritems(cases):
+ for dt, expected in cases.items():
assert offset.onOffset(dt) == expected
apply_cases = []
@@ -1622,7 +1622,7 @@ def test_onOffset(self):
@pytest.mark.parametrize('apply_case', apply_cases)
def test_apply(self, apply_case):
offset, cases = apply_case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
nano_cases = []
@@ -1647,7 +1647,7 @@ def test_apply(self, apply_case):
@pytest.mark.parametrize('nano_case', nano_cases)
def test_apply_nanoseconds(self, nano_case):
offset, cases = nano_case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@@ -1778,7 +1778,7 @@ def test_onOffset(self, case):
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
@@ -1977,7 +1977,7 @@ def test_onOffset(self, case):
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
@@ -2094,7 +2094,7 @@ def test_onOffset(self, case):
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
@@ -2193,7 +2193,7 @@ def test_isAnchored(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('weekday', range(7))
@@ -2518,7 +2518,7 @@ def test_offset_whole_year(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
@@ -2709,7 +2709,7 @@ def test_offset_whole_year(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
@@ -2831,7 +2831,7 @@ def setup_method(self, method):
_offset_map.clear()
def test_alias_equality(self):
- for k, v in compat.iteritems(_offset_map):
+ for k, v in _offset_map.items():
if v is None:
continue
assert k == v.copy()
diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py
index 9ee03d2e886f3..6d121eb9eb8df 100644
--- a/pandas/tests/tseries/offsets/test_yqm_offsets.py
+++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py
@@ -7,7 +7,7 @@
import pytest
import pandas as pd
-from pandas import Timestamp, compat
+from pandas import Timestamp
from pandas.tseries.offsets import (
BMonthBegin, BMonthEnd, BQuarterBegin, BQuarterEnd, BYearBegin, BYearEnd,
@@ -105,7 +105,7 @@ class TestMonthBegin(Base):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@@ -164,7 +164,7 @@ def test_normalize(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [(MonthEnd(), datetime(2007, 12, 31), True),
@@ -224,7 +224,7 @@ def test_offsets_compare_equal(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [(BMonthBegin(), datetime(2007, 12, 31), False),
@@ -289,7 +289,7 @@ def test_offsets_compare_equal(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [(BMonthEnd(), datetime(2007, 12, 31), True),
@@ -379,7 +379,7 @@ def test_offset_corner_case(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@@ -458,7 +458,7 @@ def test_offset_corner_case(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
@@ -590,7 +590,7 @@ def test_offset_corner_case(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@@ -668,7 +668,7 @@ def test_offset_corner_case(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
@@ -787,7 +787,7 @@ def test_misspecified(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [(YearBegin(), datetime(2007, 1, 3), False),
@@ -838,7 +838,7 @@ def test_misspecified(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [(YearEnd(), datetime(2007, 12, 31), True),
@@ -884,7 +884,7 @@ class TestYearEndDiffMonth(Base):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [(YearEnd(month=3), datetime(2007, 3, 31), True),
@@ -943,7 +943,7 @@ def test_misspecified(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@@ -980,7 +980,7 @@ class TestBYearEnd(Base):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [(BYearEnd(), datetime(2007, 12, 31), True),
@@ -1016,7 +1016,7 @@ def test_bad_month_fail(self):
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
- for base, expected in compat.iteritems(cases):
+ for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_roll(self):
diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py
index 4aee0a2e5350e..d6d4792c19ea8 100644
--- a/pandas/util/_doctools.py
+++ b/pandas/util/_doctools.py
@@ -1,7 +1,5 @@
import numpy as np
-import pandas.compat as compat
-
import pandas as pd
@@ -152,7 +150,7 @@ def _make_table(self, ax, df, title, height=None):
height = 1.0 / (len(df) + 1)
props = tb.properties()
- for (r, c), cell in compat.iteritems(props['celld']):
+ for (r, c), cell in props['celld'].items():
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 95b27161a5858..9659cb33686d0 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -22,7 +22,6 @@
can_set_locale, get_locales, set_locale)
from pandas._libs import testing as _testing
-import pandas.compat as compat
from pandas.compat import lmap, lrange, lzip, raise_with_traceback
from pandas.core.dtypes.common import (
@@ -1499,7 +1498,7 @@ def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
if check_fill_value:
assert_attr_equal('default_fill_value', left, right, obj=obj)
- for col, series in compat.iteritems(left):
+ for col, series in left.items():
assert (col in right)
# trade-off?
| - [x] xref #25725
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Removes ``compat.iteritems``. | https://api.github.com/repos/pandas-dev/pandas/pulls/26079 | 2019-04-13T21:07:10Z | 2019-04-15T04:05:36Z | 2019-04-15T04:05:36Z | 2019-04-15T22:22:53Z |
BUG: pd.to_datetime() throws if caching is on with Null-like arguments | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index c441244b4415d..856fbd1237a03 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -266,7 +266,7 @@ Datetimelike
^^^^^^^^^^^^
- Bug in :func:`to_datetime` which would raise an (incorrect) ``ValueError`` when called with a date far into the future and the ``format`` argument specified instead of raising ``OutOfBoundsDatetime`` (:issue:`23830`)
--
+- Bug in :func:`to_datetime` which would raise ``InvalidIndexError: Reindexing only valid with uniquely valued Index objects`` when called with ``cache=True``, with ``arg`` including at least two different elements from the set {None, numpy.nan, pandas.NaT} (:issue:`22305`)
-
-
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 66d563a7c6f85..1ad39e7ad357a 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -52,9 +52,10 @@ def _maybe_cache(arg, format, cache, convert_listlike):
if cache:
# Perform a quicker unique check
from pandas import Index
- if not Index(arg).is_unique:
- unique_dates = algorithms.unique(arg)
- cache_dates = convert_listlike(unique_dates, True, format)
+ unique_dates = Index(arg).unique()
+ if len(unique_dates) < len(arg):
+ cache_dates = convert_listlike(unique_dates.to_numpy(),
+ True, format)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index eaf689cfa1c21..a592ef941484e 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -1630,6 +1630,15 @@ def test_parsers(self, date_str, expected, cache):
yearfirst=yearfirst)
assert result7 == expected
+ @pytest.mark.parametrize('cache', [True, False])
+ def test_na_values_with_cache(self, cache, unique_nulls_fixture,
+ unique_nulls_fixture2):
+ # GH22305
+ expected = Index([NaT, NaT], dtype='datetime64[ns]')
+ result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2],
+ cache=cache)
+ tm.assert_index_equal(result, expected)
+
def test_parsers_nat(self):
# Test that each of several string-accepting methods return pd.NaT
result1, _, _ = parsing.parse_time_string('NaT')
| - [x] closes #22305
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
The problem was in contradictory behavior between `pd.Index([pd.NaT, None]).is_unique` that returns `False` and `algorithms.unique([pd.NaT, None])` that returns `array([NaT, None], dtype=object)` equivalent to the object being unique.
We are only using `Index(arg).unique()` now.
The test written by @realead was added to verify changes. | https://api.github.com/repos/pandas-dev/pandas/pulls/26078 | 2019-04-13T18:18:49Z | 2019-04-17T12:48:32Z | 2019-04-17T12:48:31Z | 2022-02-23T14:11:33Z |
DOC: fix errors/warnings in running code blocks | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 1b05c53df4516..98cba6969587b 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -3565,13 +3565,6 @@ HDFStore will by default not drop rows that are all missing. This behavior can b
os.remove('file.h5')
-.. ipython:: python
- :suppress:
-
- os.remove('file.h5')
-
-
-
.. _io.hdf5-fixed:
Fixed Format
diff --git a/doc/source/user_guide/timedeltas.rst b/doc/source/user_guide/timedeltas.rst
index 37cf6afcb96a3..40a8fd3101409 100644
--- a/doc/source/user_guide/timedeltas.rst
+++ b/doc/source/user_guide/timedeltas.rst
@@ -191,13 +191,12 @@ Operands can also appear in a reversed order (a singular object operated with a
df.min().idxmax()
df.min(axis=1).idxmin()
-You can fillna on timedeltas. Integers will be interpreted as seconds. You can
-pass a timedelta to get a particular value.
+You can fillna on timedeltas, passing a timedelta to get a particular value.
.. ipython:: python
- y.fillna(0)
- y.fillna(10)
+ y.fillna(pd.Timedelta(0))
+ y.fillna(pd.Timedelta(10, unit='s'))
y.fillna(pd.Timedelta('-1 days, 00:00:05'))
You can also negate, multiply and use ``abs`` on ``Timedeltas``:
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 590fde2aaccf8..f559b0d073320 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -322,13 +322,16 @@ which can be specified. These are computed from the starting point specified by
1349720105400, 1349720105500], unit='ms')
Constructing a :class:`Timestamp` or :class:`DatetimeIndex` with an epoch timestamp
-with the ``tz`` argument specified will localize the epoch timestamps to UTC
-first then convert the result to the specified time zone.
+with the ``tz`` argument specified will currently localize the epoch timestamps to UTC
+first then convert the result to the specified time zone. However, this behavior
+is :ref:`deprecated <whatsnew_0240.deprecations.integer_tz>`, and if you have
+epochs in wall time in another timezone, it is recommended to read the epochs
+as timezone-naive timestamps and then localize to the appropriate timezone:
.. ipython:: python
- pd.Timestamp(1262347200000000000, tz='US/Pacific')
- pd.DatetimeIndex([1262347200000000000], tz='US/Pacific')
+ pd.Timestamp(1262347200000000000).tz_localize('US/Pacific')
+ pd.DatetimeIndex([1262347200000000000]).tz_localize('US/Pacific')
.. note::
diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst
index 2d6550bb6888d..3671a5e24bdaa 100644
--- a/doc/source/whatsnew/v0.10.0.rst
+++ b/doc/source/whatsnew/v0.10.0.rst
@@ -295,79 +295,171 @@ Updated PyTables Support
:ref:`Docs <io.hdf5>` for PyTables ``Table`` format & several enhancements to the api. Here is a taste of what to expect.
-.. ipython:: python
- :suppress:
- :okexcept:
+.. code-block:: ipython
- import os
+ In [41]: store = pd.HDFStore('store.h5')
- os.remove('store.h5')
+ In [42]: df = pd.DataFrame(np.random.randn(8, 3),
+ ....: index=pd.date_range('1/1/2000', periods=8),
+ ....: columns=['A', 'B', 'C'])
-.. ipython:: python
+ In [43]: df
+ Out[43]:
+ A B C
+ 2000-01-01 -2.036047 0.000830 -0.955697
+ 2000-01-02 -0.898872 -0.725411 0.059904
+ 2000-01-03 -0.449644 1.082900 -1.221265
+ 2000-01-04 0.361078 1.330704 0.855932
+ 2000-01-05 -1.216718 1.488887 0.018993
+ 2000-01-06 -0.877046 0.045976 0.437274
+ 2000-01-07 -0.567182 -0.888657 -0.556383
+ 2000-01-08 0.655457 1.117949 -2.782376
- store = pd.HDFStore('store.h5')
- df = pd.DataFrame(np.random.randn(8, 3),
- index=pd.date_range('1/1/2000', periods=8),
- columns=['A', 'B', 'C'])
- df
+ [8 rows x 3 columns]
- # appending data frames
- df1 = df[0:4]
- df2 = df[4:]
- store.append('df', df1)
- store.append('df', df2)
- store
+ # appending data frames
+ In [44]: df1 = df[0:4]
- # selecting the entire store
- store.select('df')
+ In [45]: df2 = df[4:]
-.. ipython:: python
- :okwarning:
+ In [46]: store.append('df', df1)
- wp = pd.Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
- major_axis=pd.date_range('1/1/2000', periods=5),
- minor_axis=['A', 'B', 'C', 'D'])
- wp
+ In [47]: store.append('df', df2)
- # storing a panel
- store.append('wp', wp)
+ In [48]: store
+ Out[48]:
+ <class 'pandas.io.pytables.HDFStore'>
+ File path: store.h5
+ /df frame_table (typ->appendable,nrows->8,ncols->3,indexers->[index])
- # selecting via A QUERY
- store.select('wp', "major_axis>20000102 and minor_axis=['A','B']")
+ # selecting the entire store
+ In [49]: store.select('df')
+ Out[49]:
+ A B C
+ 2000-01-01 -2.036047 0.000830 -0.955697
+ 2000-01-02 -0.898872 -0.725411 0.059904
+ 2000-01-03 -0.449644 1.082900 -1.221265
+ 2000-01-04 0.361078 1.330704 0.855932
+ 2000-01-05 -1.216718 1.488887 0.018993
+ 2000-01-06 -0.877046 0.045976 0.437274
+ 2000-01-07 -0.567182 -0.888657 -0.556383
+ 2000-01-08 0.655457 1.117949 -2.782376
- # removing data from tables
- store.remove('wp', "major_axis>20000103")
- store.select('wp')
+ [8 rows x 3 columns]
+
+.. code-block:: ipython
+
+ In [50]: wp = pd.Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
+ ....: major_axis=pd.date_range('1/1/2000', periods=5),
+ ....: minor_axis=['A', 'B', 'C', 'D'])
+
+ In [51]: wp
+ Out[51]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 2 (items) x 5 (major_axis) x 4 (minor_axis)
+ Items axis: Item1 to Item2
+ Major_axis axis: 2000-01-01 00:00:00 to 2000-01-05 00:00:00
+ Minor_axis axis: A to D
+
+ # storing a panel
+ In [52]: store.append('wp', wp)
+
+ # selecting via A QUERY
+ In [53]: store.select('wp', [pd.Term('major_axis>20000102'),
+ ....: pd.Term('minor_axis', '=', ['A', 'B'])])
+ ....:
+ Out[53]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 2 (items) x 3 (major_axis) x 2 (minor_axis)
+ Items axis: Item1 to Item2
+ Major_axis axis: 2000-01-03 00:00:00 to 2000-01-05 00:00:00
+ Minor_axis axis: A to B
+
+ # removing data from tables
+ In [54]: store.remove('wp', pd.Term('major_axis>20000103'))
+ Out[54]: 8
+
+ In [55]: store.select('wp')
+ Out[55]:
+ <class 'pandas.core.panel.Panel'>
+ Dimensions: 2 (items) x 3 (major_axis) x 4 (minor_axis)
+ Items axis: Item1 to Item2
+ Major_axis axis: 2000-01-01 00:00:00 to 2000-01-03 00:00:00
+ Minor_axis axis: A to D
+
+ # deleting a store
+ In [56]: del store['df']
+
+ In [57]: store
+ Out[57]:
+ <class 'pandas.io.pytables.HDFStore'>
+ File path: store.h5
+ /wp wide_table (typ->appendable,nrows->12,ncols->2,indexers->[major_axis,minor_axis])
- # deleting a store
- del store['df']
- store
**Enhancements**
- added ability to hierarchical keys
- .. ipython:: python
+ .. code-block:: ipython
+
+ In [58]: store.put('foo/bar/bah', df)
+
+ In [59]: store.append('food/orange', df)
- store.put('foo/bar/bah', df)
- store.append('food/orange', df)
- store.append('food/apple', df)
- store
+ In [60]: store.append('food/apple', df)
- # remove all nodes under this level
- store.remove('food')
- store
+ In [61]: store
+ Out[61]:
+ <class 'pandas.io.pytables.HDFStore'>
+ File path: store.h5
+ /foo/bar/bah frame (shape->[8,3])
+ /food/apple frame_table (typ->appendable,nrows->8,ncols->3,indexers->[index])
+ /food/orange frame_table (typ->appendable,nrows->8,ncols->3,indexers->[index])
+ /wp wide_table (typ->appendable,nrows->12,ncols->2,indexers->[major_axis,minor_axis])
+
+ # remove all nodes under this level
+ In [62]: store.remove('food')
+
+ In [63]: store
+ Out[63]:
+ <class 'pandas.io.pytables.HDFStore'>
+ File path: store.h5
+ /foo/bar/bah frame (shape->[8,3])
+ /wp wide_table (typ->appendable,nrows->12,ncols->2,indexers->[major_axis,minor_axis])
- added mixed-dtype support!
.. ipython:: python
- df['string'] = 'string'
- df['int'] = 1
- store.append('df', df)
- df1 = store.select('df')
- df1
- df1.get_dtype_counts()
+ In [64]: df['string'] = 'string'
+
+ In [65]: df['int'] = 1
+
+ In [66]: store.append('df', df)
+
+ In [67]: df1 = store.select('df')
+
+ In [68]: df1
+ Out[68]:
+ A B C string int
+ 2000-01-01 -2.036047 0.000830 -0.955697 string 1
+ 2000-01-02 -0.898872 -0.725411 0.059904 string 1
+ 2000-01-03 -0.449644 1.082900 -1.221265 string 1
+ 2000-01-04 0.361078 1.330704 0.855932 string 1
+ 2000-01-05 -1.216718 1.488887 0.018993 string 1
+ 2000-01-06 -0.877046 0.045976 0.437274 string 1
+ 2000-01-07 -0.567182 -0.888657 -0.556383 string 1
+ 2000-01-08 0.655457 1.117949 -2.782376 string 1
+
+ [8 rows x 5 columns]
+
+ In [69]: df1.get_dtype_counts()
+ Out[69]:
+ float64 3
+ int64 1
+ object 1
+ dtype: int64
- performance improvements on table writing
- support for arbitrarily indexed dimensions
@@ -392,13 +484,6 @@ Updated PyTables Support
- minor change to select and remove: require a table ONLY if where is also
provided (and not None)
-.. ipython:: python
- :suppress:
-
- store.close()
- import os
- os.remove('store.h5')
-
**Compatibility**
0.10 of ``HDFStore`` is backwards compatible for reading tables created in a prior version of pandas,
diff --git a/doc/source/whatsnew/v0.11.0.rst b/doc/source/whatsnew/v0.11.0.rst
index baa464fe842d3..26b97268038e9 100644
--- a/doc/source/whatsnew/v0.11.0.rst
+++ b/doc/source/whatsnew/v0.11.0.rst
@@ -278,6 +278,7 @@ Enhancements
- ``Squeeze`` to possibly remove length 1 dimensions from an object.
.. ipython:: python
+ :okwarning:
p = pd.Panel(np.random.randn(3, 4, 4), items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20010102', periods=4),
diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst
index b2dd8229c91f3..5dbfb2c728f06 100644
--- a/doc/source/whatsnew/v0.12.0.rst
+++ b/doc/source/whatsnew/v0.12.0.rst
@@ -317,22 +317,24 @@ Other Enhancements
- ``pd.set_option()`` now allows N option, value pairs (:issue:`3667`).
- Let's say that we had an option ``'a.b'`` and another option ``'b.c'``.
- We can set them at the same time:
+ Let's say that we had an option ``'a.b'`` and another option ``'b.c'``.
+ We can set them at the same time:
- .. ipython:: python
- :suppress:
+ .. ipython:: python
- pd.core.config.register_option('a.b', 2, 'ay dot bee')
- pd.core.config.register_option('b.c', 3, 'bee dot cee')
+ In [31]: pd.get_option('a.b')
+ Out[31]: 2
- .. ipython:: python
+ In [32]: pd.get_option('b.c')
+ Out[32]: 3
+
+ In [33]: pd.set_option('a.b', 1, 'b.c', 4)
+
+ In [34]: pd.get_option('a.b')
+ Out[34]: 1
- pd.get_option('a.b')
- pd.get_option('b.c')
- pd.set_option('a.b', 1, 'b.c', 4)
- pd.get_option('a.b')
- pd.get_option('b.c')
+ In [35]: pd.get_option('b.c')
+ Out[35]: 4
- The ``filter`` method for group objects returns a subset of the original
object. Suppose we want to take only elements that belong to groups with a
diff --git a/doc/source/whatsnew/v0.13.0.rst b/doc/source/whatsnew/v0.13.0.rst
index 0f799c069f494..13a2f879211b3 100644
--- a/doc/source/whatsnew/v0.13.0.rst
+++ b/doc/source/whatsnew/v0.13.0.rst
@@ -272,6 +272,7 @@ This is like an ``append`` operation.
A Panel setting operation on an arbitrary axis aligns the input to the Panel
.. ipython:: python
+ :okwarning:
p = pd.Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
@@ -543,7 +544,7 @@ Enhancements
.. ipython:: python
- td.fillna(0)
+ td.fillna(pd.Timedelta(0))
td.fillna(datetime.timedelta(days=1, seconds=5))
You can do numeric reduction operations on timedeltas.
diff --git a/doc/source/whatsnew/v0.15.0.rst b/doc/source/whatsnew/v0.15.0.rst
index 7b9a8ba082411..f9e47b45f498d 100644
--- a/doc/source/whatsnew/v0.15.0.rst
+++ b/doc/source/whatsnew/v0.15.0.rst
@@ -702,6 +702,7 @@ Other notable API changes:
This can also be seen in multi-axis indexing with a ``Panel``.
.. ipython:: python
+ :okwarning:
p = pd.Panel(np.arange(2 * 3 * 4).reshape(2, 3, 4),
items=['ItemA', 'ItemB'],
diff --git a/doc/source/whatsnew/v0.15.2.rst b/doc/source/whatsnew/v0.15.2.rst
index dabdcd1ab76c3..9f0449d6a1754 100644
--- a/doc/source/whatsnew/v0.15.2.rst
+++ b/doc/source/whatsnew/v0.15.2.rst
@@ -161,6 +161,7 @@ Other enhancements:
- ``Panel`` now supports the ``all`` and ``any`` aggregation functions. (:issue:`8302`):
.. ipython:: python
+ :okwarning:
p = pd.Panel(np.random.rand(2, 5, 4) > 0.1)
p.all()
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index eb0b522e08a32..05d6a03639a2d 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -567,7 +567,7 @@ missing indicator, ``np.nan``. (:issue:`20377`)
.. ipython:: python
:suppress:
- from pandas.io import StringIO
+ from io import StringIO
*Previous Behavior*:
| Cleaning up errors / warnings in the doc build, related to the ipython code blocks:
- put a section about hdf5 and Panel in verbatim code blocks + added some `:okwarning:` for Panel (will need to be properly cleaned up when Panel is removed)
- fixed cases that raised deprecation warning for `TimedeltaIndex.fillna()` with integers
- reworded case that raised deprecation warning about passing integers + timezone to DatetimeIndex
- `pd.core.config` doesn't exist anymore -> removed that call + made the actual example verbatim | https://api.github.com/repos/pandas-dev/pandas/pulls/26076 | 2019-04-13T13:33:53Z | 2019-04-16T07:26:01Z | 2019-04-16T07:26:01Z | 2019-04-16T07:26:05Z |
#26065 Fix Type Annotations in pandas.core.arrays | diff --git a/mypy.ini b/mypy.ini
index 2069c736a2eb4..596d71c77317e 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -11,24 +11,6 @@ ignore_errors=True
[mypy-pandas.core.api]
ignore_errors=True
-[mypy-pandas.core.arrays.array_]
-ignore_errors=True
-
-[mypy-pandas.core.arrays.datetimelike]
-ignore_errors=True
-
-[mypy-pandas.core.arrays.integer]
-ignore_errors=True
-
-[mypy-pandas.core.arrays.interval]
-ignore_errors=True
-
-[mypy-pandas.core.arrays.period]
-ignore_errors=True
-
-[mypy-pandas.core.arrays.timedeltas]
-ignore_errors=True
-
[mypy-pandas.core.base]
ignore_errors=True
diff --git a/pandas/_typing.py b/pandas/_typing.py
index dc15a44b65db9..3959e38e6f08c 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -1,11 +1,16 @@
from pathlib import Path
-from typing import IO, AnyStr, Union
+from typing import IO, AnyStr, Type, Union
import numpy as np
+from pandas._libs import Timestamp
+from pandas._libs.tslibs.period import Period
+from pandas._libs.tslibs.timedeltas import Timedelta
+
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray
ArrayLike = Union[ABCExtensionArray, np.ndarray]
+DatetimeLikeScalar = Type[Union[Period, Timestamp, Timedelta]]
Dtype = Union[str, np.dtype, ExtensionDtype]
FilePathOrBuffer = Union[str, Path, IO[AnyStr]]
diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py
index f549557440ebe..1b002ad12d526 100644
--- a/pandas/core/arrays/array_.py
+++ b/pandas/core/arrays/array_.py
@@ -1,4 +1,4 @@
-from typing import Optional, Sequence, Union
+from typing import Optional, Sequence, Union, cast
import numpy as np
@@ -229,7 +229,7 @@ def array(data: Sequence[object],
dtype = registry.find(dtype) or dtype
if is_extension_array_dtype(dtype):
- cls = dtype.construct_array_type()
+ cls = cast(ExtensionDtype, dtype).construct_array_type()
return cls._from_sequence(data, dtype=dtype, copy=copy)
if dtype is None:
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 38cad11b24da8..c32f8642dc2ed 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -1,6 +1,6 @@
from datetime import datetime, timedelta
import operator
-from typing import Any, Sequence, Tuple, Type, Union
+from typing import Any, Sequence, Union, cast
import warnings
import numpy as np
@@ -27,6 +27,7 @@
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import isna
+from pandas._typing import DatetimeLikeScalar
from pandas.core import missing, nanops
from pandas.core.algorithms import (
checked_add_with_arr, take, unique1d, value_counts)
@@ -39,6 +40,7 @@
class AttributesMixin:
+ _data = None # type: np.ndarray
@property
def _attributes(self):
@@ -56,7 +58,7 @@ def _get_attributes_dict(self):
return {k: getattr(self, k, None) for k in self._attributes}
@property
- def _scalar_type(self) -> Union[Type, Tuple[Type]]:
+ def _scalar_type(self) -> DatetimeLikeScalar:
"""The scalar associated with this datelike
* PeriodArray : Period
@@ -477,14 +479,16 @@ def __setitem__(
if lib.is_scalar(key):
raise ValueError("setting an array element with a sequence.")
- if (not is_slice
- and len(key) != len(value)
- and not com.is_bool_indexer(key)):
- msg = ("shape mismatch: value array of length '{}' does not "
- "match indexing result of length '{}'.")
- raise ValueError(msg.format(len(key), len(value)))
- if not is_slice and len(key) == 0:
- return
+ if not is_slice:
+ key = cast(Sequence, key)
+ if (len(key) != len(value)
+ and not com.is_bool_indexer(key)):
+ msg = ("shape mismatch: value array of length '{}' does "
+ "not match indexing result of length '{}'.")
+ raise ValueError(msg.format(
+ len(key), len(value)))
+ elif not len(key):
+ return
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value)
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 29c146cb55a23..3f0a3590e24a3 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -1,5 +1,6 @@
import copy
import sys
+from typing import Type
import warnings
import numpy as np
@@ -31,9 +32,9 @@ class _IntegerDtype(ExtensionDtype):
The attributes name & type are set when these subclasses are created.
"""
- name = None
+ name = None # type: str
base = None
- type = None
+ type = None # type: Type
na_value = np.nan
def __repr__(self):
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 5de265eb83561..c73ac0ab5a543 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -939,8 +939,9 @@ def mid(self):
points) and is either monotonic increasing or monotonic decreasing,
else False
"""
-
- @property
+ # https://github.com/python/mypy/issues/1362
+ # Mypy does not support decorated properties
+ @property # type: ignore
@Appender(_interval_shared_docs['is_non_overlapping_monotonic']
% _shared_docs_kwargs)
def is_non_overlapping_monotonic(self):
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 32f3d215b006f..8a6640e11ad74 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -1,6 +1,6 @@
from datetime import timedelta
import operator
-from typing import Any, Callable, Optional, Sequence, Union
+from typing import Any, Callable, List, Optional, Sequence, Union
import numpy as np
@@ -23,7 +23,7 @@
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
-from pandas.core.arrays import ExtensionArray, datetimelike as dtl
+from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
from pandas.tseries import frequencies
@@ -94,7 +94,7 @@ class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
Parameters
----------
- values : Union[PeriodArray, Series[period], ndarary[int], PeriodIndex]
+ values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
The data to store. These should be arrays that can be directly
converted to ordinals without inference or copy (PeriodArray,
ndarray[int64]), or a box around such an array (Series[period],
@@ -135,7 +135,7 @@ class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
_scalar_type = Period
# Names others delegate to us
- _other_ops = []
+ _other_ops = [] # type: List[str]
_bool_ops = ['is_leap_year']
_object_ops = ['start_time', 'end_time', 'freq']
_field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
@@ -276,7 +276,8 @@ def _check_compatible_with(self, other):
def dtype(self):
return self._dtype
- @property
+ # read-only property overwriting read/write
+ @property # type: ignore
def freq(self):
"""
Return the frequency object for this PeriodArray.
@@ -538,7 +539,8 @@ def _sub_period(self, other):
@Appender(dtl.DatetimeLikeArrayMixin._addsub_int_array.__doc__)
def _addsub_int_array(
self,
- other: Union[ExtensionArray, np.ndarray, ABCIndexClass],
+ other: Union[ABCPeriodArray, ABCSeries,
+ ABCPeriodIndex, np.ndarray],
op: Callable[[Any], Any]
) -> ABCPeriodArray:
assert op in [operator.add, operator.sub]
@@ -778,7 +780,8 @@ def period_array(
data = np.asarray(data)
if freq:
- dtype = PeriodDtype(freq)
+ # typed Optional here because the else block below assigns None
+ dtype = PeriodDtype(freq) # type: Optional[PeriodDtype]
else:
dtype = None
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index e09e546a423fc..58d9e4085a612 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -1,5 +1,6 @@
from datetime import timedelta
import textwrap
+from typing import List
import warnings
import numpy as np
@@ -130,8 +131,8 @@ class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps):
_scalar_type = Timedelta
__array_priority__ = 1000
# define my properties & methods for delegation
- _other_ops = []
- _bool_ops = []
+ _other_ops = [] # type: List[str]
+ _bool_ops = [] # type: List[str]
_object_ops = ['freq']
_field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
| - [X] closes #26065
- [X] tests passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/26071 | 2019-04-12T22:53:35Z | 2019-04-30T13:05:48Z | 2019-04-30T13:05:48Z | 2019-04-30T19:32:00Z |
DOC: better document Dtypes docstrings + avoid sphinx warnings | diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst
index 4cf8db895f0ac..fb9a95b6736d5 100644
--- a/doc/source/reference/arrays.rst
+++ b/doc/source/reference/arrays.rst
@@ -146,6 +146,11 @@ If the data are tz-aware, then every value in the array must have the same timez
:toctree: api/
arrays.DatetimeArray
+
+.. autosummary::
+ :toctree: api/
+ :template: autosummary/class_without_autosummary.rst
+
DatetimeTZDtype
.. _api.arrays.timedelta:
@@ -260,6 +265,11 @@ Every period in a ``PeriodArray`` must have the same ``freq``.
:toctree: api/
arrays.PeriodArray
+
+.. autosummary::
+ :toctree: api/
+ :template: autosummary/class_without_autosummary.rst
+
PeriodDtype
.. _api.arrays.interval:
@@ -296,6 +306,11 @@ A collection of intervals may be stored in an :class:`arrays.IntervalArray`.
:toctree: api/
arrays.IntervalArray
+
+.. autosummary::
+ :toctree: api/
+ :template: autosummary/class_without_autosummary.rst
+
IntervalDtype
.. _api.arrays.integer_na:
@@ -310,6 +325,11 @@ Pandas provides this through :class:`arrays.IntegerArray`.
:toctree: api/
arrays.IntegerArray
+
+.. autosummary::
+ :toctree: api/
+ :template: autosummary/class_without_autosummary.rst
+
Int8Dtype
Int16Dtype
Int32Dtype
@@ -396,8 +416,27 @@ be stored efficiently as a :class:`SparseArray`.
:toctree: api/
SparseArray
+
+.. autosummary::
+ :toctree: api/
+ :template: autosummary/class_without_autosummary.rst
+
SparseDtype
The ``Series.sparse`` accessor may be used to access sparse-specific attributes
and methods if the :class:`Series` contains sparse values. See
:ref:`api.series.sparse` for more.
+
+
+
+.. Dtype attributes which are manually listed in their docstrings: including
+.. it here to make sure a docstring page is built for them
+
+..
+ .. autosummary::
+ :toctree: api/
+
+ DatetimeTZDtype.unit
+ DatetimeTZDtype.tz
+ PeriodDtype.freq
+ IntervalDtype.subtype
\ No newline at end of file
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index bbacfa3077054..29c146cb55a23 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -682,6 +682,17 @@ def integer_arithmetic_method(self, other):
module = sys.modules[__name__]
+_dtype_docstring = """
+An ExtensionDtype for {dtype} integer data.
+
+Attributes
+----------
+None
+
+Methods
+-------
+None
+"""
# create the Dtype
_dtypes = {}
@@ -695,7 +706,8 @@ def integer_arithmetic_method(self, other):
classname = "{}Dtype".format(name)
numpy_dtype = getattr(np, dtype)
attributes_dict = {'type': numpy_dtype,
- 'name': name}
+ 'name': name,
+ '__doc__': _dtype_docstring.format(dtype=dtype)}
dtype_type = register_extension_dtype(
type(classname, (_IntegerDtype, ), attributes_dict)
)
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 2eb33d6d2c50f..7e001b6ab9e73 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -72,6 +72,14 @@ class SparseDtype(ExtensionDtype):
=========== ==========
The default value may be overridden by specifying a `fill_value`.
+
+ Attributes
+ ----------
+ None
+
+ Methods
+ -------
+ None
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 7a5723b973eb0..417683ad54420 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -169,7 +169,7 @@ class CategoricalDtypeType(type):
@register_extension_dtype
class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
"""
- Type for categorical data with the categories and orderedness
+ Type for categorical data with the categories and orderedness.
.. versionchanged:: 0.21.0
@@ -334,6 +334,9 @@ def _finalize(self, categories, ordered, fastpath=False):
self._ordered = ordered
def __setstate__(self, state):
+ # for pickle compat. __get_state__ is defined in the
+ # PandasExtensionDtype superclass and uses the public properties to
+ # pickle -> need to set the settable private ones here (see GH26067)
self._categories = state.pop('categories', None)
self._ordered = state.pop('ordered', False)
@@ -570,13 +573,40 @@ def _is_boolean(self):
@register_extension_dtype
class DatetimeTZDtype(PandasExtensionDtype, ExtensionDtype):
-
"""
- A np.dtype duck-typed class, suitable for holding a custom datetime with tz
- dtype.
+ An ExtensionDtype for timezone-aware datetime data.
+
+ **This is not an actual numpy dtype**, but a duck type.
+
+ Parameters
+ ----------
+ unit : str, default "ns"
+ The precision of the datetime data. Currently limited
+ to ``"ns"``.
+ tz : str, int, or datetime.tzinfo
+ The timezone.
+
+ Attributes
+ ----------
+ unit
+ tz
+
+ Methods
+ -------
+ None
- THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of
- np.datetime64[ns]
+ Raises
+ ------
+ pytz.UnknownTimeZoneError
+ When the requested timezone cannot be found.
+
+ Examples
+ --------
+ >>> pd.DatetimeTZDtype(tz='UTC')
+ datetime64[ns, UTC]
+
+ >>> pd.DatetimeTZDtype(tz='dateutil/US/Central')
+ datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')]
"""
type = Timestamp # type: Type[Timestamp]
kind = 'M' # type: str_type
@@ -589,30 +619,6 @@ class DatetimeTZDtype(PandasExtensionDtype, ExtensionDtype):
_cache = {} # type: Dict[str_type, PandasExtensionDtype]
def __init__(self, unit="ns", tz=None):
- """
- An ExtensionDtype for timezone-aware datetime data.
-
- Parameters
- ----------
- unit : str, default "ns"
- The precision of the datetime data. Currently limited
- to ``"ns"``.
- tz : str, int, or datetime.tzinfo
- The timezone.
-
- Raises
- ------
- pytz.UnknownTimeZoneError
- When the requested timezone cannot be found.
-
- Examples
- --------
- >>> pd.core.dtypes.dtypes.DatetimeTZDtype(tz='UTC')
- datetime64[ns, UTC]
-
- >>> pd.core.dtypes.dtypes.DatetimeTZDtype(tz='dateutil/US/Central')
- datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')]
- """
if isinstance(unit, DatetimeTZDtype):
unit, tz = unit.unit, unit.tz
@@ -718,7 +724,9 @@ def __eq__(self, other):
str(self.tz) == str(other.tz))
def __setstate__(self, state):
- # for pickle compat.
+ # for pickle compat. __get_state__ is defined in the
+ # PandasExtensionDtype superclass and uses the public properties to
+ # pickle -> need to set the settable private ones here (see GH26067)
self._tz = state['tz']
self._unit = state['unit']
@@ -726,9 +734,30 @@ def __setstate__(self, state):
@register_extension_dtype
class PeriodDtype(ExtensionDtype, PandasExtensionDtype):
"""
- A Period duck-typed class, suitable for holding a period with freq dtype.
+ An ExtensionDtype for Period data.
+
+ **This is not an actual numpy dtype**, but a duck type.
+
+ Parameters
+ ----------
+ freq : str or DateOffset
+ The frequency of this PeriodDtype
+
+ Attributes
+ ----------
+ freq
- THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.int64.
+ Methods
+ -------
+ None
+
+ Examples
+ --------
+ >>> pd.PeriodDtype(freq='D')
+ period[D]
+
+ >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd())
+ period[M]
"""
type = Period # type: Type[Period]
kind = 'O' # type: str_type
@@ -751,7 +780,9 @@ def __new__(cls, freq=None):
elif freq is None:
# empty constructor for pickle compat
- return object.__new__(cls)
+ u = object.__new__(cls)
+ u._freq = None
+ return u
if not isinstance(freq, ABCDateOffset):
freq = cls._parse_dtype_strict(freq)
@@ -760,10 +791,15 @@ def __new__(cls, freq=None):
return cls._cache[freq.freqstr]
except KeyError:
u = object.__new__(cls)
- u.freq = freq
+ u._freq = freq
cls._cache[freq.freqstr] = u
return u
+ @property
+ def freq(self):
+ """The frequency object of this PeriodDtype."""
+ return self._freq
+
@classmethod
def _parse_dtype_strict(cls, freq):
if isinstance(freq, str):
@@ -817,6 +853,12 @@ def __eq__(self, other):
return isinstance(other, PeriodDtype) and self.freq == other.freq
+ def __setstate__(self, state):
+ # for pickle compat. __get_state__ is defined in the
+ # PandasExtensionDtype superclass and uses the public properties to
+ # pickle -> need to set the settable private ones here (see GH26067)
+ self._freq = state['freq']
+
@classmethod
def is_dtype(cls, dtype):
"""
@@ -849,9 +891,27 @@ def construct_array_type(cls):
@register_extension_dtype
class IntervalDtype(PandasExtensionDtype, ExtensionDtype):
"""
- A Interval duck-typed class, suitable for holding an interval
+ An ExtensionDtype for Interval data.
- THIS IS NOT A REAL NUMPY DTYPE
+ **This is not an actual numpy dtype**, but a duck type.
+
+ Parameters
+ ----------
+ subtype : str, np.dtype
+ The dtype of the Interval bounds.
+
+ Attributes
+ ----------
+ subtype
+
+ Methods
+ -------
+ None
+
+ Examples
+ --------
+ >>> pd.IntervalDtype(subtype='int64')
+ interval[int64]
"""
name = 'interval'
kind = None # type: Optional[str_type]
@@ -863,11 +923,6 @@ class IntervalDtype(PandasExtensionDtype, ExtensionDtype):
_cache = {} # type: Dict[str_type, PandasExtensionDtype]
def __new__(cls, subtype=None):
- """
- Parameters
- ----------
- subtype : the dtype of the Interval
- """
from pandas.core.dtypes.common import (
is_categorical_dtype, is_string_dtype, pandas_dtype)
@@ -877,7 +932,7 @@ def __new__(cls, subtype=None):
# we are called as an empty constructor
# generally for pickle compat
u = object.__new__(cls)
- u.subtype = None
+ u._subtype = None
return u
elif (isinstance(subtype, str) and
subtype.lower() == 'interval'):
@@ -903,10 +958,15 @@ def __new__(cls, subtype=None):
return cls._cache[str(subtype)]
except KeyError:
u = object.__new__(cls)
- u.subtype = subtype
+ u._subtype = subtype
cls._cache[str(subtype)] = u
return u
+ @property
+ def subtype(self):
+ """The dtype of the Interval bounds."""
+ return self._subtype
+
@classmethod
def construct_array_type(cls):
"""
@@ -963,6 +1023,12 @@ def __eq__(self, other):
from pandas.core.dtypes.common import is_dtype_equal
return is_dtype_equal(self.subtype, other.subtype)
+ def __setstate__(self, state):
+ # for pickle compat. __get_state__ is defined in the
+ # PandasExtensionDtype superclass and uses the public properties to
+ # pickle -> need to set the settable private ones here (see GH26067)
+ self._subtype = state['subtype']
+
@classmethod
def is_dtype(cls, dtype):
"""
| - Use ``class_without_autosummary`` template for the dtypes (similar to what `CategoricalDtype` already did) to avoid a bunch of sphinx warnings
- Therefore, explicitly list some attributes in the docstrings (for now, I went with only documenting the dtype-specific "metadata")
- And while at it, I also gave the docstrings an update (documenting the parameters, etc)
- And for doing the above, turned some metadata attributes of the dtypes into properties, so they 1) can be documented (have a docstring) b) cannot be set by the user (xref https://github.com/pandas-dev/pandas/issues/26096) | https://api.github.com/repos/pandas-dev/pandas/pulls/26067 | 2019-04-12T18:44:44Z | 2019-04-20T16:50:39Z | 2019-04-20T16:50:39Z | 2019-04-22T07:41:41Z |
Add project_urls to setup | diff --git a/setup.py b/setup.py
index 7f7e58088d2ee..d121a54ded2a1 100755
--- a/setup.py
+++ b/setup.py
@@ -203,6 +203,11 @@ def build_extensions(self):
EMAIL = "pydata@googlegroups.com"
URL = "http://pandas.pydata.org"
DOWNLOAD_URL = ''
+PROJECT_URLS = {
+ 'Bug Tracker': 'https://github.com/pandas-dev/pandas/issues',
+ 'Documentation': 'http://pandas.pydata.org/pandas-docs/stable/',
+ 'Source Code': 'https://github.com/pandas-dev/pandas'
+}
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
@@ -772,6 +777,7 @@ def srcpath(name=None, suffix='.pyx', subdir='src'):
cmdclass=cmdclass,
url=URL,
download_url=DOWNLOAD_URL,
+ project_urls=PROJECT_URLS,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
| These are used by PyPI and other services.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26066 | 2019-04-12T18:33:16Z | 2019-04-12T21:58:10Z | 2019-04-12T21:58:10Z | 2019-04-12T21:58:15Z |
BUG: prevent overflowing diffs raising error in cut (#26045) | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 1953132c826ba..68cfaf539c3f1 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -399,6 +399,7 @@ Reshaping
- Bug in :func:`concat` where the resulting ``freq`` of two :class:`DatetimeIndex` with the same ``freq`` would be dropped (:issue:`3232`).
- Bug in :func:`merge` where merging with equivalent Categorical dtypes was raising an error (:issue:`22501`)
- Bug in :class:`DataFrame` constructor when passing non-empty tuples would cause a segmentation fault (:issue:`25691`)
+- Bug in :func:`pandas.cut` where large bins could incorrectly raise an error due to an integer overflow (:issue:`26045`)
Sparse
^^^^^^
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index f99fd9004bb31..8c29bdc2a974c 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -230,7 +230,9 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
- if (np.diff(bins) < 0).any():
+
+ # GH 26045: cast to float64 to avoid an overflow
+ if (np.diff(bins.astype('float64')) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py
index 6833460fa515b..f71730fb4a313 100644
--- a/pandas/tests/reshape/test_cut.py
+++ b/pandas/tests/reshape/test_cut.py
@@ -112,6 +112,35 @@ def test_bins_not_monotonic():
cut(data, [0.1, 1.5, 1, 10])
+@pytest.mark.parametrize("x, bins, expected", [
+ (date_range("2017-12-31", periods=3),
+ [Timestamp.min, Timestamp('2018-01-01'), Timestamp.max],
+ IntervalIndex.from_tuples([
+ (Timestamp.min, Timestamp('2018-01-01')),
+ (Timestamp('2018-01-01'), Timestamp.max)])),
+
+ ([-1, 0, 1],
+ np.array([np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max],
+ dtype="int64"),
+ IntervalIndex.from_tuples([
+ (np.iinfo(np.int64).min, 0),
+ (0, np.iinfo(np.int64).max)])),
+
+ ([np.timedelta64(-1), np.timedelta64(0), np.timedelta64(1)],
+ np.array([
+ np.timedelta64(-np.iinfo(np.int64).max),
+ np.timedelta64(0),
+ np.timedelta64(np.iinfo(np.int64).max)]),
+ IntervalIndex.from_tuples([
+ (np.timedelta64(-np.iinfo(np.int64).max), np.timedelta64(0)),
+ (np.timedelta64(0), np.timedelta64(np.iinfo(np.int64).max))])),
+])
+def test_bins_monotonic_not_overflowing(x, bins, expected):
+ # GH 26045
+ result = cut(x, bins)
+ tm.assert_index_equal(result.categories, expected)
+
+
def test_wrong_num_labels():
msg = "Bin labels must be one fewer than the number of bin edges"
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
| - [X] closes #26045
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I added a relevant test with the example reported in the issue in `\tests\reshape\test_cut.py`.
I also tested with `grep -rn pandas -e 'np.diff(bins'` if we could have a similar situation elsewhere but it does not seem so.
~I will merge upstream & add changelog entry.~
Edit: whatsnew entry added & no need to merge master | https://api.github.com/repos/pandas-dev/pandas/pulls/26063 | 2019-04-12T13:30:10Z | 2019-04-19T01:03:55Z | 2019-04-19T01:03:54Z | 2019-04-19T01:04:06Z |
Docstring fixes for PR06 errors | diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 0b35a031bc53f..c8bb0878b564d 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1175,7 +1175,7 @@ def __arrow_array__(self, type=None):
Parameters
----------
- na_tuple : boolean, default True
+ na_tuple : bool, default True
Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA
value itself if False, ``nan``.
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 0245b9f74d944..153bf386d4f33 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -2336,7 +2336,8 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
----------
periods : int, default 1
Number of periods to shift.
- freq : frequency string
+ freq : str, optional
+ Frequency string
axis : axis to shift, default 0
fill_value : optional
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 708bea7d132a2..366fd78562568 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1585,7 +1585,7 @@ def to_frame(self, index=True, name=None):
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
- name : list / sequence of strings, optional
+ name : list / sequence of str, optional
The passed names should substitute index level names.
Returns
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index c1e12887b0150..7db11156cdfff 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1139,11 +1139,11 @@ def append(
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
- min_itemsize : dict of columns that specify minimum string sizes
- nan_rep : string to use as string nan representation
+ min_itemsize : dict of columns that specify minimum str sizes
+ nan_rep : str to use as str nan representation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
- encoding : default None, provide an encoding for strings
+ encoding : default None, provide an encoding for str
dropna : bool, default False
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
diff --git a/pandas/io/spss.py b/pandas/io/spss.py
index cdbe14e9fe927..9605faeb36590 100644
--- a/pandas/io/spss.py
+++ b/pandas/io/spss.py
@@ -20,7 +20,7 @@ def read_spss(
Parameters
----------
- path : string or Path
+ path : str or Path
File path.
usecols : list-like, optional
Return a subset of the columns. If None, return all columns.
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 58fed0d18dd4a..b56eae96810b4 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -284,7 +284,7 @@ def read_sql_query(
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
- index_col : str or list of strings, optional, default: None
+ index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
@@ -364,7 +364,7 @@ def read_sql(
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy connectable. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_
- index_col : str or list of strings, optional, default: None
+ index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
| This contains some small changes that resolve some of the PR06 errors from running `./scripts/validate_docstrings.py --errors=PR06`
- [x] xref #28724
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31838 | 2020-02-09T22:30:02Z | 2020-02-10T16:20:40Z | 2020-02-10T16:20:40Z | 2020-02-11T00:24:47Z |
REF: make _setitem_with_indexer iloc-only | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index b2e5d04247e81..c7dcccab00d95 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -630,7 +630,10 @@ def __setitem__(self, key, value):
else:
key = com.apply_if_callable(key, self.obj)
indexer = self._get_setitem_indexer(key)
- self._setitem_with_indexer(indexer, value)
+ self._has_valid_setitem_indexer(key)
+
+ iloc = self if self.name == "iloc" else self.obj.iloc
+ iloc._setitem_with_indexer(indexer, value)
def _validate_key(self, key, axis: int):
"""
@@ -698,681 +701,725 @@ def _convert_tuple(self, key, is_setter: bool = False):
keyidx.append(idx)
return tuple(keyidx)
- def _setitem_with_indexer(self, indexer, value):
- self._has_valid_setitem_indexer(indexer)
-
- # also has the side effect of consolidating in-place
- from pandas import Series
+ def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
+ # we have an axis0 multi-index, handle or raise
+ axis = self.axis or 0
+ try:
+ # fast path for series or for tup devoid of slices
+ return self._get_label(tup, axis=axis)
+ except TypeError:
+ # slices are unhashable
+ pass
+ except KeyError as ek:
+ # raise KeyError if number of indexers match
+ # else IndexingError will be raised
+ if len(tup) <= self.obj.index.nlevels and len(tup) > self.ndim:
+ raise ek
- info_axis = self.obj._info_axis_number
+ return None
- # maybe partial set
- take_split_path = self.obj._is_mixed_type
+ def _getitem_lowerdim(self, tup: Tuple):
- # if there is only one block/type, still have to take split path
- # unless the block is one-dimensional or it can hold the value
- if not take_split_path and self.obj._data.blocks:
- (blk,) = self.obj._data.blocks
- if 1 < blk.ndim: # in case of dict, keys are indices
- val = list(value.values()) if isinstance(value, dict) else value
- take_split_path = not blk._can_hold_element(val)
+ # we can directly get the axis result since the axis is specified
+ if self.axis is not None:
+ axis = self.obj._get_axis_number(self.axis)
+ return self._getitem_axis(tup, axis=axis)
- # if we have any multi-indexes that have non-trivial slices
- # (not null slices) then we must take the split path, xref
- # GH 10360, GH 27841
- if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
- for i, ax in zip(indexer, self.obj.axes):
- if isinstance(ax, ABCMultiIndex) and not (
- is_integer(i) or com.is_null_slice(i)
- ):
- take_split_path = True
- break
+ # we may have a nested tuples indexer here
+ if self._is_nested_tuple_indexer(tup):
+ return self._getitem_nested_tuple(tup)
- if isinstance(indexer, tuple):
- nindexer = []
- for i, idx in enumerate(indexer):
- if isinstance(idx, dict):
+ # we maybe be using a tuple to represent multiple dimensions here
+ ax0 = self.obj._get_axis(0)
+ # ...but iloc should handle the tuple as simple integer-location
+ # instead of checking it as multiindex representation (GH 13797)
+ if isinstance(ax0, ABCMultiIndex) and self.name != "iloc":
+ result = self._handle_lowerdim_multi_index_axis0(tup)
+ if result is not None:
+ return result
- # reindex the axis to the new value
- # and set inplace
- key, _ = convert_missing_indexer(idx)
+ if len(tup) > self.ndim:
+ raise IndexingError("Too many indexers. handle elsewhere")
- # if this is the items axes, then take the main missing
- # path first
- # this correctly sets the dtype and avoids cache issues
- # essentially this separates out the block that is needed
- # to possibly be modified
- if self.ndim > 1 and i == self.obj._info_axis_number:
+ for i, key in enumerate(tup):
+ if is_label_like(key) or isinstance(key, tuple):
+ section = self._getitem_axis(key, axis=i)
- # add the new item, and set the value
- # must have all defined axes if we have a scalar
- # or a list-like on the non-info axes if we have a
- # list-like
- len_non_info_axes = (
- len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i
- )
- if any(not l for l in len_non_info_axes):
- if not is_list_like_indexer(value):
- raise ValueError(
- "cannot set a frame with no "
- "defined index and a scalar"
- )
- self.obj[key] = value
- return
+ # we have yielded a scalar ?
+ if not is_list_like_indexer(section):
+ return section
- # add a new item with the dtype setup
- self.obj[key] = _infer_fill_value(value)
+ elif section.ndim == self.ndim:
+ # we're in the middle of slicing through a MultiIndex
+ # revise the key wrt to `section` by inserting an _NS
+ new_key = tup[:i] + (_NS,) + tup[i + 1 :]
- new_indexer = convert_from_missing_indexer_tuple(
- indexer, self.obj.axes
- )
- self._setitem_with_indexer(new_indexer, value)
+ else:
+ new_key = tup[:i] + tup[i + 1 :]
- return
+ # unfortunately need an odious kludge here because of
+ # DataFrame transposing convention
+ if (
+ isinstance(section, ABCDataFrame)
+ and i > 0
+ and len(new_key) == 2
+ ):
+ a, b = new_key
+ new_key = b, a
- # reindex the axis
- # make sure to clear the cache because we are
- # just replacing the block manager here
- # so the object is the same
- index = self.obj._get_axis(i)
- labels = index.insert(len(index), key)
- self.obj._data = self.obj.reindex(labels, axis=i)._data
- self.obj._maybe_update_cacher(clear=True)
- self.obj._is_copy = None
+ if len(new_key) == 1:
+ new_key = new_key[0]
- nindexer.append(labels.get_loc(key))
+ # Slices should return views, but calling iloc/loc with a null
+ # slice returns a new object.
+ if com.is_null_slice(new_key):
+ return section
+ # This is an elided recursive call to iloc/loc/etc'
+ return getattr(section, self.name)[new_key]
- else:
- nindexer.append(idx)
+ raise IndexingError("not applicable")
- indexer = tuple(nindexer)
- else:
+ def _getitem_nested_tuple(self, tup: Tuple):
+ # we have a nested tuple so have at least 1 multi-index level
+ # we should be able to match up the dimensionality here
- indexer, missing = convert_missing_indexer(indexer)
+ # we have too many indexers for our dim, but have at least 1
+ # multi-index dimension, try to see if we have something like
+ # a tuple passed to a series with a multi-index
+ if len(tup) > self.ndim:
+ result = self._handle_lowerdim_multi_index_axis0(tup)
+ if result is not None:
+ return result
- if missing:
- self._setitem_with_indexer_missing(indexer, value)
- return
+ # this is a series with a multi-index specified a tuple of
+ # selectors
+ axis = self.axis or 0
+ return self._getitem_axis(tup, axis=axis)
- # set
- item_labels = self.obj._get_axis(info_axis)
+ # handle the multi-axis by taking sections and reducing
+ # this is iterative
+ obj = self.obj
+ axis = 0
+ for i, key in enumerate(tup):
- # align and set the values
- if take_split_path:
- # Above we only set take_split_path to True for 2D cases
- assert self.ndim == 2
- assert info_axis == 1
+ if com.is_null_slice(key):
+ axis += 1
+ continue
- if not isinstance(indexer, tuple):
- indexer = _tuplify(self.ndim, indexer)
+ current_ndim = obj.ndim
+ obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
+ axis += 1
- if isinstance(value, ABCSeries):
- value = self._align_series(indexer, value)
+ # if we have a scalar, we are done
+ if is_scalar(obj) or not hasattr(obj, "ndim"):
+ break
- info_idx = indexer[info_axis]
- if is_integer(info_idx):
- info_idx = [info_idx]
- labels = item_labels[info_idx]
+ # has the dim of the obj changed?
+ # GH 7199
+ if obj.ndim < current_ndim:
+ axis -= 1
- # if we have a partial multiindex, then need to adjust the plane
- # indexer here
- if len(labels) == 1 and isinstance(
- self.obj[labels[0]].axes[0], ABCMultiIndex
- ):
- item = labels[0]
- obj = self.obj[item]
- index = obj.index
- idx = indexer[:info_axis][0]
+ return obj
- plane_indexer = tuple([idx]) + indexer[info_axis + 1 :]
- lplane_indexer = length_of_indexer(plane_indexer[0], index)
+ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
+ raise AbstractMethodError(self)
- # require that we are setting the right number of values that
- # we are indexing
- if (
- is_list_like_indexer(value)
- and np.iterable(value)
- and lplane_indexer != len(value)
- ):
+ def __getitem__(self, key):
+ if type(key) is tuple:
+ key = tuple(com.apply_if_callable(x, self.obj) for x in key)
+ if self._is_scalar_access(key):
+ try:
+ return self.obj._get_value(*key, takeable=self._takeable)
+ except (KeyError, IndexError, AttributeError):
+ # AttributeError for IntervalTree get_value
+ pass
+ return self._getitem_tuple(key)
+ else:
+ # we by definition only have the 0th axis
+ axis = self.axis or 0
- if len(obj[idx]) != len(value):
- raise ValueError(
- "cannot set using a multi-index "
- "selection indexer with a different "
- "length than the value"
- )
+ maybe_callable = com.apply_if_callable(key, self.obj)
+ return self._getitem_axis(maybe_callable, axis=axis)
- # make sure we have an ndarray
- value = getattr(value, "values", value).ravel()
+ def _is_scalar_access(self, key: Tuple):
+ raise NotImplementedError()
- # we can directly set the series here
- obj._consolidate_inplace()
- obj = obj.copy()
- obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
- self.obj[item] = obj
- return
+ def _getitem_tuple(self, tup: Tuple):
+ raise AbstractMethodError(self)
- # non-mi
- else:
- plane_indexer = indexer[:info_axis] + indexer[info_axis + 1 :]
- plane_axis = self.obj.axes[:info_axis][0]
- lplane_indexer = length_of_indexer(plane_indexer[0], plane_axis)
+ def _getitem_axis(self, key, axis: int):
+ raise NotImplementedError()
- def setter(item, v):
- s = self.obj[item]
- pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
+ def _has_valid_setitem_indexer(self, indexer) -> bool:
+ raise AbstractMethodError(self)
- # perform the equivalent of a setitem on the info axis
- # as we have a null slice or a slice with full bounds
- # which means essentially reassign to the columns of a
- # multi-dim object
- # GH6149 (null slice), GH10408 (full bounds)
- if isinstance(pi, tuple) and all(
- com.is_null_slice(idx) or com.is_full_slice(idx, len(self.obj))
- for idx in pi
- ):
- s = v
- else:
- # set the item, possibly having a dtype change
- s._consolidate_inplace()
- s = s.copy()
- s._data = s._data.setitem(indexer=pi, value=v)
- s._maybe_update_cacher(clear=True)
-
- # reset the sliced object if unique
- self.obj[item] = s
-
- # we need an iterable, with a ndim of at least 1
- # eg. don't pass through np.array(0)
- if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
-
- # we have an equal len Frame
- if isinstance(value, ABCDataFrame):
- sub_indexer = list(indexer)
- multiindex_indexer = isinstance(labels, ABCMultiIndex)
-
- for item in labels:
- if item in value:
- sub_indexer[info_axis] = item
- v = self._align_series(
- tuple(sub_indexer), value[item], multiindex_indexer
- )
- else:
- v = np.nan
+ def _getbool_axis(self, key, axis: int):
+ # caller is responsible for ensuring non-None axis
+ labels = self.obj._get_axis(axis)
+ key = check_bool_indexer(labels, key)
+ inds = key.nonzero()[0]
+ return self.obj._take_with_is_copy(inds, axis=axis)
- setter(item, v)
- # we have an equal len ndarray/convertible to our labels
- # hasattr first, to avoid coercing to ndarray without reason.
- # But we may be relying on the ndarray coercion to check ndim.
- # Why not just convert to an ndarray earlier on if needed?
- elif np.ndim(value) == 2:
+@Appender(IndexingMixin.loc.__doc__)
+class _LocIndexer(_LocationIndexer):
+ _takeable: bool = False
+ _valid_types = (
+ "labels (MUST BE IN THE INDEX), slices of labels (BOTH "
+ "endpoints included! Can be slices of integers if the "
+ "index is integers), listlike of labels, boolean"
+ )
- # note that this coerces the dtype if we are mixed
- # GH 7551
- value = np.array(value, dtype=object)
- if len(labels) != value.shape[1]:
- raise ValueError(
- "Must have equal len keys and value "
- "when setting with an ndarray"
- )
+ # -------------------------------------------------------------------
+ # Key Checks
- for i, item in enumerate(labels):
+ @Appender(_LocationIndexer._validate_key.__doc__)
+ def _validate_key(self, key, axis: int):
- # setting with a list, recoerces
- setter(item, value[:, i].tolist())
+ # valid for a collection of labels (we check their presence later)
+ # slice of labels (where start-end in labels)
+ # slice of integers (only if in the labels)
+ # boolean
- # we have an equal len list/ndarray
- elif _can_do_equal_len(
- labels, value, plane_indexer, lplane_indexer, self.obj
- ):
- setter(labels[0], value)
+ if isinstance(key, slice):
+ return
- # per label values
- else:
+ if com.is_bool_indexer(key):
+ return
- if len(labels) != len(value):
- raise ValueError(
- "Must have equal len keys and value "
- "when setting with an iterable"
- )
+ if not is_list_like_indexer(key):
+ labels = self.obj._get_axis(axis)
+ labels._convert_scalar_indexer(key, kind="loc")
- for item, v in zip(labels, value):
- setter(item, v)
- else:
+ def _has_valid_setitem_indexer(self, indexer) -> bool:
+ return True
- # scalar
- for item in labels:
- setter(item, value)
+ def _is_scalar_access(self, key: Tuple) -> bool:
+ """
+ Returns
+ -------
+ bool
+ """
+ # this is a shortcut accessor to both .loc and .iloc
+ # that provide the equivalent access of .at and .iat
+ # a) avoid getting things via sections and (to minimize dtype changes)
+ # b) provide a performant path
+ if len(key) != self.ndim:
+ return False
- else:
- if isinstance(indexer, tuple):
- indexer = maybe_convert_ix(*indexer)
+ for i, k in enumerate(key):
+ if not is_scalar(k):
+ return False
- # if we are setting on the info axis ONLY
- # set using those methods to avoid block-splitting
- # logic here
- if (
- len(indexer) > info_axis
- and is_integer(indexer[info_axis])
- and all(
- com.is_null_slice(idx)
- for i, idx in enumerate(indexer)
- if i != info_axis
- )
- and item_labels.is_unique
- ):
- self.obj[item_labels[indexer[info_axis]]] = value
- return
+ ax = self.obj.axes[i]
+ if isinstance(ax, ABCMultiIndex):
+ return False
- if isinstance(value, (ABCSeries, dict)):
- # TODO(EA): ExtensionBlock.setitem this causes issues with
- # setting for extensionarrays that store dicts. Need to decide
- # if it's worth supporting that.
- value = self._align_series(indexer, Series(value))
+ if isinstance(k, str) and ax._supports_partial_string_indexing:
+ # partial string indexing, df.loc['2000', 'A']
+ # should not be considered scalar
+ return False
- elif isinstance(value, ABCDataFrame):
- value = self._align_frame(indexer, value)
+ if not ax.is_unique:
+ return False
- # check for chained assignment
- self.obj._check_is_chained_assignment_possible()
+ return True
- # actually do the set
- self.obj._consolidate_inplace()
- self.obj._data = self.obj._data.setitem(indexer=indexer, value=value)
- self.obj._maybe_update_cacher(clear=True)
+ # -------------------------------------------------------------------
+ # MultiIndex Handling
- def _setitem_with_indexer_missing(self, indexer, value):
- """
- Insert new row(s) or column(s) into the Series or DataFrame.
+ def _multi_take_opportunity(self, tup: Tuple) -> bool:
"""
- from pandas import Series
-
- # reindex the axis to the new value
- # and set inplace
- if self.ndim == 1:
- index = self.obj.index
- new_index = index.insert(len(index), indexer)
-
- # we have a coerced indexer, e.g. a float
- # that matches in an Int64Index, so
- # we will not create a duplicate index, rather
- # index to that element
- # e.g. 0.0 -> 0
- # GH#12246
- if index.is_unique:
- new_indexer = index.get_indexer([new_index[-1]])
- if (new_indexer != -1).any():
- return self._setitem_with_indexer(new_indexer, value)
-
- # this preserves dtype of the value
- new_values = Series([value])._values
- if len(self.obj._values):
- # GH#22717 handle casting compatibility that np.concatenate
- # does incorrectly
- new_values = concat_compat([self.obj._values, new_values])
- self.obj._data = self.obj._constructor(
- new_values, index=new_index, name=self.obj.name
- )._data
- self.obj._maybe_update_cacher(clear=True)
-
- elif self.ndim == 2:
+ Check whether there is the possibility to use ``_multi_take``.
- if not len(self.obj.columns):
- # no columns and scalar
- raise ValueError("cannot set a frame with no defined columns")
+ Currently the limit is that all axes being indexed, must be indexed with
+ list-likes.
- if isinstance(value, ABCSeries):
- # append a Series
- value = value.reindex(index=self.obj.columns, copy=True)
- value.name = indexer
+ Parameters
+ ----------
+ tup : tuple
+ Tuple of indexers, one per axis.
- else:
- # a list-list
- if is_list_like_indexer(value):
- # must have conforming columns
- if len(value) != len(self.obj.columns):
- raise ValueError("cannot set a row with mismatched columns")
+ Returns
+ -------
+ bool
+ Whether the current indexing,
+ can be passed through `_multi_take`.
+ """
+ if not all(is_list_like_indexer(x) for x in tup):
+ return False
- value = Series(value, index=self.obj.columns, name=indexer)
+ # just too complicated
+ if any(com.is_bool_indexer(x) for x in tup):
+ return False
- self.obj._data = self.obj.append(value)._data
- self.obj._maybe_update_cacher(clear=True)
+ return True
- def _align_series(self, indexer, ser: ABCSeries, multiindex_indexer: bool = False):
+ def _multi_take(self, tup: Tuple):
"""
+ Create the indexers for the passed tuple of keys, and
+ executes the take operation. This allows the take operation to be
+ executed all at once, rather than once for each dimension.
+ Improving efficiency.
+
Parameters
----------
- indexer : tuple, slice, scalar
- Indexer used to get the locations that will be set to `ser`.
- ser : pd.Series
- Values to assign to the locations specified by `indexer`.
- multiindex_indexer : boolean, optional
- Defaults to False. Should be set to True if `indexer` was from
- a `pd.MultiIndex`, to avoid unnecessary broadcasting.
+ tup : tuple
+ Tuple of indexers, one per axis.
Returns
-------
- `np.array` of `ser` broadcast to the appropriate shape for assignment
- to the locations selected by `indexer`
+ values: same type as the object being indexed
"""
- if isinstance(indexer, (slice, np.ndarray, list, Index)):
- indexer = tuple([indexer])
+ # GH 836
+ d = {
+ axis: self._get_listlike_indexer(key, axis)
+ for (key, axis) in zip(tup, self.obj._AXIS_ORDERS)
+ }
+ return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True)
- if isinstance(indexer, tuple):
-
- # flatten np.ndarray indexers
- def ravel(i):
- return i.ravel() if isinstance(i, np.ndarray) else i
-
- indexer = tuple(map(ravel, indexer))
-
- aligners = [not com.is_null_slice(idx) for idx in indexer]
- sum_aligners = sum(aligners)
- single_aligner = sum_aligners == 1
- is_frame = self.ndim == 2
- obj = self.obj
+ # -------------------------------------------------------------------
- # are we a single alignable value on a non-primary
- # dim (e.g. panel: 1,2, or frame: 0) ?
- # hence need to align to a single axis dimension
- # rather that find all valid dims
+ def _get_partial_string_timestamp_match_key(self, key, labels):
+ """
+ Translate any partial string timestamp matches in key, returning the
+ new key.
- # frame
- if is_frame:
- single_aligner = single_aligner and aligners[0]
+ (GH 10331)
+ """
+ if isinstance(labels, ABCMultiIndex):
+ if (
+ isinstance(key, str)
+ and labels.levels[0]._supports_partial_string_indexing
+ ):
+ # Convert key '2016-01-01' to
+ # ('2016-01-01'[, slice(None, None, None)]+)
+ key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
- # we have a frame, with multiple indexers on both axes; and a
- # series, so need to broadcast (see GH5206)
- if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer):
- ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
+ if isinstance(key, tuple):
+ # Convert (..., '2016-01-01', ...) in tuple to
+ # (..., slice('2016-01-01', '2016-01-01', None), ...)
+ new_key = []
+ for i, component in enumerate(key):
+ if (
+ isinstance(component, str)
+ and labels.levels[i]._supports_partial_string_indexing
+ ):
+ new_key.append(slice(component, component, None))
+ else:
+ new_key.append(component)
+ key = tuple(new_key)
- # single indexer
- if len(indexer) > 1 and not multiindex_indexer:
- len_indexer = len(indexer[1])
- ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T
+ return key
- return ser
+ def _getitem_iterable(self, key, axis: int):
+ """
+ Index current object with an an iterable collection of keys.
- for i, idx in enumerate(indexer):
- ax = obj.axes[i]
+ Parameters
+ ----------
+ key : iterable
+ Targeted labels.
+ axis: int
+ Dimension on which the indexing is being made.
- # multiple aligners (or null slices)
- if is_sequence(idx) or isinstance(idx, slice):
- if single_aligner and com.is_null_slice(idx):
- continue
- new_ix = ax[idx]
- if not is_list_like_indexer(new_ix):
- new_ix = Index([new_ix])
- else:
- new_ix = Index(new_ix)
- if ser.index.equals(new_ix) or not len(new_ix):
- return ser._values.copy()
+ Raises
+ ------
+ KeyError
+ If no key was found. Will change in the future to raise if not all
+ keys were found.
- return ser.reindex(new_ix)._values
+ Returns
+ -------
+ scalar, DataFrame, or Series: indexed value(s).
+ """
+ # we assume that not com.is_bool_indexer(key), as that is
+ # handled before we get here.
+ self._validate_key(key, axis)
- # 2 dims
- elif single_aligner:
+ # A collection of keys
+ keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
+ return self.obj._reindex_with_indexers(
+ {axis: [keyarr, indexer]}, copy=True, allow_dups=True
+ )
- # reindex along index
- ax = self.obj.axes[1]
- if ser.index.equals(ax) or not len(ax):
- return ser._values.copy()
- return ser.reindex(ax)._values
+ def _getitem_tuple(self, tup: Tuple):
+ try:
+ return self._getitem_lowerdim(tup)
+ except IndexingError:
+ pass
- elif is_scalar(indexer):
- ax = self.obj._get_axis(1)
+ # no multi-index, so validate all of the indexers
+ self._has_valid_tuple(tup)
- if ser.index.equals(ax):
- return ser._values.copy()
+ # ugly hack for GH #836
+ if self._multi_take_opportunity(tup):
+ return self._multi_take(tup)
- return ser.reindex(ax)._values
+ # no shortcut needed
+ retval = self.obj
+ for i, key in enumerate(tup):
+ if com.is_null_slice(key):
+ continue
- raise ValueError("Incompatible indexer with Series")
+ retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
- def _align_frame(self, indexer, df: ABCDataFrame):
- is_frame = self.ndim == 2
+ return retval
- if isinstance(indexer, tuple):
+ def _getitem_axis(self, key, axis: int):
+ key = item_from_zerodim(key)
+ if is_iterator(key):
+ key = list(key)
- idx, cols = None, None
- sindexers = []
- for i, ix in enumerate(indexer):
- ax = self.obj.axes[i]
- if is_sequence(ix) or isinstance(ix, slice):
- if isinstance(ix, np.ndarray):
- ix = ix.ravel()
- if idx is None:
- idx = ax[ix]
- elif cols is None:
- cols = ax[ix]
- else:
- break
- else:
- sindexers.append(i)
+ labels = self.obj._get_axis(axis)
+ key = self._get_partial_string_timestamp_match_key(key, labels)
- if idx is not None and cols is not None:
+ if isinstance(key, slice):
+ self._validate_key(key, axis)
+ return self._get_slice_axis(key, axis=axis)
+ elif com.is_bool_indexer(key):
+ return self._getbool_axis(key, axis=axis)
+ elif is_list_like_indexer(key):
- if df.index.equals(idx) and df.columns.equals(cols):
- val = df.copy()._values
- else:
- val = df.reindex(idx, columns=cols)._values
- return val
+ # convert various list-like indexers
+ # to a list of keys
+ # we will use the *values* of the object
+ # and NOT the index if its a PandasObject
+ if isinstance(labels, ABCMultiIndex):
- elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame:
- ax = self.obj.index[indexer]
- if df.index.equals(ax):
- val = df.copy()._values
- else:
+ if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
+ # Series, or 0,1 ndim ndarray
+ # GH 14730
+ key = list(key)
+ elif isinstance(key, ABCDataFrame):
+ # GH 15438
+ raise NotImplementedError(
+ "Indexing a MultiIndex with a "
+ "DataFrame key is not "
+ "implemented"
+ )
+ elif hasattr(key, "ndim") and key.ndim > 1:
+ raise NotImplementedError(
+ "Indexing a MultiIndex with a "
+ "multidimensional key is not "
+ "implemented"
+ )
- # we have a multi-index and are trying to align
- # with a particular, level GH3738
if (
- isinstance(ax, ABCMultiIndex)
- and isinstance(df.index, ABCMultiIndex)
- and ax.nlevels != df.index.nlevels
+ not isinstance(key, tuple)
+ and len(key)
+ and not isinstance(key[0], tuple)
):
- raise TypeError(
- "cannot align on a multi-index with out "
- "specifying the join levels"
- )
+ key = tuple([key])
- val = df.reindex(index=ax)._values
- return val
+ # an iterable multi-selection
+ if not (isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)):
- raise ValueError("Incompatible indexer with DataFrame")
+ if hasattr(key, "ndim") and key.ndim > 1:
+ raise ValueError("Cannot index with multidimensional key")
- def _handle_lowerdim_multi_index_axis0(self, tup: Tuple):
- # we have an axis0 multi-index, handle or raise
- axis = self.axis or 0
- try:
- # fast path for series or for tup devoid of slices
- return self._get_label(tup, axis=axis)
- except TypeError:
- # slices are unhashable
- pass
- except KeyError as ek:
- # raise KeyError if number of indexers match
- # else IndexingError will be raised
- if len(tup) <= self.obj.index.nlevels and len(tup) > self.ndim:
- raise ek
+ return self._getitem_iterable(key, axis=axis)
- return None
+ # nested tuple slicing
+ if is_nested_tuple(key, labels):
+ locs = labels.get_locs(key)
+ indexer = [slice(None)] * self.ndim
+ indexer[axis] = locs
+ return self.obj.iloc[tuple(indexer)]
- def _getitem_lowerdim(self, tup: Tuple):
+ # fall thru to straight lookup
+ self._validate_key(key, axis)
+ return self._get_label(key, axis=axis)
- # we can directly get the axis result since the axis is specified
- if self.axis is not None:
- axis = self.obj._get_axis_number(self.axis)
- return self._getitem_axis(tup, axis=axis)
+ def _get_slice_axis(self, slice_obj: slice, axis: int):
+ """
+ This is pretty simple as we just have to deal with labels.
+ """
+ # caller is responsible for ensuring non-None axis
+ obj = self.obj
+ if not need_slice(slice_obj):
+ return obj.copy(deep=False)
- # we may have a nested tuples indexer here
- if self._is_nested_tuple_indexer(tup):
- return self._getitem_nested_tuple(tup)
+ labels = obj._get_axis(axis)
+ indexer = labels.slice_indexer(
+ slice_obj.start, slice_obj.stop, slice_obj.step, kind="loc"
+ )
- # we maybe be using a tuple to represent multiple dimensions here
- ax0 = self.obj._get_axis(0)
- # ...but iloc should handle the tuple as simple integer-location
- # instead of checking it as multiindex representation (GH 13797)
- if isinstance(ax0, ABCMultiIndex) and self.name != "iloc":
- result = self._handle_lowerdim_multi_index_axis0(tup)
- if result is not None:
- return result
+ if isinstance(indexer, slice):
+ return self.obj._slice(indexer, axis=axis, kind="iloc")
+ else:
+ # DatetimeIndex overrides Index.slice_indexer and may
+ # return a DatetimeIndex instead of a slice object.
+ return self.obj.take(indexer, axis=axis)
- if len(tup) > self.ndim:
- raise IndexingError("Too many indexers. handle elsewhere")
+ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
+ """
+ Convert indexing key into something we can use to do actual fancy
+ indexing on a ndarray.
- for i, key in enumerate(tup):
- if is_label_like(key) or isinstance(key, tuple):
- section = self._getitem_axis(key, axis=i)
+ Examples
+ ix[:5] -> slice(0, 5)
+ ix[[1,2,3]] -> [1,2,3]
+ ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
- # we have yielded a scalar ?
- if not is_list_like_indexer(section):
- return section
+ Going by Zen of Python?
+ 'In the face of ambiguity, refuse the temptation to guess.'
+ raise AmbiguousIndexError with integer labels?
+ - No, prefer label-based indexing
+ """
+ labels = self.obj._get_axis(axis)
- elif section.ndim == self.ndim:
- # we're in the middle of slicing through a MultiIndex
- # revise the key wrt to `section` by inserting an _NS
- new_key = tup[:i] + (_NS,) + tup[i + 1 :]
+ if isinstance(key, slice):
+ return labels._convert_slice_indexer(key, kind="loc")
- else:
- new_key = tup[:i] + tup[i + 1 :]
+ if is_scalar(key):
+ # try to find out correct indexer, if not type correct raise
+ try:
+ key = labels._convert_scalar_indexer(key, kind="loc")
+ except TypeError:
+ # but we will allow setting
+ if not is_setter:
+ raise
- # unfortunately need an odious kludge here because of
- # DataFrame transposing convention
- if (
- isinstance(section, ABCDataFrame)
- and i > 0
- and len(new_key) == 2
- ):
- a, b = new_key
- new_key = b, a
+ # see if we are positional in nature
+ is_int_index = labels.is_integer()
+ is_int_positional = is_integer(key) and not is_int_index
- if len(new_key) == 1:
- new_key = new_key[0]
+ if is_scalar(key) or isinstance(labels, ABCMultiIndex):
+ # Otherwise get_loc will raise InvalidIndexError
- # Slices should return views, but calling iloc/loc with a null
- # slice returns a new object.
- if com.is_null_slice(new_key):
- return section
- # This is an elided recursive call to iloc/loc/etc'
- return getattr(section, self.name)[new_key]
+ # if we are a label return me
+ try:
+ return labels.get_loc(key)
+ except LookupError:
+ if isinstance(key, tuple) and isinstance(labels, ABCMultiIndex):
+ if len(key) == labels.nlevels:
+ return {"key": key}
+ raise
+ except TypeError:
+ pass
+ except ValueError:
+ if not is_int_positional:
+ raise
- raise IndexingError("not applicable")
+ # a positional
+ if is_int_positional:
- def _getitem_nested_tuple(self, tup: Tuple):
- # we have a nested tuple so have at least 1 multi-index level
- # we should be able to match up the dimensionality here
+ # if we are setting and its not a valid location
+ # its an insert which fails by definition
- # we have too many indexers for our dim, but have at least 1
- # multi-index dimension, try to see if we have something like
- # a tuple passed to a series with a multi-index
- if len(tup) > self.ndim:
- result = self._handle_lowerdim_multi_index_axis0(tup)
- if result is not None:
- return result
+ # always valid
+ return {"key": key}
- # this is a series with a multi-index specified a tuple of
- # selectors
- axis = self.axis or 0
- return self._getitem_axis(tup, axis=axis)
+ if is_nested_tuple(key, labels):
+ return labels.get_locs(key)
- # handle the multi-axis by taking sections and reducing
- # this is iterative
- obj = self.obj
- axis = 0
- for i, key in enumerate(tup):
+ elif is_list_like_indexer(key):
- if com.is_null_slice(key):
- axis += 1
- continue
+ if com.is_bool_indexer(key):
+ key = check_bool_indexer(labels, key)
+ (inds,) = key.nonzero()
+ return inds
+ else:
+ # When setting, missing keys are not allowed, even with .loc:
+ return self._get_listlike_indexer(key, axis, raise_missing=True)[1]
+ else:
+ try:
+ return labels.get_loc(key)
+ except LookupError:
+ # allow a not found key only if we are a setter
+ if not is_list_like_indexer(key):
+ return {"key": key}
+ raise
- current_ndim = obj.ndim
- obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
- axis += 1
+ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
+ """
+ Transform a list-like of keys into a new index and an indexer.
- # if we have a scalar, we are done
- if is_scalar(obj) or not hasattr(obj, "ndim"):
- break
+ Parameters
+ ----------
+ key : list-like
+ Targeted labels.
+ axis: int
+ Dimension on which the indexing is being made.
+ raise_missing: bool, default False
+ Whether to raise a KeyError if some labels were not found.
+ Will be removed in the future, and then this method will always behave as
+ if ``raise_missing=True``.
- # has the dim of the obj changed?
- # GH 7199
- if obj.ndim < current_ndim:
- axis -= 1
+ Raises
+ ------
+ KeyError
+ If at least one key was requested but none was found, and
+ raise_missing=True.
- return obj
+ Returns
+ -------
+ keyarr: Index
+ New index (coinciding with 'key' if the axis is unique).
+ values : array-like
+ Indexer for the return object, -1 denotes keys not found.
+ """
+ ax = self.obj._get_axis(axis)
- def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
- raise AbstractMethodError(self)
+ # Have the index compute an indexer or return None
+ # if it cannot handle:
+ indexer, keyarr = ax._convert_listlike_indexer(key)
+ # We only act on all found values:
+ if indexer is not None and (indexer != -1).all():
+ self._validate_read_indexer(key, indexer, axis, raise_missing=raise_missing)
+ return ax[indexer], indexer
- def __getitem__(self, key):
- if type(key) is tuple:
- key = tuple(com.apply_if_callable(x, self.obj) for x in key)
- if self._is_scalar_access(key):
- try:
- return self.obj._get_value(*key, takeable=self._takeable)
- except (KeyError, IndexError, AttributeError):
- # AttributeError for IntervalTree get_value
- pass
- return self._getitem_tuple(key)
+ if ax.is_unique and not getattr(ax, "is_overlapping", False):
+ indexer = ax.get_indexer_for(key)
+ keyarr = ax.reindex(keyarr)[0]
else:
- # we by definition only have the 0th axis
- axis = self.axis or 0
-
- maybe_callable = com.apply_if_callable(key, self.obj)
- return self._getitem_axis(maybe_callable, axis=axis)
+ keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
- def _is_scalar_access(self, key: Tuple):
- raise NotImplementedError()
+ self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
+ return keyarr, indexer
- def _getitem_tuple(self, tup: Tuple):
- raise AbstractMethodError(self)
+ def _validate_read_indexer(
+ self, key, indexer, axis: int, raise_missing: bool = False
+ ):
+ """
+ Check that indexer can be used to return a result.
- def _getitem_axis(self, key, axis: int):
- raise NotImplementedError()
+ e.g. at least one element was found,
+ unless the list of keys was actually empty.
- def _has_valid_setitem_indexer(self, indexer) -> bool:
- raise AbstractMethodError(self)
+ Parameters
+ ----------
+ key : list-like
+ Targeted labels (only used to show correct error message).
+ indexer: array-like of booleans
+ Indices corresponding to the key,
+ (with -1 indicating not found).
+ axis: int
+ Dimension on which the indexing is being made.
+ raise_missing: bool
+ Whether to raise a KeyError if some labels are not found. Will be
+ removed in the future, and then this method will always behave as
+ if raise_missing=True.
- def _getbool_axis(self, key, axis: int):
- # caller is responsible for ensuring non-None axis
- labels = self.obj._get_axis(axis)
- key = check_bool_indexer(labels, key)
- inds = key.nonzero()[0]
- return self.obj._take_with_is_copy(inds, axis=axis)
+ Raises
+ ------
+ KeyError
+ If at least one key was requested but none was found, and
+ raise_missing=True.
+ """
+ ax = self.obj._get_axis(axis)
+ if len(key) == 0:
+ return
-@Appender(IndexingMixin.loc.__doc__)
-class _LocIndexer(_LocationIndexer):
- _takeable: bool = False
- _valid_types = (
- "labels (MUST BE IN THE INDEX), slices of labels (BOTH "
- "endpoints included! Can be slices of integers if the "
- "index is integers), listlike of labels, boolean"
- )
+ # Count missing values:
+ missing = (indexer < 0).sum()
- # -------------------------------------------------------------------
- # Key Checks
+ if missing:
+ if missing == len(indexer):
+ axis_name = self.obj._get_axis_name(axis)
+ raise KeyError(f"None of [{key}] are in the [{axis_name}]")
- @Appender(_LocationIndexer._validate_key.__doc__)
- def _validate_key(self, key, axis: int):
+ # We (temporarily) allow for some missing keys with .loc, except in
+ # some cases (e.g. setting) in which "raise_missing" will be False
+ if not (self.name == "loc" and not raise_missing):
+ not_found = list(set(key) - set(ax))
+ raise KeyError(f"{not_found} not in index")
- # valid for a collection of labels (we check their presence later)
- # slice of labels (where start-end in labels)
- # slice of integers (only if in the labels)
- # boolean
+ # we skip the warning on Categorical/Interval
+ # as this check is actually done (check for
+ # non-missing values), but a bit later in the
+ # code, so we want to avoid warning & then
+ # just raising
+ if not (ax.is_categorical() or ax.is_interval()):
+ raise KeyError(
+ "Passing list-likes to .loc or [] with any missing labels "
+ "is no longer supported, see "
+ "https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
+ )
- if isinstance(key, slice):
- return
+@Appender(IndexingMixin.iloc.__doc__)
+class _iLocIndexer(_LocationIndexer):
+ _valid_types = (
+ "integer, integer slice (START point is INCLUDED, END "
+ "point is EXCLUDED), listlike of integers, boolean array"
+ )
+ _takeable = True
+
+ # -------------------------------------------------------------------
+ # Key Checks
+
+ def _validate_key(self, key, axis: int):
if com.is_bool_indexer(key):
+ if hasattr(key, "index") and isinstance(key.index, Index):
+ if key.index.inferred_type == "integer":
+ raise NotImplementedError(
+ "iLocation based boolean "
+ "indexing on an integer type "
+ "is not available"
+ )
+ raise ValueError(
+ "iLocation based boolean indexing cannot use "
+ "an indexable as a mask"
+ )
return
- if not is_list_like_indexer(key):
- labels = self.obj._get_axis(axis)
- labels._convert_scalar_indexer(key, kind="loc")
+ if isinstance(key, slice):
+ return
+ elif is_integer(key):
+ self._validate_integer(key, axis)
+ elif isinstance(key, tuple):
+ # a tuple should already have been caught by this point
+ # so don't treat a tuple as a valid indexer
+ raise IndexingError("Too many indexers")
+ elif is_list_like_indexer(key):
+ arr = np.array(key)
+ len_axis = len(self.obj._get_axis(axis))
+
+ # check that the key has a numeric dtype
+ if not is_numeric_dtype(arr.dtype):
+ raise IndexError(f".iloc requires numeric indexers, got {arr}")
+
+ # check that the key does not exceed the maximum size of the index
+ if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
+ raise IndexError("positional indexers are out-of-bounds")
+ else:
+ raise ValueError(f"Can only index by location with a [{self._valid_types}]")
+
+ def _has_valid_setitem_indexer(self, indexer):
+ self._has_valid_positional_setitem_indexer(indexer)
+
+ def _has_valid_positional_setitem_indexer(self, indexer) -> bool:
+ """
+ Validate that a positional indexer cannot enlarge its target
+ will raise if needed, does not modify the indexer externally.
+
+ Returns
+ -------
+ bool
+ """
+ if isinstance(indexer, dict):
+ raise IndexError(f"{self.name} cannot enlarge its target object")
+ else:
+ if not isinstance(indexer, tuple):
+ indexer = _tuplify(self.ndim, indexer)
+ for ax, i in zip(self.obj.axes, indexer):
+ if isinstance(i, slice):
+ # should check the stop slice?
+ pass
+ elif is_list_like_indexer(i):
+ # should check the elements?
+ pass
+ elif is_integer(i):
+ if i >= len(ax):
+ raise IndexError(
+ f"{self.name} cannot enlarge its target object"
+ )
+ elif isinstance(i, dict):
+ raise IndexError(f"{self.name} cannot enlarge its target object")
- def _has_valid_setitem_indexer(self, indexer) -> bool:
return True
def _is_scalar_access(self, key: Tuple) -> bool:
@@ -1389,670 +1436,627 @@ def _is_scalar_access(self, key: Tuple) -> bool:
return False
for i, k in enumerate(key):
- if not is_scalar(k):
+ if not is_integer(k):
return False
ax = self.obj.axes[i]
- if isinstance(ax, ABCMultiIndex):
- return False
-
- if isinstance(k, str) and ax._supports_partial_string_indexing:
- # partial string indexing, df.loc['2000', 'A']
- # should not be considered scalar
- return False
-
if not ax.is_unique:
return False
return True
- # -------------------------------------------------------------------
- # MultiIndex Handling
-
- def _multi_take_opportunity(self, tup: Tuple) -> bool:
+ def _validate_integer(self, key: int, axis: int) -> None:
"""
- Check whether there is the possibility to use ``_multi_take``.
-
- Currently the limit is that all axes being indexed, must be indexed with
- list-likes.
+ Check that 'key' is a valid position in the desired axis.
Parameters
----------
- tup : tuple
- Tuple of indexers, one per axis.
+ key : int
+ Requested position.
+ axis : int
+ Desired axis.
- Returns
- -------
- bool
- Whether the current indexing,
- can be passed through `_multi_take`.
+ Raises
+ ------
+ IndexError
+ If 'key' is not a valid position in axis 'axis'.
"""
- if not all(is_list_like_indexer(x) for x in tup):
- return False
-
- # just too complicated
- if any(com.is_bool_indexer(x) for x in tup):
- return False
-
- return True
+ len_axis = len(self.obj._get_axis(axis))
+ if key >= len_axis or key < -len_axis:
+ raise IndexError("single positional indexer is out-of-bounds")
- def _multi_take(self, tup: Tuple):
- """
- Create the indexers for the passed tuple of keys, and
- executes the take operation. This allows the take operation to be
- executed all at once, rather than once for each dimension.
- Improving efficiency.
+ # -------------------------------------------------------------------
- Parameters
- ----------
- tup : tuple
- Tuple of indexers, one per axis.
+ def _getitem_tuple(self, tup: Tuple):
- Returns
- -------
- values: same type as the object being indexed
- """
- # GH 836
- d = {
- axis: self._get_listlike_indexer(key, axis)
- for (key, axis) in zip(tup, self.obj._AXIS_ORDERS)
- }
- return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True)
+ self._has_valid_tuple(tup)
+ try:
+ return self._getitem_lowerdim(tup)
+ except IndexingError:
+ pass
- # -------------------------------------------------------------------
+ retval = self.obj
+ axis = 0
+ for i, key in enumerate(tup):
+ if com.is_null_slice(key):
+ axis += 1
+ continue
- def _get_partial_string_timestamp_match_key(self, key, labels):
- """
- Translate any partial string timestamp matches in key, returning the
- new key.
+ retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)
- (GH 10331)
- """
- if isinstance(labels, ABCMultiIndex):
- if (
- isinstance(key, str)
- and labels.levels[0]._supports_partial_string_indexing
- ):
- # Convert key '2016-01-01' to
- # ('2016-01-01'[, slice(None, None, None)]+)
- key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
+ # if the dim was reduced, then pass a lower-dim the next time
+ if retval.ndim < self.ndim:
+ # TODO: this is never reached in tests; can we confirm that
+ # it is impossible?
+ axis -= 1
- if isinstance(key, tuple):
- # Convert (..., '2016-01-01', ...) in tuple to
- # (..., slice('2016-01-01', '2016-01-01', None), ...)
- new_key = []
- for i, component in enumerate(key):
- if (
- isinstance(component, str)
- and labels.levels[i]._supports_partial_string_indexing
- ):
- new_key.append(slice(component, component, None))
- else:
- new_key.append(component)
- key = tuple(new_key)
+ # try to get for the next axis
+ axis += 1
- return key
+ return retval
- def _getitem_iterable(self, key, axis: int):
+ def _get_list_axis(self, key, axis: int):
"""
- Index current object with an an iterable collection of keys.
+ Return Series values by list or array of integers.
Parameters
----------
- key : iterable
- Targeted labels.
- axis: int
- Dimension on which the indexing is being made.
-
- Raises
- ------
- KeyError
- If no key was found. Will change in the future to raise if not all
- keys were found.
+ key : list-like positional indexer
+ axis : int
Returns
-------
- scalar, DataFrame, or Series: indexed value(s).
+ Series object
+
+ Notes
+ -----
+ `axis` can only be zero.
"""
- # we assume that not com.is_bool_indexer(key), as that is
- # handled before we get here.
- self._validate_key(key, axis)
-
- # A collection of keys
- keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
- return self.obj._reindex_with_indexers(
- {axis: [keyarr, indexer]}, copy=True, allow_dups=True
- )
-
- def _getitem_tuple(self, tup: Tuple):
- try:
- return self._getitem_lowerdim(tup)
- except IndexingError:
- pass
-
- # no multi-index, so validate all of the indexers
- self._has_valid_tuple(tup)
-
- # ugly hack for GH #836
- if self._multi_take_opportunity(tup):
- return self._multi_take(tup)
-
- # no shortcut needed
- retval = self.obj
- for i, key in enumerate(tup):
- if com.is_null_slice(key):
- continue
-
- retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
-
- return retval
+ try:
+ return self.obj._take_with_is_copy(key, axis=axis)
+ except IndexError:
+ # re-raise with different error message
+ raise IndexError("positional indexers are out-of-bounds")
def _getitem_axis(self, key, axis: int):
- key = item_from_zerodim(key)
- if is_iterator(key):
- key = list(key)
-
- labels = self.obj._get_axis(axis)
- key = self._get_partial_string_timestamp_match_key(key, labels)
-
if isinstance(key, slice):
- self._validate_key(key, axis)
return self._get_slice_axis(key, axis=axis)
- elif com.is_bool_indexer(key):
- return self._getbool_axis(key, axis=axis)
- elif is_list_like_indexer(key):
-
- # convert various list-like indexers
- # to a list of keys
- # we will use the *values* of the object
- # and NOT the index if its a PandasObject
- if isinstance(labels, ABCMultiIndex):
-
- if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
- # Series, or 0,1 ndim ndarray
- # GH 14730
- key = list(key)
- elif isinstance(key, ABCDataFrame):
- # GH 15438
- raise NotImplementedError(
- "Indexing a MultiIndex with a "
- "DataFrame key is not "
- "implemented"
- )
- elif hasattr(key, "ndim") and key.ndim > 1:
- raise NotImplementedError(
- "Indexing a MultiIndex with a "
- "multidimensional key is not "
- "implemented"
- )
- if (
- not isinstance(key, tuple)
- and len(key)
- and not isinstance(key[0], tuple)
- ):
- key = tuple([key])
+ if isinstance(key, list):
+ key = np.asarray(key)
- # an iterable multi-selection
- if not (isinstance(key, tuple) and isinstance(labels, ABCMultiIndex)):
+ if com.is_bool_indexer(key):
+ self._validate_key(key, axis)
+ return self._getbool_axis(key, axis=axis)
- if hasattr(key, "ndim") and key.ndim > 1:
- raise ValueError("Cannot index with multidimensional key")
+ # a list of integers
+ elif is_list_like_indexer(key):
+ return self._get_list_axis(key, axis=axis)
- return self._getitem_iterable(key, axis=axis)
+ # a single integer
+ else:
+ key = item_from_zerodim(key)
+ if not is_integer(key):
+ raise TypeError("Cannot index by location index with a non-integer key")
- # nested tuple slicing
- if is_nested_tuple(key, labels):
- locs = labels.get_locs(key)
- indexer = [slice(None)] * self.ndim
- indexer[axis] = locs
- return self.obj.iloc[tuple(indexer)]
+ # validate the location
+ self._validate_integer(key, axis)
- # fall thru to straight lookup
- self._validate_key(key, axis)
- return self._get_label(key, axis=axis)
+ return self.obj._ixs(key, axis=axis)
def _get_slice_axis(self, slice_obj: slice, axis: int):
- """
- This is pretty simple as we just have to deal with labels.
- """
# caller is responsible for ensuring non-None axis
obj = self.obj
+
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
- indexer = labels.slice_indexer(
- slice_obj.start, slice_obj.stop, slice_obj.step, kind="loc"
- )
-
- if isinstance(indexer, slice):
- return self.obj._slice(indexer, axis=axis, kind="iloc")
- else:
- # DatetimeIndex overrides Index.slice_indexer and may
- # return a DatetimeIndex instead of a slice object.
- return self.obj.take(indexer, axis=axis)
+ labels._validate_positional_slice(slice_obj)
+ return self.obj._slice(slice_obj, axis=axis, kind="iloc")
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
- Convert indexing key into something we can use to do actual fancy
- indexing on a ndarray.
-
- Examples
- ix[:5] -> slice(0, 5)
- ix[[1,2,3]] -> [1,2,3]
- ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
-
- Going by Zen of Python?
- 'In the face of ambiguity, refuse the temptation to guess.'
- raise AmbiguousIndexError with integer labels?
- - No, prefer label-based indexing
+ Much simpler as we only have to deal with our valid types.
"""
labels = self.obj._get_axis(axis)
+ # make need to convert a float key
if isinstance(key, slice):
- return labels._convert_slice_indexer(key, kind="loc")
-
- if is_scalar(key):
- # try to find out correct indexer, if not type correct raise
- try:
- key = labels._convert_scalar_indexer(key, kind="loc")
- except TypeError:
- # but we will allow setting
- if not is_setter:
- raise
-
- # see if we are positional in nature
- is_int_index = labels.is_integer()
- is_int_positional = is_integer(key) and not is_int_index
+ labels._validate_positional_slice(key)
+ return key
- if is_scalar(key) or isinstance(labels, ABCMultiIndex):
- # Otherwise get_loc will raise InvalidIndexError
+ elif is_float(key):
+ labels._validate_indexer("positional", key, "iloc")
+ return key
- # if we are a label return me
- try:
- return labels.get_loc(key)
- except LookupError:
- if isinstance(key, tuple) and isinstance(labels, ABCMultiIndex):
- if len(key) == labels.nlevels:
- return {"key": key}
- raise
- except TypeError:
- pass
- except ValueError:
- if not is_int_positional:
- raise
+ self._validate_key(key, axis)
+ return key
- # a positional
- if is_int_positional:
+ # -------------------------------------------------------------------
- # if we are setting and its not a valid location
- # its an insert which fails by definition
+ def _setitem_with_indexer(self, indexer, value):
- # always valid
- return {"key": key}
+ # also has the side effect of consolidating in-place
+ from pandas import Series
- if is_nested_tuple(key, labels):
- return labels.get_locs(key)
+ info_axis = self.obj._info_axis_number
- elif is_list_like_indexer(key):
+ # maybe partial set
+ take_split_path = self.obj._is_mixed_type
- if com.is_bool_indexer(key):
- key = check_bool_indexer(labels, key)
- (inds,) = key.nonzero()
- return inds
- else:
- # When setting, missing keys are not allowed, even with .loc:
- return self._get_listlike_indexer(key, axis, raise_missing=True)[1]
- else:
- try:
- return labels.get_loc(key)
- except LookupError:
- # allow a not found key only if we are a setter
- if not is_list_like_indexer(key):
- return {"key": key}
- raise
+ # if there is only one block/type, still have to take split path
+ # unless the block is one-dimensional or it can hold the value
+ if not take_split_path and self.obj._data.blocks:
+ (blk,) = self.obj._data.blocks
+ if 1 < blk.ndim: # in case of dict, keys are indices
+ val = list(value.values()) if isinstance(value, dict) else value
+ take_split_path = not blk._can_hold_element(val)
- def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
- """
- Transform a list-like of keys into a new index and an indexer.
+ # if we have any multi-indexes that have non-trivial slices
+ # (not null slices) then we must take the split path, xref
+ # GH 10360, GH 27841
+ if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
+ for i, ax in zip(indexer, self.obj.axes):
+ if isinstance(ax, ABCMultiIndex) and not (
+ is_integer(i) or com.is_null_slice(i)
+ ):
+ take_split_path = True
+ break
- Parameters
- ----------
- key : list-like
- Targeted labels.
- axis: int
- Dimension on which the indexing is being made.
- raise_missing: bool, default False
- Whether to raise a KeyError if some labels were not found.
- Will be removed in the future, and then this method will always behave as
- if ``raise_missing=True``.
+ if isinstance(indexer, tuple):
+ nindexer = []
+ for i, idx in enumerate(indexer):
+ if isinstance(idx, dict):
- Raises
- ------
- KeyError
- If at least one key was requested but none was found, and
- raise_missing=True.
+ # reindex the axis to the new value
+ # and set inplace
+ key, _ = convert_missing_indexer(idx)
- Returns
- -------
- keyarr: Index
- New index (coinciding with 'key' if the axis is unique).
- values : array-like
- Indexer for the return object, -1 denotes keys not found.
- """
- ax = self.obj._get_axis(axis)
+ # if this is the items axes, then take the main missing
+ # path first
+ # this correctly sets the dtype and avoids cache issues
+ # essentially this separates out the block that is needed
+ # to possibly be modified
+ if self.ndim > 1 and i == self.obj._info_axis_number:
- # Have the index compute an indexer or return None
- # if it cannot handle:
- indexer, keyarr = ax._convert_listlike_indexer(key)
- # We only act on all found values:
- if indexer is not None and (indexer != -1).all():
- self._validate_read_indexer(key, indexer, axis, raise_missing=raise_missing)
- return ax[indexer], indexer
+ # add the new item, and set the value
+ # must have all defined axes if we have a scalar
+ # or a list-like on the non-info axes if we have a
+ # list-like
+ len_non_info_axes = (
+ len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i
+ )
+ if any(not l for l in len_non_info_axes):
+ if not is_list_like_indexer(value):
+ raise ValueError(
+ "cannot set a frame with no "
+ "defined index and a scalar"
+ )
+ self.obj[key] = value
+ return
- if ax.is_unique and not getattr(ax, "is_overlapping", False):
- indexer = ax.get_indexer_for(key)
- keyarr = ax.reindex(keyarr)[0]
+ # add a new item with the dtype setup
+ self.obj[key] = _infer_fill_value(value)
+
+ new_indexer = convert_from_missing_indexer_tuple(
+ indexer, self.obj.axes
+ )
+ self._setitem_with_indexer(new_indexer, value)
+
+ return
+
+ # reindex the axis
+ # make sure to clear the cache because we are
+ # just replacing the block manager here
+ # so the object is the same
+ index = self.obj._get_axis(i)
+ labels = index.insert(len(index), key)
+ self.obj._data = self.obj.reindex(labels, axis=i)._data
+ self.obj._maybe_update_cacher(clear=True)
+ self.obj._is_copy = None
+
+ nindexer.append(labels.get_loc(key))
+
+ else:
+ nindexer.append(idx)
+
+ indexer = tuple(nindexer)
else:
- keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
- self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
- return keyarr, indexer
+ indexer, missing = convert_missing_indexer(indexer)
- def _validate_read_indexer(
- self, key, indexer, axis: int, raise_missing: bool = False
- ):
- """
- Check that indexer can be used to return a result.
+ if missing:
+ self._setitem_with_indexer_missing(indexer, value)
+ return
- e.g. at least one element was found,
- unless the list of keys was actually empty.
+ # set
+ item_labels = self.obj._get_axis(info_axis)
- Parameters
- ----------
- key : list-like
- Targeted labels (only used to show correct error message).
- indexer: array-like of booleans
- Indices corresponding to the key,
- (with -1 indicating not found).
- axis: int
- Dimension on which the indexing is being made.
- raise_missing: bool
- Whether to raise a KeyError if some labels are not found. Will be
- removed in the future, and then this method will always behave as
- if raise_missing=True.
+ # align and set the values
+ if take_split_path:
+ # Above we only set take_split_path to True for 2D cases
+ assert self.ndim == 2
+ assert info_axis == 1
- Raises
- ------
- KeyError
- If at least one key was requested but none was found, and
- raise_missing=True.
- """
- ax = self.obj._get_axis(axis)
+ if not isinstance(indexer, tuple):
+ indexer = _tuplify(self.ndim, indexer)
- if len(key) == 0:
- return
+ if isinstance(value, ABCSeries):
+ value = self._align_series(indexer, value)
- # Count missing values:
- missing = (indexer < 0).sum()
+ info_idx = indexer[info_axis]
+ if is_integer(info_idx):
+ info_idx = [info_idx]
+ labels = item_labels[info_idx]
- if missing:
- if missing == len(indexer):
- axis_name = self.obj._get_axis_name(axis)
- raise KeyError(f"None of [{key}] are in the [{axis_name}]")
+ # if we have a partial multiindex, then need to adjust the plane
+ # indexer here
+ if len(labels) == 1 and isinstance(
+ self.obj[labels[0]].axes[0], ABCMultiIndex
+ ):
+ item = labels[0]
+ obj = self.obj[item]
+ index = obj.index
+ idx = indexer[:info_axis][0]
- # We (temporarily) allow for some missing keys with .loc, except in
- # some cases (e.g. setting) in which "raise_missing" will be False
- if not (self.name == "loc" and not raise_missing):
- not_found = list(set(key) - set(ax))
- raise KeyError(f"{not_found} not in index")
+ plane_indexer = tuple([idx]) + indexer[info_axis + 1 :]
+ lplane_indexer = length_of_indexer(plane_indexer[0], index)
- # we skip the warning on Categorical/Interval
- # as this check is actually done (check for
- # non-missing values), but a bit later in the
- # code, so we want to avoid warning & then
- # just raising
- if not (ax.is_categorical() or ax.is_interval()):
- raise KeyError(
- "Passing list-likes to .loc or [] with any missing labels "
- "is no longer supported, see "
- "https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
- )
+ # require that we are setting the right number of values that
+ # we are indexing
+ if (
+ is_list_like_indexer(value)
+ and np.iterable(value)
+ and lplane_indexer != len(value)
+ ):
+ if len(obj[idx]) != len(value):
+ raise ValueError(
+ "cannot set using a multi-index "
+ "selection indexer with a different "
+ "length than the value"
+ )
-@Appender(IndexingMixin.iloc.__doc__)
-class _iLocIndexer(_LocationIndexer):
- _valid_types = (
- "integer, integer slice (START point is INCLUDED, END "
- "point is EXCLUDED), listlike of integers, boolean array"
- )
- _takeable = True
+ # make sure we have an ndarray
+ value = getattr(value, "values", value).ravel()
- # -------------------------------------------------------------------
- # Key Checks
+ # we can directly set the series here
+ obj._consolidate_inplace()
+ obj = obj.copy()
+ obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
+ self.obj[item] = obj
+ return
- def _validate_key(self, key, axis: int):
- if com.is_bool_indexer(key):
- if hasattr(key, "index") and isinstance(key.index, Index):
- if key.index.inferred_type == "integer":
- raise NotImplementedError(
- "iLocation based boolean "
- "indexing on an integer type "
- "is not available"
- )
- raise ValueError(
- "iLocation based boolean indexing cannot use "
- "an indexable as a mask"
- )
- return
+ # non-mi
+ else:
+ plane_indexer = indexer[:info_axis] + indexer[info_axis + 1 :]
+ plane_axis = self.obj.axes[:info_axis][0]
+ lplane_indexer = length_of_indexer(plane_indexer[0], plane_axis)
- if isinstance(key, slice):
- return
- elif is_integer(key):
- self._validate_integer(key, axis)
- elif isinstance(key, tuple):
- # a tuple should already have been caught by this point
- # so don't treat a tuple as a valid indexer
- raise IndexingError("Too many indexers")
- elif is_list_like_indexer(key):
- arr = np.array(key)
- len_axis = len(self.obj._get_axis(axis))
+ def setter(item, v):
+ s = self.obj[item]
+ pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
- # check that the key has a numeric dtype
- if not is_numeric_dtype(arr.dtype):
- raise IndexError(f".iloc requires numeric indexers, got {arr}")
+ # perform the equivalent of a setitem on the info axis
+ # as we have a null slice or a slice with full bounds
+ # which means essentially reassign to the columns of a
+ # multi-dim object
+ # GH6149 (null slice), GH10408 (full bounds)
+ if isinstance(pi, tuple) and all(
+ com.is_null_slice(idx) or com.is_full_slice(idx, len(self.obj))
+ for idx in pi
+ ):
+ s = v
+ else:
+ # set the item, possibly having a dtype change
+ s._consolidate_inplace()
+ s = s.copy()
+ s._data = s._data.setitem(indexer=pi, value=v)
+ s._maybe_update_cacher(clear=True)
+
+ # reset the sliced object if unique
+ self.obj[item] = s
+
+ # we need an iterable, with a ndim of at least 1
+ # eg. don't pass through np.array(0)
+ if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
+
+ # we have an equal len Frame
+ if isinstance(value, ABCDataFrame):
+ sub_indexer = list(indexer)
+ multiindex_indexer = isinstance(labels, ABCMultiIndex)
+
+ for item in labels:
+ if item in value:
+ sub_indexer[info_axis] = item
+ v = self._align_series(
+ tuple(sub_indexer), value[item], multiindex_indexer
+ )
+ else:
+ v = np.nan
+
+ setter(item, v)
+
+ # we have an equal len ndarray/convertible to our labels
+ # hasattr first, to avoid coercing to ndarray without reason.
+ # But we may be relying on the ndarray coercion to check ndim.
+ # Why not just convert to an ndarray earlier on if needed?
+ elif np.ndim(value) == 2:
+
+ # note that this coerces the dtype if we are mixed
+ # GH 7551
+ value = np.array(value, dtype=object)
+ if len(labels) != value.shape[1]:
+ raise ValueError(
+ "Must have equal len keys and value "
+ "when setting with an ndarray"
+ )
+
+ for i, item in enumerate(labels):
+
+ # setting with a list, recoerces
+ setter(item, value[:, i].tolist())
+
+ # we have an equal len list/ndarray
+ elif _can_do_equal_len(
+ labels, value, plane_indexer, lplane_indexer, self.obj
+ ):
+ setter(labels[0], value)
+
+ # per label values
+ else:
+
+ if len(labels) != len(value):
+ raise ValueError(
+ "Must have equal len keys and value "
+ "when setting with an iterable"
+ )
+
+ for item, v in zip(labels, value):
+ setter(item, v)
+ else:
+
+ # scalar
+ for item in labels:
+ setter(item, value)
- # check that the key does not exceed the maximum size of the index
- if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
- raise IndexError("positional indexers are out-of-bounds")
else:
- raise ValueError(f"Can only index by location with a [{self._valid_types}]")
+ if isinstance(indexer, tuple):
+ indexer = maybe_convert_ix(*indexer)
- def _has_valid_setitem_indexer(self, indexer):
- self._has_valid_positional_setitem_indexer(indexer)
+ # if we are setting on the info axis ONLY
+ # set using those methods to avoid block-splitting
+ # logic here
+ if (
+ len(indexer) > info_axis
+ and is_integer(indexer[info_axis])
+ and all(
+ com.is_null_slice(idx)
+ for i, idx in enumerate(indexer)
+ if i != info_axis
+ )
+ and item_labels.is_unique
+ ):
+ self.obj[item_labels[indexer[info_axis]]] = value
+ return
- def _has_valid_positional_setitem_indexer(self, indexer) -> bool:
- """
- Validate that a positional indexer cannot enlarge its target
- will raise if needed, does not modify the indexer externally.
+ if isinstance(value, (ABCSeries, dict)):
+ # TODO(EA): ExtensionBlock.setitem this causes issues with
+ # setting for extensionarrays that store dicts. Need to decide
+ # if it's worth supporting that.
+ value = self._align_series(indexer, Series(value))
- Returns
- -------
- bool
+ elif isinstance(value, ABCDataFrame):
+ value = self._align_frame(indexer, value)
+
+ # check for chained assignment
+ self.obj._check_is_chained_assignment_possible()
+
+ # actually do the set
+ self.obj._consolidate_inplace()
+ self.obj._data = self.obj._data.setitem(indexer=indexer, value=value)
+ self.obj._maybe_update_cacher(clear=True)
+
+ def _setitem_with_indexer_missing(self, indexer, value):
"""
- if isinstance(indexer, dict):
- raise IndexError(f"{self.name} cannot enlarge its target object")
- else:
- if not isinstance(indexer, tuple):
- indexer = _tuplify(self.ndim, indexer)
- for ax, i in zip(self.obj.axes, indexer):
- if isinstance(i, slice):
- # should check the stop slice?
- pass
- elif is_list_like_indexer(i):
- # should check the elements?
- pass
- elif is_integer(i):
- if i >= len(ax):
- raise IndexError(
- f"{self.name} cannot enlarge its target object"
- )
- elif isinstance(i, dict):
- raise IndexError(f"{self.name} cannot enlarge its target object")
+ Insert new row(s) or column(s) into the Series or DataFrame.
+ """
+ from pandas import Series
+
+ # reindex the axis to the new value
+ # and set inplace
+ if self.ndim == 1:
+ index = self.obj.index
+ new_index = index.insert(len(index), indexer)
+
+ # we have a coerced indexer, e.g. a float
+ # that matches in an Int64Index, so
+ # we will not create a duplicate index, rather
+ # index to that element
+ # e.g. 0.0 -> 0
+ # GH#12246
+ if index.is_unique:
+ new_indexer = index.get_indexer([new_index[-1]])
+ if (new_indexer != -1).any():
+ return self._setitem_with_indexer(new_indexer, value)
- return True
+ # this preserves dtype of the value
+ new_values = Series([value])._values
+ if len(self.obj._values):
+ # GH#22717 handle casting compatibility that np.concatenate
+ # does incorrectly
+ new_values = concat_compat([self.obj._values, new_values])
+ self.obj._data = self.obj._constructor(
+ new_values, index=new_index, name=self.obj.name
+ )._data
+ self.obj._maybe_update_cacher(clear=True)
- def _is_scalar_access(self, key: Tuple) -> bool:
- """
- Returns
- -------
- bool
- """
- # this is a shortcut accessor to both .loc and .iloc
- # that provide the equivalent access of .at and .iat
- # a) avoid getting things via sections and (to minimize dtype changes)
- # b) provide a performant path
- if len(key) != self.ndim:
- return False
+ elif self.ndim == 2:
- for i, k in enumerate(key):
- if not is_integer(k):
- return False
+ if not len(self.obj.columns):
+ # no columns and scalar
+ raise ValueError("cannot set a frame with no defined columns")
- ax = self.obj.axes[i]
- if not ax.is_unique:
- return False
+ if isinstance(value, ABCSeries):
+ # append a Series
+ value = value.reindex(index=self.obj.columns, copy=True)
+ value.name = indexer
- return True
+ else:
+ # a list-list
+ if is_list_like_indexer(value):
+ # must have conforming columns
+ if len(value) != len(self.obj.columns):
+ raise ValueError("cannot set a row with mismatched columns")
- def _validate_integer(self, key: int, axis: int) -> None:
- """
- Check that 'key' is a valid position in the desired axis.
+ value = Series(value, index=self.obj.columns, name=indexer)
+ self.obj._data = self.obj.append(value)._data
+ self.obj._maybe_update_cacher(clear=True)
+
+ def _align_series(self, indexer, ser: ABCSeries, multiindex_indexer: bool = False):
+ """
Parameters
----------
- key : int
- Requested position.
- axis : int
- Desired axis.
+ indexer : tuple, slice, scalar
+ Indexer used to get the locations that will be set to `ser`.
+ ser : pd.Series
+ Values to assign to the locations specified by `indexer`.
+ multiindex_indexer : boolean, optional
+ Defaults to False. Should be set to True if `indexer` was from
+ a `pd.MultiIndex`, to avoid unnecessary broadcasting.
- Raises
- ------
- IndexError
- If 'key' is not a valid position in axis 'axis'.
+ Returns
+ -------
+ `np.array` of `ser` broadcast to the appropriate shape for assignment
+ to the locations selected by `indexer`
"""
- len_axis = len(self.obj._get_axis(axis))
- if key >= len_axis or key < -len_axis:
- raise IndexError("single positional indexer is out-of-bounds")
+ if isinstance(indexer, (slice, np.ndarray, list, Index)):
+ indexer = tuple([indexer])
- # -------------------------------------------------------------------
+ if isinstance(indexer, tuple):
- def _getitem_tuple(self, tup: Tuple):
+ # flatten np.ndarray indexers
+ def ravel(i):
+ return i.ravel() if isinstance(i, np.ndarray) else i
- self._has_valid_tuple(tup)
- try:
- return self._getitem_lowerdim(tup)
- except IndexingError:
- pass
+ indexer = tuple(map(ravel, indexer))
- retval = self.obj
- axis = 0
- for i, key in enumerate(tup):
- if com.is_null_slice(key):
- axis += 1
- continue
+ aligners = [not com.is_null_slice(idx) for idx in indexer]
+ sum_aligners = sum(aligners)
+ single_aligner = sum_aligners == 1
+ is_frame = self.ndim == 2
+ obj = self.obj
- retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)
+ # are we a single alignable value on a non-primary
+ # dim (e.g. panel: 1,2, or frame: 0) ?
+ # hence need to align to a single axis dimension
+ # rather that find all valid dims
- # if the dim was reduced, then pass a lower-dim the next time
- if retval.ndim < self.ndim:
- # TODO: this is never reached in tests; can we confirm that
- # it is impossible?
- axis -= 1
+ # frame
+ if is_frame:
+ single_aligner = single_aligner and aligners[0]
- # try to get for the next axis
- axis += 1
+ # we have a frame, with multiple indexers on both axes; and a
+ # series, so need to broadcast (see GH5206)
+ if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer):
+ ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
- return retval
+ # single indexer
+ if len(indexer) > 1 and not multiindex_indexer:
+ len_indexer = len(indexer[1])
+ ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T
- def _get_list_axis(self, key, axis: int):
- """
- Return Series values by list or array of integers.
+ return ser
- Parameters
- ----------
- key : list-like positional indexer
- axis : int
+ for i, idx in enumerate(indexer):
+ ax = obj.axes[i]
- Returns
- -------
- Series object
+ # multiple aligners (or null slices)
+ if is_sequence(idx) or isinstance(idx, slice):
+ if single_aligner and com.is_null_slice(idx):
+ continue
+ new_ix = ax[idx]
+ if not is_list_like_indexer(new_ix):
+ new_ix = Index([new_ix])
+ else:
+ new_ix = Index(new_ix)
+ if ser.index.equals(new_ix) or not len(new_ix):
+ return ser._values.copy()
- Notes
- -----
- `axis` can only be zero.
- """
- try:
- return self.obj._take_with_is_copy(key, axis=axis)
- except IndexError:
- # re-raise with different error message
- raise IndexError("positional indexers are out-of-bounds")
+ return ser.reindex(new_ix)._values
- def _getitem_axis(self, key, axis: int):
- if isinstance(key, slice):
- return self._get_slice_axis(key, axis=axis)
+ # 2 dims
+ elif single_aligner:
- if isinstance(key, list):
- key = np.asarray(key)
+ # reindex along index
+ ax = self.obj.axes[1]
+ if ser.index.equals(ax) or not len(ax):
+ return ser._values.copy()
+ return ser.reindex(ax)._values
- if com.is_bool_indexer(key):
- self._validate_key(key, axis)
- return self._getbool_axis(key, axis=axis)
+ elif is_scalar(indexer):
+ ax = self.obj._get_axis(1)
- # a list of integers
- elif is_list_like_indexer(key):
- return self._get_list_axis(key, axis=axis)
+ if ser.index.equals(ax):
+ return ser._values.copy()
- # a single integer
- else:
- key = item_from_zerodim(key)
- if not is_integer(key):
- raise TypeError("Cannot index by location index with a non-integer key")
+ return ser.reindex(ax)._values
- # validate the location
- self._validate_integer(key, axis)
+ raise ValueError("Incompatible indexer with Series")
- return self.obj._ixs(key, axis=axis)
+ def _align_frame(self, indexer, df: ABCDataFrame):
+ is_frame = self.ndim == 2
- def _get_slice_axis(self, slice_obj: slice, axis: int):
- # caller is responsible for ensuring non-None axis
- obj = self.obj
+ if isinstance(indexer, tuple):
- if not need_slice(slice_obj):
- return obj.copy(deep=False)
+ idx, cols = None, None
+ sindexers = []
+ for i, ix in enumerate(indexer):
+ ax = self.obj.axes[i]
+ if is_sequence(ix) or isinstance(ix, slice):
+ if isinstance(ix, np.ndarray):
+ ix = ix.ravel()
+ if idx is None:
+ idx = ax[ix]
+ elif cols is None:
+ cols = ax[ix]
+ else:
+ break
+ else:
+ sindexers.append(i)
- labels = obj._get_axis(axis)
- labels._validate_positional_slice(slice_obj)
- return self.obj._slice(slice_obj, axis=axis, kind="iloc")
+ if idx is not None and cols is not None:
- def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
- """
- Much simpler as we only have to deal with our valid types.
- """
- labels = self.obj._get_axis(axis)
+ if df.index.equals(idx) and df.columns.equals(cols):
+ val = df.copy()._values
+ else:
+ val = df.reindex(idx, columns=cols)._values
+ return val
- # make need to convert a float key
- if isinstance(key, slice):
- labels._validate_positional_slice(key)
- return key
+ elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame:
+ ax = self.obj.index[indexer]
+ if df.index.equals(ax):
+ val = df.copy()._values
+ else:
- elif is_float(key):
- labels._validate_indexer("positional", key, "iloc")
- return key
+ # we have a multi-index and are trying to align
+ # with a particular, level GH3738
+ if (
+ isinstance(ax, ABCMultiIndex)
+ and isinstance(df.index, ABCMultiIndex)
+ and ax.nlevels != df.index.nlevels
+ ):
+ raise TypeError(
+ "cannot align on a multi-index with out "
+ "specifying the join levels"
+ )
- self._validate_key(key, axis)
- return key
+ val = df.reindex(index=ax)._values
+ return val
+
+ raise ValueError("Incompatible indexer with DataFrame")
class _ScalarAccessIndexer(_NDFrameIndexerBase):
| some overlap with #31797.
_setitem_with_indexer is used for positional indexers, so it is misleading for it to be in loc anyway.
_setitem_with_indexer is an absolute beast, will be easier to sort out when we dont have to worry about what class we're in.
The diff looks misleading. All this does is
- move _setitem_with_indexer, _setitem_with_indexer_missing, _align_series, _align_frame from LocationIndexer to _iLocIndexer
- make `LocationIndexer.__setitem__` call `_has_valid_setitem_indexer` before calling _setitem_with_indexer instead of inside of it, so we get the right class's _has_valid_setitem_indexer
- dispatch to iloc if we are not already iloc
- update usages in core.frame | https://api.github.com/repos/pandas-dev/pandas/pulls/31837 | 2020-02-09T21:16:26Z | 2020-02-10T19:45:15Z | 2020-02-10T19:45:15Z | 2020-02-10T19:45:35Z |
Fixed mypy errors in pandas/tests/extension/json/test_json.py | diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index dc03a1f1dcf72..f7ca99be2adea 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -79,7 +79,8 @@ class BaseJSON:
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
# converting the UserDicts to dicts.
- def assert_series_equal(self, left, right, **kwargs):
+ @classmethod
+ def assert_series_equal(cls, left, right, *args, **kwargs):
if left.dtype.name == "json":
assert left.dtype == right.dtype
left = pd.Series(
@@ -90,9 +91,10 @@ def assert_series_equal(self, left, right, **kwargs):
index=right.index,
name=right.name,
)
- tm.assert_series_equal(left, right, **kwargs)
+ tm.assert_series_equal(left, right, *args, **kwargs)
- def assert_frame_equal(self, left, right, *args, **kwargs):
+ @classmethod
+ def assert_frame_equal(cls, left, right, *args, **kwargs):
obj_type = kwargs.get("obj", "DataFrame")
tm.assert_index_equal(
left.columns,
@@ -107,7 +109,7 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
jsons = (left.dtypes == "json").index
for col in jsons:
- self.assert_series_equal(left[col], right[col], *args, **kwargs)
+ cls.assert_series_equal(left[col], right[col], *args, **kwargs)
left = left.drop(columns=jsons)
right = right.drop(columns=jsons)
diff --git a/setup.cfg b/setup.cfg
index 9be09ae1076bb..4a900e581c353 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -135,9 +135,6 @@ ignore_errors=True
[mypy-pandas.tests.arithmetic.test_datetime64]
ignore_errors=True
-[mypy-pandas.tests.extension.json.test_json]
-ignore_errors=True
-
[mypy-pandas.tests.indexes.datetimes.test_tools]
ignore_errors=True
| Part of #28926
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/31836 | 2020-02-09T21:11:26Z | 2020-02-09T21:53:46Z | 2020-02-09T21:53:46Z | 2020-02-09T21:54:01Z |
REF: move loc-only validate_read_indexer to Loc | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 70092c70a76ad..9c0842f5536cb 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1304,66 +1304,6 @@ def _getitem_nested_tuple(self, tup: Tuple):
return obj
- def _validate_read_indexer(
- self, key, indexer, axis: int, raise_missing: bool = False
- ):
- """
- Check that indexer can be used to return a result.
-
- e.g. at least one element was found,
- unless the list of keys was actually empty.
-
- Parameters
- ----------
- key : list-like
- Targeted labels (only used to show correct error message).
- indexer: array-like of booleans
- Indices corresponding to the key,
- (with -1 indicating not found).
- axis: int
- Dimension on which the indexing is being made.
- raise_missing: bool
- Whether to raise a KeyError if some labels are not found. Will be
- removed in the future, and then this method will always behave as
- if raise_missing=True.
-
- Raises
- ------
- KeyError
- If at least one key was requested but none was found, and
- raise_missing=True.
- """
- ax = self.obj._get_axis(axis)
-
- if len(key) == 0:
- return
-
- # Count missing values:
- missing = (indexer < 0).sum()
-
- if missing:
- if missing == len(indexer):
- axis_name = self.obj._get_axis_name(axis)
- raise KeyError(f"None of [{key}] are in the [{axis_name}]")
-
- # We (temporarily) allow for some missing keys with .loc, except in
- # some cases (e.g. setting) in which "raise_missing" will be False
- if not (self.name == "loc" and not raise_missing):
- not_found = list(set(key) - set(ax))
- raise KeyError(f"{not_found} not in index")
-
- # we skip the warning on Categorical/Interval
- # as this check is actually done (check for
- # non-missing values), but a bit later in the
- # code, so we want to avoid warning & then
- # just raising
- if not (ax.is_categorical() or ax.is_interval()):
- raise KeyError(
- "Passing list-likes to .loc or [] with any missing labels "
- "is no longer supported, see "
- "https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
- )
-
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
raise AbstractMethodError(self)
@@ -1822,6 +1762,66 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False):
self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
return keyarr, indexer
+ def _validate_read_indexer(
+ self, key, indexer, axis: int, raise_missing: bool = False
+ ):
+ """
+ Check that indexer can be used to return a result.
+
+ e.g. at least one element was found,
+ unless the list of keys was actually empty.
+
+ Parameters
+ ----------
+ key : list-like
+ Targeted labels (only used to show correct error message).
+ indexer: array-like of booleans
+ Indices corresponding to the key,
+ (with -1 indicating not found).
+ axis: int
+ Dimension on which the indexing is being made.
+ raise_missing: bool
+ Whether to raise a KeyError if some labels are not found. Will be
+ removed in the future, and then this method will always behave as
+ if raise_missing=True.
+
+ Raises
+ ------
+ KeyError
+ If at least one key was requested but none was found, and
+ raise_missing=True.
+ """
+ ax = self.obj._get_axis(axis)
+
+ if len(key) == 0:
+ return
+
+ # Count missing values:
+ missing = (indexer < 0).sum()
+
+ if missing:
+ if missing == len(indexer):
+ axis_name = self.obj._get_axis_name(axis)
+ raise KeyError(f"None of [{key}] are in the [{axis_name}]")
+
+ # We (temporarily) allow for some missing keys with .loc, except in
+ # some cases (e.g. setting) in which "raise_missing" will be False
+ if not (self.name == "loc" and not raise_missing):
+ not_found = list(set(key) - set(ax))
+ raise KeyError(f"{not_found} not in index")
+
+ # we skip the warning on Categorical/Interval
+ # as this check is actually done (check for
+ # non-missing values), but a bit later in the
+ # code, so we want to avoid warning & then
+ # just raising
+ if not (ax.is_categorical() or ax.is_interval()):
+ raise KeyError(
+ "Passing list-likes to .loc or [] with any missing labels "
+ "is no longer supported, see "
+ "https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
+ )
+
@Appender(IndexingMixin.iloc.__doc__)
class _iLocIndexer(_LocationIndexer):
| straight cut/paste | https://api.github.com/repos/pandas-dev/pandas/pulls/31834 | 2020-02-09T19:36:02Z | 2020-02-09T22:29:34Z | 2020-02-09T22:29:34Z | 2020-02-09T23:51:52Z |
DOC Update documentation DataFrame.nsmallest | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e0efa93379bca..817c71cb814d6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5230,7 +5230,7 @@ def nsmallest(self, n, columns, keep="first") -> "DataFrame":
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
- ... 434000, 434000, 337000, 11300,
+ ... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
@@ -5247,18 +5247,18 @@ def nsmallest(self, n, columns, keep="first") -> "DataFrame":
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
- Nauru 11300 182 NR
+ Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
- three rows having the smallest values in column "a".
+ three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
- population GDP alpha-2
- Nauru 11300 182 NR
- Tuvalu 11300 38 TV
- Anguilla 11300 311 AI
+ population GDP alpha-2
+ Tuvalu 11300 38 TV
+ Anguilla 11300 311 AI
+ Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
@@ -5266,24 +5266,25 @@ def nsmallest(self, n, columns, keep="first") -> "DataFrame":
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
- Nauru 11300 182 NR
+ Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
- population GDP alpha-2
- Nauru 11300 182 NR
- Tuvalu 11300 38 TV
- Anguilla 11300 311 AI
+ population GDP alpha-2
+ Tuvalu 11300 38 TV
+ Anguilla 11300 311 AI
+ Iceland 337000 17036 IS
+ Nauru 337000 182 NR
- To order by the largest values in column "a" and then "c", we can
+ To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
- Nauru 11300 182 NR
Anguilla 11300 311 AI
+ Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
| Modified the example, to better demonstrate the use of the nsmallest function.
| https://api.github.com/repos/pandas-dev/pandas/pulls/31833 | 2020-02-09T19:21:26Z | 2020-02-11T04:48:28Z | 2020-02-11T04:48:28Z | 2020-02-11T04:48:43Z |
CLN: disallow kind=None in _convert_slice_indexer | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 6bc15a5f89e2a..ff026583c5adb 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3152,7 +3152,7 @@ def _validate_positional_slice(self, key: slice):
self._validate_indexer("positional", key.stop, "iloc")
self._validate_indexer("positional", key.step, "iloc")
- def _convert_slice_indexer(self, key: slice, kind=None):
+ def _convert_slice_indexer(self, key: slice, kind: str_t):
"""
Convert a slice indexer.
@@ -3162,9 +3162,9 @@ def _convert_slice_indexer(self, key: slice, kind=None):
Parameters
----------
key : label of the slice bound
- kind : {'loc', 'getitem'} or None
+ kind : {'loc', 'getitem'}
"""
- assert kind in ["loc", "getitem", None], kind
+ assert kind in ["loc", "getitem"], kind
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 03fb8db2e1e1e..331c12b3bdab5 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -885,7 +885,7 @@ def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:
return self.get_indexer_non_unique(target)[0]
return self.get_indexer(target, **kwargs)
- def _convert_slice_indexer(self, key: slice, kind=None):
+ def _convert_slice_indexer(self, key: slice, kind: str):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
return super()._convert_slice_indexer(key, kind)
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index f09713409c6cf..877b3d1d2ba30 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -393,8 +393,8 @@ def _convert_scalar_indexer(self, key, kind: str):
return key
@Appender(Index._convert_slice_indexer.__doc__)
- def _convert_slice_indexer(self, key: slice, kind=None):
- assert kind in ["loc", "getitem", None]
+ def _convert_slice_indexer(self, key: slice, kind: str):
+ assert kind in ["loc", "getitem"]
# We always treat __getitem__ slicing as label-based
# translate to locations
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 70092c70a76ad..8cd085ff54b22 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -843,9 +843,6 @@ def _setitem_with_indexer(self, indexer, value):
value = getattr(value, "values", value).ravel()
# we can directly set the series here
- # as we select a slice indexer on the mi
- if isinstance(idx, slice):
- idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
| The one place where _convert_slice_indexer is currently called without kind is within _setitem_with_indexer, which I've determined should be iloc-only so it is a no-op. With that usage removed, we can be stricter about what gets passed. | https://api.github.com/repos/pandas-dev/pandas/pulls/31832 | 2020-02-09T19:05:30Z | 2020-02-09T22:27:13Z | 2020-02-09T22:27:13Z | 2020-02-09T23:55:35Z |
Test messages test integer | diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 7a0c9300a43a2..9f0e6407c25f0 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -330,26 +330,37 @@ def test_error(self, data, all_arithmetic_operators):
opa = getattr(data, op)
# invalid scalars
- with pytest.raises(TypeError):
+ msg = (
+ r"(:?can only perform ops with numeric values)"
+ r"|(:?IntegerArray cannot perform the operation mod)"
+ )
+ with pytest.raises(TypeError, match=msg):
ops("foo")
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
- with pytest.raises(TypeError):
+ msg = (
+ r"(:?can only perform ops with numeric values)"
+ r"|(:?cannot perform .* with this index type: DatetimeArray)"
+ r"|(:?Addition/subtraction of integers and integer-arrays"
+ r" with DatetimeArray is no longer supported. *)"
+ )
+ with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
- with pytest.raises(NotImplementedError):
+ msg = r"can only perform ops with 1-d structures"
+ with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
@@ -589,7 +600,8 @@ def test_astype(self, all_data):
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
- with pytest.raises(ValueError):
+ msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
+ with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
@@ -730,16 +742,17 @@ def test_integer_array_constructor():
expected = integer_array([1, 2, 3, np.nan], dtype="int64")
tm.assert_extension_array_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = r".* should be .* numpy array. Use the 'integer_array' function instead"
+ with pytest.raises(TypeError, match=msg):
IntegerArray(values.tolist(), mask)
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
IntegerArray(values, mask.tolist())
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
IntegerArray(values.astype(float), mask)
-
- with pytest.raises(TypeError):
+ msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
+ with pytest.raises(TypeError, match=msg):
IntegerArray(values)
@@ -787,7 +800,11 @@ def test_integer_array_constructor_copy():
)
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
- with pytest.raises(TypeError):
+ msg = (
+ r"(:?.* cannot be converted to an IntegerDtype)"
+ r"|(:?values must be a 1D list-like)"
+ )
+ with pytest.raises(TypeError, match=msg):
integer_array(values)
@@ -1002,7 +1019,8 @@ def test_ufuncs_binary_int(ufunc):
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
a = integer_array(values)
- with pytest.raises(NotImplementedError):
+ msg = r"The 'reduce' method is not supported."
+ with pytest.raises(NotImplementedError, match=msg):
np.add.reduce(a)
diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py
index 1f4351c7e20ee..0b95d3aa19366 100644
--- a/pandas/tests/arrays/test_period.py
+++ b/pandas/tests/arrays/test_period.py
@@ -371,7 +371,8 @@ def test_arrow_array(data, freq):
assert result.equals(expected)
# unsupported conversions
- with pytest.raises(TypeError):
+ msg = "Not supported to convert PeriodArray to 'double' type"
+ with pytest.raises(TypeError, match=msg):
pa.array(periods, type="float64")
with pytest.raises(TypeError, match="different 'freq'"):
| - [ ] xref #30999
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31831 | 2020-02-09T18:55:32Z | 2020-02-09T22:28:28Z | 2020-02-09T22:28:28Z | 2020-02-09T22:28:31Z |
CLN: Use self.loc for Series __getitem__ with IntervalIndex | diff --git a/pandas/core/series.py b/pandas/core/series.py
index dd4c1bce5d64c..056cee4caed41 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -916,11 +916,12 @@ def _get_with(self, key):
# Note: The key_type == "boolean" case should be caught by the
# com.is_bool_indexer check in __getitem__
if key_type == "integer":
+ # We need to decide whether to treat this as a positional indexer
+ # (i.e. self.iloc) or label-based (i.e. self.loc)
if self.index.is_integer() or self.index.is_floating():
return self.loc[key]
elif isinstance(self.index, IntervalIndex):
- indexer = self.index.get_indexer_for(key)
- return self.iloc[indexer]
+ return self.loc[key]
else:
return self.iloc[key]
| Clarify that by the time we get to the affected code, the relevant task is determing whether to use loc or iloc. | https://api.github.com/repos/pandas-dev/pandas/pulls/31830 | 2020-02-09T18:52:18Z | 2020-02-09T22:17:19Z | 2020-02-09T22:17:19Z | 2020-02-09T23:58:56Z |
DOC: Use consistent casing in headers | diff --git a/doc/source/user_guide/boolean.rst b/doc/source/user_guide/boolean.rst
index 5276bc6142206..4f0ad0e8ceaeb 100644
--- a/doc/source/user_guide/boolean.rst
+++ b/doc/source/user_guide/boolean.rst
@@ -9,7 +9,7 @@
.. _boolean:
**************************
-Nullable Boolean Data Type
+Nullable Boolean data type
**************************
.. versionadded:: 1.0.0
| This is super nitty, but in the left side bar of the user guide the nullable Boolean data type section is the only one that uses all caps (other than Frequently Asked Questions, which possibly makes sense), so this might look a bit better.
https://pandas.pydata.org/pandas-docs/stable/user_guide/index.html#user-guide | https://api.github.com/repos/pandas-dev/pandas/pulls/31829 | 2020-02-09T17:41:36Z | 2020-02-09T22:26:19Z | 2020-02-09T22:26:19Z | 2020-02-09T22:28:15Z |
Backport PR #31820 on branch 1.0.x (correct redirections in doc/redirect.csv for rolling) | diff --git a/doc/redirects.csv b/doc/redirects.csv
index 3a990b09e7f7d..ef93955c14fe6 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -271,21 +271,21 @@ generated/pandas.core.window.Expanding.skew,../reference/api/pandas.core.window.
generated/pandas.core.window.Expanding.std,../reference/api/pandas.core.window.Expanding.std
generated/pandas.core.window.Expanding.sum,../reference/api/pandas.core.window.Expanding.sum
generated/pandas.core.window.Expanding.var,../reference/api/pandas.core.window.Expanding.var
-generated/pandas.core.window.Rolling.aggregate,../reference/api/pandas.core.window.Rolling.aggregate
-generated/pandas.core.window.Rolling.apply,../reference/api/pandas.core.window.Rolling.apply
-generated/pandas.core.window.Rolling.corr,../reference/api/pandas.core.window.Rolling.corr
-generated/pandas.core.window.Rolling.count,../reference/api/pandas.core.window.Rolling.count
-generated/pandas.core.window.Rolling.cov,../reference/api/pandas.core.window.Rolling.cov
-generated/pandas.core.window.Rolling.kurt,../reference/api/pandas.core.window.Rolling.kurt
-generated/pandas.core.window.Rolling.max,../reference/api/pandas.core.window.Rolling.max
-generated/pandas.core.window.Rolling.mean,../reference/api/pandas.core.window.Rolling.mean
-generated/pandas.core.window.Rolling.median,../reference/api/pandas.core.window.Rolling.median
-generated/pandas.core.window.Rolling.min,../reference/api/pandas.core.window.Rolling.min
-generated/pandas.core.window.Rolling.quantile,../reference/api/pandas.core.window.Rolling.quantile
-generated/pandas.core.window.Rolling.skew,../reference/api/pandas.core.window.Rolling.skew
-generated/pandas.core.window.Rolling.std,../reference/api/pandas.core.window.Rolling.std
-generated/pandas.core.window.Rolling.sum,../reference/api/pandas.core.window.Rolling.sum
-generated/pandas.core.window.Rolling.var,../reference/api/pandas.core.window.Rolling.var
+generated/pandas.core.window.Rolling.aggregate,../reference/api/pandas.core.window.rolling.Rolling.aggregate
+generated/pandas.core.window.Rolling.apply,../reference/api/pandas.core.window.rolling.Rolling.apply
+generated/pandas.core.window.Rolling.corr,../reference/api/pandas.core.window.rolling.Rolling.corr
+generated/pandas.core.window.Rolling.count,../reference/api/pandas.core.window.rolling.Rolling.count
+generated/pandas.core.window.Rolling.cov,../reference/api/pandas.core.window.rolling.Rolling.cov
+generated/pandas.core.window.Rolling.kurt,../reference/api/pandas.core.window.rolling.Rolling.kurt
+generated/pandas.core.window.Rolling.max,../reference/api/pandas.core.window.rolling.Rolling.max
+generated/pandas.core.window.Rolling.mean,../reference/api/pandas.core.window.rolling.Rolling.mean
+generated/pandas.core.window.Rolling.median,../reference/api/pandas.core.window.rolling.Rolling.median
+generated/pandas.core.window.Rolling.min,../reference/api/pandas.core.window.rolling.Rolling.min
+generated/pandas.core.window.Rolling.quantile,../reference/api/pandas.core.window.rolling.Rolling.quantile
+generated/pandas.core.window.Rolling.skew,../reference/api/pandas.core.window.rolling.Rolling.skew
+generated/pandas.core.window.Rolling.std,../reference/api/pandas.core.window.rolling.Rolling.std
+generated/pandas.core.window.Rolling.sum,../reference/api/pandas.core.window.rolling.Rolling.sum
+generated/pandas.core.window.Rolling.var,../reference/api/pandas.core.window.rolling.Rolling.var
generated/pandas.core.window.Window.mean,../reference/api/pandas.core.window.Window.mean
generated/pandas.core.window.Window.sum,../reference/api/pandas.core.window.Window.sum
generated/pandas.crosstab,../reference/api/pandas.crosstab
| Backport PR #31820: correct redirections in doc/redirect.csv for rolling | https://api.github.com/repos/pandas-dev/pandas/pulls/31826 | 2020-02-09T17:17:34Z | 2020-02-09T18:43:52Z | 2020-02-09T18:43:52Z | 2020-02-09T18:43:52Z |
Backport PR #31788 on branch 1.0.x (BUG: Too aggressive typing in NDFrame.align) | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 70aaaa6d0a60d..b055b44274bd8 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -16,6 +16,7 @@ Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`DataFrame.to_excel` when ``columns`` kwarg is passed (:issue:`31677`)
+- Fixed regression in :meth:`Series.align` when ``other`` is a DataFrame and ``method`` is not None (:issue:`31785`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 18e6b913cc10d..c8a6c7c760498 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8539,9 +8539,7 @@ def _align_frame(
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
- right = self._ensure_type(
- right.fillna(method=method, axis=fill_axis, limit=limit)
- )
+ right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index 47f40e24e1637..61d760052f87b 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -153,6 +153,17 @@ def test_align_multiindex():
tm.assert_series_equal(expr, res2l)
+@pytest.mark.parametrize("method", ["backfill", "bfill", "pad", "ffill", None])
+def test_align_method(method):
+ # GH31788
+ ser = pd.Series(range(3), index=range(3))
+ df = pd.DataFrame(0.0, index=range(3), columns=range(3))
+
+ result_ser, result_df = ser.align(df, method=method)
+ tm.assert_series_equal(result_ser, ser)
+ tm.assert_frame_equal(result_df, df)
+
+
def test_reindex(datetime_series, string_series):
identity = string_series.reindex(string_series.index)
| Backport PR #31788: BUG: Too aggressive typing in NDFrame.align | https://api.github.com/repos/pandas-dev/pandas/pulls/31825 | 2020-02-09T17:13:34Z | 2020-02-09T18:23:43Z | 2020-02-09T18:23:43Z | 2020-02-09T18:23:44Z |
Test messages test period | diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py
index abb667260f094..4cf1988a33de1 100644
--- a/pandas/tests/arithmetic/test_period.py
+++ b/pandas/tests/arithmetic/test_period.py
@@ -153,14 +153,17 @@ def test_eq_integer_disallowed(self, other):
result = idx == other
tm.assert_numpy_array_equal(result, expected)
-
- with pytest.raises(TypeError):
+ msg = (
+ r"(:?Invalid comparison between dtype=period\[D\] and .*)"
+ r"|(:?Cannot compare type Period with type .*)"
+ )
+ with pytest.raises(TypeError, match=msg):
idx < other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
idx > other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
idx <= other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
idx >= other
def test_pi_cmp_period(self):
@@ -587,10 +590,11 @@ def test_parr_add_iadd_parr_raises(self, box_with_array):
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
- with pytest.raises(TypeError):
+ msg = r"unsupported operand type\(s\) for \+: .* and .*"
+ with pytest.raises(TypeError, match=msg):
rng + other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
rng += other
def test_pi_sub_isub_pi(self):
@@ -625,7 +629,8 @@ def test_parr_sub_pi_mismatched_freq(self, box_with_array):
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
- with pytest.raises(IncompatibleFrequency):
+ msg = r"Input has different freq=[HD] from PeriodArray\(freq=[DH]\)"
+ with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@@ -677,7 +682,8 @@ def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
- with pytest.raises(TypeError):
+ msg = r"unsupported operand type\(s\) for [+-]: .* and .*"
+ with pytest.raises(TypeError, match=msg):
op(pi, other)
@pytest.mark.parametrize(
@@ -700,13 +706,18 @@ def test_parr_add_sub_invalid(self, other, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
- with pytest.raises(TypeError):
+ msg = (
+ r"(:?cannot add PeriodArray and .*)"
+ r"|(:?cannot subtract .* from (:?a\s)?.*)"
+ r"|(:?unsupported operand type\(s\) for \+: .* and .*)"
+ )
+ with pytest.raises(TypeError, match=msg):
rng + other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
other + rng
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
rng - other
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
other - rng
# -----------------------------------------------------------------
@@ -717,14 +728,16 @@ def test_pi_add_sub_td64_array_non_tick_raises(self):
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
- with pytest.raises(IncompatibleFrequency):
+ msg = r"Input has different freq=None from PeriodArray\(freq=Q-DEC\)"
+ with pytest.raises(IncompatibleFrequency, match=msg):
rng + tdarr
- with pytest.raises(IncompatibleFrequency):
+ with pytest.raises(IncompatibleFrequency, match=msg):
tdarr + rng
- with pytest.raises(IncompatibleFrequency):
+ with pytest.raises(IncompatibleFrequency, match=msg):
rng - tdarr
- with pytest.raises(TypeError):
+ msg = r"cannot subtract PeriodArray from timedelta64\[ns\]"
+ with pytest.raises(TypeError, match=msg):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
@@ -751,10 +764,11 @@ def test_pi_add_sub_td64_array_tick(self):
result = rng - tdarr
tm.assert_index_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = r"cannot subtract .* from .*"
+ with pytest.raises(TypeError, match=msg):
tdarr - rng
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
tdi - rng
# -----------------------------------------------------------------
@@ -783,10 +797,11 @@ def test_pi_add_offset_array(self, box):
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
- with pytest.raises(IncompatibleFrequency):
+ msg = r"Input cannot be converted to Period\(freq=Q-DEC\)"
+ with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
- with pytest.raises(IncompatibleFrequency):
+ with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@@ -811,10 +826,11 @@ def test_pi_sub_offset_array(self, box):
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
- with pytest.raises(IncompatibleFrequency):
+ msg = r"Input has different freq=-1M from Period\(freq=Q-DEC\)"
+ with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
- with pytest.raises(IncompatibleFrequency):
+ with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
@@ -924,7 +940,8 @@ def test_pi_sub_intarray(self, int_holder):
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = r"bad operand type for unary -: 'PeriodArray'"
+ with pytest.raises(TypeError, match=msg):
other - pi
# ---------------------------------------------------------------
@@ -952,7 +969,11 @@ def test_pi_add_timedeltalike_minute_gt1(self, three_days):
result = rng - other
tm.assert_index_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = (
+ r"(:?bad operand type for unary -: 'PeriodArray')"
+ r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])"
+ )
+ with pytest.raises(TypeError, match=msg):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
@@ -974,8 +995,11 @@ def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
-
- with pytest.raises(TypeError):
+ msg = (
+ r"(:?bad operand type for unary -: 'PeriodArray')"
+ r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])"
+ )
+ with pytest.raises(TypeError, match=msg):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
@@ -1110,7 +1134,8 @@ def test_parr_add_sub_td64_nat(self, box_with_array, transpose):
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = r"cannot subtract .* from .*"
+ with pytest.raises(TypeError, match=msg):
other - obj
@pytest.mark.parametrize(
@@ -1133,7 +1158,8 @@ def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other):
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
- with pytest.raises(TypeError):
+ msg = r"cannot subtract .* from .*"
+ with pytest.raises(TypeError, match=msg):
other - obj
# ---------------------------------------------------------------
| - [ ] xref #30999
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31824 | 2020-02-09T16:53:58Z | 2020-02-09T18:04:11Z | 2020-02-09T18:04:11Z | 2020-02-09T18:53:08Z |
DOC: Fix style guide typos | diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst
index a295038b5a0bd..bcddc033a61f5 100644
--- a/doc/source/development/code_style.rst
+++ b/doc/source/development/code_style.rst
@@ -119,14 +119,14 @@ For example:
.. code-block:: python
value = str
- f"Unknown recived value, got: {repr(value)}"
+ f"Unknown received value, got: {repr(value)}"
**Good:**
.. code-block:: python
value = str
- f"Unknown recived type, got: '{type(value).__name__}'"
+ f"Unknown received type, got: '{type(value).__name__}'"
Imports (aim for absolute)
@@ -135,11 +135,11 @@ Imports (aim for absolute)
In Python 3, absolute imports are recommended. In absolute import doing something
like ``import string`` will import the string module rather than ``string.py``
in the same directory. As much as possible, you should try to write out
-absolute imports that show the whole import chain from toplevel pandas.
+absolute imports that show the whole import chain from top-level pandas.
-Explicit relative imports are also supported in Python 3. But it is not
-recommended to use it. Implicit relative imports should never be used
-and is removed in Python 3.
+Explicit relative imports are also supported in Python 3 but it is not
+recommended to use them. Implicit relative imports should never be used
+and are removed in Python 3.
For example:
| A few minor spelling / grammar edits | https://api.github.com/repos/pandas-dev/pandas/pulls/31822 | 2020-02-09T14:49:47Z | 2020-02-09T17:11:05Z | 2020-02-09T17:11:05Z | 2020-02-09T17:12:14Z |
correct redirections in doc/redirect.csv for rolling | diff --git a/doc/redirects.csv b/doc/redirects.csv
index 3a990b09e7f7d..ef93955c14fe6 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -271,21 +271,21 @@ generated/pandas.core.window.Expanding.skew,../reference/api/pandas.core.window.
generated/pandas.core.window.Expanding.std,../reference/api/pandas.core.window.Expanding.std
generated/pandas.core.window.Expanding.sum,../reference/api/pandas.core.window.Expanding.sum
generated/pandas.core.window.Expanding.var,../reference/api/pandas.core.window.Expanding.var
-generated/pandas.core.window.Rolling.aggregate,../reference/api/pandas.core.window.Rolling.aggregate
-generated/pandas.core.window.Rolling.apply,../reference/api/pandas.core.window.Rolling.apply
-generated/pandas.core.window.Rolling.corr,../reference/api/pandas.core.window.Rolling.corr
-generated/pandas.core.window.Rolling.count,../reference/api/pandas.core.window.Rolling.count
-generated/pandas.core.window.Rolling.cov,../reference/api/pandas.core.window.Rolling.cov
-generated/pandas.core.window.Rolling.kurt,../reference/api/pandas.core.window.Rolling.kurt
-generated/pandas.core.window.Rolling.max,../reference/api/pandas.core.window.Rolling.max
-generated/pandas.core.window.Rolling.mean,../reference/api/pandas.core.window.Rolling.mean
-generated/pandas.core.window.Rolling.median,../reference/api/pandas.core.window.Rolling.median
-generated/pandas.core.window.Rolling.min,../reference/api/pandas.core.window.Rolling.min
-generated/pandas.core.window.Rolling.quantile,../reference/api/pandas.core.window.Rolling.quantile
-generated/pandas.core.window.Rolling.skew,../reference/api/pandas.core.window.Rolling.skew
-generated/pandas.core.window.Rolling.std,../reference/api/pandas.core.window.Rolling.std
-generated/pandas.core.window.Rolling.sum,../reference/api/pandas.core.window.Rolling.sum
-generated/pandas.core.window.Rolling.var,../reference/api/pandas.core.window.Rolling.var
+generated/pandas.core.window.Rolling.aggregate,../reference/api/pandas.core.window.rolling.Rolling.aggregate
+generated/pandas.core.window.Rolling.apply,../reference/api/pandas.core.window.rolling.Rolling.apply
+generated/pandas.core.window.Rolling.corr,../reference/api/pandas.core.window.rolling.Rolling.corr
+generated/pandas.core.window.Rolling.count,../reference/api/pandas.core.window.rolling.Rolling.count
+generated/pandas.core.window.Rolling.cov,../reference/api/pandas.core.window.rolling.Rolling.cov
+generated/pandas.core.window.Rolling.kurt,../reference/api/pandas.core.window.rolling.Rolling.kurt
+generated/pandas.core.window.Rolling.max,../reference/api/pandas.core.window.rolling.Rolling.max
+generated/pandas.core.window.Rolling.mean,../reference/api/pandas.core.window.rolling.Rolling.mean
+generated/pandas.core.window.Rolling.median,../reference/api/pandas.core.window.rolling.Rolling.median
+generated/pandas.core.window.Rolling.min,../reference/api/pandas.core.window.rolling.Rolling.min
+generated/pandas.core.window.Rolling.quantile,../reference/api/pandas.core.window.rolling.Rolling.quantile
+generated/pandas.core.window.Rolling.skew,../reference/api/pandas.core.window.rolling.Rolling.skew
+generated/pandas.core.window.Rolling.std,../reference/api/pandas.core.window.rolling.Rolling.std
+generated/pandas.core.window.Rolling.sum,../reference/api/pandas.core.window.rolling.Rolling.sum
+generated/pandas.core.window.Rolling.var,../reference/api/pandas.core.window.rolling.Rolling.var
generated/pandas.core.window.Window.mean,../reference/api/pandas.core.window.Window.mean
generated/pandas.core.window.Window.sum,../reference/api/pandas.core.window.Window.sum
generated/pandas.crosstab,../reference/api/pandas.crosstab
| - [x] closes #31762
| https://api.github.com/repos/pandas-dev/pandas/pulls/31820 | 2020-02-09T11:40:01Z | 2020-02-09T17:17:25Z | 2020-02-09T17:17:24Z | 2020-02-11T12:59:56Z |
DOC: '10 minutes to pandas' - <TAB> completion section now show a con… | diff --git a/doc/source/getting_started/10min.rst b/doc/source/getting_started/10min.rst
index 3055a22129b91..a635b5656bd2d 100644
--- a/doc/source/getting_started/10min.rst
+++ b/doc/source/getting_started/10min.rst
@@ -70,17 +70,17 @@ will be completed:
df2.abs df2.boxplot
df2.add df2.C
df2.add_prefix df2.clip
- df2.add_suffix df2.clip_lower
- df2.align df2.clip_upper
- df2.all df2.columns
+ df2.add_suffix df2.columns
+ df2.align df2.copy
+ df2.all df2.count
df2.any df2.combine
- df2.append df2.combine_first
- df2.apply df2.consolidate
- df2.applymap
- df2.D
+ df2.append df2.D
+ df2.apply df2.describe
+ df2.applymap df2.diff
+ df2.B df2.duplicated
As you can see, the columns ``A``, ``B``, ``C``, and ``D`` are automatically
-tab completed. ``E`` is there as well; the rest of the attributes have been
+tab completed. ``E`` and ``F`` are there as well; the rest of the attributes have been
truncated for brevity.
Viewing data
| …sistent block of suggestions. Solve issue #31526
- [x] closes #31526
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I didn't run those listed tests above, however I doubt it will break something.
This PR only correct a *Typo* I found on documentation. I hope I'll be useful to improve this project. | https://api.github.com/repos/pandas-dev/pandas/pulls/31818 | 2020-02-09T04:30:31Z | 2020-02-09T17:23:54Z | 2020-02-09T17:23:54Z | 2020-02-09T17:23:57Z |
BUG: Series[dim3array] failing to raise ValueError for some Index subclasses | diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0786674daf874..1b045b986ea01 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -919,7 +919,7 @@ def _get_with(self, key):
indexer = self.index.get_indexer_for(key)
return self.iloc[indexer]
else:
- return self._get_values(key)
+ return self.iloc[key]
if isinstance(key, (list, tuple)):
# TODO: de-dup with tuple case handled above?
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 98940b64330b4..21f8e3840e472 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -80,33 +80,18 @@ def test_getitem_ndarray_3d(self, index, obj, idxr, idxr_id):
idxr = idxr(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
- msg = (
- r"Buffer has wrong number of dimensions \(expected 1, "
- r"got 3\)|"
- "Cannot index with multidimensional key|"
- r"Wrong number of dimensions. values.ndim != ndim \[3 != 1\]|"
- "Index data must be 1-dimensional"
+ msg = "|".join(
+ [
+ r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
+ "Cannot index with multidimensional key",
+ r"Wrong number of dimensions. values.ndim != ndim \[3 != 1\]",
+ "Index data must be 1-dimensional",
+ ]
)
- if (
- isinstance(obj, Series)
- and idxr_id == "getitem"
- and index.inferred_type
- in [
- "string",
- "datetime64",
- "period",
- "timedelta64",
- "boolean",
- "categorical",
- ]
- ):
+ with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
idxr[nd3]
- else:
- with pytest.raises(ValueError, match=msg):
- with tm.assert_produces_warning(DeprecationWarning):
- idxr[nd3]
@pytest.mark.parametrize(
"index", tm.all_index_generator(5), ids=lambda x: type(x).__name__
| https://api.github.com/repos/pandas-dev/pandas/pulls/31816 | 2020-02-09T04:00:15Z | 2020-02-09T16:45:05Z | 2020-02-09T16:45:05Z | 2020-02-09T17:24:54Z | |
REF: use public indexers in groupby.ops | diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 43d253f632f0f..b27072aa66708 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -309,8 +309,7 @@ cdef class SeriesGrouper(_BaseGrouper):
def __init__(self, object series, object f, object labels,
Py_ssize_t ngroups, object dummy):
- # in practice we always pass either obj[:0] or the
- # safer obj._get_values(slice(None, 0))
+ # in practice we always pass obj.iloc[:0] or equivalent
assert dummy is not None
if len(series) == 0:
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index 761353ca5a6ca..4e593ce543ea6 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -658,7 +658,7 @@ def _aggregate_series_fast(self, obj: Series, func):
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
- dummy = obj._get_values(slice(None, 0))
+ dummy = obj.iloc[:0]
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer)
group_index = algorithms.take_nd(group_index, indexer, allow_fill=False)
@@ -780,7 +780,11 @@ def get_iterator(self, data: FrameOrSeries, axis: int = 0):
Generator yielding sequence of (name, subsetted object)
for each group
"""
- slicer = lambda start, edge: data._slice(slice(start, edge), axis=axis)
+ if axis == 0:
+ slicer = lambda start, edge: data.iloc[start:edge]
+ else:
+ slicer = lambda start, edge: data.iloc[:, start:edge]
+
length = len(data.axes[axis])
start = 0
@@ -919,7 +923,7 @@ def _chop(self, sdata, slice_obj: slice) -> NDFrame:
class SeriesSplitter(DataSplitter):
def _chop(self, sdata: Series, slice_obj: slice) -> Series:
- return sdata._get_values(slice_obj)
+ return sdata.iloc[slice_obj]
class FrameSplitter(DataSplitter):
@@ -934,7 +938,7 @@ def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
- return sdata._slice(slice_obj, axis=1)
+ return sdata.iloc[:, slice_obj]
def get_splitter(data: FrameOrSeries, *args, **kwargs) -> DataSplitter:
diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py
index ad71f73e80e64..ff74d374e5e3f 100644
--- a/pandas/tests/groupby/test_bin_groupby.py
+++ b/pandas/tests/groupby/test_bin_groupby.py
@@ -11,7 +11,7 @@
def test_series_grouper():
obj = Series(np.random.randn(10))
- dummy = obj[:0]
+ dummy = obj.iloc[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
@@ -28,7 +28,7 @@ def test_series_grouper():
def test_series_grouper_requires_nonempty_raises():
# GH#29500
obj = Series(np.random.randn(10))
- dummy = obj[:0]
+ dummy = obj.iloc[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
with pytest.raises(ValueError, match="SeriesGrouper requires non-empty `series`"):
| https://api.github.com/repos/pandas-dev/pandas/pulls/31814 | 2020-02-09T02:53:12Z | 2020-02-09T17:43:31Z | 2020-02-09T17:43:31Z | 2020-02-09T17:48:33Z | |
BUG: iloc setitem with 3d indexer not raising | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 85a26179276f5..536aa53c95fba 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -830,6 +830,9 @@ def setitem(self, indexer, value):
"""
transpose = self.ndim == 2
+ if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:
+ raise ValueError(f"Cannot set values with ndim > {self.ndim}")
+
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 06bf906be7093..d651fe9f67773 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -1678,6 +1678,10 @@ def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFra
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
+ if loc.ndim == 2 and loc.shape[1] == 1:
+ # GH#31813 avoid trying to set Series values with wrong
+ # dimension
+ loc = loc[:, 0]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 98940b64330b4..476eb316471de 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -133,38 +133,24 @@ def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id):
idxr = idxr(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
- msg = (
- r"Buffer has wrong number of dimensions \(expected 1, "
- r"got 3\)|"
- "'pandas._libs.interval.IntervalTree' object has no attribute "
- "'get_loc'|" # AttributeError
- "unhashable type: 'numpy.ndarray'|" # TypeError
- "No matching signature found|" # TypeError
- r"^\[\[\[|" # pandas.core.indexing.IndexingError
- "Index data must be 1-dimensional"
- )
-
- if (idxr_id == "iloc") or (
- (
- isinstance(obj, Series)
- and idxr_id == "setitem"
- and index.inferred_type
- in [
- "floating",
- "string",
- "datetime64",
- "period",
- "timedelta64",
- "boolean",
- "categorical",
- ]
- )
+ if idxr_id == "iloc":
+ err = ValueError
+ msg = f"Cannot set values with ndim > {obj.ndim}"
+ elif (
+ isinstance(index, pd.IntervalIndex)
+ and idxr_id == "setitem"
+ and obj.ndim == 1
):
- idxr[nd3] = 0
+ err = AttributeError
+ msg = (
+ "'pandas._libs.interval.IntervalTree' object has no attribute 'get_loc'"
+ )
else:
- err = (ValueError, AttributeError)
- with pytest.raises(err, match=msg):
- idxr[nd3] = 0
+ err = ValueError
+ msg = r"Buffer has wrong number of dimensions \(expected 1, got 3\)|"
+
+ with pytest.raises(err, match=msg):
+ idxr[nd3] = 0
def test_inf_upcast(self):
# GH 16957
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
also get the exceptions/messages in test_setitem_ndarray_3d to be maximally-specific | https://api.github.com/repos/pandas-dev/pandas/pulls/31813 | 2020-02-09T00:44:16Z | 2020-02-09T16:44:18Z | 2020-02-09T16:44:18Z | 2020-02-09T17:24:29Z |
CLN: tests.indexing.common | diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 3c027b035c2b8..4804172a22529 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -1,11 +1,8 @@
""" common utilities """
import itertools
-from warnings import catch_warnings
import numpy as np
-from pandas.core.dtypes.common import is_scalar
-
from pandas import DataFrame, Float64Index, MultiIndex, Series, UInt64Index, date_range
import pandas._testing as tm
@@ -115,27 +112,6 @@ def generate_indices(self, f, values=False):
return itertools.product(*axes)
- def get_result(self, obj, method, key, axis):
- """ return the result for this obj with this key and this axis """
-
- if isinstance(key, dict):
- key = key[axis]
-
- # use an artificial conversion to map the key as integers to the labels
- # so ix can work for comparisons
- if method == "indexer":
- method = "ix"
- key = obj._get_axis(axis)[key]
-
- # in case we actually want 0 index slicing
- with catch_warnings(record=True):
- try:
- xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
- except AttributeError:
- xp = getattr(obj, method).__getitem__(key)
-
- return xp
-
def get_value(self, name, f, i, values=False):
""" return the value for the location i """
@@ -170,45 +146,30 @@ def check_values(self, f, func, values=False):
tm.assert_almost_equal(result, expected)
def check_result(
- self, method1, key1, method2, key2, typs=None, axes=None, fails=None,
+ self, method, key, typs=None, axes=None, fails=None,
):
- def _eq(axis, obj, key1, key2):
+ def _eq(axis, obj, key):
""" compare equal for these 2 keys """
- if axis > obj.ndim - 1:
- return
+ axified = _axify(obj, key, axis)
try:
- rs = getattr(obj, method1).__getitem__(_axify(obj, key1, axis))
-
- try:
- xp = self.get_result(obj=obj, method=method2, key=key2, axis=axis)
- except (KeyError, IndexError):
- # TODO: why is this allowed?
- return
-
- if is_scalar(rs) and is_scalar(xp):
- assert rs == xp
- else:
- tm.assert_equal(rs, xp)
+ getattr(obj, method).__getitem__(axified)
except (IndexError, TypeError, KeyError) as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
- result = f"ok ({type(detail).__name__})"
return
-
- result = type(detail).__name__
- raise AssertionError(result, detail)
+ raise
if typs is None:
typs = self._typs
if axes is None:
axes = [0, 1]
- elif not isinstance(axes, (tuple, list)):
- assert isinstance(axes, int)
+ else:
+ assert axes in [0, 1]
axes = [axes]
# check
@@ -217,8 +178,8 @@ def _eq(axis, obj, key1, key2):
d = getattr(self, kind)
for ax in axes:
for typ in typs:
- if typ not in self._typs:
- continue
+ assert typ in self._typs
obj = d[typ]
- _eq(axis=ax, obj=obj, key1=key1, key2=key2)
+ if ax < obj.ndim:
+ _eq(axis=ax, obj=obj, key=key)
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 08ea4c1579ef8..bc5ba3d9b03e5 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -18,8 +18,6 @@ class TestiLoc(Base):
def test_iloc_getitem_int(self):
# integer
self.check_result(
- "iloc",
- 2,
"iloc",
2,
typs=["labels", "mixed", "ts", "floats", "empty"],
@@ -29,8 +27,6 @@ def test_iloc_getitem_int(self):
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result(
- "iloc",
- -1,
"iloc",
-1,
typs=["labels", "mixed", "ts", "floats", "empty"],
@@ -39,8 +35,6 @@ def test_iloc_getitem_neg_int(self):
def test_iloc_getitem_list_int(self):
self.check_result(
- "iloc",
- [0, 1, 2],
"iloc",
[0, 1, 2],
typs=["labels", "mixed", "ts", "floats", "empty"],
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 3a726fb9923ee..02652d993e0f3 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -16,32 +16,27 @@ class TestLoc(Base):
def test_loc_getitem_int(self):
# int label
- self.check_result("loc", 2, "loc", 2, typs=["label"], fails=KeyError)
+ self.check_result("loc", 2, typs=["labels"], fails=TypeError)
def test_loc_getitem_label(self):
# label
- self.check_result("loc", "c", "loc", "c", typs=["empty"], fails=KeyError)
+ self.check_result("loc", "c", typs=["empty"], fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result(
- "loc",
- "f",
- "loc",
- "f",
- typs=["ints", "uints", "labels", "mixed", "ts"],
- fails=KeyError,
+ "loc", "f", typs=["ints", "uints", "labels", "mixed", "ts"], fails=KeyError,
)
- self.check_result("loc", "f", "ix", "f", typs=["floats"], fails=KeyError)
- self.check_result("loc", "f", "loc", "f", typs=["floats"], fails=KeyError)
+ self.check_result("loc", "f", typs=["floats"], fails=KeyError)
+ self.check_result("loc", "f", typs=["floats"], fails=KeyError)
self.check_result(
- "loc", 20, "loc", 20, typs=["ints", "uints", "mixed"], fails=KeyError,
+ "loc", 20, typs=["ints", "uints", "mixed"], fails=KeyError,
)
- self.check_result("loc", 20, "loc", 20, typs=["labels"], fails=TypeError)
- self.check_result("loc", 20, "loc", 20, typs=["ts"], axes=0, fails=TypeError)
- self.check_result("loc", 20, "loc", 20, typs=["floats"], axes=0, fails=KeyError)
+ self.check_result("loc", 20, typs=["labels"], fails=TypeError)
+ self.check_result("loc", 20, typs=["ts"], axes=0, fails=TypeError)
+ self.check_result("loc", 20, typs=["floats"], axes=0, fails=KeyError)
def test_loc_getitem_label_list(self):
# TODO: test something here?
@@ -50,49 +45,25 @@ def test_loc_getitem_label_list(self):
def test_loc_getitem_label_list_with_missing(self):
self.check_result(
- "loc", [0, 1, 2], "loc", [0, 1, 2], typs=["empty"], fails=KeyError,
+ "loc", [0, 1, 2], typs=["empty"], fails=KeyError,
)
self.check_result(
- "loc",
- [0, 2, 10],
- "ix",
- [0, 2, 10],
- typs=["ints", "uints", "floats"],
- axes=0,
- fails=KeyError,
+ "loc", [0, 2, 10], typs=["ints", "uints", "floats"], axes=0, fails=KeyError,
)
self.check_result(
- "loc",
- [3, 6, 7],
- "ix",
- [3, 6, 7],
- typs=["ints", "uints", "floats"],
- axes=1,
- fails=KeyError,
+ "loc", [3, 6, 7], typs=["ints", "uints", "floats"], axes=1, fails=KeyError,
)
# GH 17758 - MultiIndex and missing keys
self.check_result(
- "loc",
- [(1, 3), (1, 4), (2, 5)],
- "ix",
- [(1, 3), (1, 4), (2, 5)],
- typs=["multi"],
- axes=0,
- fails=KeyError,
+ "loc", [(1, 3), (1, 4), (2, 5)], typs=["multi"], axes=0, fails=KeyError,
)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result(
- "loc",
- [20, 30, 40],
- "loc",
- [20, 30, 40],
- typs=["ints", "uints"],
- axes=1,
- fails=KeyError,
+ "loc", [20, 30, 40], typs=["ints", "uints"], axes=1, fails=KeyError,
)
def test_loc_getitem_label_array_like(self):
@@ -104,7 +75,7 @@ def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
- self.check_result("loc", b, "loc", b, typs=["empty"], fails=IndexError)
+ self.check_result("loc", b, typs=["empty"], fails=IndexError)
def test_loc_getitem_label_slice(self):
@@ -115,8 +86,6 @@ def test_loc_getitem_label_slice(self):
# GH 14316
self.check_result(
- "loc",
- slice(1, 3),
"loc",
slice(1, 3),
typs=["labels", "mixed", "empty", "ts", "floats"],
@@ -124,42 +93,18 @@ def test_loc_getitem_label_slice(self):
)
self.check_result(
- "loc",
- slice("20130102", "20130104"),
- "loc",
- slice("20130102", "20130104"),
- typs=["ts"],
- axes=1,
- fails=TypeError,
+ "loc", slice("20130102", "20130104"), typs=["ts"], axes=1, fails=TypeError,
)
self.check_result(
- "loc",
- slice(2, 8),
- "loc",
- slice(2, 8),
- typs=["mixed"],
- axes=0,
- fails=TypeError,
+ "loc", slice(2, 8), typs=["mixed"], axes=0, fails=TypeError,
)
self.check_result(
- "loc",
- slice(2, 8),
- "loc",
- slice(2, 8),
- typs=["mixed"],
- axes=1,
- fails=KeyError,
+ "loc", slice(2, 8), typs=["mixed"], axes=1, fails=KeyError,
)
self.check_result(
- "loc",
- slice(2, 4, 2),
- "loc",
- slice(2, 4, 2),
- typs=["mixed"],
- axes=0,
- fails=TypeError,
+ "loc", slice(2, 4, 2), typs=["mixed"], axes=0, fails=TypeError,
)
| A lot of code in there not doing anything, AFAICT leftover from ix cross-comparisons. | https://api.github.com/repos/pandas-dev/pandas/pulls/31812 | 2020-02-08T23:43:54Z | 2020-02-09T15:00:05Z | 2020-02-09T15:00:05Z | 2020-02-09T15:41:21Z |
ENH: add 'origin' and 'offset' arguments to 'resample' and 'pd.Grouper' | diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst
index 6ba58310000cb..076c1313eec4e 100644
--- a/doc/source/user_guide/timeseries.rst
+++ b/doc/source/user_guide/timeseries.rst
@@ -1572,10 +1572,9 @@ end of the interval is closed:
ts.resample('5Min', closed='left').mean()
-Parameters like ``label`` and ``loffset`` are used to manipulate the resulting
-labels. ``label`` specifies whether the result is labeled with the beginning or
-the end of the interval. ``loffset`` performs a time adjustment on the output
-labels.
+Parameters like ``label`` are used to manipulate the resulting labels.
+``label`` specifies whether the result is labeled with the beginning or
+the end of the interval.
.. ipython:: python
@@ -1583,8 +1582,6 @@ labels.
ts.resample('5Min', label='left').mean()
- ts.resample('5Min', label='left', loffset='1s').mean()
-
.. warning::
The default values for ``label`` and ``closed`` is '**left**' for all
@@ -1789,6 +1786,58 @@ natural and functions similarly to :py:func:`itertools.groupby`:
See :ref:`groupby.iterating-label` or :class:`Resampler.__iter__` for more.
+.. _timeseries.adjust-the-start-of-the-bins:
+
+Use `origin` or `offset` to adjust the start of the bins
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 1.1.0
+
+The bins of the grouping are adjusted based on the beginning of the day of the time series starting point. This works well with frequencies that are multiples of a day (like `30D`) or that divide a day evenly (like `90s` or `1min`). This can create inconsistencies with some frequencies that do not meet this criteria. To change this behavior you can specify a fixed Timestamp with the argument ``origin``.
+
+For example:
+
+.. ipython:: python
+
+ start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
+ middle = '2000-10-02 00:00:00'
+ rng = pd.date_range(start, end, freq='7min')
+ ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
+ ts
+
+Here we can see that, when using ``origin`` with its default value (``'start_day'``), the result after ``'2000-10-02 00:00:00'`` are not identical depending on the start of time series:
+
+.. ipython:: python
+
+ ts.resample('17min', origin='start_day').sum()
+ ts[middle:end].resample('17min', origin='start_day').sum()
+
+
+Here we can see that, when setting ``origin`` to ``'epoch'``, the result after ``'2000-10-02 00:00:00'`` are identical depending on the start of time series:
+
+.. ipython:: python
+
+ ts.resample('17min', origin='epoch').sum()
+ ts[middle:end].resample('17min', origin='epoch').sum()
+
+
+If needed you can use a custom timestamp for ``origin``:
+
+.. ipython:: python
+
+ ts.resample('17min', origin='2001-01-01').sum()
+ ts[middle:end].resample('17min', origin=pd.Timestamp('2001-01-01')).sum()
+
+If needed you can just adjust the bins with an ``offset`` Timedelta that would be added to the default ``origin``.
+Those two examples are equivalent for this time series:
+
+.. ipython:: python
+
+ ts.resample('17min', origin='start').sum()
+ ts.resample('17min', offset='23h30min').sum()
+
+
+Note the use of ``'start'`` for ``origin`` on the last example. In that case, ``origin`` will be set to the first value of the timeseries.
.. _timeseries.periods:
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 092bd3345efbc..44797d3296c80 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -152,6 +152,49 @@ For example:
pd.to_datetime(tz_strs, format='%Y-%m-%d %H:%M:%S %z', utc=True)
pd.to_datetime(tz_strs, format='%Y-%m-%d %H:%M:%S %z')
+.. _whatsnew_110.grouper_resample_origin:
+
+Grouper and resample now supports the arguments origin and offset
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:class:`Grouper` and :class:`DataFrame.resample` now supports the arguments ``origin`` and ``offset``. It let the user control the timestamp on which to adjust the grouping. (:issue:`31809`)
+
+The bins of the grouping are adjusted based on the beginning of the day of the time series starting point. This works well with frequencies that are multiples of a day (like `30D`) or that divides a day (like `90s` or `1min`). But it can create inconsistencies with some frequencies that do not meet this criteria. To change this behavior you can now specify a fixed timestamp with the argument ``origin``.
+
+Two arguments are now deprecated (more information in the documentation of :class:`DataFrame.resample`):
+
+- ``base`` should be replaced by ``offset``.
+- ``loffset`` should be replaced by directly adding an offset to the index DataFrame after being resampled.
+
+Small example of the use of ``origin``:
+
+.. ipython:: python
+
+ start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
+ middle = '2000-10-02 00:00:00'
+ rng = pd.date_range(start, end, freq='7min')
+ ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
+ ts
+
+Resample with the default behavior ``'start_day'`` (origin is ``2000-10-01 00:00:00``):
+
+.. ipython:: python
+
+ ts.resample('17min').sum()
+ ts.resample('17min', origin='start_day').sum()
+
+Resample using a fixed origin:
+
+.. ipython:: python
+
+ ts.resample('17min', origin='epoch').sum()
+ ts.resample('17min', origin='2000-01-01').sum()
+
+If needed you can adjust the bins with the argument ``offset`` (a Timedelta) that would be added to the default ``origin``.
+
+For a full example, see: :ref:`timeseries.adjust-the-start-of-the-bins`.
+
+
.. _whatsnew_110.enhancements.other:
Other enhancements
diff --git a/pandas/_typing.py b/pandas/_typing.py
index d225b845970cc..71df27119bd96 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -1,3 +1,4 @@
+from datetime import datetime, timedelta
from pathlib import Path
from typing import (
IO,
@@ -43,6 +44,15 @@
PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"]
Scalar = Union[PythonScalar, PandasScalar]
+# timestamp and timedelta convertible types
+
+TimestampConvertibleTypes = Union[
+ "Timestamp", datetime, np.datetime64, int, np.int64, float, str
+]
+TimedeltaConvertibleTypes = Union[
+ "Timedelta", timedelta, np.timedelta64, int, np.int64, float, str
+]
+
# other
Dtype = Union[
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 792e5a1228fe6..26691c3f1cc0c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -39,6 +39,8 @@
Label,
Level,
Renamer,
+ TimedeltaConvertibleTypes,
+ TimestampConvertibleTypes,
ValueKeyFunc,
)
from pandas.compat import set_function_name
@@ -7760,9 +7762,11 @@ def resample(
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
- base: int = 0,
+ base: Optional[int] = None,
on=None,
level=None,
+ origin: Union[str, TimestampConvertibleTypes] = "start_day",
+ offset: Optional[TimedeltaConvertibleTypes] = None,
) -> "Resampler":
"""
Resample time-series data.
@@ -7797,17 +7801,40 @@ def resample(
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
+
+ .. deprecated:: 1.1.0
+ You should add the loffset to the `df.index` after the resample.
+ See below.
+
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
+
+ .. deprecated:: 1.1.0
+ The new arguments that you should use are 'offset' or 'origin'.
+
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
-
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
+ origin : {'epoch', 'start', 'start_day'}, Timestamp or str, default 'start_day'
+ The timestamp on which to adjust the grouping. The timezone of origin
+ must match the timezone of the index.
+ If a timestamp is not used, these values are also supported:
+
+ - 'epoch': `origin` is 1970-01-01
+ - 'start': `origin` is the first value of the timeseries
+ - 'start_day': `origin` is the first day at midnight of the timeseries
+
+ .. versionadded:: 1.1.0
+
+ offset : Timedelta or str, default is None
+ An offset timedelta added to the origin.
+
+ .. versionadded:: 1.1.0
Returns
-------
@@ -8025,6 +8052,88 @@ def resample(
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
+
+ If you want to adjust the start of the bins based on a fixed timestamp:
+
+ >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
+ >>> rng = pd.date_range(start, end, freq='7min')
+ >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
+ >>> ts
+ 2000-10-01 23:30:00 0
+ 2000-10-01 23:37:00 3
+ 2000-10-01 23:44:00 6
+ 2000-10-01 23:51:00 9
+ 2000-10-01 23:58:00 12
+ 2000-10-02 00:05:00 15
+ 2000-10-02 00:12:00 18
+ 2000-10-02 00:19:00 21
+ 2000-10-02 00:26:00 24
+ Freq: 7T, dtype: int64
+
+ >>> ts.resample('17min').sum()
+ 2000-10-01 23:14:00 0
+ 2000-10-01 23:31:00 9
+ 2000-10-01 23:48:00 21
+ 2000-10-02 00:05:00 54
+ 2000-10-02 00:22:00 24
+ Freq: 17T, dtype: int64
+
+ >>> ts.resample('17min', origin='epoch').sum()
+ 2000-10-01 23:18:00 0
+ 2000-10-01 23:35:00 18
+ 2000-10-01 23:52:00 27
+ 2000-10-02 00:09:00 39
+ 2000-10-02 00:26:00 24
+ Freq: 17T, dtype: int64
+
+ >>> ts.resample('17min', origin='2000-01-01').sum()
+ 2000-10-01 23:24:00 3
+ 2000-10-01 23:41:00 15
+ 2000-10-01 23:58:00 45
+ 2000-10-02 00:15:00 45
+ Freq: 17T, dtype: int64
+
+ If you want to adjust the start of the bins with an `offset` Timedelta, the two
+ following lines are equivalent:
+
+ >>> ts.resample('17min', origin='start').sum()
+ 2000-10-01 23:30:00 9
+ 2000-10-01 23:47:00 21
+ 2000-10-02 00:04:00 54
+ 2000-10-02 00:21:00 24
+ Freq: 17T, dtype: int64
+
+ >>> ts.resample('17min', offset='23h30min').sum()
+ 2000-10-01 23:30:00 9
+ 2000-10-01 23:47:00 21
+ 2000-10-02 00:04:00 54
+ 2000-10-02 00:21:00 24
+ Freq: 17T, dtype: int64
+
+ To replace the use of the deprecated `base` argument, you can now use `offset`,
+ in this example it is equivalent to have `base=2`:
+
+ >>> ts.resample('17min', offset='2min').sum()
+ 2000-10-01 23:16:00 0
+ 2000-10-01 23:33:00 9
+ 2000-10-01 23:50:00 36
+ 2000-10-02 00:07:00 39
+ 2000-10-02 00:24:00 24
+ Freq: 17T, dtype: int64
+
+ To replace the use of the deprecated `loffset` argument:
+
+ >>> from pandas.tseries.frequencies import to_offset
+ >>> loffset = '19min'
+ >>> ts_out = ts.resample('17min').sum()
+ >>> ts_out.index = ts_out.index + to_offset(loffset)
+ >>> ts_out
+ 2000-10-01 23:33:00 0
+ 2000-10-01 23:50:00 9
+ 2000-10-02 00:07:00 21
+ 2000-10-02 00:24:00 54
+ 2000-10-02 00:41:00 24
+ Freq: 17T, dtype: int64
"""
from pandas.core.resample import get_resampler
@@ -8041,6 +8150,8 @@ def resample(
base=base,
key=on,
level=level,
+ origin=origin,
+ offset=offset,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index b92ff1c7c8ca4..c71085cd4918a 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -1646,15 +1646,6 @@ def resample(self, rule, *args, **kwargs):
0 2000-01-01 00:00:00 0 1
2000-01-01 00:03:00 0 2
5 2000-01-01 00:03:00 5 1
-
- Add an offset of twenty seconds.
-
- >>> df.groupby('a').resample('3T', loffset='20s').sum()
- a b
- a
- 0 2000-01-01 00:00:20 0 2
- 2000-01-01 00:03:20 0 1
- 5 2000-01-01 00:00:20 5 1
"""
from pandas.core.resample import get_resampler_for_grouping
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 948b4ba27f705..9660fb9c2e1b0 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -2,8 +2,8 @@
Provide user facing operators for doing the split part of the
split-apply-combine paradigm.
"""
-
from typing import Dict, Hashable, List, Optional, Tuple
+import warnings
import numpy as np
@@ -67,9 +67,38 @@ class Grouper:
If grouper is PeriodIndex and `freq` parameter is passed.
base : int, default 0
Only when `freq` parameter is passed.
+ For frequencies that evenly subdivide 1 day, the "origin" of the
+ aggregated intervals. For example, for '5min' frequency, base could
+ range from 0 through 4. Defaults to 0.
+
+ .. deprecated:: 1.1.0
+ The new arguments that you should use are 'offset' or 'origin'.
+
loffset : str, DateOffset, timedelta object
Only when `freq` parameter is passed.
+ .. deprecated:: 1.1.0
+ loffset is only working for ``.resample(...)`` and not for
+ Grouper (:issue:`28302`).
+ However, loffset is also deprecated for ``.resample(...)``
+ See: :class:`DataFrame.resample`
+
+ origin : {'epoch', 'start', 'start_day'}, Timestamp or str, default 'start_day'
+ The timestamp on which to adjust the grouping. The timezone of origin must
+ match the timezone of the index.
+ If a timestamp is not used, these values are also supported:
+
+ - 'epoch': `origin` is 1970-01-01
+ - 'start': `origin` is the first value of the timeseries
+ - 'start_day': `origin` is the first day at midnight of the timeseries
+
+ .. versionadded:: 1.1.0
+
+ offset : Timedelta or str, default is None
+ An offset timedelta added to the origin.
+
+ .. versionadded:: 1.1.0
+
Returns
-------
A specification for a groupby instruction
@@ -123,6 +152,74 @@ class Grouper:
2000-01-02 0.5 15.0
2000-01-09 2.0 30.0
2000-01-16 3.0 40.0
+
+ If you want to adjust the start of the bins based on a fixed timestamp:
+
+ >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
+ >>> rng = pd.date_range(start, end, freq='7min')
+ >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
+ >>> ts
+ 2000-10-01 23:30:00 0
+ 2000-10-01 23:37:00 3
+ 2000-10-01 23:44:00 6
+ 2000-10-01 23:51:00 9
+ 2000-10-01 23:58:00 12
+ 2000-10-02 00:05:00 15
+ 2000-10-02 00:12:00 18
+ 2000-10-02 00:19:00 21
+ 2000-10-02 00:26:00 24
+ Freq: 7T, dtype: int64
+
+ >>> ts.groupby(pd.Grouper(freq='17min')).sum()
+ 2000-10-01 23:14:00 0
+ 2000-10-01 23:31:00 9
+ 2000-10-01 23:48:00 21
+ 2000-10-02 00:05:00 54
+ 2000-10-02 00:22:00 24
+ Freq: 17T, dtype: int64
+
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
+ 2000-10-01 23:18:00 0
+ 2000-10-01 23:35:00 18
+ 2000-10-01 23:52:00 27
+ 2000-10-02 00:09:00 39
+ 2000-10-02 00:26:00 24
+ Freq: 17T, dtype: int64
+
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
+ 2000-10-01 23:24:00 3
+ 2000-10-01 23:41:00 15
+ 2000-10-01 23:58:00 45
+ 2000-10-02 00:15:00 45
+ Freq: 17T, dtype: int64
+
+ If you want to adjust the start of the bins with an `offset` Timedelta, the two
+ following lines are equivalent:
+
+ >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
+ 2000-10-01 23:30:00 9
+ 2000-10-01 23:47:00 21
+ 2000-10-02 00:04:00 54
+ 2000-10-02 00:21:00 24
+ Freq: 17T, dtype: int64
+
+ >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
+ 2000-10-01 23:30:00 9
+ 2000-10-01 23:47:00 21
+ 2000-10-02 00:04:00 54
+ 2000-10-02 00:21:00 24
+ Freq: 17T, dtype: int64
+
+ To replace the use of the deprecated `base` argument, you can now use `offset`,
+ in this example it is equivalent to have `base=2`:
+
+ >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
+ 2000-10-01 23:16:00 0
+ 2000-10-01 23:33:00 9
+ 2000-10-01 23:50:00 36
+ 2000-10-02 00:07:00 39
+ 2000-10-02 00:24:00 24
+ Freq: 17T, dtype: int64
"""
_attributes: Tuple[str, ...] = ("key", "level", "freq", "axis", "sort")
@@ -131,6 +228,43 @@ def __new__(cls, *args, **kwargs):
if kwargs.get("freq") is not None:
from pandas.core.resample import TimeGrouper
+ # Deprecation warning of `base` and `loffset` since v1.1.0:
+ # we are raising the warning here to be able to set the `stacklevel`
+ # properly since we need to raise the `base` and `loffset` deprecation
+ # warning from three different cases:
+ # core/generic.py::NDFrame.resample
+ # core/groupby/groupby.py::GroupBy.resample
+ # core/groupby/grouper.py::Grouper
+ # raising these warnings from TimeGrouper directly would fail the test:
+ # tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
+
+ # hacky way to set the stacklevel: if cls is TimeGrouper it means
+ # that the call comes from a pandas internal call of resample,
+ # otherwise it comes from pd.Grouper
+ stacklevel = 4 if cls is TimeGrouper else 2
+ if kwargs.get("base", None) is not None:
+ warnings.warn(
+ "'base' in .resample() and in Grouper() is deprecated.\n"
+ "The new arguments that you should use are 'offset' or 'origin'.\n"
+ '\n>>> df.resample(freq="3s", base=2)\n'
+ "\nbecomes:\n"
+ '\n>>> df.resample(freq="3s", offset="2s")\n',
+ FutureWarning,
+ stacklevel=stacklevel,
+ )
+
+ if kwargs.get("loffset", None) is not None:
+ warnings.warn(
+ "'loffset' in .resample() and in Grouper() is deprecated.\n"
+ '\n>>> df.resample(freq="3s", loffset="8H")\n'
+ "\nbecomes:\n"
+ "\n>>> from pandas.tseries.frequencies import to_offset"
+ '\n>>> df = df.resample(freq="3s").mean()'
+ '\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n',
+ FutureWarning,
+ stacklevel=stacklevel,
+ )
+
cls = TimeGrouper
return super().__new__(cls)
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index b8c45f26301a4..755059bf0adf1 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -1,14 +1,15 @@
import copy
from datetime import timedelta
from textwrap import dedent
-from typing import Dict, no_type_check
+from typing import Dict, Optional, Union, no_type_check
import numpy as np
from pandas._libs import lib
-from pandas._libs.tslibs import NaT, Period, Timestamp
+from pandas._libs.tslibs import NaT, Period, Timedelta, Timestamp
from pandas._libs.tslibs.frequencies import is_subperiod, is_superperiod
from pandas._libs.tslibs.period import IncompatibleFrequency
+from pandas._typing import TimedeltaConvertibleTypes, TimestampConvertibleTypes
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, doc
@@ -66,8 +67,9 @@ class Resampler(_GroupBy, ShallowMixin):
"label",
"convention",
"loffset",
- "base",
"kind",
+ "origin",
+ "offset",
]
def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs):
@@ -244,7 +246,7 @@ def pipe(self, func, *args, **kwargs):
>>> r = s.resample('2s')
DatetimeIndexResampler [freq=<2 * Seconds>, axis=0, closed=left,
- label=left, convention=start, base=0]
+ label=left, convention=start]
>>> r.agg(np.sum)
2013-01-01 00:00:00 3
@@ -1298,22 +1300,25 @@ class TimeGrouper(Grouper):
"loffset",
"kind",
"convention",
- "base",
+ "origin",
+ "offset",
)
def __init__(
self,
freq="Min",
- closed=None,
- label=None,
+ closed: Optional[str] = None,
+ label: Optional[str] = None,
how="mean",
axis=0,
fill_method=None,
limit=None,
loffset=None,
- kind=None,
- convention=None,
- base=0,
+ kind: Optional[str] = None,
+ convention: Optional[str] = None,
+ base: Optional[int] = None,
+ origin: Union[str, TimestampConvertibleTypes] = "start_day",
+ offset: Optional[TimedeltaConvertibleTypes] = None,
**kwargs,
):
# Check for correctness of the keyword arguments which would
@@ -1347,18 +1352,48 @@ def __init__(
self.convention = convention or "E"
self.convention = self.convention.lower()
- if isinstance(loffset, str):
- loffset = to_offset(loffset)
- self.loffset = loffset
-
self.how = how
self.fill_method = fill_method
self.limit = limit
- self.base = base
+
+ if origin in ("epoch", "start", "start_day"):
+ self.origin = origin
+ else:
+ try:
+ self.origin = Timestamp(origin)
+ except Exception as e:
+ raise ValueError(
+ "'origin' should be equal to 'epoch', 'start', 'start_day' or "
+ f"should be a Timestamp convertible type. Got '{origin}' instead."
+ ) from e
+
+ try:
+ self.offset = Timedelta(offset) if offset is not None else None
+ except Exception as e:
+ raise ValueError(
+ "'offset' should be a Timedelta convertible type. "
+ f"Got '{offset}' instead."
+ ) from e
# always sort time groupers
kwargs["sort"] = True
+ # Handle deprecated arguments since v1.1.0 of `base` and `loffset` (GH #31809)
+ if base is not None and offset is not None:
+ raise ValueError("'offset' and 'base' cannot be present at the same time")
+
+ if base and isinstance(freq, Tick):
+ # this conversion handle the default behavior of base and the
+ # special case of GH #10530. Indeed in case when dealing with
+ # a TimedeltaIndex base was treated as a 'pure' offset even though
+ # the default behavior of base was equivalent of a modulo on
+ # freq_nanos.
+ self.offset = Timedelta(base * freq.nanos // freq.n)
+
+ if isinstance(loffset, str):
+ loffset = to_offset(loffset)
+ self.loffset = loffset
+
super().__init__(freq=freq, axis=axis, **kwargs)
def _get_resampler(self, obj, kind=None):
@@ -1414,7 +1449,12 @@ def _get_time_bins(self, ax):
return binner, [], labels
first, last = _get_timestamp_range_edges(
- ax.min(), ax.max(), self.freq, closed=self.closed, base=self.base
+ ax.min(),
+ ax.max(),
+ self.freq,
+ closed=self.closed,
+ origin=self.origin,
+ offset=self.offset,
)
# GH #12037
# use first/last directly instead of call replace() on them
@@ -1499,11 +1539,11 @@ def _get_time_delta_bins(self, ax):
end_stamps = labels + self.freq
bins = ax.searchsorted(end_stamps, side="left")
- if self.base > 0:
- # GH #10530
- labels += type(self.freq)(self.base)
+ if self.offset:
+ # GH 10530 & 31809
+ labels += self.offset
if self.loffset:
- # GH #33498
+ # GH 33498
labels += self.loffset
return binner, bins, labels
@@ -1556,11 +1596,18 @@ def _get_period_bins(self, ax):
end = ax.max().asfreq(self.freq, how="end")
bin_shift = 0
- # GH 23882
- if self.base:
- # get base adjusted bin edge labels
+ if isinstance(self.freq, Tick):
+ # GH 23882 & 31809: get adjusted bin edge labels with 'origin'
+ # and 'origin' support. This call only makes sense if the freq is a
+ # Tick since offset and origin are only used in those cases.
+ # Not doing this check could create an extra empty bin.
p_start, end = _get_period_range_edges(
- start, end, self.freq, closed=self.closed, base=self.base
+ start,
+ end,
+ self.freq,
+ closed=self.closed,
+ origin=self.origin,
+ offset=self.offset,
)
# Get offset for bin edge (not label edge) adjustment
@@ -1612,7 +1659,9 @@ def _take_new_index(obj, indexer, new_index, axis=0):
raise ValueError("'obj' should be either a Series or a DataFrame")
-def _get_timestamp_range_edges(first, last, offset, closed="left", base=0):
+def _get_timestamp_range_edges(
+ first, last, freq, closed="left", origin="start_day", offset=None
+):
"""
Adjust the `first` Timestamp to the preceding Timestamp that resides on
the provided offset. Adjust the `last` Timestamp to the following
@@ -1626,49 +1675,62 @@ def _get_timestamp_range_edges(first, last, offset, closed="left", base=0):
The beginning Timestamp of the range to be adjusted.
last : pd.Timestamp
The ending Timestamp of the range to be adjusted.
- offset : pd.DateOffset
+ freq : pd.DateOffset
The dateoffset to which the Timestamps will be adjusted.
closed : {'right', 'left'}, default None
Which side of bin interval is closed.
- base : int, default 0
- The "origin" of the adjusted Timestamps.
+ origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day'
+ The timestamp on which to adjust the grouping. The timezone of origin must
+ match the timezone of the index.
+ If a timestamp is not used, these values are also supported:
+
+ - 'epoch': `origin` is 1970-01-01
+ - 'start': `origin` is the first value of the timeseries
+ - 'start_day': `origin` is the first day at midnight of the timeseries
+ offset : pd.Timedelta, default is None
+ An offset timedelta added to the origin.
Returns
-------
A tuple of length 2, containing the adjusted pd.Timestamp objects.
"""
- if isinstance(offset, Tick):
- if isinstance(offset, Day):
+ index_tz = first.tz
+ if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None):
+ raise ValueError("The origin must have the same timezone as the index.")
+
+ if isinstance(freq, Tick):
+ if isinstance(freq, Day):
# _adjust_dates_anchored assumes 'D' means 24H, but first/last
# might contain a DST transition (23H, 24H, or 25H).
# So "pretend" the dates are naive when adjusting the endpoints
- tz = first.tz
first = first.tz_localize(None)
last = last.tz_localize(None)
+ if isinstance(origin, Timestamp):
+ origin = origin.tz_localize(None)
first, last = _adjust_dates_anchored(
- first, last, offset, closed=closed, base=base
+ first, last, freq, closed=closed, origin=origin, offset=offset,
)
- if isinstance(offset, Day):
- first = first.tz_localize(tz)
- last = last.tz_localize(tz)
- return first, last
-
+ if isinstance(freq, Day):
+ first = first.tz_localize(index_tz)
+ last = last.tz_localize(index_tz)
else:
first = first.normalize()
last = last.normalize()
- if closed == "left":
- first = Timestamp(offset.rollback(first))
- else:
- first = Timestamp(first - offset)
+ if closed == "left":
+ first = Timestamp(freq.rollback(first))
+ else:
+ first = Timestamp(first - freq)
- last = Timestamp(last + offset)
+ last = Timestamp(last + freq)
return first, last
-def _get_period_range_edges(first, last, offset, closed="left", base=0):
+def _get_period_range_edges(
+ first, last, freq, closed="left", origin="start_day", offset=None
+):
"""
Adjust the provided `first` and `last` Periods to the respective Period of
the given offset that encompasses them.
@@ -1679,12 +1741,21 @@ def _get_period_range_edges(first, last, offset, closed="left", base=0):
The beginning Period of the range to be adjusted.
last : pd.Period
The ending Period of the range to be adjusted.
- offset : pd.DateOffset
- The dateoffset to which the Periods will be adjusted.
+ freq : pd.DateOffset
+ The freq to which the Periods will be adjusted.
closed : {'right', 'left'}, default None
Which side of bin interval is closed.
- base : int, default 0
- The "origin" of the adjusted Periods.
+ origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day'
+ The timestamp on which to adjust the grouping. The timezone of origin must
+ match the timezone of the index.
+
+ If a timestamp is not used, these values are also supported:
+
+ - 'epoch': `origin` is 1970-01-01
+ - 'start': `origin` is the first value of the timeseries
+ - 'start_day': `origin` is the first day at midnight of the timeseries
+ offset : pd.Timedelta, default is None
+ An offset timedelta added to the origin.
Returns
-------
@@ -1696,52 +1767,58 @@ def _get_period_range_edges(first, last, offset, closed="left", base=0):
# GH 23882
first = first.to_timestamp()
last = last.to_timestamp()
- adjust_first = not offset.is_on_offset(first)
- adjust_last = offset.is_on_offset(last)
+ adjust_first = not freq.is_on_offset(first)
+ adjust_last = freq.is_on_offset(last)
first, last = _get_timestamp_range_edges(
- first, last, offset, closed=closed, base=base
+ first, last, freq, closed=closed, origin=origin, offset=offset,
)
- first = (first + int(adjust_first) * offset).to_period(offset)
- last = (last - int(adjust_last) * offset).to_period(offset)
+ first = (first + int(adjust_first) * freq).to_period(freq)
+ last = (last - int(adjust_last) * freq).to_period(freq)
return first, last
-def _adjust_dates_anchored(first, last, offset, closed="right", base=0):
+def _adjust_dates_anchored(
+ first, last, freq, closed="right", origin="start_day", offset=None
+):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
- # not a multiple of the frequency.
- #
- # See https://github.com/pandas-dev/pandas/issues/8683
+ # not a multiple of the frequency. See GH 8683
+ # To handle frequencies that are not multiple or divisible by a day we let
+ # the possibility to define a fixed origin timestamp. See GH 31809
+ origin_nanos = 0 # origin == "epoch"
+ if origin == "start_day":
+ origin_nanos = first.normalize().value
+ elif origin == "start":
+ origin_nanos = first.value
+ elif isinstance(origin, Timestamp):
+ origin_nanos = origin.value
+ origin_nanos += offset.value if offset else 0
# GH 10117 & GH 19375. If first and last contain timezone information,
# Perform the calculation in UTC in order to avoid localizing on an
# Ambiguous or Nonexistent time.
first_tzinfo = first.tzinfo
last_tzinfo = last.tzinfo
- start_day_nanos = first.normalize().value
if first_tzinfo is not None:
first = first.tz_convert("UTC")
if last_tzinfo is not None:
last = last.tz_convert("UTC")
- base_nanos = (base % offset.n) * offset.nanos // offset.n
- start_day_nanos += base_nanos
-
- foffset = (first.value - start_day_nanos) % offset.nanos
- loffset = (last.value - start_day_nanos) % offset.nanos
+ foffset = (first.value - origin_nanos) % freq.nanos
+ loffset = (last.value - origin_nanos) % freq.nanos
if closed == "right":
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
- fresult = first.value - offset.nanos
+ fresult = first.value - freq.nanos
if loffset > 0:
# roll forward
- lresult = last.value + (offset.nanos - loffset)
+ lresult = last.value + (freq.nanos - loffset)
else:
# already the end of the road
lresult = last.value
@@ -1754,9 +1831,9 @@ def _adjust_dates_anchored(first, last, offset, closed="right", base=0):
if loffset > 0:
# roll forward
- lresult = last.value + (offset.nanos - loffset)
+ lresult = last.value + (freq.nanos - loffset)
else:
- lresult = last.value + offset.nanos
+ lresult = last.value + freq.nanos
fresult = Timestamp(fresult)
lresult = Timestamp(lresult)
if first_tzinfo is not None:
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index d0559923fec51..485535bec20d0 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -1,4 +1,4 @@
-from datetime import datetime, timedelta
+from datetime import datetime
import numpy as np
import pytest
@@ -9,7 +9,7 @@
from pandas.core.groupby.groupby import DataError
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
-from pandas.core.indexes.period import PeriodIndex, period_range
+from pandas.core.indexes.period import period_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.resample import _asfreq_compat
@@ -194,29 +194,6 @@ def test_resample_empty_dtypes(index, dtype, resample_method):
pass
-@all_ts
-@pytest.mark.parametrize("arg", ["mean", {"value": "mean"}, ["mean"]])
-def test_resample_loffset_arg_type(frame, create_index, arg):
- # GH 13218, 15002
- df = frame
- expected_means = [df.values[i : i + 2].mean() for i in range(0, len(df.values), 2)]
- expected_index = create_index(df.index[0], periods=len(df.index) / 2, freq="2D")
-
- # loffset coerces PeriodIndex to DateTimeIndex
- if isinstance(expected_index, PeriodIndex):
- expected_index = expected_index.to_timestamp()
-
- expected_index += timedelta(hours=2)
- expected = DataFrame({"value": expected_means}, index=expected_index)
-
- result_agg = df.resample("2D", loffset="2H").agg(arg)
-
- if isinstance(arg, list):
- expected.columns = pd.MultiIndex.from_tuples([("value", "mean")])
-
- tm.assert_frame_equal(result_agg, expected)
-
-
@all_ts
def test_apply_to_empty_series(empty_series_dti):
# GH 14313
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 0c364d37f039e..fe005801aaa53 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1,4 +1,4 @@
-from datetime import datetime, timedelta
+from datetime import datetime
from functools import partial
from io import StringIO
@@ -18,7 +18,7 @@
from pandas.core.resample import DatetimeIndex, _get_timestamp_range_edges
import pandas.tseries.offsets as offsets
-from pandas.tseries.offsets import BDay, Minute
+from pandas.tseries.offsets import Minute
@pytest.fixture()
@@ -412,70 +412,6 @@ def test_resample_frame_basic():
df.resample("W-WED", kind="period").mean()
-@pytest.mark.parametrize(
- "loffset", [timedelta(minutes=1), "1min", Minute(1), np.timedelta64(1, "m")]
-)
-def test_resample_loffset(loffset):
- # GH 7687
- rng = date_range("1/1/2000 00:00:00", "1/1/2000 00:13:00", freq="min")
- s = Series(np.random.randn(14), index=rng)
-
- result = s.resample("5min", closed="right", label="right", loffset=loffset).mean()
- idx = date_range("1/1/2000", periods=4, freq="5min")
- expected = Series(
- [s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
- index=idx + timedelta(minutes=1),
- )
- tm.assert_series_equal(result, expected)
- assert result.index.freq == Minute(5)
-
- # from daily
- dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D")
- ser = Series(np.random.rand(len(dti)), dti)
-
- # to weekly
- result = ser.resample("w-sun").last()
- business_day_offset = BDay()
- expected = ser.resample("w-sun", loffset=-business_day_offset).last()
- assert result.index[0] - business_day_offset == expected.index[0]
-
-
-def test_resample_loffset_upsample():
- # GH 20744
- rng = date_range("1/1/2000 00:00:00", "1/1/2000 00:13:00", freq="min")
- s = Series(np.random.randn(14), index=rng)
-
- result = s.resample(
- "5min", closed="right", label="right", loffset=timedelta(minutes=1)
- ).ffill()
- idx = date_range("1/1/2000", periods=4, freq="5min")
- expected = Series([s[0], s[5], s[10], s[-1]], index=idx + timedelta(minutes=1))
-
- tm.assert_series_equal(result, expected)
-
-
-def test_resample_loffset_count():
- # GH 12725
- start_time = "1/1/2000 00:00:00"
- rng = date_range(start_time, periods=100, freq="S")
- ts = Series(np.random.randn(len(rng)), index=rng)
-
- result = ts.resample("10S", loffset="1s").count()
-
- expected_index = date_range(start_time, periods=10, freq="10S") + timedelta(
- seconds=1
- )
- expected = Series(10, index=expected_index)
-
- tm.assert_series_equal(result, expected)
-
- # Same issue should apply to .size() since it goes through
- # same code path
- result = ts.resample("10S", loffset="1s").size()
-
- tm.assert_series_equal(result, expected)
-
-
def test_resample_upsample():
# from daily
dti = date_range(
@@ -791,27 +727,177 @@ def test_resample_single_group():
tm.assert_series_equal(result, expected)
-def test_resample_base():
+def test_resample_offset():
+ # GH 31809
+
rng = date_range("1/1/2000 00:00:00", "1/1/2000 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
- resampled = ts.resample("5min", base=2).mean()
+ resampled = ts.resample("5min", offset="2min").mean()
exp_rng = date_range("12/31/1999 23:57:00", "1/1/2000 01:57", freq="5min")
tm.assert_index_equal(resampled.index, exp_rng)
-def test_resample_float_base():
- # GH25161
- dt = pd.to_datetime(
- ["2018-11-26 16:17:43.51", "2018-11-26 16:17:44.51", "2018-11-26 16:17:45.51"]
- )
- s = Series(np.arange(3), index=dt)
+def test_resample_origin():
+ # GH 31809
+ rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
+ ts = Series(np.random.randn(len(rng)), index=rng)
- base = 17 + 43.51 / 60
- result = s.resample("3min", base=base).size()
- expected = Series(
- 3, index=pd.DatetimeIndex(["2018-11-26 16:17:43.51"], freq="3min")
+ exp_rng = date_range("1999-12-31 23:57:00", "2000-01-01 01:57", freq="5min")
+
+ resampled = ts.resample("5min", origin="1999-12-31 23:57:00").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ offset_timestamp = pd.Timestamp(0) + pd.Timedelta("2min")
+ resampled = ts.resample("5min", origin=offset_timestamp).mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ resampled = ts.resample("5min", origin="epoch", offset="2m").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ # origin of '1999-31-12 12:02:00' should be equivalent for this case
+ resampled = ts.resample("5min", origin="1999-12-31 12:02:00").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ resampled = ts.resample("5min", offset="-3m").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+
+@pytest.mark.parametrize(
+ "origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()],
+)
+def test_resample_bad_origin(origin):
+ rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+ msg = (
+ "'origin' should be equal to 'epoch', 'start', 'start_day' or "
+ f"should be a Timestamp convertible type. Got '{origin}' instead."
)
+ with pytest.raises(ValueError, match=msg):
+ ts.resample("5min", origin=origin)
+
+
+@pytest.mark.parametrize(
+ "offset", ["invalid_value", "12dayys", "2000-30-30", object()],
+)
+def test_resample_bad_offset(offset):
+ rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+ msg = f"'offset' should be a Timedelta convertible type. Got '{offset}' instead."
+ with pytest.raises(ValueError, match=msg):
+ ts.resample("5min", offset=offset)
+
+
+def test_resample_origin_prime_freq():
+ # GH 31809
+ start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00"
+ rng = pd.date_range(start, end, freq="7min")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+
+ exp_rng = date_range("2000-10-01 23:14:00", "2000-10-02 00:22:00", freq="17min")
+ resampled = ts.resample("17min").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+ resampled = ts.resample("17min", origin="start_day").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ exp_rng = date_range("2000-10-01 23:30:00", "2000-10-02 00:21:00", freq="17min")
+ resampled = ts.resample("17min", origin="start").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+ resampled = ts.resample("17min", offset="23h30min").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+ resampled = ts.resample("17min", origin="start_day", offset="23h30min").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ exp_rng = date_range("2000-10-01 23:18:00", "2000-10-02 00:26:00", freq="17min")
+ resampled = ts.resample("17min", origin="epoch").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ exp_rng = date_range("2000-10-01 23:24:00", "2000-10-02 00:15:00", freq="17min")
+ resampled = ts.resample("17min", origin="2000-01-01").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+
+def test_resample_origin_with_tz():
+ # GH 31809
+ msg = "The origin must have the same timezone as the index."
+
+ tz = "Europe/Paris"
+ rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s", tz=tz)
+ ts = Series(np.random.randn(len(rng)), index=rng)
+
+ exp_rng = date_range("1999-12-31 23:57:00", "2000-01-01 01:57", freq="5min", tz=tz)
+ resampled = ts.resample("5min", origin="1999-12-31 23:57:00+00:00").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ # origin of '1999-31-12 12:02:00+03:00' should be equivalent for this case
+ resampled = ts.resample("5min", origin="1999-12-31 12:02:00+03:00").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ resampled = ts.resample("5min", origin="epoch", offset="2m").mean()
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+ with pytest.raises(ValueError, match=msg):
+ ts.resample("5min", origin="12/31/1999 23:57:00").mean()
+
+ # if the series is not tz aware, origin should not be tz aware
+ rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+ with pytest.raises(ValueError, match=msg):
+ ts.resample("5min", origin="12/31/1999 23:57:00+03:00").mean()
+
+
+def test_resample_origin_with_day_freq_on_dst():
+ # GH 31809
+ tz = "America/Chicago"
+
+ def _create_series(values, timestamps, freq="D"):
+ return pd.Series(
+ values,
+ index=pd.DatetimeIndex(
+ [Timestamp(t, tz=tz) for t in timestamps], freq=freq, ambiguous=True
+ ),
+ )
+
+ # test classical behavior of origin in a DST context
+ start = pd.Timestamp("2013-11-02", tz=tz)
+ end = pd.Timestamp("2013-11-03 23:59", tz=tz)
+ rng = pd.date_range(start, end, freq="1h")
+ ts = pd.Series(np.ones(len(rng)), index=rng)
+
+ expected = _create_series([24.0, 25.0], ["2013-11-02", "2013-11-03"])
+ for origin in ["epoch", "start", "start_day", start, None]:
+ result = ts.resample("D", origin=origin).sum()
+ tm.assert_series_equal(result, expected)
+
+ # test complex behavior of origin/offset in a DST context
+ start = pd.Timestamp("2013-11-03", tz=tz)
+ end = pd.Timestamp("2013-11-03 23:59", tz=tz)
+ rng = pd.date_range(start, end, freq="1h")
+ ts = pd.Series(np.ones(len(rng)), index=rng)
+
+ expected_ts = ["2013-11-02 22:00-05:00", "2013-11-03 22:00-06:00"]
+ expected = _create_series([23.0, 2.0], expected_ts)
+ result = ts.resample("D", origin="start", offset="-2H").sum()
+ tm.assert_series_equal(result, expected)
+
+ expected_ts = ["2013-11-02 22:00-05:00", "2013-11-03 21:00-06:00"]
+ expected = _create_series([22.0, 3.0], expected_ts, freq="24H")
+ result = ts.resample("24H", origin="start", offset="-2H").sum()
+ tm.assert_series_equal(result, expected)
+
+ expected_ts = ["2013-11-02 02:00-05:00", "2013-11-03 02:00-06:00"]
+ expected = _create_series([3.0, 22.0], expected_ts)
+ result = ts.resample("D", origin="start", offset="2H").sum()
+ tm.assert_series_equal(result, expected)
+
+ expected_ts = ["2013-11-02 23:00-05:00", "2013-11-03 23:00-06:00"]
+ expected = _create_series([24.0, 1.0], expected_ts)
+ result = ts.resample("D", origin="start", offset="-1H").sum()
+ tm.assert_series_equal(result, expected)
+
+ expected_ts = ["2013-11-02 01:00-05:00", "2013-11-03 01:00:00-0500"]
+ expected = _create_series([1.0, 24.0], expected_ts)
+ result = ts.resample("D", origin="start", offset="1H").sum()
tm.assert_series_equal(result, expected)
@@ -1588,7 +1674,7 @@ def test_resample_equivalent_offsets(n1, freq1, n2, freq2, k):
@pytest.mark.parametrize(
- "first,last,offset,exp_first,exp_last",
+ "first,last,freq,exp_first,exp_last",
[
("19910905", "19920406", "D", "19910905", "19920407"),
("19910905 00:00", "19920406 06:00", "D", "19910905", "19920407"),
@@ -1598,17 +1684,17 @@ def test_resample_equivalent_offsets(n1, freq1, n2, freq2, k):
("1991-08", "1992-04", "M", "19910831", "19920531"),
],
)
-def test_get_timestamp_range_edges(first, last, offset, exp_first, exp_last):
+def test_get_timestamp_range_edges(first, last, freq, exp_first, exp_last):
first = pd.Period(first)
first = first.to_timestamp(first.freq)
last = pd.Period(last)
last = last.to_timestamp(last.freq)
- exp_first = pd.Timestamp(exp_first, freq=offset)
- exp_last = pd.Timestamp(exp_last, freq=offset)
+ exp_first = pd.Timestamp(exp_first, freq=freq)
+ exp_last = pd.Timestamp(exp_last, freq=freq)
- offset = pd.tseries.frequencies.to_offset(offset)
- result = _get_timestamp_range_edges(first, last, offset)
+ freq = pd.tseries.frequencies.to_offset(freq)
+ result = _get_timestamp_range_edges(first, last, freq)
expected = (exp_first, exp_last)
assert result == expected
diff --git a/pandas/tests/resample/test_deprecated.py b/pandas/tests/resample/test_deprecated.py
new file mode 100644
index 0000000000000..8b3adbf08d157
--- /dev/null
+++ b/pandas/tests/resample/test_deprecated.py
@@ -0,0 +1,263 @@
+from datetime import datetime, timedelta
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import DataFrame, Series
+import pandas._testing as tm
+from pandas.core.indexes.datetimes import date_range
+from pandas.core.indexes.period import PeriodIndex, period_range
+from pandas.core.indexes.timedeltas import timedelta_range
+
+from pandas.tseries.offsets import BDay, Minute
+
+DATE_RANGE = (date_range, "dti", datetime(2005, 1, 1), datetime(2005, 1, 10))
+PERIOD_RANGE = (period_range, "pi", datetime(2005, 1, 1), datetime(2005, 1, 10))
+TIMEDELTA_RANGE = (timedelta_range, "tdi", "1 day", "10 day")
+
+all_ts = pytest.mark.parametrize(
+ "_index_factory,_series_name,_index_start,_index_end",
+ [DATE_RANGE, PERIOD_RANGE, TIMEDELTA_RANGE],
+)
+
+
+@pytest.fixture()
+def _index_factory():
+ return period_range
+
+
+@pytest.fixture
+def create_index(_index_factory):
+ def _create_index(*args, **kwargs):
+ """ return the _index_factory created using the args, kwargs """
+ return _index_factory(*args, **kwargs)
+
+ return _create_index
+
+
+# new test to check that all FutureWarning are triggered
+def test_deprecating_on_loffset_and_base():
+ # GH 31809
+
+ idx = pd.date_range("2001-01-01", periods=4, freq="T")
+ df = pd.DataFrame(data=4 * [range(2)], index=idx, columns=["a", "b"])
+
+ with tm.assert_produces_warning(FutureWarning):
+ pd.Grouper(freq="10s", base=0)
+ with tm.assert_produces_warning(FutureWarning):
+ pd.Grouper(freq="10s", loffset="0s")
+ with tm.assert_produces_warning(FutureWarning):
+ df.groupby("a").resample("3T", base=0).sum()
+ with tm.assert_produces_warning(FutureWarning):
+ df.groupby("a").resample("3T", loffset="0s").sum()
+ with tm.assert_produces_warning(FutureWarning):
+ df.resample("3T", base=0).sum()
+ with tm.assert_produces_warning(FutureWarning):
+ df.resample("3T", loffset="0s").sum()
+ msg = "'offset' and 'base' cannot be present at the same time"
+ with tm.assert_produces_warning(FutureWarning):
+ with pytest.raises(ValueError, match=msg):
+ df.groupby("a").resample("3T", base=0, offset=0).sum()
+
+
+@all_ts
+@pytest.mark.parametrize("arg", ["mean", {"value": "mean"}, ["mean"]])
+def test_resample_loffset_arg_type(frame, create_index, arg):
+ # GH 13218, 15002
+ df = frame
+ expected_means = [df.values[i : i + 2].mean() for i in range(0, len(df.values), 2)]
+ expected_index = create_index(df.index[0], periods=len(df.index) / 2, freq="2D")
+
+ # loffset coerces PeriodIndex to DateTimeIndex
+ if isinstance(expected_index, PeriodIndex):
+ expected_index = expected_index.to_timestamp()
+
+ expected_index += timedelta(hours=2)
+ expected = DataFrame({"value": expected_means}, index=expected_index)
+
+ with tm.assert_produces_warning(FutureWarning):
+ result_agg = df.resample("2D", loffset="2H").agg(arg)
+
+ if isinstance(arg, list):
+ expected.columns = pd.MultiIndex.from_tuples([("value", "mean")])
+
+ tm.assert_frame_equal(result_agg, expected)
+
+
+@pytest.mark.parametrize(
+ "loffset", [timedelta(minutes=1), "1min", Minute(1), np.timedelta64(1, "m")]
+)
+def test_resample_loffset(loffset):
+ # GH 7687
+ rng = date_range("1/1/2000 00:00:00", "1/1/2000 00:13:00", freq="min")
+ s = Series(np.random.randn(14), index=rng)
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = s.resample(
+ "5min", closed="right", label="right", loffset=loffset
+ ).mean()
+ idx = date_range("1/1/2000", periods=4, freq="5min")
+ expected = Series(
+ [s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
+ index=idx + timedelta(minutes=1),
+ )
+ tm.assert_series_equal(result, expected)
+ assert result.index.freq == Minute(5)
+
+ # from daily
+ dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D")
+ ser = Series(np.random.rand(len(dti)), dti)
+
+ # to weekly
+ result = ser.resample("w-sun").last()
+ business_day_offset = BDay()
+ with tm.assert_produces_warning(FutureWarning):
+ expected = ser.resample("w-sun", loffset=-business_day_offset).last()
+ assert result.index[0] - business_day_offset == expected.index[0]
+
+
+def test_resample_loffset_upsample():
+ # GH 20744
+ rng = date_range("1/1/2000 00:00:00", "1/1/2000 00:13:00", freq="min")
+ s = Series(np.random.randn(14), index=rng)
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = s.resample(
+ "5min", closed="right", label="right", loffset=timedelta(minutes=1)
+ ).ffill()
+ idx = date_range("1/1/2000", periods=4, freq="5min")
+ expected = Series([s[0], s[5], s[10], s[-1]], index=idx + timedelta(minutes=1))
+
+ tm.assert_series_equal(result, expected)
+
+
+def test_resample_loffset_count():
+ # GH 12725
+ start_time = "1/1/2000 00:00:00"
+ rng = date_range(start_time, periods=100, freq="S")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = ts.resample("10S", loffset="1s").count()
+
+ expected_index = date_range(start_time, periods=10, freq="10S") + timedelta(
+ seconds=1
+ )
+ expected = Series(10, index=expected_index)
+
+ tm.assert_series_equal(result, expected)
+
+ # Same issue should apply to .size() since it goes through
+ # same code path
+ with tm.assert_produces_warning(FutureWarning):
+ result = ts.resample("10S", loffset="1s").size()
+
+ tm.assert_series_equal(result, expected)
+
+
+def test_resample_base():
+ rng = date_range("1/1/2000 00:00:00", "1/1/2000 02:00", freq="s")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+
+ with tm.assert_produces_warning(FutureWarning):
+ resampled = ts.resample("5min", base=2).mean()
+ exp_rng = date_range("12/31/1999 23:57:00", "1/1/2000 01:57", freq="5min")
+ tm.assert_index_equal(resampled.index, exp_rng)
+
+
+def test_resample_float_base():
+ # GH25161
+ dt = pd.to_datetime(
+ ["2018-11-26 16:17:43.51", "2018-11-26 16:17:44.51", "2018-11-26 16:17:45.51"]
+ )
+ s = Series(np.arange(3), index=dt)
+
+ base = 17 + 43.51 / 60
+ with tm.assert_produces_warning(FutureWarning):
+ result = s.resample("3min", base=base).size()
+ expected = Series(
+ 3, index=pd.DatetimeIndex(["2018-11-26 16:17:43.51"], freq="3min")
+ )
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("kind", ["period", None, "timestamp"])
+@pytest.mark.parametrize("agg_arg", ["mean", {"value": "mean"}, ["mean"]])
+def test_loffset_returns_datetimeindex(frame, kind, agg_arg):
+ # make sure passing loffset returns DatetimeIndex in all cases
+ # basic method taken from Base.test_resample_loffset_arg_type()
+ df = frame
+ expected_means = [df.values[i : i + 2].mean() for i in range(0, len(df.values), 2)]
+ expected_index = period_range(df.index[0], periods=len(df.index) / 2, freq="2D")
+
+ # loffset coerces PeriodIndex to DateTimeIndex
+ expected_index = expected_index.to_timestamp()
+ expected_index += timedelta(hours=2)
+ expected = DataFrame({"value": expected_means}, index=expected_index)
+
+ with tm.assert_produces_warning(FutureWarning):
+ result_agg = df.resample("2D", loffset="2H", kind=kind).agg(agg_arg)
+ if isinstance(agg_arg, list):
+ expected.columns = pd.MultiIndex.from_tuples([("value", "mean")])
+ tm.assert_frame_equal(result_agg, expected)
+
+
+@pytest.mark.parametrize(
+ "start,end,start_freq,end_freq,base,offset",
+ [
+ ("19910905", "19910909 03:00", "H", "24H", 10, "10H"),
+ ("19910905", "19910909 12:00", "H", "24H", 10, "10H"),
+ ("19910905", "19910909 23:00", "H", "24H", 10, "10H"),
+ ("19910905 10:00", "19910909", "H", "24H", 10, "10H"),
+ ("19910905 10:00", "19910909 10:00", "H", "24H", 10, "10H"),
+ ("19910905", "19910909 10:00", "H", "24H", 10, "10H"),
+ ("19910905 12:00", "19910909", "H", "24H", 10, "10H"),
+ ("19910905 12:00", "19910909 03:00", "H", "24H", 10, "10H"),
+ ("19910905 12:00", "19910909 12:00", "H", "24H", 10, "10H"),
+ ("19910905 12:00", "19910909 12:00", "H", "24H", 34, "34H"),
+ ("19910905 12:00", "19910909 12:00", "H", "17H", 10, "10H"),
+ ("19910905 12:00", "19910909 12:00", "H", "17H", 3, "3H"),
+ ("19910905 12:00", "19910909 1:00", "H", "M", 3, "3H"),
+ ("19910905", "19910913 06:00", "2H", "24H", 10, "10H"),
+ ("19910905", "19910905 01:39", "Min", "5Min", 3, "3Min"),
+ ("19910905", "19910905 03:18", "2Min", "5Min", 3, "3Min"),
+ ],
+)
+def test_resample_with_non_zero_base(start, end, start_freq, end_freq, base, offset):
+ # GH 23882
+ s = pd.Series(0, index=pd.period_range(start, end, freq=start_freq))
+ s = s + np.arange(len(s))
+ with tm.assert_produces_warning(FutureWarning):
+ result = s.resample(end_freq, base=base).mean()
+ result = result.to_timestamp(end_freq)
+
+ # test that the replacement argument 'offset' works
+ result_offset = s.resample(end_freq, offset=offset).mean()
+ result_offset = result_offset.to_timestamp(end_freq)
+ tm.assert_series_equal(result, result_offset)
+
+ # to_timestamp casts 24H -> D
+ result = result.asfreq(end_freq) if end_freq == "24H" else result
+ with tm.assert_produces_warning(FutureWarning):
+ expected = s.to_timestamp().resample(end_freq, base=base).mean()
+ if end_freq == "M":
+ # TODO: is non-tick the relevant characteristic? (GH 33815)
+ expected.index = expected.index._with_freq(None)
+ tm.assert_series_equal(result, expected)
+
+
+def test_resample_base_with_timedeltaindex():
+ # GH 10530
+ rng = timedelta_range(start="0s", periods=25, freq="s")
+ ts = Series(np.random.randn(len(rng)), index=rng)
+
+ with tm.assert_produces_warning(FutureWarning):
+ with_base = ts.resample("2s", base=5).mean()
+ without_base = ts.resample("2s").mean()
+
+ exp_without_base = timedelta_range(start="0s", end="25s", freq="2s")
+ exp_with_base = timedelta_range(start="5s", end="29s", freq="2s")
+
+ tm.assert_index_equal(without_base.index, exp_without_base)
+ tm.assert_index_equal(with_base.index, exp_with_base)
diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py
index ebc75018bb52d..3db9a91118ebc 100644
--- a/pandas/tests/resample/test_period_index.py
+++ b/pandas/tests/resample/test_period_index.py
@@ -1,4 +1,4 @@
-from datetime import datetime, timedelta
+from datetime import datetime
import dateutil
import numpy as np
@@ -719,27 +719,6 @@ def test_evenly_divisible_with_no_extra_bins(self):
result = df.resample("7D").sum()
tm.assert_frame_equal(result, expected)
- @pytest.mark.parametrize("kind", ["period", None, "timestamp"])
- @pytest.mark.parametrize("agg_arg", ["mean", {"value": "mean"}, ["mean"]])
- def test_loffset_returns_datetimeindex(self, frame, kind, agg_arg):
- # make sure passing loffset returns DatetimeIndex in all cases
- # basic method taken from Base.test_resample_loffset_arg_type()
- df = frame
- expected_means = [
- df.values[i : i + 2].mean() for i in range(0, len(df.values), 2)
- ]
- expected_index = period_range(df.index[0], periods=len(df.index) / 2, freq="2D")
-
- # loffset coerces PeriodIndex to DateTimeIndex
- expected_index = expected_index.to_timestamp()
- expected_index += timedelta(hours=2)
- expected = DataFrame({"value": expected_means}, index=expected_index)
-
- result_agg = df.resample("2D", loffset="2H", kind=kind).agg(agg_arg)
- if isinstance(agg_arg, list):
- expected.columns = pd.MultiIndex.from_tuples([("value", "mean")])
- tm.assert_frame_equal(result_agg, expected)
-
@pytest.mark.parametrize("freq, period_mult", [("H", 24), ("12H", 2)])
@pytest.mark.parametrize("kind", [None, "period"])
def test_upsampling_ohlc(self, freq, period_mult, kind):
@@ -815,42 +794,41 @@ def test_resample_with_only_nat(self):
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
- "start,end,start_freq,end_freq,base",
+ "start,end,start_freq,end_freq,offset",
[
- ("19910905", "19910909 03:00", "H", "24H", 10),
- ("19910905", "19910909 12:00", "H", "24H", 10),
- ("19910905", "19910909 23:00", "H", "24H", 10),
- ("19910905 10:00", "19910909", "H", "24H", 10),
- ("19910905 10:00", "19910909 10:00", "H", "24H", 10),
- ("19910905", "19910909 10:00", "H", "24H", 10),
- ("19910905 12:00", "19910909", "H", "24H", 10),
- ("19910905 12:00", "19910909 03:00", "H", "24H", 10),
- ("19910905 12:00", "19910909 12:00", "H", "24H", 10),
- ("19910905 12:00", "19910909 12:00", "H", "24H", 34),
- ("19910905 12:00", "19910909 12:00", "H", "17H", 10),
- ("19910905 12:00", "19910909 12:00", "H", "17H", 3),
- ("19910905 12:00", "19910909 1:00", "H", "M", 3),
- ("19910905", "19910913 06:00", "2H", "24H", 10),
- ("19910905", "19910905 01:39", "Min", "5Min", 3),
- ("19910905", "19910905 03:18", "2Min", "5Min", 3),
+ ("19910905", "19910909 03:00", "H", "24H", "10H"),
+ ("19910905", "19910909 12:00", "H", "24H", "10H"),
+ ("19910905", "19910909 23:00", "H", "24H", "10H"),
+ ("19910905 10:00", "19910909", "H", "24H", "10H"),
+ ("19910905 10:00", "19910909 10:00", "H", "24H", "10H"),
+ ("19910905", "19910909 10:00", "H", "24H", "10H"),
+ ("19910905 12:00", "19910909", "H", "24H", "10H"),
+ ("19910905 12:00", "19910909 03:00", "H", "24H", "10H"),
+ ("19910905 12:00", "19910909 12:00", "H", "24H", "10H"),
+ ("19910905 12:00", "19910909 12:00", "H", "24H", "34H"),
+ ("19910905 12:00", "19910909 12:00", "H", "17H", "10H"),
+ ("19910905 12:00", "19910909 12:00", "H", "17H", "3H"),
+ ("19910905 12:00", "19910909 1:00", "H", "M", "3H"),
+ ("19910905", "19910913 06:00", "2H", "24H", "10H"),
+ ("19910905", "19910905 01:39", "Min", "5Min", "3Min"),
+ ("19910905", "19910905 03:18", "2Min", "5Min", "3Min"),
],
)
- def test_resample_with_non_zero_base(self, start, end, start_freq, end_freq, base):
- # GH 23882
+ def test_resample_with_offset(self, start, end, start_freq, end_freq, offset):
+ # GH 23882 & 31809
s = pd.Series(0, index=pd.period_range(start, end, freq=start_freq))
s = s + np.arange(len(s))
- result = s.resample(end_freq, base=base).mean()
+ result = s.resample(end_freq, offset=offset).mean()
result = result.to_timestamp(end_freq)
- # to_timestamp casts 24H -> D
- result = result.asfreq(end_freq) if end_freq == "24H" else result
- expected = s.to_timestamp().resample(end_freq, base=base).mean()
+
+ expected = s.to_timestamp().resample(end_freq, offset=offset).mean()
if end_freq == "M":
- # TODO: is non-tick the relevant characteristic?
+ # TODO: is non-tick the relevant characteristic? (GH 33815)
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
- "first,last,offset,exp_first,exp_last",
+ "first,last,freq,exp_first,exp_last",
[
("19910905", "19920406", "D", "19910905", "19920406"),
("19910905 00:00", "19920406 06:00", "D", "19910905", "19920406"),
@@ -866,15 +844,15 @@ def test_resample_with_non_zero_base(self, start, end, start_freq, end_freq, bas
("1991-08", "1992-04", "M", "1991-08", "1992-04"),
],
)
- def test_get_period_range_edges(self, first, last, offset, exp_first, exp_last):
+ def test_get_period_range_edges(self, first, last, freq, exp_first, exp_last):
first = pd.Period(first)
last = pd.Period(last)
- exp_first = pd.Period(exp_first, freq=offset)
- exp_last = pd.Period(exp_last, freq=offset)
+ exp_first = pd.Period(exp_first, freq=freq)
+ exp_last = pd.Period(exp_last, freq=freq)
- offset = pd.tseries.frequencies.to_offset(offset)
- result = _get_period_range_edges(first, last, offset)
+ freq = pd.tseries.frequencies.to_offset(freq)
+ result = _get_period_range_edges(first, last, freq)
expected = (exp_first, exp_last)
assert result == expected
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 5044a18e33248..73aa01cff84fa 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -25,7 +25,13 @@ def test_str():
r = test_series.resample("H")
assert (
"DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, "
- "label=left, convention=start, base=0]" in str(r)
+ "label=left, convention=start, origin=start_day]" in str(r)
+ )
+
+ r = test_series.resample("H", origin="2000-01-01")
+ assert (
+ "DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, "
+ "label=left, convention=start, origin=2000-01-01 00:00:00]" in str(r)
)
diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py
index 035698687cfc2..cbf3a778f9ae0 100644
--- a/pandas/tests/resample/test_resampler_grouper.py
+++ b/pandas/tests/resample/test_resampler_grouper.py
@@ -1,6 +1,7 @@
from textwrap import dedent
import numpy as np
+import pytest
from pandas.util._test_decorators import async_mark
@@ -131,6 +132,47 @@ def test_groupby_resample_on_api_with_getitem():
tm.assert_series_equal(result, exp)
+def test_groupby_with_origin():
+ # GH 31809
+
+ freq = "1399min" # prime number that is smaller than 24h
+ start, end = "1/1/2000 00:00:00", "1/31/2000 00:00"
+ middle = "1/15/2000 00:00:00"
+
+ rng = pd.date_range(start, end, freq="1231min") # prime number
+ ts = pd.Series(np.random.randn(len(rng)), index=rng)
+ ts2 = ts[middle:end]
+
+ # proves that grouper without a fixed origin does not work
+ # when dealing with unusual frequencies
+ simple_grouper = pd.Grouper(freq=freq)
+ count_ts = ts.groupby(simple_grouper).agg("count")
+ count_ts = count_ts[middle:end]
+ count_ts2 = ts2.groupby(simple_grouper).agg("count")
+ with pytest.raises(AssertionError):
+ tm.assert_index_equal(count_ts.index, count_ts2.index)
+
+ # test origin on 1970-01-01 00:00:00
+ origin = pd.Timestamp(0)
+ adjusted_grouper = pd.Grouper(freq=freq, origin=origin)
+ adjusted_count_ts = ts.groupby(adjusted_grouper).agg("count")
+ adjusted_count_ts = adjusted_count_ts[middle:end]
+ adjusted_count_ts2 = ts2.groupby(adjusted_grouper).agg("count")
+ tm.assert_series_equal(adjusted_count_ts, adjusted_count_ts2)
+
+ # test origin on 2049-10-18 20:00:00
+ origin_future = pd.Timestamp(0) + pd.Timedelta("1399min") * 30_000
+ adjusted_grouper2 = pd.Grouper(freq=freq, origin=origin_future)
+ adjusted2_count_ts = ts.groupby(adjusted_grouper2).agg("count")
+ adjusted2_count_ts = adjusted2_count_ts[middle:end]
+ adjusted2_count_ts2 = ts2.groupby(adjusted_grouper2).agg("count")
+ tm.assert_series_equal(adjusted2_count_ts, adjusted2_count_ts2)
+
+ # both grouper use an adjusted timestamp that is a multiple of 1399 min
+ # they should be equals even if the adjusted_timestamp is in the future
+ tm.assert_series_equal(adjusted_count_ts, adjusted2_count_ts2)
+
+
def test_nearest():
# GH 17496
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index 49ac5f81f9c02..26e429c47b494 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -166,7 +166,7 @@ def test_aggregate_normal(resample_method):
("prod", dict(min_count=1), np.nan),
],
)
-def test_resample_entirly_nat_window(method, method_args, unit):
+def test_resample_entirely_nat_window(method, method_args, unit):
s = pd.Series([0] * 2 + [np.nan] * 2, index=pd.date_range("2017", periods=4))
result = methodcaller(method, **method_args)(s.resample("2d"))
expected = pd.Series(
@@ -251,7 +251,15 @@ def test_repr():
expected = (
"TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
"closed='left', label='left', how='mean', "
- "convention='e', base=0)"
+ "convention='e', origin='start_day')"
+ )
+ assert result == expected
+
+ result = repr(Grouper(key="A", freq="H", origin="2000-01-01"))
+ expected = (
+ "TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
+ "closed='left', label='left', how='mean', "
+ "convention='e', origin=Timestamp('2000-01-01 00:00:00'))"
)
assert result == expected
diff --git a/pandas/tests/resample/test_timedelta.py b/pandas/tests/resample/test_timedelta.py
index 1b4a625f078c9..0fbb60c176b30 100644
--- a/pandas/tests/resample/test_timedelta.py
+++ b/pandas/tests/resample/test_timedelta.py
@@ -80,13 +80,12 @@ def test_resample_timedelta_idempotency():
tm.assert_series_equal(result, expected)
-def test_resample_base_with_timedeltaindex():
-
- # GH 10530
+def test_resample_offset_with_timedeltaindex():
+ # GH 10530 & 31809
rng = timedelta_range(start="0s", periods=25, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
- with_base = ts.resample("2s", base=5).mean()
+ with_base = ts.resample("2s", offset="5s").mean()
without_base = ts.resample("2s").mean()
exp_without_base = timedelta_range(start="0s", end="25s", freq="2s")
| **EDIT:** this PR has changed, now instead of adding `adjust_timestamp` we are adding `origin` and `offset` arguments to `resample` and `pd.Grouper` (see https://github.com/pandas-dev/pandas/pull/31809#issuecomment-583884772)
----
Hello,
This enhancement is an alternative to the `base` argument present in `pd.Grouper` or in the method `resample`. It adds the `adjust_timestamp` argument to change the current behavior of: https://github.com/pandas-dev/pandas/blob/master/pandas/core/resample.py#L1728
- `adjust_timestamp` is the timestamp on which to adjust the grouping. If None is passed, the first day of the time series at midnight is used.
Currently the bins of the grouping are adjusted based on the beginning of the day of the time series starting point. This works well with frequencies that are multiples of a day (like `30D`) or that divides a day (like `90s` or `1min`). But it can create inconsistencies with some frequencies that do not meet this criteria.
Here is a simple snippet from a test that I added that proves that the current behavior can lead to some inconsistencies. Inconsistencies that can be fixed if we use `adjust_timestamp`:
```python
import pandas as pd
import numpy as np
import pandas._testing as tm
import pytest
freq = "1399min" # prime number that is smaller than 24h
start, end = "1/1/2000 00:00:00", "1/31/2000 00:00"
middle = "1/15/2000 00:00:00"
rng = pd.date_range(start, end, freq="1231min") # prime number
ts = pd.Series(np.random.randn(len(rng)), index=rng)
ts2 = ts[middle:end]
# proves that grouper without a fixed adjust_timestamp does not work
# when dealing with unusual frequencies
simple_grouper = pd.Grouper(freq=freq)
count_ts = ts.groupby(simple_grouper).agg("count")
count_ts = count_ts[middle:end]
count_ts2 = ts2.groupby(simple_grouper).agg("count")
with pytest.raises(AssertionError):
tm.assert_index_equal(count_ts.index, count_ts2.index)
# test adjusted_timestamp on 1970-01-01 00:00:00
adjust_timestamp = pd.Timestamp(0)
adjusted_grouper = pd.Grouper(freq=freq, adjust_timestamp=adjust_timestamp)
adjusted_count_ts = ts.groupby(adjusted_grouper).agg("count")
adjusted_count_ts = adjusted_count_ts[middle:end]
adjusted_count_ts2 = ts2.groupby(adjusted_grouper).agg("count")
tm.assert_series_equal(adjusted_count_ts, adjusted_count_ts2)
```
I think this PR is ready to be merged, but I am of course open to any suggestions or criticism. :wink:
For instance, I am not sure if the naming of `adjust_timestamp` is correct. An alternative could be `base_timestamp` or `ref_timestamp` :thinking:?
Cheers,
----
- [X] closes #25226
closes #28302
closes #28675
closes #4197
closes #8521
- [X] Add 'origin' and 'offset' arguments to 'resample' and 'pd.Grouper'
- [X] tests added / passed
- [X] Add deprecation warning for `loffset` and `base` in the code
- [X] Add deprecation warning for `loffset` and `base` in the doc
- [x] Add examples in the doc for `origin` and `offset`
- [x] whatsnew entry (add deprecation notice with `offset` example)
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/31809 | 2020-02-08T19:11:11Z | 2020-05-10T15:52:55Z | 2020-05-10T15:52:54Z | 2020-05-10T16:45:31Z |
CLN: some code cleanups in pandas/_libs/ | diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 5f3d946a1e024..b7f17aee35a44 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -7,13 +7,30 @@ from libc.math cimport fabs, sqrt
import numpy as np
cimport numpy as cnp
-from numpy cimport (ndarray,
- NPY_INT64, NPY_INT32, NPY_INT16, NPY_INT8,
- NPY_UINT64, NPY_UINT32, NPY_UINT16, NPY_UINT8,
- NPY_FLOAT32, NPY_FLOAT64,
- NPY_OBJECT,
- int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
- uint32_t, uint64_t, float32_t, float64_t)
+from numpy cimport (
+ NPY_FLOAT32,
+ NPY_FLOAT64,
+ NPY_INT8,
+ NPY_INT16,
+ NPY_INT32,
+ NPY_INT64,
+ NPY_OBJECT,
+ NPY_UINT8,
+ NPY_UINT16,
+ NPY_UINT32,
+ NPY_UINT64,
+ float32_t,
+ float64_t,
+ int8_t,
+ int16_t,
+ int32_t,
+ int64_t,
+ ndarray,
+ uint8_t,
+ uint16_t,
+ uint32_t,
+ uint64_t,
+)
cnp.import_array()
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index 093c53790cd35..dfa7aa708d681 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -3,13 +3,25 @@ from cython import Py_ssize_t
import numpy as np
cimport numpy as cnp
-from numpy cimport (ndarray,
- int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
- uint32_t, uint64_t, float32_t, float64_t)
+from numpy cimport (
+ float32_t,
+ float64_t,
+ int8_t,
+ int16_t,
+ int32_t,
+ int64_t,
+ ndarray,
+ uint8_t,
+ uint16_t,
+ uint32_t,
+ uint64_t,
+)
cnp.import_array()
from pandas._libs.algos import (
- groupsort_indexer, ensure_platform_int, take_1d_int64_int64
+ ensure_platform_int,
+ groupsort_indexer,
+ take_1d_int64_int64,
)
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 1990ef66a6bf1..7a18429f21a18 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -15,18 +15,33 @@ from cpython.iterator cimport PyIter_Check
from cpython.sequence cimport PySequence_Check
from cpython.number cimport PyNumber_Check
-from cpython.datetime cimport (PyDateTime_Check, PyDate_Check,
- PyTime_Check, PyDelta_Check,
- PyDateTime_IMPORT)
+from cpython.datetime cimport (
+ PyDateTime_Check,
+ PyDate_Check,
+ PyTime_Check,
+ PyDelta_Check,
+ PyDateTime_IMPORT,
+)
PyDateTime_IMPORT
import numpy as np
cimport numpy as cnp
-from numpy cimport (ndarray, PyArray_Check, PyArray_GETITEM,
- PyArray_ITER_DATA, PyArray_ITER_NEXT, PyArray_IterNew,
- flatiter, NPY_OBJECT,
- int64_t, float32_t, float64_t,
- uint8_t, uint64_t, complex128_t)
+from numpy cimport (
+ NPY_OBJECT,
+ PyArray_Check,
+ PyArray_GETITEM,
+ PyArray_ITER_DATA,
+ PyArray_ITER_NEXT,
+ PyArray_IterNew,
+ complex128_t,
+ flatiter,
+ float32_t,
+ float64_t,
+ int64_t,
+ ndarray,
+ uint8_t,
+ uint64_t,
+)
cnp.import_array()
cdef extern from "numpy/arrayobject.h":
@@ -60,7 +75,12 @@ from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64
from pandas._libs.tslibs.timezones cimport get_timezone, tz_compare
from pandas._libs.missing cimport (
- checknull, isnaobj, is_null_datetime64, is_null_timedelta64, is_null_period, C_NA
+ checknull,
+ isnaobj,
+ is_null_datetime64,
+ is_null_timedelta64,
+ is_null_period,
+ C_NA,
)
@@ -246,7 +266,7 @@ def item_from_zerodim(val: object) -> object:
@cython.wraparound(False)
@cython.boundscheck(False)
-def fast_unique_multiple(list arrays, sort: bool=True):
+def fast_unique_multiple(list arrays, sort: bool = True):
"""
Generate a list of unique values from a list of arrays.
@@ -277,6 +297,7 @@ def fast_unique_multiple(list arrays, sort: bool=True):
if val not in table:
table[val] = stub
uniques.append(val)
+
if sort is None:
try:
uniques.sort()
@@ -289,7 +310,7 @@ def fast_unique_multiple(list arrays, sort: bool=True):
@cython.wraparound(False)
@cython.boundscheck(False)
-def fast_unique_multiple_list(lists: list, sort: bool=True) -> list:
+def fast_unique_multiple_list(lists: list, sort: bool = True) -> list:
cdef:
list buf
Py_ssize_t k = len(lists)
diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx
index 4e831081c8e54..e74b5919a4590 100644
--- a/pandas/_libs/reshape.pyx
+++ b/pandas/_libs/reshape.pyx
@@ -1,8 +1,20 @@
import cython
from cython import Py_ssize_t
-from numpy cimport (int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t,
- uint32_t, uint64_t, float32_t, float64_t, ndarray)
+from numpy cimport (
+ float32_t,
+ float64_t,
+ int8_t,
+ int16_t,
+ int32_t,
+ int64_t,
+ ndarray,
+ uint8_t,
+ uint16_t,
+ uint32_t,
+ uint64_t,
+)
+
cimport numpy as cnp
import numpy as np
from pandas._libs.lib cimport c_is_list_like
diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx
index 3a6dd506b2428..4ca053a0ee83a 100644
--- a/pandas/_libs/sparse.pyx
+++ b/pandas/_libs/sparse.pyx
@@ -448,7 +448,7 @@ cdef class BlockIndex(SparseIndex):
ylen = y.blengths
# block may be split, but can't exceed original len / 2 + 1
- max_len = int(min(self.length, y.length) / 2) + 1
+ max_len = min(self.length, y.length) // 2 + 1
out_bloc = np.empty(max_len, dtype=np.int32)
out_blen = np.empty(max_len, dtype=np.int32)
@@ -672,7 +672,7 @@ cdef class BlockUnion(BlockMerge):
ystart = self.ystart
yend = self.yend
- max_len = int(min(self.x.length, self.y.length) / 2) + 1
+ max_len = min(self.x.length, self.y.length) // 2 + 1
out_bloc = np.empty(max_len, dtype=np.int32)
out_blen = np.empty(max_len, dtype=np.int32)
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx
index 9419f0eba39aa..c3a47902cff0f 100644
--- a/pandas/_libs/tslibs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -1,8 +1,6 @@
from datetime import datetime
-from cpython.object cimport (
- PyObject_RichCompareBool,
- Py_EQ, Py_NE)
+from cpython.object cimport PyObject_RichCompareBool, Py_EQ, Py_NE
from numpy cimport int64_t, import_array, ndarray
import numpy as np
@@ -14,15 +12,25 @@ from libc.string cimport strlen, memset
import cython
-from cpython.datetime cimport (PyDateTime_Check, PyDelta_Check, PyDate_Check,
- PyDateTime_IMPORT)
+from cpython.datetime cimport (
+ PyDate_Check,
+ PyDateTime_Check,
+ PyDateTime_IMPORT,
+ PyDelta_Check,
+)
# import datetime C API
PyDateTime_IMPORT
from pandas._libs.tslibs.np_datetime cimport (
- npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct,
- pandas_datetime_to_datetimestruct, check_dts_bounds,
- NPY_DATETIMEUNIT, NPY_FR_D, NPY_FR_us)
+ npy_datetimestruct,
+ dtstruct_to_dt64,
+ dt64_to_dtstruct,
+ pandas_datetime_to_datetimestruct,
+ check_dts_bounds,
+ NPY_DATETIMEUNIT,
+ NPY_FR_D,
+ NPY_FR_us,
+)
cdef extern from "src/datetime/np_datetime.h":
int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr,
@@ -37,12 +45,15 @@ from pandas._libs.tslibs.timedeltas import Timedelta
from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds
cimport pandas._libs.tslibs.ccalendar as ccalendar
-from pandas._libs.tslibs.ccalendar cimport (
- dayofweek, get_day_of_year, is_leapyear)
+from pandas._libs.tslibs.ccalendar cimport dayofweek, get_day_of_year, is_leapyear
from pandas._libs.tslibs.ccalendar import MONTH_NUMBERS
from pandas._libs.tslibs.frequencies cimport (
- get_freq_code, get_base_alias, get_to_timestamp_base, get_freq_str,
- get_rule_month)
+ get_base_alias,
+ get_freq_code,
+ get_freq_str,
+ get_rule_month,
+ get_to_timestamp_base,
+)
from pandas._libs.tslibs.parsing import parse_time_string
from pandas._libs.tslibs.resolution import Resolution
from pandas._libs.tslibs.nattype import nat_strings
@@ -55,7 +66,7 @@ from pandas._libs.tslibs.tzconversion cimport tz_convert_utc_to_tzlocal
cdef:
enum:
- INT32_MIN = -2147483648
+ INT32_MIN = -2_147_483_648
ctypedef struct asfreq_info:
@@ -179,8 +190,7 @@ cdef freq_conv_func get_asfreq_func(int from_freq, int to_freq) nogil:
return <freq_conv_func>asfreq_MtoB
elif from_group == FR_WK:
return <freq_conv_func>asfreq_WtoB
- elif from_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC,
- FR_MS, FR_US, FR_NS]:
+ elif from_group in [FR_DAY, FR_HR, FR_MIN, FR_SEC, FR_MS, FR_US, FR_NS]:
return <freq_conv_func>asfreq_DTtoB
else:
return <freq_conv_func>nofunc
@@ -289,17 +299,15 @@ cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back,
return DtoB_weekday(unix_date)
-cdef inline int64_t upsample_daytime(int64_t ordinal,
- asfreq_info *af_info) nogil:
- if (af_info.is_end):
+cdef inline int64_t upsample_daytime(int64_t ordinal, asfreq_info *af_info) nogil:
+ if af_info.is_end:
return (ordinal + 1) * af_info.intraday_conversion_factor - 1
else:
return ordinal * af_info.intraday_conversion_factor
-cdef inline int64_t downsample_daytime(int64_t ordinal,
- asfreq_info *af_info) nogil:
- return ordinal // (af_info.intraday_conversion_factor)
+cdef inline int64_t downsample_daytime(int64_t ordinal, asfreq_info *af_info) nogil:
+ return ordinal // af_info.intraday_conversion_factor
cdef inline int64_t transform_via_day(int64_t ordinal,
@@ -1464,24 +1472,24 @@ def extract_freq(ndarray[object] values):
cdef:
Py_ssize_t i, n = len(values)
- object p
+ object value
for i in range(n):
- p = values[i]
+ value = values[i]
try:
# now Timestamp / NaT has freq attr
- if is_period_object(p):
- return p.freq
+ if is_period_object(value):
+ return value.freq
except AttributeError:
pass
raise ValueError('freq not specified and cannot be inferred')
-
# -----------------------------------------------------------------------
# period helpers
+
@cython.wraparound(False)
@cython.boundscheck(False)
cdef int64_t[:] localize_dt64arr_to_period(const int64_t[:] stamps,
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx
index 5508b208de00a..dfe050c7bbff7 100644
--- a/pandas/_libs/tslibs/strptime.pyx
+++ b/pandas/_libs/tslibs/strptime.pyx
@@ -45,8 +45,7 @@ cdef dict _parse_code_table = {'y': 0,
'u': 22}
-def array_strptime(object[:] values, object fmt,
- bint exact=True, errors='raise'):
+def array_strptime(object[:] values, object fmt, bint exact=True, errors='raise'):
"""
Calculates the datetime structs represented by the passed array of strings
@@ -78,12 +77,9 @@ def array_strptime(object[:] values, object fmt,
if fmt is not None:
if '%W' in fmt or '%U' in fmt:
if '%Y' not in fmt and '%y' not in fmt:
- raise ValueError("Cannot use '%W' or '%U' without "
- "day and year")
- if ('%A' not in fmt and '%a' not in fmt and '%w' not
- in fmt):
- raise ValueError("Cannot use '%W' or '%U' without "
- "day and year")
+ raise ValueError("Cannot use '%W' or '%U' without day and year")
+ if '%A' not in fmt and '%a' not in fmt and '%w' not in fmt:
+ raise ValueError("Cannot use '%W' or '%U' without day and year")
elif '%Z' in fmt and '%z' in fmt:
raise ValueError("Cannot parse both %Z and %z")
@@ -749,6 +745,6 @@ cdef parse_timezone_directive(str z):
microseconds = int(gmtoff_remainder + gmtoff_remainder_padding)
total_minutes = ((hours * 60) + minutes + (seconds // 60) +
- (microseconds // 60000000))
+ (microseconds // 60_000_000))
total_minutes = -total_minutes if z.startswith("-") else total_minutes
return pytz.FixedOffset(total_minutes)
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index 35ee87e714fa8..07947f6677c04 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -196,7 +196,7 @@ cdef int64_t[:] unbox_utcoffsets(object transinfo):
arr = np.empty(sz, dtype='i8')
for i in range(sz):
- arr[i] = int(transinfo[i][0].total_seconds()) * 1000000000
+ arr[i] = int(transinfo[i][0].total_seconds()) * 1_000_000_000
return arr
@@ -217,7 +217,7 @@ cdef object get_dst_info(object tz):
if cache_key is None:
# e.g. pytz.FixedOffset, matplotlib.dates._UTC,
# psycopg2.tz.FixedOffsetTimezone
- num = int(get_utcoffset(tz, None).total_seconds()) * 1000000000
+ num = int(get_utcoffset(tz, None).total_seconds()) * 1_000_000_000
return (np.array([NPY_NAT + 1], dtype=np.int64),
np.array([num], dtype=np.int64),
None)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31808 | 2020-02-08T13:40:45Z | 2020-02-22T15:45:31Z | 2020-02-22T15:45:31Z | 2020-02-29T10:27:10Z |
Backport PR #31748: BUG: Fixed encoding of pd.NA with to_json | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 94dc1e0c007ca..70aaaa6d0a60d 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -25,8 +25,9 @@ Fixed regressions
Bug fixes
~~~~~~~~~
--
--
+**I/O**
+
+- Using ``pd.NA`` with :meth:`DataFrame.to_json` now correctly outputs a null value instead of an empty object (:issue:`31615`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 5c5b80648aed1..5cb782a0051af 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -54,6 +54,7 @@ static PyTypeObject *cls_dataframe;
static PyTypeObject *cls_series;
static PyTypeObject *cls_index;
static PyTypeObject *cls_nat;
+static PyTypeObject *cls_na;
PyObject *cls_timedelta;
npy_int64 get_nat(void) { return NPY_MIN_INT64; }
@@ -151,6 +152,7 @@ int PdBlock_iterNext(JSOBJ, JSONTypeContext *);
void *initObjToJSON(void) {
PyObject *mod_pandas;
PyObject *mod_nattype;
+ PyObject *mod_natype;
PyObject *mod_decimal = PyImport_ImportModule("decimal");
type_decimal =
(PyTypeObject *)PyObject_GetAttrString(mod_decimal, "Decimal");
@@ -176,6 +178,12 @@ void *initObjToJSON(void) {
Py_DECREF(mod_nattype);
}
+ mod_natype = PyImport_ImportModule("pandas._libs.missing");
+ if (mod_natype) {
+ cls_na = (PyTypeObject *)PyObject_GetAttrString(mod_natype, "NAType");
+ Py_DECREF(mod_natype);
+ }
+
/* Initialise numpy API */
import_array();
// GH 31463
@@ -1909,6 +1917,10 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
"%R (0d array) is not JSON serializable at the moment",
obj);
goto INVALID;
+ } else if (PyObject_TypeCheck(obj, cls_na)) {
+ PRINTMARK();
+ tc->type = JT_NULL;
+ return;
}
ISITERABLE:
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index bb873c71e8a35..4b7936d3159a4 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1640,3 +1640,13 @@ def test_deprecate_numpy_argument_read_json(self):
with tm.assert_produces_warning(FutureWarning):
result = read_json(expected.to_json(), numpy=True)
tm.assert_frame_equal(result, expected)
+
+ def test_json_pandas_na(self):
+ # GH 31615
+ result = pd.DataFrame([[pd.NA]]).to_json()
+ assert result == '{"0":{"0":null}}'
+
+ def test_json_pandas_nulls(self, nulls_fixture):
+ # GH 31615
+ result = pd.DataFrame([[nulls_fixture]]).to_json()
+ assert result == '{"0":{"0":null}}'
| Backport PR #31748 | https://api.github.com/repos/pandas-dev/pandas/pulls/31804 | 2020-02-08T08:15:36Z | 2020-02-08T10:31:37Z | 2020-02-08T10:31:37Z | 2020-02-08T10:31:41Z |
Added pd.NA to nulls_fixture | diff --git a/pandas/conftest.py b/pandas/conftest.py
index 7851cba9cd91a..d19bf85877140 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -441,7 +441,7 @@ def other_closed(request):
return request.param
-@pytest.fixture(params=[None, np.nan, pd.NaT, float("nan"), np.float("NaN")])
+@pytest.fixture(params=[None, np.nan, pd.NaT, float("nan"), np.float("NaN"), pd.NA])
def nulls_fixture(request):
"""
Fixture for each null type in pandas.
diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py
index f9e1a515277d5..3f85ac8c190db 100644
--- a/pandas/tests/arithmetic/test_interval.py
+++ b/pandas/tests/arithmetic/test_interval.py
@@ -129,6 +129,10 @@ def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed):
def test_compare_scalar_na(self, op, array, nulls_fixture):
result = op(array, nulls_fixture)
expected = self.elementwise_comparison(op, array, nulls_fixture)
+
+ if nulls_fixture is pd.NA and array.dtype != pd.IntervalDtype("int"):
+ pytest.xfail("broken for non-integer IntervalArray; see GH 31882")
+
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
@@ -207,6 +211,10 @@ def test_compare_list_like_nan(self, op, array, nulls_fixture):
other = [nulls_fixture] * 4
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
+
+ if nulls_fixture is pd.NA:
+ pytest.xfail("broken for non-integer IntervalArray; see GH 31882")
+
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index 21a4773fa3683..3c9b34a4a1439 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -384,6 +384,10 @@ def test_get_loc_nan(level, nulls_fixture):
key = ["b", "d"]
levels[level] = np.array([0, nulls_fixture], dtype=type(nulls_fixture))
key[level] = nulls_fixture
+
+ if nulls_fixture is pd.NA:
+ pytest.xfail("MultiIndex from pd.NA in np.array broken; see GH 31883")
+
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index c64a70af6f2a4..3b4b6b09dcda5 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -305,6 +305,10 @@ def test_index_ctor_infer_nat_dt_like(self, pos, klass, dtype, ctor, nulls_fixtu
data = [ctor]
data.insert(pos, nulls_fixture)
+ if nulls_fixture is pd.NA:
+ expected = Index([pd.NA, pd.NaT])
+ pytest.xfail("Broken with np.NaT ctor; see GH 31884")
+
result = Index(data)
tm.assert_index_equal(result, expected)
@@ -1964,6 +1968,9 @@ def test_isin_nan_common_float64(self, nulls_fixture):
pytest.skip("pd.NaT not compatible with Float64Index")
# Float64Index overrides isin, so must be checked separately
+ if nulls_fixture is pd.NA:
+ pytest.xfail("Float64Index cannot contain pd.NA")
+
tm.assert_numpy_array_equal(
Float64Index([1.0, nulls_fixture]).isin([np.nan]), np.array([False, True])
)
| @jorisvandenbossche
I think we should do this. Simply xfailed issues up for discussion on how to resolve
| https://api.github.com/repos/pandas-dev/pandas/pulls/31799 | 2020-02-08T00:09:08Z | 2020-02-20T04:51:08Z | 2020-02-20T04:51:08Z | 2023-04-12T20:17:36Z |
CLN: assorted indexing-related cleanups | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e0efa93379bca..c6f77591de033 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2933,12 +2933,12 @@ def __setitem__(self, key, value):
# set column
self._set_item(key, value)
- def _setitem_slice(self, key, value):
+ def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
- self.loc._setitem_with_indexer(key, value)
+ self.iloc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
@@ -2950,7 +2950,7 @@ def _setitem_array(self, key, value):
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
- self.loc._setitem_with_indexer(indexer, value)
+ self.iloc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
@@ -2962,7 +2962,7 @@ def _setitem_array(self, key, value):
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
- self.loc._setitem_with_indexer((slice(None), indexer), value)
+ self.iloc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e8ad2bef099a1..174f30a68ece0 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3190,17 +3190,16 @@ def is_int(v):
# convert the slice to an indexer here
# if we are mixed and have integers
- try:
- if is_positional and self.is_mixed():
+ if is_positional and self.is_mixed():
+ try:
# Validate start & stop
if start is not None:
self.get_loc(start)
if stop is not None:
self.get_loc(stop)
is_positional = False
- except KeyError:
- if self.inferred_type in ["mixed-integer-float", "integer-na"]:
- raise
+ except KeyError:
+ pass
if is_null_slicer:
indexer = key
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 5c0f893554957..9c113f8310de5 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -26,8 +26,7 @@
is_list_like_indexer,
length_of_indexer,
)
-from pandas.core.indexes.api import Index
-from pandas.core.indexes.base import InvalidIndexError
+from pandas.core.indexes.api import Index, InvalidIndexError
# "null slice"
_NS = slice(None, None)
@@ -592,6 +591,9 @@ def _get_label(self, label, axis: int):
return self.obj._xs(label, axis=axis)
def _get_setitem_indexer(self, key):
+ """
+ Convert a potentially-label-based key into a positional indexer.
+ """
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
@@ -756,7 +758,7 @@ def _setitem_with_indexer(self, indexer, value):
"defined index and a scalar"
)
self.obj[key] = value
- return self.obj
+ return
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
@@ -766,7 +768,7 @@ def _setitem_with_indexer(self, indexer, value):
)
self._setitem_with_indexer(new_indexer, value)
- return self.obj
+ return
# reindex the axis
# make sure to clear the cache because we are
@@ -789,7 +791,8 @@ def _setitem_with_indexer(self, indexer, value):
indexer, missing = convert_missing_indexer(indexer)
if missing:
- return self._setitem_with_indexer_missing(indexer, value)
+ self._setitem_with_indexer_missing(indexer, value)
+ return
# set
item_labels = self.obj._get_axis(info_axis)
@@ -1015,7 +1018,6 @@ def _setitem_with_indexer_missing(self, indexer, value):
new_values, index=new_index, name=self.obj.name
)._data
self.obj._maybe_update_cacher(clear=True)
- return self.obj
elif self.ndim == 2:
@@ -1039,7 +1041,6 @@ def _setitem_with_indexer_missing(self, indexer, value):
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
- return self.obj
def _align_series(self, indexer, ser: ABCSeries, multiindex_indexer: bool = False):
"""
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0786674daf874..c89456d43d1d6 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -862,7 +862,9 @@ def __getitem__(self, key):
return result
except InvalidIndexError:
- pass
+ if not isinstance(self.index, MultiIndex):
+ raise
+
except (KeyError, ValueError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
| - _setitem_with_indexer isnt consistent about whether or not it returns anything, make it always-None
- avoid using private loc method from DataFrame
| https://api.github.com/repos/pandas-dev/pandas/pulls/31797 | 2020-02-07T22:26:40Z | 2020-02-10T10:39:25Z | 2020-02-10T10:39:25Z | 2020-02-10T16:20:43Z |
BUG: Avoid casting Int to object in Categorical.from_codes | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index f4bb8c580fb08..8f04032e1ca09 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -28,6 +28,10 @@ Fixed regressions
Bug fixes
~~~~~~~~~
+**Categorical**
+
+- Fixed bug where :meth:`Categorical.from_codes` improperly raised a ``ValueError`` when passed nullable integer codes. (:issue:`31779`)
+
**I/O**
- Using ``pd.NA`` with :meth:`DataFrame.to_json` now correctly outputs a null value instead of an empty object (:issue:`31615`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index d26ff7490e714..0e04354ae7c89 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -644,7 +644,13 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
)
raise ValueError(msg)
- codes = np.asarray(codes) # #21767
+ if is_extension_array_dtype(codes) and is_integer_dtype(codes):
+ # Avoid the implicit conversion of Int to object
+ if isna(codes).any():
+ raise ValueError("codes cannot contain NA values")
+ codes = codes.to_numpy(dtype=np.int64)
+ else:
+ codes = np.asarray(codes)
if len(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 70e1421c8dcf4..dbd8fd8df67c1 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -560,6 +560,23 @@ def test_from_codes_neither(self):
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([0, 1])
+ def test_from_codes_with_nullable_int(self):
+ codes = pd.array([0, 1], dtype="Int64")
+ categories = ["a", "b"]
+
+ result = Categorical.from_codes(codes, categories=categories)
+ expected = Categorical.from_codes(codes.to_numpy(int), categories=categories)
+
+ tm.assert_categorical_equal(result, expected)
+
+ def test_from_codes_with_nullable_int_na_raises(self):
+ codes = pd.array([0, None], dtype="Int64")
+ categories = ["a", "b"]
+
+ msg = "codes cannot contain NA values"
+ with pytest.raises(ValueError, match=msg):
+ Categorical.from_codes(codes, categories=categories)
+
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories(self, dtype):
cats = ["a", "b"]
| - [x] closes #31779
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31794 | 2020-02-07T21:37:33Z | 2020-02-12T12:36:51Z | 2020-02-12T12:36:50Z | 2020-02-12T15:34:46Z |
Some code cleanups | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 85a26179276f5..e6685ea200992 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -85,8 +85,6 @@
import pandas.core.missing as missing
from pandas.core.nanops import nanpercentile
-from pandas.io.formats.printing import pprint_thing
-
class Block(PandasObject):
"""
@@ -159,7 +157,8 @@ def _check_ndim(self, values, ndim):
@property
def _holder(self):
- """The array-like that can hold the underlying values.
+ """
+ The array-like that can hold the underlying values.
None for 'Block', overridden by subclasses that don't
use an ndarray.
@@ -284,16 +283,11 @@ def __repr__(self) -> str:
# don't want to print out all of the items here
name = type(self).__name__
if self._is_single_block:
-
result = f"{name}: {len(self)} dtype: {self.dtype}"
-
else:
- shape = " x ".join(pprint_thing(s) for s in self.shape)
- result = (
- f"{name}: {pprint_thing(self.mgr_locs.indexer)}, "
- f"{shape}, dtype: {self.dtype}"
- )
+ shape = " x ".join(str(s) for s in self.shape)
+ result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}"
return result
@@ -319,10 +313,7 @@ def getitem_block(self, slicer, new_mgr_locs=None):
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
- if isinstance(slicer, tuple):
- axis0_slicer = slicer[0]
- else:
- axis0_slicer = slicer
+ axis0_slicer = slicer[0] if isinstance(slicer, tuple) else slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index c75373b82305c..9fd7ff073afdc 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -204,10 +204,9 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
missing_arr.fill(fill_value)
return missing_arr
- if not self.indexers:
- if not self.block._can_consolidate:
- # preserve these for validation in concat_compat
- return self.block.values
+ if (not self.indexers) and (not self.block._can_consolidate):
+ # preserve these for validation in concat_compat
+ return self.block.values
if self.block.is_bool and not self.block.is_categorical:
# External code requested filling/upcasting, bool values must
@@ -372,7 +371,7 @@ def _get_empty_dtype_and_na(join_units):
raise AssertionError(msg)
-def is_uniform_join_units(join_units):
+def is_uniform_join_units(join_units) -> bool:
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 08ae0b02169d4..0ec471cf366fe 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -589,7 +589,7 @@ def comp(s, regex=False):
)
return _compare_or_regex_search(values, s, regex)
- masks = [comp(s, regex) for i, s in enumerate(src_list)]
+ masks = [comp(s, regex) for s in src_list]
result_blocks = []
src_len = len(src_list) - 1
@@ -755,10 +755,7 @@ def copy(self, deep=True):
# hit in e.g. tests.io.json.test_pandas
def copy_func(ax):
- if deep == "all":
- return ax.copy(deep=True)
- else:
- return ax.view()
+ return ax.copy(deep=True) if deep == "all" else ax.view()
new_axes = [copy_func(ax) for ax in self.axes]
else:
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 8bc8470ae7658..f83964abdf9a2 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1492,11 +1492,10 @@ def extract(r):
# level, then our header was too long.
for n in range(len(columns[0])):
if all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
+ header = ",".join(str(x) for x in self.header)
raise ParserError(
- "Passed header=[{header}] are too many rows for this "
- "multi_index of columns".format(
- header=",".join(str(x) for x in self.header)
- )
+ f"Passed header=[{header}] are too many rows "
+ "for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index c1e12887b0150..1988b86e6db91 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3085,9 +3085,8 @@ def write(self, obj, **kwargs):
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
- if i == 0:
- if not ax.is_unique:
- raise ValueError("Columns index has to be unique for fixed format")
+ if i == 0 and (not ax.is_unique):
+ raise ValueError("Columns index has to be unique for fixed format")
self.write_index(f"axis{i}", ax)
# Supporting mixed-type DataFrame objects...nontrivial
@@ -4216,7 +4215,7 @@ def write_data(self, chunksize: Optional[int], dropna: bool = False):
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
- chunks = int(nrows / chunksize) + 1
+ chunks = nrows // chunksize + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31792 | 2020-02-07T20:50:04Z | 2020-02-12T16:09:01Z | 2020-02-12T16:09:01Z | 2020-02-29T10:32:22Z |
CI: fix feather test | diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index d7a21b27308e8..404f5a477187b 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -141,24 +141,7 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
- msg1 = r"File (b')?.+does_not_exist\.{}'? does not exist".format(fn_ext)
- msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
- msg3 = "Expected object or value"
- msg4 = "path_or_buf needs to be a string file path or file-like"
- msg5 = (
- fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
- fr"'.+does_not_exist\.{fn_ext}'"
- )
- msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
- msg7 = (
- fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
- )
- msg8 = fr"Failed to open local file.+does_not_exist\.{fn_ext}.?, error: .*"
-
- with pytest.raises(
- error_class,
- match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
- ):
+ with tm.external_error_raised(error_class):
reader(path)
@pytest.mark.parametrize(
@@ -184,24 +167,7 @@ def test_read_expands_user_home_dir(
path = os.path.join("~", "does_not_exist." + fn_ext)
monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
- msg1 = fr"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
- msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
- msg3 = "Unexpected character found when decoding 'false'"
- msg4 = "path_or_buf needs to be a string file path or file-like"
- msg5 = (
- fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
- fr"'.+does_not_exist\.{fn_ext}'"
- )
- msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
- msg7 = (
- fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
- )
- msg8 = fr"Failed to open local file.+does_not_exist\.{fn_ext}.?, error: .*"
-
- with pytest.raises(
- error_class,
- match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
- ):
+ with tm.external_error_raised(error_class):
reader(path)
@pytest.mark.parametrize(
| https://api.github.com/repos/pandas-dev/pandas/pulls/31791 | 2020-02-07T20:44:58Z | 2020-02-08T00:13:10Z | 2020-02-08T00:13:10Z | 2020-03-12T13:31:17Z | |
BUG: Too aggressive typing in NDFrame.align | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 70aaaa6d0a60d..b055b44274bd8 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -16,6 +16,7 @@ Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression in :meth:`DataFrame.to_excel` when ``columns`` kwarg is passed (:issue:`31677`)
+- Fixed regression in :meth:`Series.align` when ``other`` is a DataFrame and ``method`` is not None (:issue:`31785`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 313d40b575629..35a6643a80a25 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8360,9 +8360,7 @@ def _align_frame(
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
- right = self._ensure_type(
- right.fillna(method=method, axis=fill_axis, limit=limit)
- )
+ right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index dc8b91de3d09b..05bd967903e9d 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -153,6 +153,17 @@ def test_align_multiindex():
tm.assert_series_equal(expr, res2l)
+@pytest.mark.parametrize("method", ["backfill", "bfill", "pad", "ffill", None])
+def test_align_method(method):
+ # GH31788
+ ser = pd.Series(range(3), index=range(3))
+ df = pd.DataFrame(0.0, index=range(3), columns=range(3))
+
+ result_ser, result_df = ser.align(df, method=method)
+ tm.assert_series_equal(result_ser, ser)
+ tm.assert_frame_equal(result_df, df)
+
+
def test_reindex(datetime_series, string_series):
identity = string_series.reindex(string_series.index)
| - [x] closes #31785
The type checking was too aggressive. ``right`` has type ``Any``, so the wrapping in ``_ensure_type`` should not be done. | https://api.github.com/repos/pandas-dev/pandas/pulls/31788 | 2020-02-07T18:50:27Z | 2020-02-09T17:12:59Z | 2020-02-09T17:12:59Z | 2020-02-12T21:49:41Z |
REF: remove iloc case from _convert_slice_indexer | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e8ad2bef099a1..c094996f69419 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3142,6 +3142,15 @@ def _convert_scalar_indexer(self, key, kind: str_t):
return key
+ def _validate_positional_slice(self, key: slice):
+ """
+ For positional indexing, a slice must have either int or None
+ for each of start, stop, and step.
+ """
+ self._validate_indexer("positional", key.start, "iloc")
+ self._validate_indexer("positional", key.stop, "iloc")
+ self._validate_indexer("positional", key.step, "iloc")
+
def _convert_slice_indexer(self, key: slice, kind=None):
"""
Convert a slice indexer.
@@ -3152,16 +3161,9 @@ def _convert_slice_indexer(self, key: slice, kind=None):
Parameters
----------
key : label of the slice bound
- kind : {'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem'} or None
"""
- assert kind in ["loc", "getitem", "iloc", None]
-
- # validate iloc
- if kind == "iloc":
- self._validate_indexer("positional", key.start, "iloc")
- self._validate_indexer("positional", key.stop, "iloc")
- self._validate_indexer("positional", key.step, "iloc")
- return key
+ assert kind in ["loc", "getitem", None], kind
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index d67c40a78d807..f09713409c6cf 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -394,10 +394,9 @@ def _convert_scalar_indexer(self, key, kind: str):
@Appender(Index._convert_slice_indexer.__doc__)
def _convert_slice_indexer(self, key: slice, kind=None):
+ assert kind in ["loc", "getitem", None]
- if kind == "iloc":
- return super()._convert_slice_indexer(key, kind=kind)
-
+ # We always treat __getitem__ slicing as label-based
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 5c0f893554957..70092c70a76ad 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1685,7 +1685,7 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(
- slice_obj.start, slice_obj.stop, slice_obj.step, kind=self.name
+ slice_obj.start, slice_obj.stop, slice_obj.step, kind="loc"
)
if isinstance(indexer, slice):
@@ -2035,8 +2035,8 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
- indexer = labels._convert_slice_indexer(slice_obj, kind="iloc")
- return self.obj._slice(indexer, axis=axis, kind="iloc")
+ labels._validate_positional_slice(slice_obj)
+ return self.obj._slice(slice_obj, axis=axis, kind="iloc")
def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
@@ -2046,7 +2046,8 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
# make need to convert a float key
if isinstance(key, slice):
- return labels._convert_slice_indexer(key, kind="iloc")
+ labels._validate_positional_slice(key)
+ return key
elif is_float(key):
labels._validate_indexer("positional", key, "iloc")
diff --git a/pandas/core/series.py b/pandas/core/series.py
index c54331f867a9c..100500264be73 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -842,7 +842,10 @@ def _ixs(self, i: int, axis: int = 0):
def _slice(self, slobj: slice, axis: int = 0, kind: str = "getitem") -> "Series":
assert kind in ["getitem", "iloc"]
- slobj = self.index._convert_slice_indexer(slobj, kind=kind)
+ if kind == "getitem":
+ # If called from getitem, we need to determine whether
+ # this slice is positional or label-based.
+ slobj = self.index._convert_slice_indexer(slobj, kind="getitem")
return self._get_values(slobj)
def __getitem__(self, key):
@@ -884,7 +887,7 @@ def __getitem__(self, key):
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
- return self._slice(key)
+ return self._slice(key, kind="getitem")
elif isinstance(key, ABCDataFrame):
raise TypeError(
"Indexing a Series with DataFrame is not "
| _convert_slice_indexer is turning out to be one of the stickier methods to figure out, xref #31658.
The case with kind=None is only called from one place in core.indexing, and that is not reached in the tests. I'd like to either a) figure out a non-None kind to pass to it, or b) determine that it can never be reached so can be removed. Any thoughts on this are very welcome. | https://api.github.com/repos/pandas-dev/pandas/pulls/31786 | 2020-02-07T18:12:27Z | 2020-02-09T17:06:36Z | 2020-02-09T17:06:36Z | 2020-02-10T20:10:01Z |
BUG: fix StringArray/PandasArray setitem with slice | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index f4bb8c580fb08..6d91ba4e43ac1 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -32,6 +32,10 @@ Bug fixes
- Using ``pd.NA`` with :meth:`DataFrame.to_json` now correctly outputs a null value instead of an empty object (:issue:`31615`)
+**Experimental dtypes**
+
+- Fixed bug in setting values using a slice indexer with string dtype (:issue:`31772`)
+
.. ---------------------------------------------------------------------------
.. _whatsnew_102.contributors:
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index e573fe661106e..0e64967ce93a6 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -263,12 +263,8 @@ def __setitem__(self, key, value) -> None:
value = extract_array(value, extract_numpy=True)
key = check_array_indexer(self, key)
- scalar_key = lib.is_scalar(key)
scalar_value = lib.is_scalar(value)
- if not scalar_key and scalar_value:
- key = np.asarray(key)
-
if not scalar_value:
value = np.asarray(value, dtype=self._ndarray.dtype)
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index e0ca603aaa0ed..590bcd586900a 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -173,6 +173,29 @@ def test_setitem_tuple_index(self, data):
s[(0, 1)] = data[1]
self.assert_series_equal(s, expected)
+ def test_setitem_slice(self, data, box_in_series):
+ arr = data[:5].copy()
+ expected = data.take([0, 0, 0, 3, 4])
+ if box_in_series:
+ arr = pd.Series(arr)
+ expected = pd.Series(expected)
+
+ arr[:3] = data[0]
+ self.assert_equal(arr, expected)
+
+ def test_setitem_loc_iloc_slice(self, data):
+ arr = data[:5].copy()
+ s = pd.Series(arr, index=["a", "b", "c", "d", "e"])
+ expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index)
+
+ result = s.copy()
+ result.iloc[:3] = data[0]
+ self.assert_equal(result, expected)
+
+ result = s.copy()
+ result.loc[:"c"] = data[0]
+ self.assert_equal(result, expected)
+
def test_setitem_slice_mismatch_length_raises(self, data):
arr = data[:5]
with pytest.raises(ValueError):
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 8a820c8746857..76573242a2506 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -396,6 +396,14 @@ def test_setitem_scalar_key_sequence_raise(self, data):
# Failed: DID NOT RAISE <class 'ValueError'>
super().test_setitem_scalar_key_sequence_raise(data)
+ @skip_nested
+ def test_setitem_slice(self, data, box_in_series):
+ super().test_setitem_slice(data, box_in_series)
+
+ @skip_nested
+ def test_setitem_loc_iloc_slice(self, data):
+ super().test_setitem_loc_iloc_slice(data)
+
@skip_nested
class TestParsing(BaseNumPyTests, base.BaseParsingTests):
| Closes #31772 | https://api.github.com/repos/pandas-dev/pandas/pulls/31773 | 2020-02-07T12:03:45Z | 2020-02-12T12:38:27Z | 2020-02-12T12:38:27Z | 2020-02-12T13:14:26Z |
ERR: improve error message for invalid indexer | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e431d0bcf7e9b..e8ad2bef099a1 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3158,9 +3158,9 @@ def _convert_slice_indexer(self, key: slice, kind=None):
# validate iloc
if kind == "iloc":
- self._validate_indexer("slice", key.start, "iloc")
- self._validate_indexer("slice", key.stop, "iloc")
- self._validate_indexer("slice", key.step, "iloc")
+ self._validate_indexer("positional", key.start, "iloc")
+ self._validate_indexer("positional", key.stop, "iloc")
+ self._validate_indexer("positional", key.step, "iloc")
return key
# potentially cast the bounds to integers
@@ -3285,8 +3285,8 @@ def _invalid_indexer(self, form: str_t, key):
Consistent invalid indexer message.
"""
raise TypeError(
- f"cannot do {form} indexing on {type(self)} with these "
- f"indexers [{key}] of {type(key)}"
+ f"cannot do {form} indexing on {type(self).__name__} with these "
+ f"indexers [{key}] of type {type(key).__name__}"
)
# --------------------------------------------------------------------
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index b143ff0aa9c02..d06d0d499ef47 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -406,9 +406,9 @@ def _convert_scalar_indexer(self, key, kind: str):
is_int = is_integer(key)
is_flt = is_float(key)
if kind == "loc" and (is_int or is_flt):
- self._invalid_indexer("index", key)
+ self._invalid_indexer("label", key)
elif kind == "getitem" and is_flt:
- self._invalid_indexer("index", key)
+ self._invalid_indexer("label", key)
return super()._convert_scalar_indexer(key, kind=kind)
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index ca4d1ff067f3d..6fc8c0e9ad459 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1050,9 +1050,8 @@ def test_getitem_setitem_float_labels(self):
# positional slicing only via iloc!
msg = (
- "cannot do slice indexing on "
- r"<class 'pandas\.core\.indexes\.numeric\.Float64Index'> with "
- r"these indexers \[1.0\] of <class 'float'>"
+ "cannot do positional indexing on Float64Index with "
+ r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 7b1a9d8ff6ae3..5f4c78449f71d 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1860,9 +1860,8 @@ def check(df):
# No NaN found -> error
if len(indexer) == 0:
msg = (
- "cannot do label indexing on "
- r"<class 'pandas\.core\.indexes\.range\.RangeIndex'> "
- r"with these indexers \[nan\] of <class 'float'>"
+ "cannot do label indexing on RangeIndex "
+ r"with these indexers \[nan\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.loc[:, np.nan]
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 8c8dece53277e..da935b1c911d0 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -83,8 +83,8 @@ def test_loc_scalar(self):
df.loc["d", "C"] = 10
msg = (
- r"cannot do label indexing on <class 'pandas\.core\.indexes\.category"
- r"\.CategoricalIndex'> with these indexers \[1\] of <class 'int'>"
+ "cannot do label indexing on CategoricalIndex with these "
+ r"indexers \[1\] of type int"
)
with pytest.raises(TypeError, match=msg):
df.loc[1]
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 199d9e1013e23..8bb88cd9fd63a 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -54,7 +54,7 @@ def test_scalar_error(self, index_func):
msg = (
"cannot do positional indexing on {klass} with these "
- r"indexers \[3\.0\] of {kind}".format(klass=type(i), kind=str(float))
+ r"indexers \[3\.0\] of type float".format(klass=type(i).__name__)
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
@@ -92,11 +92,11 @@ def test_scalar_non_numeric(self):
else:
error = TypeError
msg = (
- r"cannot do (label|index|positional) indexing "
+ r"cannot do (label|positional) indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}|"
+ r"type float|"
"Cannot index by location index with a "
- "non-integer key".format(klass=type(i), kind=str(float))
+ "non-integer key".format(klass=type(i).__name__)
)
with pytest.raises(error, match=msg):
idxr(s)[3.0]
@@ -113,9 +113,9 @@ def test_scalar_non_numeric(self):
else:
error = TypeError
msg = (
- r"cannot do (label|index) indexing "
+ r"cannot do label indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}".format(klass=type(i), kind=str(float))
+ r"type float".format(klass=type(i).__name__)
)
with pytest.raises(error, match=msg):
s.loc[3.0]
@@ -125,9 +125,9 @@ def test_scalar_non_numeric(self):
# setting with a float fails with iloc
msg = (
- r"cannot do (label|index|positional) indexing "
+ r"cannot do (label|positional) indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}".format(klass=type(i), kind=str(float))
+ r"type float".format(klass=type(i).__name__)
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
@@ -162,9 +162,9 @@ def test_scalar_non_numeric(self):
s = Series(np.arange(len(i)), index=i)
s[3]
msg = (
- r"cannot do (label|index) indexing "
+ r"cannot do label indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}".format(klass=type(i), kind=str(float))
+ r"type float".format(klass=type(i).__name__)
)
with pytest.raises(TypeError, match=msg):
s[3.0]
@@ -181,9 +181,9 @@ def test_scalar_with_mixed(self):
msg = (
r"cannot do label indexing "
r"on {klass} with these indexers \[1\.0\] of "
- r"{kind}|"
+ r"type float|"
"Cannot index by location index with a non-integer key".format(
- klass=str(Index), kind=str(float)
+ klass=Index.__name__
)
)
with pytest.raises(TypeError, match=msg):
@@ -203,7 +203,7 @@ def test_scalar_with_mixed(self):
msg = (
r"cannot do label indexing "
r"on {klass} with these indexers \[1\.0\] of "
- r"{kind}".format(klass=str(Index), kind=str(float))
+ r"type float".format(klass=Index.__name__)
)
with pytest.raises(TypeError, match=msg):
idxr(s3)[1.0]
@@ -317,7 +317,7 @@ def test_scalar_float(self):
msg = (
r"cannot do positional indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"{kind}".format(klass=str(Float64Index), kind=str(float))
+ r"type float".format(klass=Float64Index.__name__)
)
with pytest.raises(TypeError, match=msg):
s2.iloc[3.0] = 0
@@ -346,9 +346,9 @@ def test_slice_non_numeric(self):
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
- "cannot do slice indexing "
+ "cannot do positional indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s.iloc[l]
@@ -356,14 +356,10 @@ def test_slice_non_numeric(self):
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do slice indexing "
+ "cannot do (slice|positional) indexing "
r"on {klass} with these indexers "
r"\[(3|4)(\.0)?\] "
- r"of ({kind_float}|{kind_int})".format(
- klass=type(index),
- kind_float=str(float),
- kind_int=str(int),
- )
+ r"of type (float|int)".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
@@ -372,23 +368,19 @@ def test_slice_non_numeric(self):
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
- "cannot do slice indexing "
+ "cannot do positional indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s.iloc[l] = 0
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do slice indexing "
+ "cannot do (slice|positional) indexing "
r"on {klass} with these indexers "
r"\[(3|4)(\.0)?\] "
- r"of ({kind_float}|{kind_int})".format(
- klass=type(index),
- kind_float=str(float),
- kind_int=str(int),
- )
+ r"of type (float|int)".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l] = 0
@@ -428,7 +420,7 @@ def test_slice_integer(self):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -452,7 +444,7 @@ def test_slice_integer(self):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[-6\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[slice(-6.0, 6.0)]
@@ -478,7 +470,7 @@ def test_slice_integer(self):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(2|3)\.5\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -496,7 +488,7 @@ def test_slice_integer(self):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
@@ -517,9 +509,9 @@ def test_integer_positional_indexing(self):
klass = RangeIndex
msg = (
- "cannot do slice indexing "
+ "cannot do (slice|positional) indexing "
r"on {klass} with these indexers \[(2|4)\.0\] of "
- "{kind}".format(klass=str(klass), kind=str(float))
+ "type float".format(klass=klass.__name__)
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
@@ -544,7 +536,7 @@ def f(idxr):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(0|1)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -559,7 +551,7 @@ def f(idxr):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[-10\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[slice(-10.0, 10.0)]
@@ -578,7 +570,7 @@ def f(idxr):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[0\.5\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -595,7 +587,7 @@ def f(idxr):
msg = (
"cannot do slice indexing "
r"on {klass} with these indexers \[(3|4)\.0\] of "
- "{kind}".format(klass=type(index), kind=str(float))
+ "type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 312a0c6531cfb..3622b12b853a4 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -140,8 +140,8 @@ def test_series_at_raises_type_error(self):
assert result == 1
msg = (
- "cannot do label indexing on <class 'pandas.core.indexes.base.Index'> "
- r"with these indexers \[0\] of <class 'int'>"
+ "cannot do label indexing on Index "
+ r"with these indexers \[0\] of type int"
)
with pytest.raises(TypeError, match=msg):
ser.at[0]
@@ -157,8 +157,8 @@ def test_frame_raises_type_error(self):
assert result == 1
msg = (
- "cannot do label indexing on <class 'pandas.core.indexes.base.Index'> "
- r"with these indexers \[0\] of <class 'int'>"
+ "cannot do label indexing on Index "
+ r"with these indexers \[0\] of type int"
)
with pytest.raises(TypeError, match=msg):
df.at["a", 0]
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index 0af574bc39d83..7e73e6366438b 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -128,9 +128,8 @@ def test_setitem_float_labels():
def test_slice_float_get_set(datetime_series):
msg = (
- r"cannot do slice indexing on <class 'pandas\.core\.indexes"
- r"\.datetimes\.DatetimeIndex'> with these indexers \[{key}\] "
- r"of <class 'float'>"
+ "cannot do slice indexing on DatetimeIndex with these indexers "
+ r"\[{key}\] of type float"
)
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0]
| Convert the long repr of the Index into just the name. So something like
```
cannot do slice indexing on <class 'pandas.core.indexes.datetimes.DatetimeIndex'> with these indexers [key] of <class 'float'>
```
becomes
```
cannot do slice indexing on DatetimeIndex with these indexers [key] of type float"
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/31769 | 2020-02-07T08:47:08Z | 2020-02-08T10:26:59Z | 2020-02-08T10:26:59Z | 2020-02-08T10:27:06Z |
CLN: trim unreachable indexing code | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e0efa93379bca..c67679b5ecc41 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3011,17 +3011,12 @@ def _set_value(self, index, col, value, takeable: bool = False):
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
-
- Returns
- -------
- DataFrame
- If label pair is contained, will be reference to calling DataFrame,
- otherwise a new object.
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
- return series._set_value(index, value, takeable=True)
+ series._set_value(index, value, takeable=True)
+ return
series = self._get_item_cache(col)
engine = self.index._engine
@@ -3031,7 +3026,6 @@ def _set_value(self, index, col, value, takeable: bool = False):
series._values[loc] = value
# Note: trying to use series._set_value breaks tests in
# tests.frame.indexing.test_indexing and tests.indexing.test_partial
- return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
@@ -3040,8 +3034,6 @@ def _set_value(self, index, col, value, takeable: bool = False):
self.loc[index, col] = value
self._item_cache.pop(col, None)
- return self
-
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
diff --git a/pandas/core/series.py b/pandas/core/series.py
index dd4c1bce5d64c..75ac48fa81bec 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -905,7 +905,7 @@ def _get_with(self, key):
return self._get_values(key)
raise
- if not isinstance(key, (list, np.ndarray, Series, Index)):
+ if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)):
key = list(key)
if isinstance(key, Index):
@@ -1004,8 +1004,6 @@ def __setitem__(self, key, value):
try:
self._set_with_engine(key, value)
- except com.SettingWithCopyError:
- raise
except (KeyError, ValueError):
values = self._values
if is_integer(key) and not self.index.inferred_type == "integer":
@@ -1014,9 +1012,6 @@ def __setitem__(self, key, value):
self[:] = value
else:
self.loc[key] = value
- except InvalidIndexError:
- # e.g. slice
- self._set_with(key, value)
except TypeError as e:
if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):
@@ -1087,7 +1082,7 @@ def _set_with(self, key, value):
def _set_labels(self, key, value):
key = com.asarray_tuplesafe(key)
- indexer = self.index.get_indexer(key)
+ indexer: np.ndarray = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise ValueError(f"{key[mask]} not contained in the index")
@@ -1113,12 +1108,6 @@ def _set_value(self, label, value, takeable: bool = False):
value : object
Scalar value.
takeable : interpret the index as indexers, default False
-
- Returns
- -------
- Series
- If label is contained, will be reference to calling Series,
- otherwise a new object.
"""
try:
if takeable:
@@ -1132,8 +1121,6 @@ def _set_value(self, label, value, takeable: bool = False):
# set using a non-recursive method
self.loc[label] = value
- return self
-
# ----------------------------------------------------------------------
# Unsorted
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 6700d9c261791..d892e3d637772 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -1377,28 +1377,28 @@ def test_set_value(self, float_frame):
def test_set_value_resize(self, float_frame):
res = float_frame._set_value("foobar", "B", 0)
- assert res is float_frame
- assert res.index[-1] == "foobar"
- assert res._get_value("foobar", "B") == 0
+ assert res is None
+ assert float_frame.index[-1] == "foobar"
+ assert float_frame._get_value("foobar", "B") == 0
float_frame.loc["foobar", "qux"] = 0
assert float_frame._get_value("foobar", "qux") == 0
res = float_frame.copy()
- res3 = res._set_value("foobar", "baz", "sam")
- assert res3["baz"].dtype == np.object_
+ res._set_value("foobar", "baz", "sam")
+ assert res["baz"].dtype == np.object_
res = float_frame.copy()
- res3 = res._set_value("foobar", "baz", True)
- assert res3["baz"].dtype == np.object_
+ res._set_value("foobar", "baz", True)
+ assert res["baz"].dtype == np.object_
res = float_frame.copy()
- res3 = res._set_value("foobar", "baz", 5)
- assert is_float_dtype(res3["baz"])
- assert isna(res3["baz"].drop(["foobar"])).all()
+ res._set_value("foobar", "baz", 5)
+ assert is_float_dtype(res["baz"])
+ assert isna(res["baz"].drop(["foobar"])).all()
msg = "could not convert string to float: 'sam'"
with pytest.raises(ValueError, match=msg):
- res3._set_value("foobar", "baz", "sam")
+ res._set_value("foobar", "baz", "sam")
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(np.random.randn(3, 3), index=range(3), columns=list("ABC"))
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index f01e9409d9333..fc9d4ec5290a5 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -73,17 +73,13 @@ def test_series_set_value():
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
- s = Series(dtype=object)._set_value(dates[0], 1.0)
- s2 = s._set_value(dates[1], np.nan)
+ s = Series(dtype=object)
+ s._set_value(dates[0], 1.0)
+ s._set_value(dates[1], np.nan)
expected = Series([1.0, np.nan], index=index)
- tm.assert_series_equal(s2, expected)
-
- # FIXME: dont leave commented-out
- # s = Series(index[:1], index[:1])
- # s2 = s._set_value(dates[1], index[1])
- # assert s2.values.dtype == 'M8[ns]'
+ tm.assert_series_equal(s, expected)
@pytest.mark.slow
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index c318b26aaeb67..18fcbea683dd3 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -375,15 +375,15 @@ def test_setitem_dtypes():
def test_set_value(datetime_series, string_series):
idx = datetime_series.index[10]
res = datetime_series._set_value(idx, 0)
- assert res is datetime_series
+ assert res is None
assert datetime_series[idx] == 0
# equiv
s = string_series.copy()
res = s._set_value("foobar", 0)
- assert res is s
- assert res.index[-1] == "foobar"
- assert res["foobar"] == 0
+ assert res is None
+ assert s.index[-1] == "foobar"
+ assert s["foobar"] == 0
s = string_series.copy()
s.loc["foobar"] = 0
| make _set_value not return anything (the DataFrame docstring in particular is misleading) | https://api.github.com/repos/pandas-dev/pandas/pulls/31768 | 2020-02-07T04:11:11Z | 2020-02-09T20:42:31Z | 2020-02-09T20:42:31Z | 2020-02-09T20:58:48Z |
TST: parametrize some indexing tests | diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 8bb88cd9fd63a..6cc18a3989266 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -59,115 +59,117 @@ def test_scalar_error(self, index_func):
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
- def test_scalar_non_numeric(self):
-
- # GH 4892
- # float_indexers should raise exceptions
- # on appropriate Index types & accessors
-
- for index in [
+ @pytest.mark.parametrize(
+ "index_func",
+ [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
- ]:
+ ],
+ )
+ def test_scalar_non_numeric(self, index_func):
- i = index(5)
+ # GH 4892
+ # float_indexers should raise exceptions
+ # on appropriate Index types & accessors
- for s in [
- Series(np.arange(len(i)), index=i),
- DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),
- ]:
+ i = index_func(5)
- # getting
- for idxr, getitem in [(lambda x: x.iloc, False), (lambda x: x, True)]:
+ for s in [
+ Series(np.arange(len(i)), index=i),
+ DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),
+ ]:
- # gettitem on a DataFrame is a KeyError as it is indexing
- # via labels on the columns
- if getitem and isinstance(s, DataFrame):
- error = KeyError
- msg = r"^3(\.0)?$"
- else:
- error = TypeError
- msg = (
- r"cannot do (label|positional) indexing "
- r"on {klass} with these indexers \[3\.0\] of "
- r"type float|"
- "Cannot index by location index with a "
- "non-integer key".format(klass=type(i).__name__)
- )
- with pytest.raises(error, match=msg):
- idxr(s)[3.0]
-
- # label based can be a TypeError or KeyError
- if s.index.inferred_type in {
- "categorical",
- "string",
- "unicode",
- "mixed",
- }:
+ # getting
+ for idxr, getitem in [(lambda x: x.iloc, False), (lambda x: x, True)]:
+
+ # gettitem on a DataFrame is a KeyError as it is indexing
+ # via labels on the columns
+ if getitem and isinstance(s, DataFrame):
error = KeyError
- msg = r"^3\.0$"
+ msg = r"^3(\.0)?$"
else:
error = TypeError
msg = (
- r"cannot do label indexing "
+ r"cannot do (label|positional) indexing "
r"on {klass} with these indexers \[3\.0\] of "
- r"type float".format(klass=type(i).__name__)
+ r"type float|"
+ "Cannot index by location index with a "
+ "non-integer key".format(klass=type(i).__name__)
)
with pytest.raises(error, match=msg):
- s.loc[3.0]
-
- # contains
- assert 3.0 not in s
-
- # setting with a float fails with iloc
+ idxr(s)[3.0]
+
+ # label based can be a TypeError or KeyError
+ if s.index.inferred_type in {
+ "categorical",
+ "string",
+ "unicode",
+ "mixed",
+ }:
+ error = KeyError
+ msg = r"^3\.0$"
+ else:
+ error = TypeError
msg = (
r"cannot do (label|positional) indexing "
r"on {klass} with these indexers \[3\.0\] of "
r"type float".format(klass=type(i).__name__)
)
- with pytest.raises(TypeError, match=msg):
- s.iloc[3.0] = 0
-
- # setting with an indexer
- if s.index.inferred_type in ["categorical"]:
- # Value or Type Error
- pass
- elif s.index.inferred_type in ["datetime64", "timedelta64", "period"]:
-
- # these should prob work
- # and are inconsistent between series/dataframe ATM
- # for idxr in [lambda x: x]:
- # s2 = s.copy()
- #
- # with pytest.raises(TypeError):
- # idxr(s2)[3.0] = 0
- pass
-
- else:
+ with pytest.raises(error, match=msg):
+ s.loc[3.0]
- s2 = s.copy()
- s2.loc[3.0] = 10
- assert s2.index.is_object()
-
- for idxr in [lambda x: x]:
- s2 = s.copy()
- idxr(s2)[3.0] = 0
- assert s2.index.is_object()
+ # contains
+ assert 3.0 not in s
- # fallsback to position selection, series only
- s = Series(np.arange(len(i)), index=i)
- s[3]
+ # setting with a float fails with iloc
msg = (
- r"cannot do label indexing "
+ r"cannot do (label|positional) indexing "
r"on {klass} with these indexers \[3\.0\] of "
r"type float".format(klass=type(i).__name__)
)
with pytest.raises(TypeError, match=msg):
- s[3.0]
+ s.iloc[3.0] = 0
+
+ # setting with an indexer
+ if s.index.inferred_type in ["categorical"]:
+ # Value or Type Error
+ pass
+ elif s.index.inferred_type in ["datetime64", "timedelta64", "period"]:
+
+ # these should prob work
+ # and are inconsistent between series/dataframe ATM
+ # for idxr in [lambda x: x]:
+ # s2 = s.copy()
+ #
+ # with pytest.raises(TypeError):
+ # idxr(s2)[3.0] = 0
+ pass
+
+ else:
+
+ s2 = s.copy()
+ s2.loc[3.0] = 10
+ assert s2.index.is_object()
+
+ for idxr in [lambda x: x]:
+ s2 = s.copy()
+ idxr(s2)[3.0] = 0
+ assert s2.index.is_object()
+
+ # fallsback to position selection, series only
+ s = Series(np.arange(len(i)), index=i)
+ s[3]
+ msg = (
+ r"cannot do (label|positional) indexing "
+ r"on {klass} with these indexers \[3\.0\] of "
+ r"type float".format(klass=type(i).__name__)
+ )
+ with pytest.raises(TypeError, match=msg):
+ s[3.0]
def test_scalar_with_mixed(self):
@@ -222,52 +224,56 @@ def test_scalar_with_mixed(self):
expected = 3
assert result == expected
- def test_scalar_integer(self):
+ @pytest.mark.parametrize(
+ "index_func", [tm.makeIntIndex, tm.makeRangeIndex],
+ )
+ @pytest.mark.parametrize("klass", [Series, DataFrame])
+ def test_scalar_integer(self, index_func, klass):
# test how scalar float indexers work on int indexes
# integer index
- for i in [Int64Index(range(5)), RangeIndex(5)]:
+ i = index_func(5)
- for s in [
- Series(np.arange(len(i))),
- DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),
- ]:
+ if klass is Series:
+ obj = Series(np.arange(len(i)))
+ else:
+ obj = DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i)
- # coerce to equal int
- for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
+ # coerce to equal int
+ for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
- result = idxr(s)[3.0]
- self.check(result, s, 3, getitem)
+ result = idxr(obj)[3.0]
+ self.check(result, obj, 3, getitem)
- # coerce to equal int
- for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
+ # coerce to equal int
+ for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]:
- if isinstance(s, Series):
+ if isinstance(obj, Series):
- def compare(x, y):
- assert x == y
+ def compare(x, y):
+ assert x == y
- expected = 100
- else:
- compare = tm.assert_series_equal
- if getitem:
- expected = Series(100, index=range(len(s)), name=3)
- else:
- expected = Series(100.0, index=range(len(s)), name=3)
+ expected = 100
+ else:
+ compare = tm.assert_series_equal
+ if getitem:
+ expected = Series(100, index=range(len(obj)), name=3)
+ else:
+ expected = Series(100.0, index=range(len(obj)), name=3)
- s2 = s.copy()
- idxr(s2)[3.0] = 100
+ s2 = obj.copy()
+ idxr(s2)[3.0] = 100
- result = idxr(s2)[3.0]
- compare(result, expected)
+ result = idxr(s2)[3.0]
+ compare(result, expected)
- result = idxr(s2)[3]
- compare(result, expected)
+ result = idxr(s2)[3]
+ compare(result, expected)
- # contains
- # coerce to equal int
- assert 3.0 in s
+ # contains
+ # coerce to equal int
+ assert 3.0 in obj
def test_scalar_float(self):
@@ -322,68 +328,70 @@ def test_scalar_float(self):
with pytest.raises(TypeError, match=msg):
s2.iloc[3.0] = 0
- def test_slice_non_numeric(self):
-
- # GH 4892
- # float_indexers should raise exceptions
- # on appropriate Index types & accessors
-
- for index in [
+ @pytest.mark.parametrize(
+ "index_func",
+ [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeDateIndex,
tm.makeTimedeltaIndex,
tm.makePeriodIndex,
+ ],
+ )
+ def test_slice_non_numeric(self, index_func):
+
+ # GH 4892
+ # float_indexers should raise exceptions
+ # on appropriate Index types & accessors
+
+ index = index_func(5)
+ for s in [
+ Series(range(5), index=index),
+ DataFrame(np.random.randn(5, 2), index=index),
]:
- index = index(5)
- for s in [
- Series(range(5), index=index),
- DataFrame(np.random.randn(5, 2), index=index),
- ]:
+ # getitem
+ for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
+
+ msg = (
+ "cannot do positional indexing "
+ r"on {klass} with these indexers \[(3|4)\.0\] of "
+ "type float".format(klass=type(index).__name__)
+ )
+ with pytest.raises(TypeError, match=msg):
+ s.iloc[l]
- # getitem
- for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
+ for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do positional indexing "
- r"on {klass} with these indexers \[(3|4)\.0\] of "
- "type float".format(klass=type(index).__name__)
+ "cannot do (slice|positional) indexing "
+ r"on {klass} with these indexers "
+ r"\[(3|4)(\.0)?\] "
+ r"of type (float|int)".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
- s.iloc[l]
+ idxr(s)[l]
- for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
-
- msg = (
- "cannot do (slice|positional) indexing "
- r"on {klass} with these indexers "
- r"\[(3|4)(\.0)?\] "
- r"of type (float|int)".format(klass=type(index).__name__)
- )
- with pytest.raises(TypeError, match=msg):
- idxr(s)[l]
+ # setitem
+ for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
- # setitem
- for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
+ msg = (
+ "cannot do positional indexing "
+ r"on {klass} with these indexers \[(3|4)\.0\] of "
+ "type float".format(klass=type(index).__name__)
+ )
+ with pytest.raises(TypeError, match=msg):
+ s.iloc[l] = 0
+ for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do positional indexing "
- r"on {klass} with these indexers \[(3|4)\.0\] of "
- "type float".format(klass=type(index).__name__)
+ "cannot do (slice|positional) indexing "
+ r"on {klass} with these indexers "
+ r"\[(3|4)(\.0)?\] "
+ r"of type (float|int)".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
- s.iloc[l] = 0
-
- for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
- msg = (
- "cannot do (slice|positional) indexing "
- r"on {klass} with these indexers "
- r"\[(3|4)(\.0)?\] "
- r"of type (float|int)".format(klass=type(index).__name__)
- )
- with pytest.raises(TypeError, match=msg):
- idxr(s)[l] = 0
+ idxr(s)[l] = 0
def test_slice_integer(self):
@@ -516,83 +524,86 @@ def test_integer_positional_indexing(self):
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
- def test_slice_integer_frame_getitem(self):
+ @pytest.mark.parametrize(
+ "index_func", [tm.makeIntIndex, tm.makeRangeIndex],
+ )
+ def test_slice_integer_frame_getitem(self, index_func):
# similar to above, but on the getitem dim (of a DataFrame)
- for index in [Int64Index(range(5)), RangeIndex(5)]:
-
- s = DataFrame(np.random.randn(5, 2), index=index)
-
- def f(idxr):
+ index = index_func(5)
- # getitem
- for l in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]:
+ s = DataFrame(np.random.randn(5, 2), index=index)
- result = idxr(s)[l]
- indexer = slice(0, 2)
- self.check(result, s, indexer, False)
-
- # positional indexing
- msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[(0|1)\.0\] of "
- "type float".format(klass=type(index).__name__)
- )
- with pytest.raises(TypeError, match=msg):
- s[l]
+ def f(idxr):
- # getitem out-of-bounds
- for l in [slice(-10, 10), slice(-10.0, 10.0)]:
+ # getitem
+ for l in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]:
- result = idxr(s)[l]
- self.check(result, s, slice(-10, 10), True)
+ result = idxr(s)[l]
+ indexer = slice(0, 2)
+ self.check(result, s, indexer, False)
# positional indexing
msg = (
"cannot do slice indexing "
- r"on {klass} with these indexers \[-10\.0\] of "
+ r"on {klass} with these indexers \[(0|1)\.0\] of "
"type float".format(klass=type(index).__name__)
)
with pytest.raises(TypeError, match=msg):
- s[slice(-10.0, 10.0)]
+ s[l]
- # getitem odd floats
- for l, res in [
- (slice(0.5, 1), slice(1, 2)),
- (slice(0, 0.5), slice(0, 1)),
- (slice(0.5, 1.5), slice(1, 2)),
- ]:
+ # getitem out-of-bounds
+ for l in [slice(-10, 10), slice(-10.0, 10.0)]:
- result = idxr(s)[l]
- self.check(result, s, res, False)
+ result = idxr(s)[l]
+ self.check(result, s, slice(-10, 10), True)
- # positional indexing
- msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[0\.5\] of "
- "type float".format(klass=type(index).__name__)
- )
- with pytest.raises(TypeError, match=msg):
- s[l]
+ # positional indexing
+ msg = (
+ "cannot do slice indexing "
+ r"on {klass} with these indexers \[-10\.0\] of "
+ "type float".format(klass=type(index).__name__)
+ )
+ with pytest.raises(TypeError, match=msg):
+ s[slice(-10.0, 10.0)]
- # setitem
- for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
+ # getitem odd floats
+ for l, res in [
+ (slice(0.5, 1), slice(1, 2)),
+ (slice(0, 0.5), slice(0, 1)),
+ (slice(0.5, 1.5), slice(1, 2)),
+ ]:
- sc = s.copy()
- idxr(sc)[l] = 0
- result = idxr(sc)[l].values.ravel()
- assert (result == 0).all()
+ result = idxr(s)[l]
+ self.check(result, s, res, False)
- # positional indexing
- msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[(3|4)\.0\] of "
- "type float".format(klass=type(index).__name__)
- )
- with pytest.raises(TypeError, match=msg):
- s[l] = 0
+ # positional indexing
+ msg = (
+ "cannot do slice indexing "
+ r"on {klass} with these indexers \[0\.5\] of "
+ "type float".format(klass=type(index).__name__)
+ )
+ with pytest.raises(TypeError, match=msg):
+ s[l]
+
+ # setitem
+ for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
+
+ sc = s.copy()
+ idxr(sc)[l] = 0
+ result = idxr(sc)[l].values.ravel()
+ assert (result == 0).all()
+
+ # positional indexing
+ msg = (
+ "cannot do slice indexing "
+ r"on {klass} with these indexers \[(3|4)\.0\] of "
+ "type float".format(klass=type(index).__name__)
+ )
+ with pytest.raises(TypeError, match=msg):
+ s[l] = 0
- f(lambda x: x.loc)
+ f(lambda x: x.loc)
def test_slice_float(self):
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 3622b12b853a4..899c58eb5edea 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -9,61 +9,59 @@
class TestScalar(Base):
- def test_at_and_iat_get(self):
+ @pytest.mark.parametrize("kind", ["series", "frame"])
+ def test_at_and_iat_get(self, kind):
def _check(f, func, values=False):
if f is not None:
- indicies = self.generate_indices(f, values)
- for i in indicies:
+ indices = self.generate_indices(f, values)
+ for i in indices:
result = getattr(f, func)[i]
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(result, expected)
- for kind in self._kinds:
+ d = getattr(self, kind)
- d = getattr(self, kind)
+ # iat
+ for f in [d["ints"], d["uints"]]:
+ _check(f, "iat", values=True)
- # iat
- for f in [d["ints"], d["uints"]]:
- _check(f, "iat", values=True)
-
- for f in [d["labels"], d["ts"], d["floats"]]:
- if f is not None:
- msg = "iAt based indexing can only have integer indexers"
- with pytest.raises(ValueError, match=msg):
- self.check_values(f, "iat")
+ for f in [d["labels"], d["ts"], d["floats"]]:
+ if f is not None:
+ msg = "iAt based indexing can only have integer indexers"
+ with pytest.raises(ValueError, match=msg):
+ self.check_values(f, "iat")
- # at
- for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
- _check(f, "at")
+ # at
+ for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
+ _check(f, "at")
- def test_at_and_iat_set(self):
+ @pytest.mark.parametrize("kind", ["series", "frame"])
+ def test_at_and_iat_set(self, kind):
def _check(f, func, values=False):
if f is not None:
- indicies = self.generate_indices(f, values)
- for i in indicies:
+ indices = self.generate_indices(f, values)
+ for i in indices:
getattr(f, func)[i] = 1
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(expected, 1)
- for kind in self._kinds:
+ d = getattr(self, kind)
- d = getattr(self, kind)
+ # iat
+ for f in [d["ints"], d["uints"]]:
+ _check(f, "iat", values=True)
- # iat
- for f in [d["ints"], d["uints"]]:
- _check(f, "iat", values=True)
-
- for f in [d["labels"], d["ts"], d["floats"]]:
- if f is not None:
- msg = "iAt based indexing can only have integer indexers"
- with pytest.raises(ValueError, match=msg):
- _check(f, "iat")
+ for f in [d["labels"], d["ts"], d["floats"]]:
+ if f is not None:
+ msg = "iAt based indexing can only have integer indexers"
+ with pytest.raises(ValueError, match=msg):
+ _check(f, "iat")
- # at
- for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
- _check(f, "at")
+ # at
+ for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
+ _check(f, "at")
class TestScalar2:
| 2/many | https://api.github.com/repos/pandas-dev/pandas/pulls/31767 | 2020-02-07T03:29:37Z | 2020-02-09T16:42:15Z | 2020-02-09T16:42:15Z | 2020-02-09T17:23:42Z |
REF: share _partial_date_slice between PeriodIndex/DatetimeIndex | diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index b143ff0aa9c02..d622baf515b82 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -1,6 +1,7 @@
"""
Base and utility classes for tseries type pandas objects.
"""
+from datetime import datetime
from typing import Any, List, Optional, Union
import numpy as np
@@ -412,6 +413,57 @@ def _convert_scalar_indexer(self, key, kind: str):
return super()._convert_scalar_indexer(key, kind=kind)
+ def _validate_partial_date_slice(self, reso: str):
+ raise NotImplementedError
+
+ def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
+ raise NotImplementedError
+
+ def _partial_date_slice(
+ self, reso: str, parsed: datetime, use_lhs: bool = True, use_rhs: bool = True
+ ):
+ """
+ Parameters
+ ----------
+ reso : str
+ parsed : datetime
+ use_lhs : bool, default True
+ use_rhs : bool, default True
+
+ Returns
+ -------
+ slice or ndarray[intp]
+ """
+ self._validate_partial_date_slice(reso)
+
+ t1, t2 = self._parsed_string_to_bounds(reso, parsed)
+ i8vals = self.asi8
+ unbox = self._data._unbox_scalar
+
+ if self.is_monotonic:
+
+ if len(self) and (
+ (use_lhs and t1 < self[0] and t2 < self[0])
+ or ((use_rhs and t1 > self[-1] and t2 > self[-1]))
+ ):
+ # we are out of range
+ raise KeyError
+
+ # TODO: does this depend on being monotonic _increasing_?
+
+ # a monotonic (sorted) series can be sliced
+ # Use asi8.searchsorted to avoid re-validating Periods/Timestamps
+ left = i8vals.searchsorted(unbox(t1), side="left") if use_lhs else None
+ right = i8vals.searchsorted(unbox(t2), side="right") if use_rhs else None
+ return slice(left, right)
+
+ else:
+ lhs_mask = (i8vals >= unbox(t1)) if use_lhs else True
+ rhs_mask = (i8vals <= unbox(t2)) if use_rhs else True
+
+ # try to find the dates
+ return (lhs_mask & rhs_mask).nonzero()[0]
+
# --------------------------------------------------------------------
__add__ = make_wrapped_arith_op("__add__")
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 3d57f0944b318..b67d0dcea0ac6 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -503,19 +503,9 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
end = end.tz_localize(self.tz)
return start, end
- def _partial_date_slice(
- self, reso: str, parsed: datetime, use_lhs: bool = True, use_rhs: bool = True
- ):
- """
- Parameters
- ----------
- reso : str
- use_lhs : bool, default True
- use_rhs : bool, default True
- """
- is_monotonic = self.is_monotonic
+ def _validate_partial_date_slice(self, reso: str):
if (
- is_monotonic
+ self.is_monotonic
and reso in ["day", "hour", "minute", "second"]
and self._resolution >= Resolution.get_reso(reso)
):
@@ -530,31 +520,6 @@ def _partial_date_slice(
# _parsed_string_to_bounds allows it.
raise KeyError
- t1, t2 = self._parsed_string_to_bounds(reso, parsed)
- stamps = self.asi8
-
- if is_monotonic:
-
- # we are out of range
- if len(stamps) and (
- (use_lhs and t1.value < stamps[0] and t2.value < stamps[0])
- or ((use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))
- ):
- raise KeyError
-
- # a monotonic (sorted) series can be sliced
- # Use asi8.searchsorted to avoid re-validating
- left = stamps.searchsorted(t1.value, side="left") if use_lhs else None
- right = stamps.searchsorted(t2.value, side="right") if use_rhs else None
-
- return slice(left, right)
-
- lhs_mask = (stamps >= t1.value) if use_lhs else True
- rhs_mask = (stamps <= t2.value) if use_rhs else True
-
- # try to find a the dates
- return (lhs_mask & rhs_mask).nonzero()[0]
-
def _maybe_promote(self, other):
if other.inferred_type == "date":
other = DatetimeIndex(other)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 42f0a012902a3..cc8acd463e6c8 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -606,9 +606,7 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
iv = Period(parsed, freq=(grp, 1))
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
- def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
- # TODO: Check for non-True use_lhs/use_rhs
- parsed, reso = parse_time_string(key, self.freq)
+ def _validate_partial_date_slice(self, reso: str):
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
@@ -616,35 +614,16 @@ def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
- raise ValueError(key)
-
- t1, t2 = self._parsed_string_to_bounds(reso, parsed)
- i8vals = self.asi8
-
- if self.is_monotonic:
-
- # we are out of range
- if len(self) and (
- (use_lhs and t1 < self[0] and t2 < self[0])
- or ((use_rhs and t1 > self[-1] and t2 > self[-1]))
- ):
- raise KeyError(key)
-
- # TODO: does this depend on being monotonic _increasing_?
- # If so, DTI will also be affected.
+ raise ValueError
- # a monotonic (sorted) series can be sliced
- # Use asi8.searchsorted to avoid re-validating Periods
- left = i8vals.searchsorted(t1.ordinal, side="left") if use_lhs else None
- right = i8vals.searchsorted(t2.ordinal, side="right") if use_rhs else None
- return slice(left, right)
-
- else:
- lhs_mask = (i8vals >= t1.ordinal) if use_lhs else True
- rhs_mask = (i8vals <= t2.ordinal) if use_rhs else True
+ def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
+ # TODO: Check for non-True use_lhs/use_rhs
+ parsed, reso = parse_time_string(key, self.freq)
- # try to find a the dates
- return (lhs_mask & rhs_mask).nonzero()[0]
+ try:
+ return self._partial_date_slice(reso, parsed, use_lhs, use_rhs)
+ except KeyError:
+ raise KeyError(key)
def _convert_tolerance(self, tolerance, target):
tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance, target)
| https://api.github.com/repos/pandas-dev/pandas/pulls/31766 | 2020-02-07T01:51:47Z | 2020-02-09T16:56:08Z | 2020-02-09T16:56:08Z | 2020-02-09T17:29:46Z | |
REF: Remove CategoricalIndex.get_value | diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 85229c728848f..7373f41daefa4 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -1,4 +1,4 @@
-from typing import TYPE_CHECKING, Any, List
+from typing import Any, List
import warnings
import numpy as np
@@ -29,9 +29,6 @@
from pandas.core.indexes.extension import ExtensionIndex, inherit_names
import pandas.core.missing as missing
-if TYPE_CHECKING:
- from pandas import Series
-
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="CategoricalIndex"))
@@ -444,35 +441,6 @@ def _maybe_cast_indexer(self, key):
code = self.codes.dtype.type(code)
return code
- def get_value(self, series: "Series", key: Any):
- """
- Fast lookup of value from 1-dimensional ndarray. Only use this if you
- know what you're doing
-
- Parameters
- ----------
- series : Series
- 1-dimensional array to take values from
- key: : scalar
- The value of this index at the position of the desired value,
- otherwise the positional index of the desired value
-
- Returns
- -------
- Any
- The element of the series at the position indicated by the key
- """
- k = key
- try:
- k = self._convert_scalar_indexer(k, kind="getitem")
- indexer = self.get_loc(k)
- return series.take([indexer])[0]
- except (KeyError, TypeError):
- pass
-
- # we might be a positional inexer
- return Index.get_value(self, series, key)
-
@Appender(Index.where.__doc__)
def where(self, cond, other=None):
# TODO: Investigate an alternative implementation with
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 0786674daf874..c54331f867a9c 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -979,6 +979,9 @@ def _get_value(self, label, takeable: bool = False):
"""
if takeable:
return self._values[label]
+
+ # We assume that _convert_scalar_indexer has already been called,
+ # with kind="loc", if necessary, by the time we get here
return self.index.get_value(self, label)
def __setitem__(self, key, value):
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index fa5c75d5e4ad9..bb10b12d94628 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -564,6 +564,18 @@ def test_categorical_assigning_ops():
tm.assert_series_equal(s, exp)
+def test_getitem_categorical_str():
+ # GH#31765
+ ser = pd.Series(range(5), index=pd.Categorical(["a", "b", "c", "a", "b"]))
+ result = ser["a"]
+ expected = ser.iloc[[0, 3]]
+ tm.assert_series_equal(result, expected)
+
+ # Check the intermediate steps work as expected
+ result = ser.index.get_value(ser, "a")
+ tm.assert_series_equal(result, expected)
+
+
def test_slice(string_series, object_series):
numSlice = string_series[10:20]
numSliceEnd = string_series[-10:]
| It is no longer needed following #31724. | https://api.github.com/repos/pandas-dev/pandas/pulls/31765 | 2020-02-07T01:47:17Z | 2020-02-09T14:58:33Z | 2020-02-09T14:58:33Z | 2020-02-09T15:47:49Z |
Backport PR #31729 on branch 1.0.x (BUG: Fix to_excel writers handling of cols) | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 07a837829c384..94dc1e0c007ca 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -15,7 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
--
+- Fixed regression in :meth:`DataFrame.to_excel` when ``columns`` kwarg is passed (:issue:`31677`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 9b0f100c1b041..3d5b571cc58a0 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -400,7 +400,7 @@ def __init__(
# Deprecated in GH#17295, enforced in 1.0.0
raise KeyError("Not all names specified in 'columns' are found")
- self.df = df
+ self.df = df.reindex(columns=cols)
self.columns = self.df.columns
self.float_format = float_format
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 55b987a599670..31382d29ce615 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -1018,6 +1018,27 @@ def test_invalid_columns(self, path):
):
write_frame.to_excel(path, "test1", columns=["C", "D"])
+ @pytest.mark.parametrize(
+ "to_excel_index,read_excel_index_col",
+ [
+ (True, 0), # Include index in write to file
+ (False, None), # Dont include index in write to file
+ ],
+ )
+ def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col):
+ # GH 31677
+ write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2], "C": [3, 3, 3]})
+ write_frame.to_excel(
+ path, "col_subset_bug", columns=["A", "B"], index=to_excel_index
+ )
+
+ expected = write_frame[["A", "B"]]
+ read_frame = pd.read_excel(
+ path, "col_subset_bug", index_col=read_excel_index_col
+ )
+
+ tm.assert_frame_equal(expected, read_frame)
+
def test_comment_arg(self, path):
# see gh-18735
#
| Backport PR #31729: BUG: Fix to_excel writers handling of cols | https://api.github.com/repos/pandas-dev/pandas/pulls/31764 | 2020-02-06T23:41:26Z | 2020-02-07T00:38:54Z | 2020-02-07T00:38:54Z | 2020-02-07T00:38:54Z |
REF/TST: misplaced tests in tests.indexes.period | diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py
index 6139726dc34e4..916f722247a14 100644
--- a/pandas/tests/indexes/datetimes/test_astype.py
+++ b/pandas/tests/indexes/datetimes/test_astype.py
@@ -1,7 +1,6 @@
from datetime import datetime
import dateutil
-from dateutil.tz import tzlocal
import numpy as np
import pytest
import pytz
@@ -12,7 +11,7 @@
Index,
Int64Index,
NaT,
- Period,
+ PeriodIndex,
Series,
Timestamp,
date_range,
@@ -278,81 +277,19 @@ def test_integer_index_astype_datetime(self, tz, dtype):
expected = pd.DatetimeIndex(["2018-01-01"], tz=tz)
tm.assert_index_equal(result, expected)
+ def test_dti_astype_period(self):
+ idx = DatetimeIndex([NaT, "2011-01-01", "2011-02-01"], name="idx")
-class TestToPeriod:
- def setup_method(self, method):
- data = [
- Timestamp("2007-01-01 10:11:12.123456Z"),
- Timestamp("2007-01-01 10:11:13.789123Z"),
- ]
- self.index = DatetimeIndex(data)
-
- def test_to_period_millisecond(self):
- index = self.index
-
- with tm.assert_produces_warning(UserWarning):
- # warning that timezone info will be lost
- period = index.to_period(freq="L")
- assert 2 == len(period)
- assert period[0] == Period("2007-01-01 10:11:12.123Z", "L")
- assert period[1] == Period("2007-01-01 10:11:13.789Z", "L")
-
- def test_to_period_microsecond(self):
- index = self.index
+ res = idx.astype("period[M]")
+ exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="M", name="idx")
+ tm.assert_index_equal(res, exp)
- with tm.assert_produces_warning(UserWarning):
- # warning that timezone info will be lost
- period = index.to_period(freq="U")
- assert 2 == len(period)
- assert period[0] == Period("2007-01-01 10:11:12.123456Z", "U")
- assert period[1] == Period("2007-01-01 10:11:13.789123Z", "U")
-
- @pytest.mark.parametrize(
- "tz",
- ["US/Eastern", pytz.utc, tzlocal(), "dateutil/US/Eastern", dateutil.tz.tzutc()],
- )
- def test_to_period_tz(self, tz):
- ts = date_range("1/1/2000", "2/1/2000", tz=tz)
-
- with tm.assert_produces_warning(UserWarning):
- # GH#21333 warning that timezone info will be lost
- result = ts.to_period()[0]
- expected = ts[0].to_period()
-
- assert result == expected
-
- expected = date_range("1/1/2000", "2/1/2000").to_period()
-
- with tm.assert_produces_warning(UserWarning):
- # GH#21333 warning that timezone info will be lost
- result = ts.to_period()
-
- tm.assert_index_equal(result, expected)
+ res = idx.astype("period[3M]")
+ exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="3M", name="idx")
+ tm.assert_index_equal(res, exp)
- @pytest.mark.parametrize("tz", ["Etc/GMT-1", "Etc/GMT+1"])
- def test_to_period_tz_utc_offset_consistency(self, tz):
- # GH 22905
- ts = pd.date_range("1/1/2000", "2/1/2000", tz="Etc/GMT-1")
- with tm.assert_produces_warning(UserWarning):
- result = ts.to_period()[0]
- expected = ts[0].to_period()
- assert result == expected
-
- def test_to_period_nofreq(self):
- idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
- with pytest.raises(ValueError):
- idx.to_period()
-
- idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="infer")
- assert idx.freqstr == "D"
- expected = pd.PeriodIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="D")
- tm.assert_index_equal(idx.to_period(), expected)
-
- # GH 7606
- idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
- assert idx.freqstr is None
- tm.assert_index_equal(idx.to_period(), expected)
+class TestAstype:
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_astype_category(self, tz):
obj = pd.date_range("2000", periods=2, tz=tz)
diff --git a/pandas/tests/indexes/datetimes/test_to_period.py b/pandas/tests/indexes/datetimes/test_to_period.py
new file mode 100644
index 0000000000000..5567f98c52211
--- /dev/null
+++ b/pandas/tests/indexes/datetimes/test_to_period.py
@@ -0,0 +1,161 @@
+import dateutil.tz
+from dateutil.tz import tzlocal
+import pytest
+import pytz
+
+from pandas._libs.tslibs.ccalendar import MONTHS
+from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG
+
+from pandas import (
+ DatetimeIndex,
+ Period,
+ PeriodIndex,
+ Timestamp,
+ date_range,
+ period_range,
+)
+import pandas._testing as tm
+
+
+class TestToPeriod:
+ def test_dti_to_period(self):
+ dti = date_range(start="1/1/2005", end="12/1/2005", freq="M")
+ pi1 = dti.to_period()
+ pi2 = dti.to_period(freq="D")
+ pi3 = dti.to_period(freq="3D")
+
+ assert pi1[0] == Period("Jan 2005", freq="M")
+ assert pi2[0] == Period("1/31/2005", freq="D")
+ assert pi3[0] == Period("1/31/2005", freq="3D")
+
+ assert pi1[-1] == Period("Nov 2005", freq="M")
+ assert pi2[-1] == Period("11/30/2005", freq="D")
+ assert pi3[-1], Period("11/30/2005", freq="3D")
+
+ tm.assert_index_equal(pi1, period_range("1/1/2005", "11/1/2005", freq="M"))
+ tm.assert_index_equal(
+ pi2, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("D")
+ )
+ tm.assert_index_equal(
+ pi3, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("3D")
+ )
+
+ @pytest.mark.parametrize("month", MONTHS)
+ def test_to_period_quarterly(self, month):
+ # make sure we can make the round trip
+ freq = "Q-{month}".format(month=month)
+ rng = period_range("1989Q3", "1991Q3", freq=freq)
+ stamps = rng.to_timestamp()
+ result = stamps.to_period(freq)
+ tm.assert_index_equal(rng, result)
+
+ @pytest.mark.parametrize("off", ["BQ", "QS", "BQS"])
+ def test_to_period_quarterlyish(self, off):
+ rng = date_range("01-Jan-2012", periods=8, freq=off)
+ prng = rng.to_period()
+ assert prng.freq == "Q-DEC"
+
+ @pytest.mark.parametrize("off", ["BA", "AS", "BAS"])
+ def test_to_period_annualish(self, off):
+ rng = date_range("01-Jan-2012", periods=8, freq=off)
+ prng = rng.to_period()
+ assert prng.freq == "A-DEC"
+
+ def test_to_period_monthish(self):
+ offsets = ["MS", "BM"]
+ for off in offsets:
+ rng = date_range("01-Jan-2012", periods=8, freq=off)
+ prng = rng.to_period()
+ assert prng.freq == "M"
+
+ rng = date_range("01-Jan-2012", periods=8, freq="M")
+ prng = rng.to_period()
+ assert prng.freq == "M"
+
+ with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
+ date_range("01-Jan-2012", periods=8, freq="EOM")
+
+ def test_period_dt64_round_trip(self):
+ dti = date_range("1/1/2000", "1/7/2002", freq="B")
+ pi = dti.to_period()
+ tm.assert_index_equal(pi.to_timestamp(), dti)
+
+ dti = date_range("1/1/2000", "1/7/2002", freq="B")
+ pi = dti.to_period(freq="H")
+ tm.assert_index_equal(pi.to_timestamp(), dti)
+
+ def test_to_period_millisecond(self):
+ index = DatetimeIndex(
+ [
+ Timestamp("2007-01-01 10:11:12.123456Z"),
+ Timestamp("2007-01-01 10:11:13.789123Z"),
+ ]
+ )
+
+ with tm.assert_produces_warning(UserWarning):
+ # warning that timezone info will be lost
+ period = index.to_period(freq="L")
+ assert 2 == len(period)
+ assert period[0] == Period("2007-01-01 10:11:12.123Z", "L")
+ assert period[1] == Period("2007-01-01 10:11:13.789Z", "L")
+
+ def test_to_period_microsecond(self):
+ index = DatetimeIndex(
+ [
+ Timestamp("2007-01-01 10:11:12.123456Z"),
+ Timestamp("2007-01-01 10:11:13.789123Z"),
+ ]
+ )
+
+ with tm.assert_produces_warning(UserWarning):
+ # warning that timezone info will be lost
+ period = index.to_period(freq="U")
+ assert 2 == len(period)
+ assert period[0] == Period("2007-01-01 10:11:12.123456Z", "U")
+ assert period[1] == Period("2007-01-01 10:11:13.789123Z", "U")
+
+ @pytest.mark.parametrize(
+ "tz",
+ ["US/Eastern", pytz.utc, tzlocal(), "dateutil/US/Eastern", dateutil.tz.tzutc()],
+ )
+ def test_to_period_tz(self, tz):
+ ts = date_range("1/1/2000", "2/1/2000", tz=tz)
+
+ with tm.assert_produces_warning(UserWarning):
+ # GH#21333 warning that timezone info will be lost
+ result = ts.to_period()[0]
+ expected = ts[0].to_period()
+
+ assert result == expected
+
+ expected = date_range("1/1/2000", "2/1/2000").to_period()
+
+ with tm.assert_produces_warning(UserWarning):
+ # GH#21333 warning that timezone info will be lost
+ result = ts.to_period()
+
+ tm.assert_index_equal(result, expected)
+
+ @pytest.mark.parametrize("tz", ["Etc/GMT-1", "Etc/GMT+1"])
+ def test_to_period_tz_utc_offset_consistency(self, tz):
+ # GH#22905
+ ts = date_range("1/1/2000", "2/1/2000", tz="Etc/GMT-1")
+ with tm.assert_produces_warning(UserWarning):
+ result = ts.to_period()[0]
+ expected = ts[0].to_period()
+ assert result == expected
+
+ def test_to_period_nofreq(self):
+ idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
+ with pytest.raises(ValueError):
+ idx.to_period()
+
+ idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="infer")
+ assert idx.freqstr == "D"
+ expected = PeriodIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="D")
+ tm.assert_index_equal(idx.to_period(), expected)
+
+ # GH#7606
+ idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
+ assert idx.freqstr is None
+ tm.assert_index_equal(idx.to_period(), expected)
diff --git a/pandas/tests/indexes/period/test_asfreq.py b/pandas/tests/indexes/period/test_asfreq.py
index 88e800d66f3ad..8c04ac1177676 100644
--- a/pandas/tests/indexes/period/test_asfreq.py
+++ b/pandas/tests/indexes/period/test_asfreq.py
@@ -1,8 +1,6 @@
-import numpy as np
import pytest
-import pandas as pd
-from pandas import DataFrame, PeriodIndex, Series, period_range
+from pandas import PeriodIndex, period_range
import pandas._testing as tm
@@ -98,7 +96,7 @@ def test_asfreq_mult_pi(self, freq):
assert result.freq == exp.freq
def test_asfreq_combined_pi(self):
- pi = pd.PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="H")
+ pi = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="H")
exp = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="25H")
for freq, how in zip(["1D1H", "1H1D"], ["S", "E"]):
result = pi.asfreq(freq, how=how)
@@ -106,38 +104,18 @@ def test_asfreq_combined_pi(self):
assert result.freq == exp.freq
for freq in ["1D1H", "1H1D"]:
- pi = pd.PeriodIndex(
- ["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq=freq
- )
+ pi = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq=freq)
result = pi.asfreq("H")
exp = PeriodIndex(["2001-01-02 00:00", "2001-01-03 02:00", "NaT"], freq="H")
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
- pi = pd.PeriodIndex(
- ["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq=freq
- )
+ pi = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq=freq)
result = pi.asfreq("H", how="S")
exp = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="H")
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
- def test_asfreq_ts(self):
- index = period_range(freq="A", start="1/1/2001", end="12/31/2010")
- ts = Series(np.random.randn(len(index)), index=index)
- df = DataFrame(np.random.randn(len(index), 3), index=index)
-
- result = ts.asfreq("D", how="end")
- df_result = df.asfreq("D", how="end")
- exp_index = index.asfreq("D", how="end")
- assert len(result) == len(ts)
- tm.assert_index_equal(result.index, exp_index)
- tm.assert_index_equal(df_result.index, exp_index)
-
- result = ts.asfreq("D", how="start")
- assert len(result) == len(ts)
- tm.assert_index_equal(result.index, index.asfreq("D", how="start"))
-
def test_astype_asfreq(self):
pi1 = PeriodIndex(["2011-01-01", "2011-02-01", "2011-03-01"], freq="D")
exp = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M")
diff --git a/pandas/tests/indexes/period/test_astype.py b/pandas/tests/indexes/period/test_astype.py
index ec386dd9dd11c..2f10e45193d5d 100644
--- a/pandas/tests/indexes/period/test_astype.py
+++ b/pandas/tests/indexes/period/test_astype.py
@@ -1,8 +1,18 @@
import numpy as np
import pytest
-import pandas as pd
-from pandas import Index, Int64Index, NaT, Period, PeriodIndex, period_range
+from pandas import (
+ CategoricalIndex,
+ DatetimeIndex,
+ Index,
+ Int64Index,
+ NaT,
+ Period,
+ PeriodIndex,
+ Timedelta,
+ UInt64Index,
+ period_range,
+)
import pandas._testing as tm
@@ -41,39 +51,39 @@ def test_astype_conversion(self):
def test_astype_uint(self):
arr = period_range("2000", periods=2)
- expected = pd.UInt64Index(np.array([10957, 10958], dtype="uint64"))
+ expected = UInt64Index(np.array([10957, 10958], dtype="uint64"))
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_object(self):
- idx = pd.PeriodIndex([], freq="M")
+ idx = PeriodIndex([], freq="M")
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
- idx = pd.PeriodIndex(["2011-01", pd.NaT], freq="M")
+ idx = PeriodIndex(["2011-01", NaT], freq="M")
- exp = np.array([pd.Period("2011-01", freq="M"), pd.NaT], dtype=object)
+ exp = np.array([Period("2011-01", freq="M"), NaT], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
- exp = np.array([pd.Period("2011-01-01", freq="D"), pd.NaT], dtype=object)
- idx = pd.PeriodIndex(["2011-01-01", pd.NaT], freq="D")
+ exp = np.array([Period("2011-01-01", freq="D"), NaT], dtype=object)
+ idx = PeriodIndex(["2011-01-01", NaT], freq="D")
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
# TODO: de-duplicate this version (from test_ops) with the one above
# (from test_period)
def test_astype_object2(self):
- idx = pd.period_range(start="2013-01-01", periods=4, freq="M", name="idx")
+ idx = period_range(start="2013-01-01", periods=4, freq="M", name="idx")
expected_list = [
- pd.Period("2013-01-31", freq="M"),
- pd.Period("2013-02-28", freq="M"),
- pd.Period("2013-03-31", freq="M"),
- pd.Period("2013-04-30", freq="M"),
+ Period("2013-01-31", freq="M"),
+ Period("2013-02-28", freq="M"),
+ Period("2013-03-31", freq="M"),
+ Period("2013-04-30", freq="M"),
]
- expected = pd.Index(expected_list, dtype=object, name="idx")
+ expected = Index(expected_list, dtype=object, name="idx")
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
@@ -85,31 +95,31 @@ def test_astype_object2(self):
["2013-01-01", "2013-01-02", "NaT", "2013-01-04"], freq="D", name="idx"
)
expected_list = [
- pd.Period("2013-01-01", freq="D"),
- pd.Period("2013-01-02", freq="D"),
- pd.Period("NaT", freq="D"),
- pd.Period("2013-01-04", freq="D"),
+ Period("2013-01-01", freq="D"),
+ Period("2013-01-02", freq="D"),
+ Period("NaT", freq="D"),
+ Period("2013-01-04", freq="D"),
]
- expected = pd.Index(expected_list, dtype=object, name="idx")
+ expected = Index(expected_list, dtype=object, name="idx")
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
assert result[i] == expected[i]
- assert result[2] is pd.NaT
+ assert result[2] is NaT
assert result.name == expected.name
result_list = idx.tolist()
for i in [0, 1, 3]:
assert result_list[i] == expected_list[i]
- assert result_list[2] is pd.NaT
+ assert result_list[2] is NaT
def test_astype_category(self):
- obj = pd.period_range("2000", periods=2)
+ obj = period_range("2000", periods=2)
result = obj.astype("category")
- expected = pd.CategoricalIndex(
- [pd.Period("2000-01-01", freq="D"), pd.Period("2000-01-02", freq="D")]
+ expected = CategoricalIndex(
+ [Period("2000-01-01", freq="D"), Period("2000-01-02", freq="D")]
)
tm.assert_index_equal(result, expected)
@@ -118,11 +128,30 @@ def test_astype_category(self):
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
- obj = pd.period_range("2000", periods=2)
+ obj = period_range("2000", periods=2)
result = obj.astype(bool)
- expected = pd.Index(np.array([True, True]))
+ expected = Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
+
+ def test_period_astype_to_timestamp(self):
+ pi = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M")
+
+ exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"])
+ tm.assert_index_equal(pi.astype("datetime64[ns]"), exp)
+
+ exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"])
+ exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
+ tm.assert_index_equal(pi.astype("datetime64[ns]", how="end"), exp)
+
+ exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], tz="US/Eastern")
+ res = pi.astype("datetime64[ns, US/Eastern]")
+ tm.assert_index_equal(pi.astype("datetime64[ns, US/Eastern]"), exp)
+
+ exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"], tz="US/Eastern")
+ exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
+ res = pi.astype("datetime64[ns, US/Eastern]", how="end")
+ tm.assert_index_equal(res, exp)
diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py
index dcd3c8e946e9a..fcbadce3d63b1 100644
--- a/pandas/tests/indexes/period/test_constructors.py
+++ b/pandas/tests/indexes/period/test_constructors.py
@@ -6,7 +6,16 @@
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
-from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
+from pandas import (
+ Index,
+ NaT,
+ Period,
+ PeriodIndex,
+ Series,
+ date_range,
+ offsets,
+ period_range,
+)
import pandas._testing as tm
from pandas.core.arrays import PeriodArray
@@ -14,27 +23,25 @@
class TestPeriodIndex:
def test_construction_base_constructor(self):
# GH 13664
- arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
- tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
- tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
+ arr = [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")]
+ tm.assert_index_equal(Index(arr), PeriodIndex(arr))
+ tm.assert_index_equal(Index(np.array(arr)), PeriodIndex(np.array(arr)))
- arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
- tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
- tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
+ arr = [np.nan, NaT, Period("2011-03", freq="M")]
+ tm.assert_index_equal(Index(arr), PeriodIndex(arr))
+ tm.assert_index_equal(Index(np.array(arr)), PeriodIndex(np.array(arr)))
- arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
- tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
+ arr = [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="D")]
+ tm.assert_index_equal(Index(arr), Index(arr, dtype=object))
- tm.assert_index_equal(
- pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
- )
+ tm.assert_index_equal(Index(np.array(arr)), Index(np.array(arr), dtype=object))
def test_base_constructor_with_period_dtype(self):
dtype = PeriodDtype("D")
values = ["2011-01-01", "2012-03-04", "2014-05-01"]
- result = pd.Index(values, dtype=dtype)
+ result = Index(values, dtype=dtype)
- expected = pd.PeriodIndex(values, dtype=dtype)
+ expected = PeriodIndex(values, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
@@ -43,9 +50,9 @@ def test_base_constructor_with_period_dtype(self):
def test_index_object_dtype(self, values_constructor):
# Index(periods, dtype=object) is an Index (not an PeriodIndex)
periods = [
- pd.Period("2011-01", freq="M"),
- pd.NaT,
- pd.Period("2011-03", freq="M"),
+ Period("2011-01", freq="M"),
+ NaT,
+ Period("2011-03", freq="M"),
]
values = values_constructor(periods)
result = Index(values, dtype=object)
@@ -118,8 +125,8 @@ def test_constructor_arrays_negative_year(self):
pindex = PeriodIndex(year=years, quarter=quarters)
- tm.assert_index_equal(pindex.year, pd.Index(years))
- tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
+ tm.assert_index_equal(pindex.year, Index(years))
+ tm.assert_index_equal(pindex.quarter, Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
@@ -184,7 +191,7 @@ def test_constructor_datetime64arr(self):
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
- data = pd.date_range("2017", periods=4, freq="M")
+ data = date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
@@ -226,52 +233,47 @@ def test_constructor_dtype(self):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
- idx = pd.PeriodIndex([], freq="M")
+ idx = PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
- pd.PeriodIndex([])
+ PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
- [Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")]
+ [Period("2011-01", freq="M"), NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
- np.array([Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")])
+ np.array([Period("2011-01", freq="M"), NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
- [pd.NaT, pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
+ [NaT, NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
- [
- pd.NaT,
- pd.NaT,
- Period("2011-01", freq="M"),
- Period("2011-01", freq="M"),
- ]
+ [NaT, NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
)
tm.assert_index_equal(idx, exp)
- idx = PeriodIndex([pd.NaT, pd.NaT, "2011-01", "2011-01"], freq="M")
+ idx = PeriodIndex([NaT, NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
- PeriodIndex([pd.NaT, pd.NaT])
+ PeriodIndex([NaT, NaT])
with pytest.raises(ValueError, match="freq not specified"):
- PeriodIndex(np.array([pd.NaT, pd.NaT]))
+ PeriodIndex(np.array([NaT, NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
@@ -283,40 +285,36 @@ def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
- PeriodIndex(
- [Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
- )
+ PeriodIndex([Period("2011-01", freq="M"), NaT, Period("2011-01", freq="D")])
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
- [Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
+ [Period("2011-01", freq="M"), NaT, Period("2011-01", freq="D")]
)
)
- # first element is pd.NaT
+ # first element is NaT
with pytest.raises(IncompatibleFrequency, match=msg):
- PeriodIndex(
- [pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
- )
+ PeriodIndex([NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")])
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
- [pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
+ [NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
- idx = PeriodIndex(["2011-01", pd.NaT, Period("2011-01", freq="M")])
+ idx = PeriodIndex(["2011-01", NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
- idx = PeriodIndex(["NaT", pd.NaT, Period("2011-01", freq="M")])
+ idx = PeriodIndex(["NaT", NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
- idx = PeriodIndex([Period("2011-01-01", freq="D"), pd.NaT, "2012-01-01"])
+ idx = PeriodIndex([Period("2011-01-01", freq="D"), NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
@@ -349,11 +347,11 @@ def test_constructor_simple_new_empty(self):
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
with pytest.raises(AssertionError, match="<class "):
- pd.PeriodIndex._simple_new(floats, freq="M")
+ PeriodIndex._simple_new(floats, freq="M")
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
- pd.PeriodIndex(floats, freq="M")
+ PeriodIndex(floats, freq="M")
def test_constructor_nat(self):
msg = "start and end must not be NaT"
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index fffc4a7562306..077fa2a0b1c56 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -7,7 +7,17 @@
from pandas._libs.tslibs import period as libperiod
import pandas as pd
-from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
+from pandas import (
+ DatetimeIndex,
+ NaT,
+ Period,
+ PeriodIndex,
+ Series,
+ Timedelta,
+ date_range,
+ notna,
+ period_range,
+)
import pandas._testing as tm
from pandas.core.indexes.base import InvalidIndexError
@@ -22,23 +32,23 @@ def test_ellipsis(self):
assert result is not idx
def test_getitem(self):
- idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
+ idx1 = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
- assert result == pd.Period("2011-01-01", freq="D")
+ assert result == Period("2011-01-01", freq="D")
result = idx[-1]
- assert result == pd.Period("2011-01-31", freq="D")
+ assert result == Period("2011-01-31", freq="D")
result = idx[0:5]
- expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
+ expected = period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
- expected = pd.PeriodIndex(
+ expected = PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
@@ -48,7 +58,7 @@ def test_getitem(self):
assert result.freq == "D"
result = idx[-20:-5:3]
- expected = pd.PeriodIndex(
+ expected = PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
@@ -71,11 +81,11 @@ def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
- exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
+ exp = PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
- exp = pd.PeriodIndex(
+ exp = PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
@@ -125,16 +135,16 @@ def test_getitem_datetime(self):
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
- idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
- assert idx[0] == pd.Period("2011-01", freq="M")
- assert idx[1] is pd.NaT
+ idx = PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
+ assert idx[0] == Period("2011-01", freq="M")
+ assert idx[1] is NaT
s = pd.Series([0, 1, 2], index=idx)
- assert s[pd.NaT] == 1
+ assert s[NaT] == 1
s = pd.Series(idx, index=idx)
- assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
- assert s[pd.NaT] is pd.NaT
+ assert s[Period("2011-01", freq="M")] == Period("2011-01", freq="M")
+ assert s[NaT] is NaT
def test_getitem_list_periods(self):
# GH 7710
@@ -145,7 +155,7 @@ def test_getitem_list_periods(self):
def test_getitem_seconds(self):
# GH#6716
- didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
+ didx = date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
@@ -174,7 +184,7 @@ def test_getitem_seconds(self):
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
- didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
+ didx = date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
@@ -216,24 +226,24 @@ def test_where(self, klass):
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
- expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
+ expected = PeriodIndex([NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
- for arr in [np.nan, pd.NaT]:
+ for arr in [np.nan, NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
- i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
+ i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
- i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
+ i2 = PeriodIndex([NaT, NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
@@ -241,7 +251,7 @@ def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
- i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
+ i2 = PeriodIndex([NaT, NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
@@ -256,23 +266,23 @@ def test_where_invalid_dtypes(self):
class TestTake:
def test_take(self):
# GH#10295
- idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
+ idx1 = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
- assert result == pd.Period("2011-01-01", freq="D")
+ assert result == Period("2011-01-01", freq="D")
result = idx.take([5])
- assert result == pd.Period("2011-01-06", freq="D")
+ assert result == Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
- expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
+ expected = period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
- expected = pd.PeriodIndex(
+ expected = PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
@@ -280,7 +290,7 @@ def test_take(self):
assert result.freq == "D"
result = idx.take([7, 4, 1])
- expected = pd.PeriodIndex(
+ expected = PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
@@ -327,25 +337,25 @@ def test_take_misc(self):
def test_take_fill_value(self):
# GH#12631
- idx = pd.PeriodIndex(
+ idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
- expected = pd.PeriodIndex(
+ expected = PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
- expected = pd.PeriodIndex(
+ expected = PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- expected = pd.PeriodIndex(
+ expected = PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
@@ -382,20 +392,20 @@ def test_get_loc_nat(self):
# check DatetimeIndex compat
for idx in [didx, pidx]:
- assert idx.get_loc(pd.NaT) == 1
+ assert idx.get_loc(NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
- p0 = pd.Period("2017-09-01")
- p1 = pd.Period("2017-09-02")
- p2 = pd.Period("2017-09-03")
+ p0 = Period("2017-09-01")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
- idx0 = pd.PeriodIndex([p0, p1, p2])
+ idx0 = PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
@@ -415,7 +425,7 @@ def test_get_loc(self):
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
- idx1 = pd.PeriodIndex([p1, p1, p2])
+ idx1 = PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
@@ -436,7 +446,7 @@ def test_get_loc(self):
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
- idx2 = pd.PeriodIndex([p2, p1, p2])
+ idx2 = PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
@@ -446,7 +456,7 @@ def test_get_loc(self):
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_get_loc_integer(self):
- dti = pd.date_range("2016-01-01", periods=3)
+ dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
with pytest.raises(KeyError, match="16801"):
pi.get_loc(16801)
@@ -458,7 +468,7 @@ def test_get_loc_integer(self):
@pytest.mark.parametrize("freq", ["H", "D"])
def test_get_value_datetime_hourly(self, freq):
# get_loc and get_value should treat datetime objects symmetrically
- dti = pd.date_range("2016-01-01", periods=3, freq="MS")
+ dti = date_range("2016-01-01", periods=3, freq="MS")
pi = dti.to_period(freq)
ser = pd.Series(range(7, 10), index=pi)
@@ -469,7 +479,7 @@ def test_get_value_datetime_hourly(self, freq):
assert ser[ts] == 7
assert ser.loc[ts] == 7
- ts2 = ts + pd.Timedelta(hours=3)
+ ts2 = ts + Timedelta(hours=3)
if freq == "H":
with pytest.raises(KeyError, match="2016-01-01 03:00"):
pi.get_loc(ts2)
@@ -487,7 +497,7 @@ def test_get_value_datetime_hourly(self, freq):
def test_get_value_integer(self):
msg = "index 16801 is out of bounds for axis 0 with size 3"
- dti = pd.date_range("2016-01-01", periods=3)
+ dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
ser = pd.Series(range(3), index=pi)
with pytest.raises(IndexError, match=msg):
@@ -501,15 +511,15 @@ def test_get_value_integer(self):
def test_is_monotonic_increasing(self):
# GH 17717
- p0 = pd.Period("2017-09-01")
- p1 = pd.Period("2017-09-02")
- p2 = pd.Period("2017-09-03")
+ p0 = Period("2017-09-01")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
- idx_inc0 = pd.PeriodIndex([p0, p1, p2])
- idx_inc1 = pd.PeriodIndex([p0, p1, p1])
- idx_dec0 = pd.PeriodIndex([p2, p1, p0])
- idx_dec1 = pd.PeriodIndex([p2, p1, p1])
- idx = pd.PeriodIndex([p1, p2, p0])
+ idx_inc0 = PeriodIndex([p0, p1, p2])
+ idx_inc1 = PeriodIndex([p0, p1, p1])
+ idx_dec0 = PeriodIndex([p2, p1, p0])
+ idx_dec1 = PeriodIndex([p2, p1, p1])
+ idx = PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
@@ -519,15 +529,15 @@ def test_is_monotonic_increasing(self):
def test_is_monotonic_decreasing(self):
# GH 17717
- p0 = pd.Period("2017-09-01")
- p1 = pd.Period("2017-09-02")
- p2 = pd.Period("2017-09-03")
+ p0 = Period("2017-09-01")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
- idx_inc0 = pd.PeriodIndex([p0, p1, p2])
- idx_inc1 = pd.PeriodIndex([p0, p1, p1])
- idx_dec0 = pd.PeriodIndex([p2, p1, p0])
- idx_dec1 = pd.PeriodIndex([p2, p1, p1])
- idx = pd.PeriodIndex([p1, p2, p0])
+ idx_inc0 = PeriodIndex([p0, p1, p2])
+ idx_inc1 = PeriodIndex([p0, p1, p1])
+ idx_dec0 = PeriodIndex([p2, p1, p0])
+ idx_dec1 = PeriodIndex([p2, p1, p1])
+ idx = PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
@@ -537,13 +547,13 @@ def test_is_monotonic_decreasing(self):
def test_contains(self):
# GH 17717
- p0 = pd.Period("2017-09-01")
- p1 = pd.Period("2017-09-02")
- p2 = pd.Period("2017-09-03")
- p3 = pd.Period("2017-09-04")
+ p0 = Period("2017-09-01")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
+ p3 = Period("2017-09-04")
ps0 = [p0, p1, p2]
- idx0 = pd.PeriodIndex(ps0)
+ idx0 = PeriodIndex(ps0)
ser = pd.Series(range(6, 9), index=idx0)
for p in ps0:
@@ -565,25 +575,25 @@ def test_contains(self):
def test_get_value(self):
# GH 17717
- p0 = pd.Period("2017-09-01")
- p1 = pd.Period("2017-09-02")
- p2 = pd.Period("2017-09-03")
+ p0 = Period("2017-09-01")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
- idx0 = pd.PeriodIndex([p0, p1, p2])
+ idx0 = PeriodIndex([p0, p1, p2])
input0 = pd.Series(np.array([1, 2, 3]), index=idx0)
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
- idx1 = pd.PeriodIndex([p1, p1, p2])
+ idx1 = PeriodIndex([p1, p1, p2])
input1 = pd.Series(np.array([1, 2, 3]), index=idx1)
expected1 = input1.iloc[[0, 1]]
result1 = idx1.get_value(input1, p1)
tm.assert_series_equal(result1, expected1)
- idx2 = pd.PeriodIndex([p1, p2, p1])
+ idx2 = PeriodIndex([p1, p2, p1])
input2 = pd.Series(np.array([1, 2, 3]), index=idx2)
expected2 = input2.iloc[[0, 2]]
@@ -592,22 +602,22 @@ def test_get_value(self):
def test_get_indexer(self):
# GH 17717
- p1 = pd.Period("2017-09-01")
- p2 = pd.Period("2017-09-04")
- p3 = pd.Period("2017-09-07")
+ p1 = Period("2017-09-01")
+ p2 = Period("2017-09-04")
+ p3 = Period("2017-09-07")
- tp0 = pd.Period("2017-08-31")
- tp1 = pd.Period("2017-09-02")
- tp2 = pd.Period("2017-09-05")
- tp3 = pd.Period("2017-09-09")
+ tp0 = Period("2017-08-31")
+ tp1 = Period("2017-09-02")
+ tp2 = Period("2017-09-05")
+ tp3 = Period("2017-09-09")
- idx = pd.PeriodIndex([p1, p2, p3])
+ idx = PeriodIndex([p1, p2, p3])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
- target = pd.PeriodIndex([tp0, tp1, tp2, tp3])
+ target = PeriodIndex([tp0, tp1, tp2, tp3])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1, 2], dtype=np.intp)
)
@@ -618,13 +628,13 @@ def test_get_indexer(self):
idx.get_indexer(target, "nearest"), np.array([0, 0, 1, 2], dtype=np.intp)
)
- res = idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 day"))
+ res = idx.get_indexer(target, "nearest", tolerance=Timedelta("1 day"))
tm.assert_numpy_array_equal(res, np.array([0, 0, 1, -1], dtype=np.intp))
def test_get_indexer_mismatched_dtype(self):
# Check that we return all -1s and do not raise or cast incorrectly
- dti = pd.date_range("2016-01-01", periods=3)
+ dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
pi2 = dti.to_period("W")
@@ -652,13 +662,13 @@ def test_get_indexer_mismatched_dtype(self):
def test_get_indexer_non_unique(self):
# GH 17717
- p1 = pd.Period("2017-09-02")
- p2 = pd.Period("2017-09-03")
- p3 = pd.Period("2017-09-04")
- p4 = pd.Period("2017-09-05")
+ p1 = Period("2017-09-02")
+ p2 = Period("2017-09-03")
+ p3 = Period("2017-09-04")
+ p4 = Period("2017-09-05")
- idx1 = pd.PeriodIndex([p1, p2, p1])
- idx2 = pd.PeriodIndex([p2, p1, p3, p4])
+ idx1 = PeriodIndex([p1, p2, p1])
+ idx2 = PeriodIndex([p2, p1, p3, p4])
result = idx1.get_indexer_non_unique(idx2)
expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.intp)
@@ -669,7 +679,7 @@ def test_get_indexer_non_unique(self):
# TODO: This method came from test_period; de-dup with version above
def test_get_loc2(self):
- idx = pd.period_range("2000-01-01", periods=3)
+ idx = period_range("2000-01-01", periods=3)
for method in [None, "pad", "backfill", "nearest"]:
assert idx.get_loc(idx[1], method) == 1
@@ -678,10 +688,10 @@ def test_get_loc2(self):
assert idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
- idx = pd.period_range("2000-01-01", periods=5)[::2]
+ idx = period_range("2000-01-01", periods=5)[::2]
assert idx.get_loc("2000-01-02T12", method="nearest", tolerance="1 day") == 1
assert (
- idx.get_loc("2000-01-02T12", method="nearest", tolerance=pd.Timedelta("1D"))
+ idx.get_loc("2000-01-02T12", method="nearest", tolerance=Timedelta("1D"))
== 1
)
assert (
@@ -710,19 +720,19 @@ def test_get_loc2(self):
"2000-01-10",
method="nearest",
tolerance=[
- pd.Timedelta("1 day").to_timedelta64(),
- pd.Timedelta("1 day").to_timedelta64(),
+ Timedelta("1 day").to_timedelta64(),
+ Timedelta("1 day").to_timedelta64(),
],
)
# TODO: This method came from test_period; de-dup with version above
def test_get_indexer2(self):
- idx = pd.period_range("2000-01-01", periods=3).asfreq("H", how="start")
+ idx = period_range("2000-01-01", periods=3).asfreq("H", how="start")
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
- target = pd.PeriodIndex(
+ target = PeriodIndex(
["1999-12-31T23", "2000-01-01T12", "2000-01-02T01"], freq="H"
)
tm.assert_numpy_array_equal(
@@ -748,8 +758,8 @@ def test_get_indexer2(self):
np.array([0, 1, 1], dtype=np.intp),
)
tol_raw = [
- pd.Timedelta("1 hour"),
- pd.Timedelta("1 hour"),
+ Timedelta("1 hour"),
+ Timedelta("1 hour"),
np.timedelta64(1, "D"),
]
tm.assert_numpy_array_equal(
@@ -759,8 +769,8 @@ def test_get_indexer2(self):
np.array([0, -1, 1], dtype=np.intp),
)
tol_bad = [
- pd.Timedelta("2 hour").to_timedelta64(),
- pd.Timedelta("1 hour").to_timedelta64(),
+ Timedelta("2 hour").to_timedelta64(),
+ Timedelta("1 hour").to_timedelta64(),
np.timedelta64(1, "M"),
]
with pytest.raises(
@@ -778,7 +788,7 @@ def test_indexing(self):
def test_period_index_indexer(self):
# GH4125
- idx = pd.period_range("2002-01", "2003-12", freq="M")
+ idx = period_range("2002-01", "2003-12", freq="M")
df = pd.DataFrame(np.random.randn(24, 10), index=idx)
tm.assert_frame_equal(df, df.loc[idx])
tm.assert_frame_equal(df, df.loc[list(idx)])
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index 2e4bed598b807..196946e696c8d 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -266,10 +266,6 @@ def test_order(self):
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq == "D"
- def test_shift(self):
- # This is tested in test_arithmetic
- pass
-
def test_nat(self):
assert pd.PeriodIndex._na_value is NaT
assert pd.PeriodIndex([], freq="M")._na_value is NaT
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 248df3291f040..4db93e850f579 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -55,9 +55,9 @@ def test_where(self):
@pytest.mark.parametrize(
"index",
[
- pd.period_range("2000-01-01", periods=3, freq="D"),
- pd.period_range("2001-01-01", periods=3, freq="2D"),
- pd.PeriodIndex(["2001-01", "NaT", "2003-01"], freq="M"),
+ period_range("2000-01-01", periods=3, freq="D"),
+ period_range("2001-01-01", periods=3, freq="2D"),
+ PeriodIndex(["2001-01", "NaT", "2003-01"], freq="M"),
],
)
def test_repeat_freqstr(self, index, use_numpy):
@@ -69,32 +69,32 @@ def test_repeat_freqstr(self, index, use_numpy):
def test_fillna_period(self):
# GH 11343
- idx = pd.PeriodIndex(["2011-01-01 09:00", pd.NaT, "2011-01-01 11:00"], freq="H")
+ idx = PeriodIndex(["2011-01-01 09:00", NaT, "2011-01-01 11:00"], freq="H")
- exp = pd.PeriodIndex(
+ exp = PeriodIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], freq="H"
)
- tm.assert_index_equal(idx.fillna(pd.Period("2011-01-01 10:00", freq="H")), exp)
+ tm.assert_index_equal(idx.fillna(Period("2011-01-01 10:00", freq="H")), exp)
- exp = pd.Index(
+ exp = Index(
[
- pd.Period("2011-01-01 09:00", freq="H"),
+ Period("2011-01-01 09:00", freq="H"),
"x",
- pd.Period("2011-01-01 11:00", freq="H"),
+ Period("2011-01-01 11:00", freq="H"),
],
dtype=object,
)
tm.assert_index_equal(idx.fillna("x"), exp)
- exp = pd.Index(
+ exp = Index(
[
- pd.Period("2011-01-01 09:00", freq="H"),
- pd.Period("2011-01-01", freq="D"),
- pd.Period("2011-01-01 11:00", freq="H"),
+ Period("2011-01-01 09:00", freq="H"),
+ Period("2011-01-01", freq="D"),
+ Period("2011-01-01 11:00", freq="H"),
],
dtype=object,
)
- tm.assert_index_equal(idx.fillna(pd.Period("2011-01-01", freq="D")), exp)
+ tm.assert_index_equal(idx.fillna(Period("2011-01-01", freq="D")), exp)
def test_no_millisecond_field(self):
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
@@ -138,25 +138,25 @@ def test_shallow_copy_changing_freq_raises(self):
pi._shallow_copy(pi, freq="H")
def test_view_asi8(self):
- idx = pd.PeriodIndex([], freq="M")
+ idx = PeriodIndex([], freq="M")
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view("i8"), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
- idx = pd.PeriodIndex(["2011-01", pd.NaT], freq="M")
+ idx = PeriodIndex(["2011-01", NaT], freq="M")
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view("i8"), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
- idx = pd.PeriodIndex(["2011-01-01", pd.NaT], freq="D")
+ idx = PeriodIndex(["2011-01-01", NaT], freq="D")
tm.assert_numpy_array_equal(idx.view("i8"), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_values(self):
- idx = pd.PeriodIndex([], freq="M")
+ idx = PeriodIndex([], freq="M")
exp = np.array([], dtype=np.object)
tm.assert_numpy_array_equal(idx.values, exp)
@@ -165,17 +165,17 @@ def test_values(self):
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx._ndarray_values, exp)
- idx = pd.PeriodIndex(["2011-01", pd.NaT], freq="M")
+ idx = PeriodIndex(["2011-01", NaT], freq="M")
- exp = np.array([pd.Period("2011-01", freq="M"), pd.NaT], dtype=object)
+ exp = np.array([Period("2011-01", freq="M"), NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.to_numpy(), exp)
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._ndarray_values, exp)
- idx = pd.PeriodIndex(["2011-01-01", pd.NaT], freq="D")
+ idx = PeriodIndex(["2011-01-01", NaT], freq="D")
- exp = np.array([pd.Period("2011-01-01", freq="D"), pd.NaT], dtype=object)
+ exp = np.array([Period("2011-01-01", freq="D"), NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.to_numpy(), exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
@@ -371,7 +371,7 @@ def test_factorize(self):
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
- idx2 = pd.PeriodIndex(
+ idx2 = PeriodIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"], freq="M"
)
@@ -414,13 +414,13 @@ def test_contains(self):
def test_contains_nat(self):
# see gh-13582
idx = period_range("2007-01", freq="M", periods=10)
- assert pd.NaT not in idx
+ assert NaT not in idx
assert None not in idx
assert float("nan") not in idx
assert np.nan not in idx
- idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
- assert pd.NaT in idx
+ idx = PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
+ assert NaT in idx
assert None in idx
assert float("nan") in idx
assert np.nan in idx
@@ -433,19 +433,6 @@ def test_periods_number_check(self):
with pytest.raises(ValueError, match=msg):
period_range("2011-1-1", "2012-1-1", "B")
- def test_start_time(self):
- # GH 17157
- index = period_range(freq="M", start="2016-01-01", end="2016-05-31")
- expected_index = date_range("2016-01-01", end="2016-05-31", freq="MS")
- tm.assert_index_equal(index.start_time, expected_index)
-
- def test_end_time(self):
- # GH 17157
- index = period_range(freq="M", start="2016-01-01", end="2016-05-31")
- expected_index = date_range("2016-01-01", end="2016-05-31", freq="M")
- expected_index = expected_index.shift(1, freq="D").shift(-1, freq="ns")
- tm.assert_index_equal(index.end_time, expected_index)
-
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN")
@@ -565,7 +552,7 @@ def test_convert_array_of_periods(self):
rng = period_range("1/1/2000", periods=20, freq="D")
periods = list(rng)
- result = pd.Index(periods)
+ result = Index(periods)
assert isinstance(result, PeriodIndex)
def test_append_concat(self):
@@ -606,10 +593,8 @@ def test_join_self(self, join_type):
def test_insert(self):
# GH 18295 (test missing)
- expected = PeriodIndex(
- ["2017Q1", pd.NaT, "2017Q2", "2017Q3", "2017Q4"], freq="Q"
- )
- for na in (np.nan, pd.NaT, None):
+ expected = PeriodIndex(["2017Q1", NaT, "2017Q2", "2017Q3", "2017Q4"], freq="Q")
+ for na in (np.nan, NaT, None):
result = period_range("2017Q1", periods=4, freq="Q").insert(1, na)
tm.assert_index_equal(result, expected)
@@ -668,36 +653,36 @@ def test_is_monotonic_with_nat():
# GH#31437
# PeriodIndex.is_monotonic should behave analogously to DatetimeIndex,
# in particular never be monotonic when we have NaT
- dti = pd.date_range("2016-01-01", periods=3)
+ dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
- tdi = pd.Index(dti.view("timedelta64[ns]"))
+ tdi = Index(dti.view("timedelta64[ns]"))
for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]:
- if isinstance(obj, pd.Index):
+ if isinstance(obj, Index):
# i.e. not Engines
assert obj.is_monotonic
assert obj.is_monotonic_increasing
assert not obj.is_monotonic_decreasing
assert obj.is_unique
- dti1 = dti.insert(0, pd.NaT)
+ dti1 = dti.insert(0, NaT)
pi1 = dti1.to_period("D")
- tdi1 = pd.Index(dti1.view("timedelta64[ns]"))
+ tdi1 = Index(dti1.view("timedelta64[ns]"))
for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]:
- if isinstance(obj, pd.Index):
+ if isinstance(obj, Index):
# i.e. not Engines
assert not obj.is_monotonic
assert not obj.is_monotonic_increasing
assert not obj.is_monotonic_decreasing
assert obj.is_unique
- dti2 = dti.insert(3, pd.NaT)
+ dti2 = dti.insert(3, NaT)
pi2 = dti2.to_period("H")
- tdi2 = pd.Index(dti2.view("timedelta64[ns]"))
+ tdi2 = Index(dti2.view("timedelta64[ns]"))
for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]:
- if isinstance(obj, pd.Index):
+ if isinstance(obj, Index):
# i.e. not Engines
assert not obj.is_monotonic
assert not obj.is_monotonic_increasing
diff --git a/pandas/tests/indexes/period/test_scalar_compat.py b/pandas/tests/indexes/period/test_scalar_compat.py
index d9809f0f75611..0f92b7a4e168b 100644
--- a/pandas/tests/indexes/period/test_scalar_compat.py
+++ b/pandas/tests/indexes/period/test_scalar_compat.py
@@ -6,11 +6,13 @@
class TestPeriodIndexOps:
def test_start_time(self):
+ # GH#17157
index = period_range(freq="M", start="2016-01-01", end="2016-05-31")
expected_index = date_range("2016-01-01", end="2016-05-31", freq="MS")
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
+ # GH#17157
index = period_range(freq="M", start="2016-01-01", end="2016-05-31")
expected_index = date_range("2016-01-01", end="2016-05-31", freq="M")
expected_index += Timedelta(1, "D") - Timedelta(1, "ns")
diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py
index dc7805880784f..6f254b7b4408d 100644
--- a/pandas/tests/indexes/period/test_setops.py
+++ b/pandas/tests/indexes/period/test_setops.py
@@ -44,9 +44,9 @@ def test_join_does_not_recur(self):
@pytest.mark.parametrize("sort", [None, False])
def test_union(self, sort):
# union
- other1 = pd.period_range("1/1/2000", freq="D", periods=5)
- rng1 = pd.period_range("1/6/2000", freq="D", periods=5)
- expected1 = pd.PeriodIndex(
+ other1 = period_range("1/1/2000", freq="D", periods=5)
+ rng1 = period_range("1/6/2000", freq="D", periods=5)
+ expected1 = PeriodIndex(
[
"2000-01-06",
"2000-01-07",
@@ -62,17 +62,17 @@ def test_union(self, sort):
freq="D",
)
- rng2 = pd.period_range("1/1/2000", freq="D", periods=5)
- other2 = pd.period_range("1/4/2000", freq="D", periods=5)
- expected2 = pd.period_range("1/1/2000", freq="D", periods=8)
+ rng2 = period_range("1/1/2000", freq="D", periods=5)
+ other2 = period_range("1/4/2000", freq="D", periods=5)
+ expected2 = period_range("1/1/2000", freq="D", periods=8)
- rng3 = pd.period_range("1/1/2000", freq="D", periods=5)
- other3 = pd.PeriodIndex([], freq="D")
- expected3 = pd.period_range("1/1/2000", freq="D", periods=5)
+ rng3 = period_range("1/1/2000", freq="D", periods=5)
+ other3 = PeriodIndex([], freq="D")
+ expected3 = period_range("1/1/2000", freq="D", periods=5)
- rng4 = pd.period_range("2000-01-01 09:00", freq="H", periods=5)
- other4 = pd.period_range("2000-01-02 09:00", freq="H", periods=5)
- expected4 = pd.PeriodIndex(
+ rng4 = period_range("2000-01-01 09:00", freq="H", periods=5)
+ other4 = period_range("2000-01-02 09:00", freq="H", periods=5)
+ expected4 = PeriodIndex(
[
"2000-01-01 09:00",
"2000-01-01 10:00",
@@ -88,13 +88,13 @@ def test_union(self, sort):
freq="H",
)
- rng5 = pd.PeriodIndex(
+ rng5 = PeriodIndex(
["2000-01-01 09:01", "2000-01-01 09:03", "2000-01-01 09:05"], freq="T"
)
- other5 = pd.PeriodIndex(
+ other5 = PeriodIndex(
["2000-01-01 09:01", "2000-01-01 09:05", "2000-01-01 09:08"], freq="T"
)
- expected5 = pd.PeriodIndex(
+ expected5 = PeriodIndex(
[
"2000-01-01 09:01",
"2000-01-01 09:03",
@@ -104,13 +104,13 @@ def test_union(self, sort):
freq="T",
)
- rng6 = pd.period_range("2000-01-01", freq="M", periods=7)
- other6 = pd.period_range("2000-04-01", freq="M", periods=7)
- expected6 = pd.period_range("2000-01-01", freq="M", periods=10)
+ rng6 = period_range("2000-01-01", freq="M", periods=7)
+ other6 = period_range("2000-04-01", freq="M", periods=7)
+ expected6 = period_range("2000-01-01", freq="M", periods=10)
- rng7 = pd.period_range("2003-01-01", freq="A", periods=5)
- other7 = pd.period_range("1998-01-01", freq="A", periods=8)
- expected7 = pd.PeriodIndex(
+ rng7 = period_range("2003-01-01", freq="A", periods=5)
+ other7 = period_range("1998-01-01", freq="A", periods=8)
+ expected7 = PeriodIndex(
[
"2003",
"2004",
@@ -126,11 +126,11 @@ def test_union(self, sort):
freq="A",
)
- rng8 = pd.PeriodIndex(
+ rng8 = PeriodIndex(
["1/3/2000", "1/2/2000", "1/1/2000", "1/5/2000", "1/4/2000"], freq="D"
)
- other8 = pd.period_range("1/6/2000", freq="D", periods=5)
- expected8 = pd.PeriodIndex(
+ other8 = period_range("1/6/2000", freq="D", periods=5)
+ expected8 = PeriodIndex(
[
"1/3/2000",
"1/2/2000",
@@ -185,15 +185,16 @@ def test_union_misc(self, sort):
with pytest.raises(IncompatibleFrequency):
index.join(index3)
+ # TODO: belongs elsewhere
def test_union_dataframe_index(self):
- rng1 = pd.period_range("1/1/1999", "1/1/2012", freq="M")
+ rng1 = period_range("1/1/1999", "1/1/2012", freq="M")
s1 = pd.Series(np.random.randn(len(rng1)), rng1)
- rng2 = pd.period_range("1/1/1980", "12/1/2001", freq="M")
+ rng2 = period_range("1/1/1980", "12/1/2001", freq="M")
s2 = pd.Series(np.random.randn(len(rng2)), rng2)
df = pd.DataFrame({"s1": s1, "s2": s2})
- exp = pd.period_range("1/1/1980", "1/1/2012", freq="M")
+ exp = period_range("1/1/1980", "1/1/2012", freq="M")
tm.assert_index_equal(df.index, exp)
@pytest.mark.parametrize("sort", [None, False])
@@ -294,16 +295,16 @@ def test_intersection_cases(self, sort):
def test_difference(self, sort):
# diff
period_rng = ["1/3/2000", "1/2/2000", "1/1/2000", "1/5/2000", "1/4/2000"]
- rng1 = pd.PeriodIndex(period_rng, freq="D")
- other1 = pd.period_range("1/6/2000", freq="D", periods=5)
+ rng1 = PeriodIndex(period_rng, freq="D")
+ other1 = period_range("1/6/2000", freq="D", periods=5)
expected1 = rng1
- rng2 = pd.PeriodIndex(period_rng, freq="D")
- other2 = pd.period_range("1/4/2000", freq="D", periods=5)
- expected2 = pd.PeriodIndex(["1/3/2000", "1/2/2000", "1/1/2000"], freq="D")
+ rng2 = PeriodIndex(period_rng, freq="D")
+ other2 = period_range("1/4/2000", freq="D", periods=5)
+ expected2 = PeriodIndex(["1/3/2000", "1/2/2000", "1/1/2000"], freq="D")
- rng3 = pd.PeriodIndex(period_rng, freq="D")
- other3 = pd.PeriodIndex([], freq="D")
+ rng3 = PeriodIndex(period_rng, freq="D")
+ other3 = PeriodIndex([], freq="D")
expected3 = rng3
period_rng = [
@@ -313,15 +314,15 @@ def test_difference(self, sort):
"2000-01-01 11:00",
"2000-01-01 13:00",
]
- rng4 = pd.PeriodIndex(period_rng, freq="H")
- other4 = pd.period_range("2000-01-02 09:00", freq="H", periods=5)
+ rng4 = PeriodIndex(period_rng, freq="H")
+ other4 = period_range("2000-01-02 09:00", freq="H", periods=5)
expected4 = rng4
- rng5 = pd.PeriodIndex(
+ rng5 = PeriodIndex(
["2000-01-01 09:03", "2000-01-01 09:01", "2000-01-01 09:05"], freq="T"
)
- other5 = pd.PeriodIndex(["2000-01-01 09:01", "2000-01-01 09:05"], freq="T")
- expected5 = pd.PeriodIndex(["2000-01-01 09:03"], freq="T")
+ other5 = PeriodIndex(["2000-01-01 09:01", "2000-01-01 09:05"], freq="T")
+ expected5 = PeriodIndex(["2000-01-01 09:03"], freq="T")
period_rng = [
"2000-02-01",
@@ -332,14 +333,14 @@ def test_difference(self, sort):
"2000-03-01",
"2000-04-01",
]
- rng6 = pd.PeriodIndex(period_rng, freq="M")
- other6 = pd.period_range("2000-04-01", freq="M", periods=7)
- expected6 = pd.PeriodIndex(["2000-02-01", "2000-01-01", "2000-03-01"], freq="M")
+ rng6 = PeriodIndex(period_rng, freq="M")
+ other6 = period_range("2000-04-01", freq="M", periods=7)
+ expected6 = PeriodIndex(["2000-02-01", "2000-01-01", "2000-03-01"], freq="M")
period_rng = ["2003", "2007", "2006", "2005", "2004"]
- rng7 = pd.PeriodIndex(period_rng, freq="A")
- other7 = pd.period_range("1998-01-01", freq="A", periods=8)
- expected7 = pd.PeriodIndex(["2007", "2006"], freq="A")
+ rng7 = PeriodIndex(period_rng, freq="A")
+ other7 = period_range("1998-01-01", freq="A", periods=8)
+ expected7 = PeriodIndex(["2007", "2006"], freq="A")
for rng, other, expected in [
(rng1, other1, expected1),
diff --git a/pandas/tests/indexes/period/test_shift.py b/pandas/tests/indexes/period/test_shift.py
index 5689e98c33455..b4c9810f3a554 100644
--- a/pandas/tests/indexes/period/test_shift.py
+++ b/pandas/tests/indexes/period/test_shift.py
@@ -1,7 +1,6 @@
import numpy as np
import pytest
-import pandas as pd
from pandas import PeriodIndex, period_range
import pandas._testing as tm
@@ -62,7 +61,7 @@ def test_shift(self):
def test_shift_corner_cases(self):
# GH#9903
- idx = pd.PeriodIndex([], name="xxx", freq="H")
+ idx = PeriodIndex([], name="xxx", freq="H")
with pytest.raises(TypeError):
# period shift doesn't accept freq
@@ -71,19 +70,19 @@ def test_shift_corner_cases(self):
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
- idx = pd.PeriodIndex(
+ idx = PeriodIndex(
["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"],
name="xxx",
freq="H",
)
tm.assert_index_equal(idx.shift(0), idx)
- exp = pd.PeriodIndex(
+ exp = PeriodIndex(
["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"],
name="xxx",
freq="H",
)
tm.assert_index_equal(idx.shift(3), exp)
- exp = pd.PeriodIndex(
+ exp = PeriodIndex(
["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"],
name="xxx",
freq="H",
@@ -104,7 +103,7 @@ def test_shift_nat(self):
def test_shift_gh8083(self):
# test shift for PeriodIndex
# GH#8083
- drange = pd.period_range("20130101", periods=5, freq="D")
+ drange = period_range("20130101", periods=5, freq="D")
result = drange.shift(1)
expected = PeriodIndex(
["2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05", "2013-01-06"],
diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py
index 23350fdff4b78..dae220006ebe0 100644
--- a/pandas/tests/indexes/period/test_tools.py
+++ b/pandas/tests/indexes/period/test_tools.py
@@ -1,22 +1,19 @@
-from datetime import datetime, timedelta
+from datetime import datetime
import numpy as np
import pytest
from pandas._libs.tslibs import IncompatibleFrequency
-from pandas._libs.tslibs.ccalendar import MONTHS
-import pandas as pd
from pandas import (
DatetimeIndex,
+ NaT,
Period,
PeriodIndex,
- Series,
Timedelta,
Timestamp,
date_range,
period_range,
- to_datetime,
)
import pandas._testing as tm
@@ -42,207 +39,33 @@ def test_monthly(self):
def test_freq(self, freq):
self._check_freq(freq, "1970-01-01")
- def test_negone_ordinals(self):
- freqs = ["A", "M", "Q", "D", "H", "T", "S"]
-
- period = Period(ordinal=-1, freq="D")
- for freq in freqs:
- repr(period.asfreq(freq))
-
- for freq in freqs:
- period = Period(ordinal=-1, freq=freq)
- repr(period)
- assert period.year == 1969
-
- period = Period(ordinal=-1, freq="B")
- repr(period)
- period = Period(ordinal=-1, freq="W")
- repr(period)
-
-
-class TestPeriodIndex:
- def test_to_timestamp(self):
- index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
- series = Series(1, index=index, name="foo")
-
- exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
- result = series.to_timestamp(how="end")
- exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
- assert result.name == "foo"
-
- exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
- result = series.to_timestamp(how="start")
- tm.assert_index_equal(result.index, exp_index)
-
- def _get_with_delta(delta, freq="A-DEC"):
- return date_range(
- to_datetime("1/1/2001") + delta,
- to_datetime("12/31/2009") + delta,
- freq=freq,
- )
-
- delta = timedelta(hours=23)
- result = series.to_timestamp("H", "end")
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
-
- delta = timedelta(hours=23, minutes=59)
- result = series.to_timestamp("T", "end")
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
-
- result = series.to_timestamp("S", "end")
- delta = timedelta(hours=23, minutes=59, seconds=59)
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
-
- index = period_range(freq="H", start="1/1/2001", end="1/2/2001")
- series = Series(1, index=index, name="foo")
-
- exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H")
- result = series.to_timestamp(how="end")
- exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
- assert result.name == "foo"
-
- def test_to_timestamp_freq(self):
- idx = pd.period_range("2017", periods=12, freq="A-DEC")
- result = idx.to_timestamp()
- expected = pd.date_range("2017", periods=12, freq="AS-JAN")
- tm.assert_index_equal(result, expected)
-
- def test_to_timestamp_repr_is_code(self):
- zs = [
- Timestamp("99-04-17 00:00:00", tz="UTC"),
- Timestamp("2001-04-17 00:00:00", tz="UTC"),
- Timestamp("2001-04-17 00:00:00", tz="America/Los_Angeles"),
- Timestamp("2001-04-17 00:00:00", tz=None),
- ]
- for z in zs:
- assert eval(repr(z)) == z
-
- def test_to_timestamp_to_period_astype(self):
- idx = DatetimeIndex([pd.NaT, "2011-01-01", "2011-02-01"], name="idx")
-
- res = idx.astype("period[M]")
- exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="M", name="idx")
- tm.assert_index_equal(res, exp)
-
- res = idx.astype("period[3M]")
- exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="3M", name="idx")
- tm.assert_index_equal(res, exp)
-
- def test_dti_to_period(self):
- dti = pd.date_range(start="1/1/2005", end="12/1/2005", freq="M")
- pi1 = dti.to_period()
- pi2 = dti.to_period(freq="D")
- pi3 = dti.to_period(freq="3D")
-
- assert pi1[0] == Period("Jan 2005", freq="M")
- assert pi2[0] == Period("1/31/2005", freq="D")
- assert pi3[0] == Period("1/31/2005", freq="3D")
-
- assert pi1[-1] == Period("Nov 2005", freq="M")
- assert pi2[-1] == Period("11/30/2005", freq="D")
- assert pi3[-1], Period("11/30/2005", freq="3D")
-
- tm.assert_index_equal(pi1, period_range("1/1/2005", "11/1/2005", freq="M"))
- tm.assert_index_equal(
- pi2, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("D")
- )
- tm.assert_index_equal(
- pi3, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("3D")
- )
-
- @pytest.mark.parametrize("month", MONTHS)
- def test_to_period_quarterly(self, month):
- # make sure we can make the round trip
- freq = "Q-{month}".format(month=month)
- rng = period_range("1989Q3", "1991Q3", freq=freq)
- stamps = rng.to_timestamp()
- result = stamps.to_period(freq)
- tm.assert_index_equal(rng, result)
-
- @pytest.mark.parametrize("off", ["BQ", "QS", "BQS"])
- def test_to_period_quarterlyish(self, off):
- rng = date_range("01-Jan-2012", periods=8, freq=off)
- prng = rng.to_period()
- assert prng.freq == "Q-DEC"
-
- @pytest.mark.parametrize("off", ["BA", "AS", "BAS"])
- def test_to_period_annualish(self, off):
- rng = date_range("01-Jan-2012", periods=8, freq=off)
- prng = rng.to_period()
- assert prng.freq == "A-DEC"
-
- def test_to_period_monthish(self):
- offsets = ["MS", "BM"]
- for off in offsets:
- rng = date_range("01-Jan-2012", periods=8, freq=off)
- prng = rng.to_period()
- assert prng.freq == "M"
-
- rng = date_range("01-Jan-2012", periods=8, freq="M")
- prng = rng.to_period()
- assert prng.freq == "M"
-
- msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG
- with pytest.raises(ValueError, match=msg):
- date_range("01-Jan-2012", periods=8, freq="EOM")
-
- def test_period_dt64_round_trip(self):
- dti = date_range("1/1/2000", "1/7/2002", freq="B")
- pi = dti.to_period()
- tm.assert_index_equal(pi.to_timestamp(), dti)
-
- dti = date_range("1/1/2000", "1/7/2002", freq="B")
- pi = dti.to_period(freq="H")
- tm.assert_index_equal(pi.to_timestamp(), dti)
-
- def test_combine_first(self):
- # GH#3367
- didx = pd.date_range(start="1950-01-31", end="1950-07-31", freq="M")
- pidx = pd.period_range(
- start=pd.Period("1950-1"), end=pd.Period("1950-7"), freq="M"
- )
- # check to be consistent with DatetimeIndex
- for idx in [didx, pidx]:
- a = pd.Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
- b = pd.Series([9, 9, 9, 9, 9, 9, 9], index=idx)
-
- result = a.combine_first(b)
- expected = pd.Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)
- tm.assert_series_equal(result, expected)
+class TestSearchsorted:
@pytest.mark.parametrize("freq", ["D", "2D"])
def test_searchsorted(self, freq):
- pidx = pd.PeriodIndex(
+ pidx = PeriodIndex(
["2014-01-01", "2014-01-02", "2014-01-03", "2014-01-04", "2014-01-05"],
freq=freq,
)
- p1 = pd.Period("2014-01-01", freq=freq)
+ p1 = Period("2014-01-01", freq=freq)
assert pidx.searchsorted(p1) == 0
- p2 = pd.Period("2014-01-04", freq=freq)
+ p2 = Period("2014-01-04", freq=freq)
assert pidx.searchsorted(p2) == 3
- assert pidx.searchsorted(pd.NaT) == 0
+ assert pidx.searchsorted(NaT) == 0
msg = "Input has different freq=H from PeriodArray"
with pytest.raises(IncompatibleFrequency, match=msg):
- pidx.searchsorted(pd.Period("2014-01-01", freq="H"))
+ pidx.searchsorted(Period("2014-01-01", freq="H"))
msg = "Input has different freq=5D from PeriodArray"
with pytest.raises(IncompatibleFrequency, match=msg):
- pidx.searchsorted(pd.Period("2014-01-01", freq="5D"))
+ pidx.searchsorted(Period("2014-01-01", freq="5D"))
def test_searchsorted_invalid(self):
- pidx = pd.PeriodIndex(
+ pidx = PeriodIndex(
["2014-01-01", "2014-01-02", "2014-01-03", "2014-01-04", "2014-01-05"],
freq="D",
)
@@ -284,13 +107,21 @@ def test_tolist(self):
recon = PeriodIndex(rs)
tm.assert_index_equal(index, recon)
+
+class TestToTimestamp:
+ def test_to_timestamp_freq(self):
+ idx = period_range("2017", periods=12, freq="A-DEC")
+ result = idx.to_timestamp()
+ expected = date_range("2017", periods=12, freq="AS-JAN")
+ tm.assert_index_equal(result, expected)
+
def test_to_timestamp_pi_nat(self):
# GH#7228
index = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="M", name="idx")
result = index.to_timestamp("D")
expected = DatetimeIndex(
- [pd.NaT, datetime(2011, 1, 1), datetime(2011, 2, 1)], name="idx"
+ [NaT, datetime(2011, 1, 1), datetime(2011, 2, 1)], name="idx"
)
tm.assert_index_equal(result, expected)
assert result.name == "idx"
@@ -356,29 +187,6 @@ def test_to_timestamp_pi_combined(self):
expected = expected + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result, expected)
- def test_period_astype_to_timestamp(self):
- pi = pd.PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M")
-
- exp = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"])
- tm.assert_index_equal(pi.astype("datetime64[ns]"), exp)
-
- exp = pd.DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"])
- exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
- tm.assert_index_equal(pi.astype("datetime64[ns]", how="end"), exp)
-
- exp = pd.DatetimeIndex(
- ["2011-01-01", "2011-02-01", "2011-03-01"], tz="US/Eastern"
- )
- res = pi.astype("datetime64[ns, US/Eastern]")
- tm.assert_index_equal(pi.astype("datetime64[ns, US/Eastern]"), exp)
-
- exp = pd.DatetimeIndex(
- ["2011-01-31", "2011-02-28", "2011-03-31"], tz="US/Eastern"
- )
- exp = exp + Timedelta(1, "D") - Timedelta(1, "ns")
- res = pi.astype("datetime64[ns, US/Eastern]", how="end")
- tm.assert_index_equal(res, exp)
-
def test_to_timestamp_1703(self):
index = period_range("1/1/2012", periods=4, freq="D")
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index 995d47c1473be..b396a88e6eb6a 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -1565,3 +1565,21 @@ def test_small_year_parsing():
per1 = Period("0001-01-07", "D")
assert per1.year == 1
assert per1.day == 7
+
+
+def test_negone_ordinals():
+ freqs = ["A", "M", "Q", "D", "H", "T", "S"]
+
+ period = Period(ordinal=-1, freq="D")
+ for freq in freqs:
+ repr(period.asfreq(freq))
+
+ for freq in freqs:
+ period = Period(ordinal=-1, freq=freq)
+ repr(period)
+ assert period.year == 1969
+
+ period = Period(ordinal=-1, freq="B")
+ repr(period)
+ period = Period(ordinal=-1, freq="W")
+ repr(period)
diff --git a/pandas/tests/scalar/timestamp/test_rendering.py b/pandas/tests/scalar/timestamp/test_rendering.py
index 6b64b230a0bb9..cab6946bb8d02 100644
--- a/pandas/tests/scalar/timestamp/test_rendering.py
+++ b/pandas/tests/scalar/timestamp/test_rendering.py
@@ -85,3 +85,13 @@ def test_pprint(self):
{'w': {'a': Timestamp('2011-01-01 00:00:00')}}],
'foo': 1}"""
assert result == expected
+
+ def test_to_timestamp_repr_is_code(self):
+ zs = [
+ Timestamp("99-04-17 00:00:00", tz="UTC"),
+ Timestamp("2001-04-17 00:00:00", tz="UTC"),
+ Timestamp("2001-04-17 00:00:00", tz="America/Los_Angeles"),
+ Timestamp("2001-04-17 00:00:00", tz=None),
+ ]
+ for z in zs:
+ assert eval(repr(z)) == z
diff --git a/pandas/tests/series/methods/test_asfreq.py b/pandas/tests/series/methods/test_asfreq.py
new file mode 100644
index 0000000000000..05ec56cf02182
--- /dev/null
+++ b/pandas/tests/series/methods/test_asfreq.py
@@ -0,0 +1,23 @@
+import numpy as np
+
+from pandas import DataFrame, Series, period_range
+import pandas._testing as tm
+
+
+class TestAsFreq:
+ # TODO: de-duplicate/parametrize or move DataFrame test
+ def test_asfreq_ts(self):
+ index = period_range(freq="A", start="1/1/2001", end="12/31/2010")
+ ts = Series(np.random.randn(len(index)), index=index)
+ df = DataFrame(np.random.randn(len(index), 3), index=index)
+
+ result = ts.asfreq("D", how="end")
+ df_result = df.asfreq("D", how="end")
+ exp_index = index.asfreq("D", how="end")
+ assert len(result) == len(ts)
+ tm.assert_index_equal(result.index, exp_index)
+ tm.assert_index_equal(df_result.index, exp_index)
+
+ result = ts.asfreq("D", how="start")
+ assert len(result) == len(ts)
+ tm.assert_index_equal(result.index, index.asfreq("D", how="start"))
diff --git a/pandas/tests/series/methods/test_combine_first.py b/pandas/tests/series/methods/test_combine_first.py
new file mode 100644
index 0000000000000..aed6425e50117
--- /dev/null
+++ b/pandas/tests/series/methods/test_combine_first.py
@@ -0,0 +1,19 @@
+import numpy as np
+
+from pandas import Period, Series, date_range, period_range
+import pandas._testing as tm
+
+
+class TestCombineFirst:
+ def test_combine_first_period_datetime(self):
+ # GH#3367
+ didx = date_range(start="1950-01-31", end="1950-07-31", freq="M")
+ pidx = period_range(start=Period("1950-1"), end=Period("1950-7"), freq="M")
+ # check to be consistent with DatetimeIndex
+ for idx in [didx, pidx]:
+ a = Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
+ b = Series([9, 9, 9, 9, 9, 9, 9], index=idx)
+
+ result = a.combine_first(b)
+ expected = Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_to_timestamp.py b/pandas/tests/series/methods/test_to_timestamp.py
new file mode 100644
index 0000000000000..44caf1f082a4f
--- /dev/null
+++ b/pandas/tests/series/methods/test_to_timestamp.py
@@ -0,0 +1,54 @@
+from datetime import timedelta
+
+from pandas import Series, Timedelta, date_range, period_range, to_datetime
+import pandas._testing as tm
+
+
+class TestToTimestamp:
+ def test_to_timestamp(self):
+ index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
+ series = Series(1, index=index, name="foo")
+
+ exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
+ result = series.to_timestamp(how="end")
+ exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
+ tm.assert_index_equal(result.index, exp_index)
+ assert result.name == "foo"
+
+ exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
+ result = series.to_timestamp(how="start")
+ tm.assert_index_equal(result.index, exp_index)
+
+ def _get_with_delta(delta, freq="A-DEC"):
+ return date_range(
+ to_datetime("1/1/2001") + delta,
+ to_datetime("12/31/2009") + delta,
+ freq=freq,
+ )
+
+ delta = timedelta(hours=23)
+ result = series.to_timestamp("H", "end")
+ exp_index = _get_with_delta(delta)
+ exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
+ tm.assert_index_equal(result.index, exp_index)
+
+ delta = timedelta(hours=23, minutes=59)
+ result = series.to_timestamp("T", "end")
+ exp_index = _get_with_delta(delta)
+ exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
+ tm.assert_index_equal(result.index, exp_index)
+
+ result = series.to_timestamp("S", "end")
+ delta = timedelta(hours=23, minutes=59, seconds=59)
+ exp_index = _get_with_delta(delta)
+ exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
+ tm.assert_index_equal(result.index, exp_index)
+
+ index = period_range(freq="H", start="1/1/2001", end="1/2/2001")
+ series = Series(1, index=index, name="foo")
+
+ exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H")
+ result = series.to_timestamp(how="end")
+ exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
+ tm.assert_index_equal(result.index, exp_index)
+ assert result.name == "foo"
| https://api.github.com/repos/pandas-dev/pandas/pulls/31758 | 2020-02-06T20:10:22Z | 2020-02-09T17:04:58Z | 2020-02-09T17:04:58Z | 2020-02-09T17:23:01Z | |
BUG Decode to UTF-8 the dtype string read from a hdf file | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 64f0cb3f2e26d..0ac26ca65f468 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -184,6 +184,7 @@ I/O
- Bug in :meth:`DataFrame.to_parquet` overwriting pyarrow's default for
``coerce_timestamps``; following pyarrow's default allows writing nanosecond
timestamps with ``version="2.0"`` (:issue:`31652`).
+- Bug in :class:`HDFStore` that caused it to set to ``int64`` the dtype of a ``datetime64`` column when reading a DataFrame in Python 3 from fixed format written in Python 2 (:issue:`31750`)
Plotting
^^^^^^^^
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index c1e12887b0150..0e2b909d5cdc7 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2722,7 +2722,7 @@ def read_array(
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
- dtype = getattr(attrs, "value_type", None)
+ dtype = _ensure_decoded(getattr(attrs, "value_type", None))
shape = getattr(attrs, "shape", None)
if shape is not None:
diff --git a/pandas/tests/io/data/legacy_hdf/legacy_table_fixed_datetime_py2.h5 b/pandas/tests/io/data/legacy_hdf/legacy_table_fixed_datetime_py2.h5
new file mode 100644
index 0000000000000..18cfae15a3a78
Binary files /dev/null and b/pandas/tests/io/data/legacy_hdf/legacy_table_fixed_datetime_py2.h5 differ
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index f56d042093886..547de39eec5e0 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -4074,6 +4074,21 @@ def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
)
tm.assert_frame_equal(expected, result)
+ def test_legacy_table_fixed_format_read_datetime_py2(self, datapath, setup_path):
+ # GH 31750
+ # legacy table with fixed format and datetime64 column written in Python 2
+ with ensure_clean_store(
+ datapath("io", "data", "legacy_hdf", "legacy_table_fixed_datetime_py2.h5"),
+ mode="r",
+ ) as store:
+ result = store.select("df")
+ expected = pd.DataFrame(
+ [[pd.Timestamp("2020-02-06T18:00")]],
+ columns=["A"],
+ index=pd.Index(["date"]),
+ )
+ tm.assert_frame_equal(expected, result)
+
def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
| Fixes GH31750
The dtype value wasn't being decoded to `UTF-8` when reading a DataFrame
from a hdf file. This was a problem when reading a hdf that was
created from python 2 with a fixed format as the dtype was being read as `b'datetime'`
instead of `datetime`, which caused `HDFStore` to read the data as
`int64` instead of coercing it to the correct `datetime64` dtype.
- [x] closes #31750
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31756 | 2020-02-06T20:02:14Z | 2020-02-09T21:34:20Z | 2020-02-09T21:34:20Z | 2020-02-10T13:49:36Z |
CLN: misplaced TimedeltaIndex tests | diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index ca4d1ff067f3d..19b72d42062aa 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -94,6 +94,14 @@ def test_loc_iterable(self, float_frame, key_type):
expected = float_frame.loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
+ def test_loc_timedelta_0seconds(self):
+ # GH#10583
+ df = pd.DataFrame(np.random.normal(size=(10, 4)))
+ df.index = pd.timedelta_range(start="0s", periods=10, freq="s")
+ expected = df.loc[pd.Timedelta("0s") :, :]
+ result = df.loc["0s":, :]
+ tm.assert_frame_equal(expected, result)
+
@pytest.mark.parametrize(
"idx_type",
[
@@ -204,7 +212,7 @@ def test_setitem_list_of_tuples(self, float_frame):
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
- def test_setitem_mulit_index(self):
+ def test_setitem_multi_index(self):
# GH7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py
index 32e6821e87f05..0de10b5d82171 100644
--- a/pandas/tests/indexes/timedeltas/test_constructors.py
+++ b/pandas/tests/indexes/timedeltas/test_constructors.py
@@ -10,6 +10,12 @@
class TestTimedeltaIndex:
+ @pytest.mark.parametrize("unit", ["Y", "y", "M"])
+ def test_unit_m_y_raises(self, unit):
+ msg = "Units 'M' and 'Y' are no longer supported"
+ with pytest.raises(ValueError, match=msg):
+ TimedeltaIndex([1, 3, 7], unit)
+
def test_int64_nocopy(self):
# GH#23539 check that a copy isn't made when we pass int64 data
# and copy=False
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index a3e390fc941c7..6606507dabc29 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -113,15 +113,6 @@ def test_order(self):
["1 day", "3 day", "5 day", "2 day", "1 day"], name="idx2"
)
- # TODO(wesm): unused?
- # exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
- # '3 day', '5 day'], name='idx2')
-
- # idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
- # '2 minute', pd.NaT], name='idx3')
- # exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
- # '5 minute'], name='idx3')
-
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
@@ -189,9 +180,6 @@ def test_infer_freq(self, freq):
tm.assert_index_equal(idx, result)
assert result.freq == freq
- def test_shift(self):
- pass # handled in test_arithmetic.py
-
def test_repeat(self):
index = pd.timedelta_range("1 days", periods=2, freq="D")
exp = pd.TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"])
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index 3b52b93fa6369..8a91c9d5e09c8 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -284,17 +284,3 @@ def test_freq_conversion(self):
result = td.astype("timedelta64[s]")
tm.assert_index_equal(result, expected)
-
- @pytest.mark.parametrize("unit", ["Y", "y", "M"])
- def test_unit_m_y_raises(self, unit):
- msg = "Units 'M' and 'Y' are no longer supported"
- with pytest.raises(ValueError, match=msg):
- TimedeltaIndex([1, 3, 7], unit)
-
-
-class TestTimeSeries:
- def test_series_box_timedelta(self):
- rng = timedelta_range("1 day 1 s", periods=5, freq="h")
- s = Series(rng)
- assert isinstance(s[1], Timedelta)
- assert isinstance(s.iat[2], Timedelta)
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
index 1cef9de6a3a77..9f12af9a96104 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
@@ -1,7 +1,6 @@
import numpy as np
import pytest
-import pandas as pd
from pandas import timedelta_range, to_timedelta
import pandas._testing as tm
@@ -31,23 +30,6 @@ def test_timedelta_range(self):
result = timedelta_range("0 days", freq="30T", periods=50)
tm.assert_index_equal(result, expected)
- # GH 11776
- arr = np.arange(10).reshape(2, 5)
- df = pd.DataFrame(np.arange(10).reshape(2, 5))
- for arg in (arr, df):
- with pytest.raises(TypeError, match="1-d array"):
- to_timedelta(arg)
- for errors in ["ignore", "raise", "coerce"]:
- with pytest.raises(TypeError, match="1-d array"):
- to_timedelta(arg, errors=errors)
-
- # issue10583
- df = pd.DataFrame(np.random.normal(size=(10, 4)))
- df.index = pd.timedelta_range(start="0s", periods=10, freq="s")
- expected = df.loc[pd.Timedelta("0s") :, :]
- result = df.loc["0s":, :]
- tm.assert_frame_equal(expected, result)
-
@pytest.mark.parametrize(
"periods, freq", [(3, "2D"), (5, "D"), (6, "19H12T"), (7, "16H"), (9, "12H")]
)
diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/indexes/timedeltas/test_tools.py
index 477fc092a4e16..e3cf3a7f16a82 100644
--- a/pandas/tests/indexes/timedeltas/test_tools.py
+++ b/pandas/tests/indexes/timedeltas/test_tools.py
@@ -57,6 +57,17 @@ def test_to_timedelta(self):
expected = TimedeltaIndex([np.timedelta64(1, "D")] * 5)
tm.assert_index_equal(result, expected)
+ def test_to_timedelta_dataframe(self):
+ # GH 11776
+ arr = np.arange(10).reshape(2, 5)
+ df = pd.DataFrame(np.arange(10).reshape(2, 5))
+ for arg in (arr, df):
+ with pytest.raises(TypeError, match="1-d array"):
+ to_timedelta(arg)
+ for errors in ["ignore", "raise", "coerce"]:
+ with pytest.raises(TypeError, match="1-d array"):
+ to_timedelta(arg, errors=errors)
+
def test_to_timedelta_invalid(self):
# bad value for errors parameter
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 1b21e2419595f..321df29176728 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -241,6 +241,16 @@ def test_series_box_timestamp():
assert isinstance(ser.iat[5], pd.Timestamp)
+def test_series_box_timedelta():
+ rng = pd.timedelta_range("1 day 1 s", periods=5, freq="h")
+ ser = pd.Series(rng)
+ assert isinstance(ser[0], Timedelta)
+ assert isinstance(ser.at[1], Timedelta)
+ assert isinstance(ser.iat[2], Timedelta)
+ assert isinstance(ser.loc[3], Timedelta)
+ assert isinstance(ser.iloc[4], Timedelta)
+
+
def test_getitem_ambiguous_keyerror():
s = Series(range(10), index=list(range(0, 20, 2)))
with pytest.raises(KeyError, match=r"^1$"):
| https://api.github.com/repos/pandas-dev/pandas/pulls/31755 | 2020-02-06T19:24:38Z | 2020-02-09T15:02:58Z | 2020-02-09T15:02:58Z | 2020-02-09T15:39:46Z | |
CLN/TST: organize DatetimeIndex tests | diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py
index 95d14ad4c86f7..1d1d371fcec1e 100644
--- a/pandas/tests/indexes/datetimes/test_constructors.py
+++ b/pandas/tests/indexes/datetimes/test_constructors.py
@@ -950,3 +950,12 @@ def test_datetimeindex_constructor_misc(self):
)
assert len(idx1) == len(idx2)
assert idx1.freq == idx2.freq
+
+ def test_pass_datetimeindex_to_index(self):
+ # Bugs in #1396
+ rng = date_range("1/1/2000", "3/1/2000")
+ idx = Index(rng, dtype=object)
+
+ expected = Index(rng.to_pydatetime(), dtype=object)
+
+ tm.assert_numpy_array_equal(idx.values, expected.values)
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 2f954117f48d7..c358e72538788 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -344,6 +344,115 @@ def test_take_fill_value_with_timezone(self):
idx.take(np.array([1, -5]))
+class TestGetLoc:
+ @pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
+ def test_get_loc_method_exact_match(self, method):
+ idx = pd.date_range("2000-01-01", periods=3)
+ assert idx.get_loc(idx[1], method) == 1
+ assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
+ assert idx.get_loc(str(idx[1]), method) == 1
+
+ if method is not None:
+ assert idx.get_loc(idx[1], method, tolerance=pd.Timedelta("0 days")) == 1
+
+ def test_get_loc(self):
+ idx = pd.date_range("2000-01-01", periods=3)
+
+ assert idx.get_loc("2000-01-01", method="nearest") == 0
+ assert idx.get_loc("2000-01-01T12", method="nearest") == 1
+
+ assert idx.get_loc("2000-01-01T12", method="nearest", tolerance="1 day") == 1
+ assert (
+ idx.get_loc("2000-01-01T12", method="nearest", tolerance=pd.Timedelta("1D"))
+ == 1
+ )
+ assert (
+ idx.get_loc(
+ "2000-01-01T12", method="nearest", tolerance=np.timedelta64(1, "D")
+ )
+ == 1
+ )
+ assert (
+ idx.get_loc("2000-01-01T12", method="nearest", tolerance=timedelta(1)) == 1
+ )
+ with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
+ idx.get_loc("2000-01-01T12", method="nearest", tolerance="foo")
+ with pytest.raises(KeyError, match="'2000-01-01T03'"):
+ idx.get_loc("2000-01-01T03", method="nearest", tolerance="2 hours")
+ with pytest.raises(
+ ValueError, match="tolerance size must match target index size"
+ ):
+ idx.get_loc(
+ "2000-01-01",
+ method="nearest",
+ tolerance=[
+ pd.Timedelta("1day").to_timedelta64(),
+ pd.Timedelta("1day").to_timedelta64(),
+ ],
+ )
+
+ assert idx.get_loc("2000", method="nearest") == slice(0, 3)
+ assert idx.get_loc("2000-01", method="nearest") == slice(0, 3)
+
+ assert idx.get_loc("1999", method="nearest") == 0
+ assert idx.get_loc("2001", method="nearest") == 2
+
+ with pytest.raises(KeyError, match="'1999'"):
+ idx.get_loc("1999", method="pad")
+ with pytest.raises(KeyError, match="'2001'"):
+ idx.get_loc("2001", method="backfill")
+
+ with pytest.raises(KeyError, match="'foobar'"):
+ idx.get_loc("foobar")
+ with pytest.raises(InvalidIndexError, match=r"slice\(None, 2, None\)"):
+ idx.get_loc(slice(2))
+
+ idx = pd.to_datetime(["2000-01-01", "2000-01-04"])
+ assert idx.get_loc("2000-01-02", method="nearest") == 0
+ assert idx.get_loc("2000-01-03", method="nearest") == 1
+ assert idx.get_loc("2000-01", method="nearest") == slice(0, 2)
+
+ # time indexing
+ idx = pd.date_range("2000-01-01", periods=24, freq="H")
+ tm.assert_numpy_array_equal(
+ idx.get_loc(time(12)), np.array([12]), check_dtype=False
+ )
+ tm.assert_numpy_array_equal(
+ idx.get_loc(time(12, 30)), np.array([]), check_dtype=False
+ )
+ with pytest.raises(NotImplementedError):
+ idx.get_loc(time(12, 30), method="pad")
+
+ def test_get_loc_nat(self):
+ # GH#20464
+ index = DatetimeIndex(["1/3/2000", "NaT"])
+ assert index.get_loc(pd.NaT) == 1
+
+ assert index.get_loc(None) == 1
+
+ assert index.get_loc(np.nan) == 1
+
+ assert index.get_loc(pd.NA) == 1
+
+ assert index.get_loc(np.datetime64("NaT")) == 1
+
+ with pytest.raises(KeyError, match="NaT"):
+ index.get_loc(np.timedelta64("NaT"))
+
+ @pytest.mark.parametrize("key", [pd.Timedelta(0), pd.Timedelta(1), timedelta(0)])
+ def test_get_loc_timedelta_invalid_key(self, key):
+ # GH#20464
+ dti = pd.date_range("1970-01-01", periods=10)
+ with pytest.raises(TypeError):
+ dti.get_loc(key)
+
+ def test_get_loc_reasonable_key_error(self):
+ # GH#1062
+ index = DatetimeIndex(["1/3/2000"])
+ with pytest.raises(KeyError, match="2000"):
+ index.get_loc("1/1/2000")
+
+
class TestDatetimeIndex:
@pytest.mark.parametrize(
"null", [None, np.nan, np.datetime64("NaT"), pd.NaT, pd.NA]
@@ -639,84 +748,6 @@ def test_get_value(self):
result = dti.get_value(ser, key.to_datetime64())
assert result == 7
- def test_get_loc(self):
- idx = pd.date_range("2000-01-01", periods=3)
-
- for method in [None, "pad", "backfill", "nearest"]:
- assert idx.get_loc(idx[1], method) == 1
- assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
- assert idx.get_loc(str(idx[1]), method) == 1
-
- if method is not None:
- assert (
- idx.get_loc(idx[1], method, tolerance=pd.Timedelta("0 days")) == 1
- )
-
- assert idx.get_loc("2000-01-01", method="nearest") == 0
- assert idx.get_loc("2000-01-01T12", method="nearest") == 1
-
- assert idx.get_loc("2000-01-01T12", method="nearest", tolerance="1 day") == 1
- assert (
- idx.get_loc("2000-01-01T12", method="nearest", tolerance=pd.Timedelta("1D"))
- == 1
- )
- assert (
- idx.get_loc(
- "2000-01-01T12", method="nearest", tolerance=np.timedelta64(1, "D")
- )
- == 1
- )
- assert (
- idx.get_loc("2000-01-01T12", method="nearest", tolerance=timedelta(1)) == 1
- )
- with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
- idx.get_loc("2000-01-01T12", method="nearest", tolerance="foo")
- with pytest.raises(KeyError, match="'2000-01-01T03'"):
- idx.get_loc("2000-01-01T03", method="nearest", tolerance="2 hours")
- with pytest.raises(
- ValueError, match="tolerance size must match target index size"
- ):
- idx.get_loc(
- "2000-01-01",
- method="nearest",
- tolerance=[
- pd.Timedelta("1day").to_timedelta64(),
- pd.Timedelta("1day").to_timedelta64(),
- ],
- )
-
- assert idx.get_loc("2000", method="nearest") == slice(0, 3)
- assert idx.get_loc("2000-01", method="nearest") == slice(0, 3)
-
- assert idx.get_loc("1999", method="nearest") == 0
- assert idx.get_loc("2001", method="nearest") == 2
-
- with pytest.raises(KeyError, match="'1999'"):
- idx.get_loc("1999", method="pad")
- with pytest.raises(KeyError, match="'2001'"):
- idx.get_loc("2001", method="backfill")
-
- with pytest.raises(KeyError, match="'foobar'"):
- idx.get_loc("foobar")
- with pytest.raises(InvalidIndexError, match=r"slice\(None, 2, None\)"):
- idx.get_loc(slice(2))
-
- idx = pd.to_datetime(["2000-01-01", "2000-01-04"])
- assert idx.get_loc("2000-01-02", method="nearest") == 0
- assert idx.get_loc("2000-01-03", method="nearest") == 1
- assert idx.get_loc("2000-01", method="nearest") == slice(0, 2)
-
- # time indexing
- idx = pd.date_range("2000-01-01", periods=24, freq="H")
- tm.assert_numpy_array_equal(
- idx.get_loc(time(12)), np.array([12]), check_dtype=False
- )
- tm.assert_numpy_array_equal(
- idx.get_loc(time(12, 30)), np.array([]), check_dtype=False
- )
- with pytest.raises(NotImplementedError):
- idx.get_loc(time(12, 30), method="pad")
-
def test_get_indexer(self):
idx = pd.date_range("2000-01-01", periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
@@ -756,32 +787,3 @@ def test_get_indexer(self):
idx.get_indexer(target, "nearest", tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method="nearest", tolerance="foo")
-
- def test_reasonable_key_error(self):
- # GH#1062
- index = DatetimeIndex(["1/3/2000"])
- with pytest.raises(KeyError, match="2000"):
- index.get_loc("1/1/2000")
-
- @pytest.mark.parametrize("key", [pd.Timedelta(0), pd.Timedelta(1), timedelta(0)])
- def test_timedelta_invalid_key(self, key):
- # GH#20464
- dti = pd.date_range("1970-01-01", periods=10)
- with pytest.raises(TypeError):
- dti.get_loc(key)
-
- def test_get_loc_nat(self):
- # GH#20464
- index = DatetimeIndex(["1/3/2000", "NaT"])
- assert index.get_loc(pd.NaT) == 1
-
- assert index.get_loc(None) == 1
-
- assert index.get_loc(np.nan) == 1
-
- assert index.get_loc(pd.NA) == 1
-
- assert index.get_loc(np.datetime64("NaT")) == 1
-
- with pytest.raises(KeyError, match="NaT"):
- index.get_loc(np.timedelta64("NaT"))
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 340f53b2868bd..d0464698e3f24 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -12,15 +12,6 @@
class TestTimeSeries:
- def test_pass_datetimeindex_to_index(self):
- # Bugs in #1396
- rng = date_range("1/1/2000", "3/1/2000")
- idx = Index(rng, dtype=object)
-
- expected = Index(rng.to_pydatetime(), dtype=object)
-
- tm.assert_numpy_array_equal(idx.values, expected.values)
-
def test_range_edges(self):
# GH#13672
idx = pd.date_range(
| 1/many | https://api.github.com/repos/pandas-dev/pandas/pulls/31753 | 2020-02-06T18:54:23Z | 2020-02-06T23:48:19Z | 2020-02-06T23:48:19Z | 2020-02-06T23:53:21Z |
F string fixes | diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 04fd4835469a9..78b630bb5ada1 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -60,9 +60,7 @@ def compare_element(result, expected, typ, version=None):
assert result == expected
assert result.freq == expected.freq
else:
- comparator = getattr(
- tm, "assert_{typ}_equal".format(typ=typ), tm.assert_almost_equal
- )
+ comparator = getattr(tm, f"assert_{typ}_equal", tm.assert_almost_equal)
comparator(result, expected)
@@ -77,7 +75,7 @@ def compare(data, vf, version):
# use a specific comparator
# if available
- comparator = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
+ comparator = f"compare_{typ}_{dt}"
comparator = m.get(comparator, m["compare_element"])
comparator(result, expected, typ, version)
@@ -234,7 +232,7 @@ def test_legacy_sparse_warning(datapath):
@pytest.fixture
def get_random_path():
- return "__{}__.pickle".format(tm.rands(10))
+ return f"__{tm.rands(10)}__.pickle"
class TestCompression:
@@ -262,7 +260,7 @@ def compress_file(self, src_path, dest_path, compression):
elif compression == "xz":
f = _get_lzma_file(lzma)(dest_path, "w")
else:
- msg = "Unrecognized compression type: {}".format(compression)
+ msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
if compression != "zip":
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 1b21e2419595f..fa5c75d5e4ad9 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -429,7 +429,7 @@ def test_basic_getitem_setitem_corner(datetime_series):
@pytest.mark.parametrize("tz", ["US/Eastern", "UTC", "Asia/Tokyo"])
def test_setitem_with_tz(tz):
orig = pd.Series(pd.date_range("2016-01-01", freq="H", periods=3, tz=tz))
- assert orig.dtype == "datetime64[ns, {0}]".format(tz)
+ assert orig.dtype == f"datetime64[ns, {tz}]"
# scalar
s = orig.copy()
@@ -456,7 +456,7 @@ def test_setitem_with_tz(tz):
[pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2012-01-01", tz=tz)],
index=[1, 2],
)
- assert vals.dtype == "datetime64[ns, {0}]".format(tz)
+ assert vals.dtype == f"datetime64[ns, {tz}]"
s[[1, 2]] = vals
exp = pd.Series(
@@ -481,7 +481,7 @@ def test_setitem_with_tz_dst():
# GH XXX
tz = "US/Eastern"
orig = pd.Series(pd.date_range("2016-11-06", freq="H", periods=3, tz=tz))
- assert orig.dtype == "datetime64[ns, {0}]".format(tz)
+ assert orig.dtype == f"datetime64[ns, {tz}]"
# scalar
s = orig.copy()
@@ -508,7 +508,7 @@ def test_setitem_with_tz_dst():
[pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2012-01-01", tz=tz)],
index=[1, 2],
)
- assert vals.dtype == "datetime64[ns, {0}]".format(tz)
+ assert vals.dtype == f"datetime64[ns, {tz}]"
s[[1, 2]] = vals
exp = pd.Series(
| Related to issue https://github.com/pandas-dev/pandas/issues/29547 | https://api.github.com/repos/pandas-dev/pandas/pulls/31751 | 2020-02-06T17:33:01Z | 2020-02-06T21:43:30Z | 2020-02-06T21:43:30Z | 2020-02-06T21:43:36Z |
Backport PR #31745 on branch 1.0.x (CI: Update travis-37.yaml Conda channel ) | diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index 73e2c20b31438..682b1016ff3a2 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -2,7 +2,6 @@ name: pandas-dev
channels:
- defaults
- conda-forge
- - c3i_test
dependencies:
- python=3.7.*
| Backport PR #31745: CI: Update travis-37.yaml Conda channel | https://api.github.com/repos/pandas-dev/pandas/pulls/31749 | 2020-02-06T15:44:25Z | 2020-02-06T23:41:55Z | 2020-02-06T23:41:55Z | 2020-02-06T23:41:55Z |
BUG: Fixed encoding of pd.NA with to_json | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 07a837829c384..182ad7231983e 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -25,8 +25,9 @@ Fixed regressions
Bug fixes
~~~~~~~~~
--
--
+**I/O**
+
+- Using ``pd.NA`` with :meth:`DataFrame.to_json` now correctly outputs a null value instead of an empty object (:issue:`31615`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 0fc146d25459b..8cfc20ffd2c1c 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -53,6 +53,7 @@ static PyTypeObject *cls_dataframe;
static PyTypeObject *cls_series;
static PyTypeObject *cls_index;
static PyTypeObject *cls_nat;
+static PyTypeObject *cls_na;
PyObject *cls_timedelta;
npy_int64 get_nat(void) { return NPY_MIN_INT64; }
@@ -149,6 +150,7 @@ int PdBlock_iterNext(JSOBJ, JSONTypeContext *);
void *initObjToJSON(void) {
PyObject *mod_pandas;
PyObject *mod_nattype;
+ PyObject *mod_natype;
PyObject *mod_decimal = PyImport_ImportModule("decimal");
type_decimal =
(PyTypeObject *)PyObject_GetAttrString(mod_decimal, "Decimal");
@@ -174,6 +176,12 @@ void *initObjToJSON(void) {
Py_DECREF(mod_nattype);
}
+ mod_natype = PyImport_ImportModule("pandas._libs.missing");
+ if (mod_natype) {
+ cls_na = (PyTypeObject *)PyObject_GetAttrString(mod_natype, "NAType");
+ Py_DECREF(mod_natype);
+ }
+
/* Initialise numpy API */
import_array();
// GH 31463
@@ -1789,6 +1797,10 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) {
"%R (0d array) is not JSON serializable at the moment",
obj);
goto INVALID;
+ } else if (PyObject_TypeCheck(obj, cls_na)) {
+ PRINTMARK();
+ tc->type = JT_NULL;
+ return;
}
ISITERABLE:
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 602022a21c4a6..f2d35bfb3b5ae 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -1671,3 +1671,13 @@ def test_to_s3(self, s3_resource):
assert target_file in (
obj.key for obj in s3_resource.Bucket("pandas-test").objects.all()
)
+
+ def test_json_pandas_na(self):
+ # GH 31615
+ result = pd.DataFrame([[pd.NA]]).to_json()
+ assert result == '{"0":{"0":null}}'
+
+ def test_json_pandas_nulls(self, nulls_fixture):
+ # GH 31615
+ result = pd.DataFrame([[nulls_fixture]]).to_json()
+ assert result == '{"0":{"0":null}}'
| - [x] closes #31615
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31748 | 2020-02-06T13:57:26Z | 2020-02-08T08:11:34Z | 2020-02-08T08:11:34Z | 2020-02-08T08:17:10Z |
CI: Update travis-37.yaml Conda channel | diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml
index 73e2c20b31438..682b1016ff3a2 100644
--- a/ci/deps/travis-37.yaml
+++ b/ci/deps/travis-37.yaml
@@ -2,7 +2,6 @@ name: pandas-dev
channels:
- defaults
- conda-forge
- - c3i_test
dependencies:
- python=3.7.*
| Remove unrequired channel
- [x] closes #31739
Cc @TomAugspurger
| https://api.github.com/repos/pandas-dev/pandas/pulls/31745 | 2020-02-06T13:36:53Z | 2020-02-06T15:44:12Z | 2020-02-06T15:44:11Z | 2020-02-06T15:44:14Z |
TST: expand tests for ExtensionArray setitem with nullable arrays | diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 590bcd586900a..af70799c0236e 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -4,7 +4,7 @@
import pytest
import pandas as pd
-from pandas.core.arrays.numpy_ import PandasDtype
+import pandas._testing as tm
from .base import BaseExtensionTests
@@ -93,6 +93,92 @@ def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
df.iloc[10, 1] = data[1]
assert df.loc[10, "B"] == data[1]
+ @pytest.mark.parametrize(
+ "mask",
+ [
+ np.array([True, True, True, False, False]),
+ pd.array([True, True, True, False, False], dtype="boolean"),
+ ],
+ ids=["numpy-array", "boolean-array"],
+ )
+ def test_setitem_mask(self, data, mask, box_in_series):
+ arr = data[:5].copy()
+ expected = arr.take([0, 0, 0, 3, 4])
+ if box_in_series:
+ arr = pd.Series(arr)
+ expected = pd.Series(expected)
+ arr[mask] = data[0]
+ self.assert_equal(expected, arr)
+
+ def test_setitem_mask_raises(self, data, box_in_series):
+ # wrong length
+ mask = np.array([True, False])
+
+ if box_in_series:
+ data = pd.Series(data)
+
+ with pytest.raises(IndexError, match="wrong length"):
+ data[mask] = data[0]
+
+ mask = pd.array(mask, dtype="boolean")
+ with pytest.raises(IndexError, match="wrong length"):
+ data[mask] = data[0]
+
+ def test_setitem_mask_boolean_array_raises(self, data, box_in_series):
+ # missing values in mask
+ mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
+ mask[:2] = pd.NA
+
+ if box_in_series:
+ data = pd.Series(data)
+
+ msg = (
+ "Cannot mask with a boolean indexer containing NA values|"
+ "cannot mask with array containing NA / NaN values"
+ )
+ with pytest.raises(ValueError, match=msg):
+ data[mask] = data[0]
+
+ @pytest.mark.parametrize(
+ "idx",
+ [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
+ ids=["list", "integer-array", "numpy-array"],
+ )
+ def test_setitem_integer_array(self, data, idx, box_in_series):
+ arr = data[:5].copy()
+ expected = data.take([0, 0, 0, 3, 4])
+
+ if box_in_series:
+ arr = pd.Series(arr)
+ expected = pd.Series(expected)
+
+ arr[idx] = arr[0]
+ self.assert_equal(arr, expected)
+
+ @pytest.mark.parametrize(
+ "idx, box_in_series",
+ [
+ ([0, 1, 2, pd.NA], False),
+ pytest.param(
+ [0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948")
+ ),
+ (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
+ (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
+ ],
+ ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
+ )
+ def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
+ arr = data.copy()
+
+ # TODO(xfail) this raises KeyError about labels not found (it tries label-based)
+ # for list of labels with Series
+ if box_in_series:
+ arr = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))])
+
+ msg = "Cannot index with an integer indexer containing NA values"
+ with pytest.raises(ValueError, match=msg):
+ arr[idx] = arr[0]
+
@pytest.mark.parametrize("as_callable", [True, False])
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_aligned(self, data, as_callable, setter):
@@ -219,14 +305,3 @@ def test_setitem_preserves_views(self, data):
data[0] = data[1]
assert view1[0] == data[1]
assert view2[0] == data[1]
-
- def test_setitem_nullable_mask(self, data):
- # GH 31446
- # TODO: there is some issue with PandasArray, therefore,
- # TODO: skip the setitem test for now, and fix it later
- if data.dtype != PandasDtype("object"):
- arr = data[:5]
- expected = data.take([0, 0, 0, 3, 4])
- mask = pd.array([True, True, True, False, False])
- arr[mask] = data[0]
- self.assert_extension_array_equal(expected, arr)
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index 76573242a2506..80a093530a8cd 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -396,6 +396,52 @@ def test_setitem_scalar_key_sequence_raise(self, data):
# Failed: DID NOT RAISE <class 'ValueError'>
super().test_setitem_scalar_key_sequence_raise(data)
+ # TODO: there is some issue with PandasArray, therefore,
+ # skip the setitem test for now, and fix it later (GH 31446)
+
+ @skip_nested
+ @pytest.mark.parametrize(
+ "mask",
+ [
+ np.array([True, True, True, False, False]),
+ pd.array([True, True, True, False, False], dtype="boolean"),
+ ],
+ ids=["numpy-array", "boolean-array"],
+ )
+ def test_setitem_mask(self, data, mask, box_in_series):
+ super().test_setitem_mask(data, mask, box_in_series)
+
+ @skip_nested
+ def test_setitem_mask_raises(self, data, box_in_series):
+ super().test_setitem_mask_raises(data, box_in_series)
+
+ @skip_nested
+ def test_setitem_mask_boolean_array_raises(self, data, box_in_series):
+ super().test_setitem_mask_boolean_array_raises(data, box_in_series)
+
+ @skip_nested
+ @pytest.mark.parametrize(
+ "idx",
+ [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
+ ids=["list", "integer-array", "numpy-array"],
+ )
+ def test_setitem_integer_array(self, data, idx, box_in_series):
+ super().test_setitem_integer_array(data, idx, box_in_series)
+
+ @skip_nested
+ @pytest.mark.parametrize(
+ "idx, box_in_series",
+ [
+ ([0, 1, 2, pd.NA], False),
+ pytest.param([0, 1, 2, pd.NA], True, marks=pytest.mark.xfail),
+ (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
+ (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
+ ],
+ ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
+ )
+ def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
+ super().test_setitem_integer_with_missing_raises(data, idx, box_in_series)
+
@skip_nested
def test_setitem_slice(self, data, box_in_series):
super().test_setitem_slice(data, box_in_series)
| Follow-up on https://github.com/pandas-dev/pandas/pull/31484 to add some more test cases (eg also test the validation done in the `check_array_indexer` (wrong length, missing values), as well for nullable integer arrays), similarly to the tests I added for `__getitem__`.
cc @charlesdong1991 | https://api.github.com/repos/pandas-dev/pandas/pulls/31741 | 2020-02-06T10:22:46Z | 2020-02-13T13:39:47Z | 2020-02-13T13:39:47Z | 2020-02-13T13:41:55Z |
Backport PR #31723 on branch 1.0.x (DOC: Add 1.0.2 whatsnew) | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index c9495d5b137fd..76d13478612ee 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -18,6 +18,7 @@ Version 1.0
v1.0.0
v1.0.1
+ v1.0.2
Version 0.25
------------
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
new file mode 100644
index 0000000000000..07a837829c384
--- /dev/null
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -0,0 +1,38 @@
+.. _whatsnew_102:
+
+What's new in 1.0.2 (February ??, 2020)
+---------------------------------------
+
+These are the changes in pandas 1.0.2. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_102.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_102.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_102.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.0.1..v1.0.2|HEAD
\ No newline at end of file
| Backport PR #31723: DOC: Add 1.0.2 whatsnew | https://api.github.com/repos/pandas-dev/pandas/pulls/31737 | 2020-02-06T07:49:29Z | 2020-02-06T11:12:07Z | 2020-02-06T11:12:07Z | 2020-02-06T11:12:07Z |
REF: turn _try_mi into MultiIndex._get_values_for_loc | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 3c735fc0309b6..cb827851cc595 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4601,7 +4601,7 @@ def get_value(self, series: "Series", key):
else:
raise
- return self._get_values_for_loc(series, loc)
+ return self._get_values_for_loc(series, loc, key)
def _should_fallback_to_positional(self) -> bool:
"""
@@ -4611,12 +4611,14 @@ def _should_fallback_to_positional(self) -> bool:
return False
return True
- def _get_values_for_loc(self, series: "Series", loc):
+ def _get_values_for_loc(self, series: "Series", loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
+
+ key is included for MultiIndex compat.
"""
if is_integer(loc):
return series._values[loc]
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 708bea7d132a2..81e89441e92d8 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -1,5 +1,15 @@
from sys import getsizeof
-from typing import Any, Hashable, Iterable, List, Optional, Sequence, Tuple, Union
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Hashable,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
import warnings
import numpy as np
@@ -56,6 +66,9 @@
pprint_thing,
)
+if TYPE_CHECKING:
+ from pandas import Series # noqa:F401
+
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass="MultiIndex", target_klass="MultiIndex or list of tuples")
@@ -2326,28 +2339,32 @@ def get_value(self, series, key):
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
- def _try_mi(k):
- # TODO: what if a level contains tuples??
- loc = self.get_loc(k)
-
- new_values = series._values[loc]
- if is_scalar(loc):
- return new_values
-
- new_index = self[loc]
- new_index = maybe_droplevels(new_index, k)
- return series._constructor(
- new_values, index=new_index, name=series.name
- ).__finalize__(self)
-
try:
- return _try_mi(key)
+ loc = self.get_loc(key)
except KeyError:
if is_integer(key):
- return series._values[key]
+ loc = key
else:
raise
+ return self._get_values_for_loc(series, loc, key)
+
+ def _get_values_for_loc(self, series: "Series", loc, key):
+ """
+ Do a positional lookup on the given Series, returning either a scalar
+ or a Series.
+
+ Assumes that `series.index is self`
+ """
+ new_values = series._values[loc]
+ if is_scalar(loc):
+ return new_values
+
+ new_index = self[loc]
+ new_index = maybe_droplevels(new_index, key)
+ new_ser = series._constructor(new_values, index=new_index, name=series.name)
+ return new_ser.__finalize__(series)
+
def _convert_listlike_indexer(self, keyarr):
"""
Parameters
| Discussed in #31640. | https://api.github.com/repos/pandas-dev/pandas/pulls/31736 | 2020-02-06T05:18:42Z | 2020-02-09T17:04:00Z | 2020-02-09T17:04:00Z | 2020-02-09T17:26:37Z |
BUG: list-like to_replace on Categorical.replace is ignored or crash | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 0216007ea5ba8..19358689a2186 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -31,6 +31,7 @@ Bug fixes
**Categorical**
- Fixed bug where :meth:`Categorical.from_codes` improperly raised a ``ValueError`` when passed nullable integer codes. (:issue:`31779`)
+- Bug in :class:`Categorical` that would ignore or crash when calling :meth:`Series.replace` with a list-like ``to_replace`` (:issue:`31720`)
**I/O**
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 01d2bfe0458c8..5d0574e2b4cfd 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -1070,6 +1070,7 @@ def assert_series_equal(
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
+ check_category_order=True,
obj="Series",
):
"""
@@ -1104,6 +1105,10 @@ def assert_series_equal(
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
+ check_category_order : bool, default True
+ Whether to compare category order of internal Categoricals
+
+ .. versionadded:: 1.0.2
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
@@ -1206,7 +1211,12 @@ def assert_series_equal(
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
- assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
+ assert_categorical_equal(
+ left.values,
+ right.values,
+ obj=f"{obj} category",
+ check_category_order=check_category_order,
+ )
# This could be refactored to use the NDFrame.equals method
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 6c7c35e9b4763..b25202addb54f 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2440,18 +2440,30 @@ def replace(self, to_replace, value, inplace: bool = False):
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
- if to_replace in cat.categories:
- if isna(value):
- cat.remove_categories(to_replace, inplace=True)
- else:
+
+ # build a dict of (to replace -> value) pairs
+ if is_list_like(to_replace):
+ # if to_replace is list-like and value is scalar
+ replace_dict = {replace_value: value for replace_value in to_replace}
+ else:
+ # if both to_replace and value are scalar
+ replace_dict = {to_replace: value}
+
+ # other cases, like if both to_replace and value are list-like or if
+ # to_replace is a dict, are handled separately in NDFrame
+ for replace_value, new_value in replace_dict.items():
+ if replace_value in cat.categories:
+ if isna(new_value):
+ cat.remove_categories(replace_value, inplace=True)
+ continue
categories = cat.categories.tolist()
- index = categories.index(to_replace)
- if value in cat.categories:
- value_index = categories.index(value)
+ index = categories.index(replace_value)
+ if new_value in cat.categories:
+ value_index = categories.index(new_value)
cat._codes[cat._codes == index] = value_index
- cat.remove_categories(to_replace, inplace=True)
+ cat.remove_categories(replace_value, inplace=True)
else:
- categories[index] = value
+ categories[index] = new_value
cat.rename_categories(categories, inplace=True)
if not inplace:
return cat
diff --git a/pandas/tests/arrays/categorical/test_replace.py b/pandas/tests/arrays/categorical/test_replace.py
new file mode 100644
index 0000000000000..52530123bd52f
--- /dev/null
+++ b/pandas/tests/arrays/categorical/test_replace.py
@@ -0,0 +1,48 @@
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+
+@pytest.mark.parametrize(
+ "to_replace,value,expected,check_types,check_categorical",
+ [
+ # one-to-one
+ (1, 2, [2, 2, 3], True, True),
+ (1, 4, [4, 2, 3], True, True),
+ (4, 1, [1, 2, 3], True, True),
+ (5, 6, [1, 2, 3], True, True),
+ # many-to-one
+ ([1], 2, [2, 2, 3], True, True),
+ ([1, 2], 3, [3, 3, 3], True, True),
+ ([1, 2], 4, [4, 4, 3], True, True),
+ ((1, 2, 4), 5, [5, 5, 3], True, True),
+ ((5, 6), 2, [1, 2, 3], True, True),
+ # many-to-many, handled outside of Categorical and results in separate dtype
+ ([1], [2], [2, 2, 3], False, False),
+ ([1, 4], [5, 2], [5, 2, 3], False, False),
+ # check_categorical sorts categories, which crashes on mixed dtypes
+ (3, "4", [1, 2, "4"], True, False),
+ ([1, 2, "3"], "5", ["5", "5", 3], True, False),
+ ],
+)
+def test_replace(to_replace, value, expected, check_types, check_categorical):
+ # GH 31720
+ s = pd.Series([1, 2, 3], dtype="category")
+ result = s.replace(to_replace, value)
+ expected = pd.Series(expected, dtype="category")
+ s.replace(to_replace, value, inplace=True)
+ tm.assert_series_equal(
+ expected,
+ result,
+ check_dtype=check_types,
+ check_categorical=check_categorical,
+ check_category_order=False,
+ )
+ tm.assert_series_equal(
+ expected,
+ s,
+ check_dtype=check_types,
+ check_categorical=check_categorical,
+ check_category_order=False,
+ )
| - [X] closes #31720
- [X] tests added / passed
- [x] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Covers the case where `to_replace` is a list-like and `value` is a string. Other cases, like "`to_replace` is dict and `value` is None", or "`to_replace` and `value` are both lists" are handled earlier in generic.py | https://api.github.com/repos/pandas-dev/pandas/pulls/31734 | 2020-02-06T04:41:51Z | 2020-02-17T16:59:53Z | 2020-02-17T16:59:53Z | 2020-02-17T17:00:54Z |
Add --quiet option to isort command | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 0cc42be42d61e..b46989894ae12 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -113,7 +113,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
# Imports - Check formatting using isort see setup.cfg for settings
MSG='Check import format using isort' ; echo $MSG
- ISORT_CMD="isort --recursive --check-only pandas asv_bench"
+ ISORT_CMD="isort --quiet --recursive --check-only pandas asv_bench"
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
eval $ISORT_CMD | awk '{print "##[error]" $0}'; RET=$(($RET + ${PIPESTATUS[0]}))
else
| - [x] closes #30974
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/31733 | 2020-02-06T04:00:01Z | 2020-02-07T06:12:19Z | 2020-02-07T06:12:18Z | 2020-02-07T06:12:44Z |
fixed mypy errors in mypy-pandas.tests.extension.decimal.test_decimal | diff --git a/pandas/tests/extension/base/base.py b/pandas/tests/extension/base/base.py
index 144b0825b39a2..97d8e7c66dbdb 100644
--- a/pandas/tests/extension/base/base.py
+++ b/pandas/tests/extension/base/base.py
@@ -2,8 +2,20 @@
class BaseExtensionTests:
+ # classmethod and different signature is needed
+ # to make inheritance compliant with mypy
+ @classmethod
+ def assert_equal(cls, left, right, **kwargs):
+ return tm.assert_equal(left, right, **kwargs)
- assert_equal = staticmethod(tm.assert_equal)
- assert_series_equal = staticmethod(tm.assert_series_equal)
- assert_frame_equal = staticmethod(tm.assert_frame_equal)
- assert_extension_array_equal = staticmethod(tm.assert_extension_array_equal)
+ @classmethod
+ def assert_series_equal(cls, left, right, *args, **kwargs):
+ return tm.assert_series_equal(left, right, *args, **kwargs)
+
+ @classmethod
+ def assert_frame_equal(cls, left, right, *args, **kwargs):
+ return tm.assert_frame_equal(left, right, *args, **kwargs)
+
+ @classmethod
+ def assert_extension_array_equal(cls, left, right, *args, **kwargs):
+ return tm.assert_extension_array_equal(left, right, *args, **kwargs)
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index de7c98ab96571..bd9b77a2bc419 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -66,7 +66,8 @@ def data_for_grouping():
class BaseDecimal:
- def assert_series_equal(self, left, right, *args, **kwargs):
+ @classmethod
+ def assert_series_equal(cls, left, right, *args, **kwargs):
def convert(x):
# need to convert array([Decimal(NaN)], dtype='object') to np.NaN
# because Series[object].isnan doesn't recognize decimal(NaN) as
@@ -88,7 +89,8 @@ def convert(x):
tm.assert_series_equal(left_na, right_na)
return tm.assert_series_equal(left[~left_na], right[~right_na], *args, **kwargs)
- def assert_frame_equal(self, left, right, *args, **kwargs):
+ @classmethod
+ def assert_frame_equal(cls, left, right, *args, **kwargs):
# TODO(EA): select_dtypes
tm.assert_index_equal(
left.columns,
@@ -103,7 +105,7 @@ def assert_frame_equal(self, left, right, *args, **kwargs):
decimals = (left.dtypes == "decimal").index
for col in decimals:
- self.assert_series_equal(left[col], right[col], *args, **kwargs)
+ cls.assert_series_equal(left[col], right[col], *args, **kwargs)
left = left.drop(columns=decimals)
right = right.drop(columns=decimals)
diff --git a/setup.cfg b/setup.cfg
index c298aa652824c..9be09ae1076bb 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -135,9 +135,6 @@ ignore_errors=True
[mypy-pandas.tests.arithmetic.test_datetime64]
ignore_errors=True
-[mypy-pandas.tests.extension.decimal.test_decimal]
-ignore_errors=True
-
[mypy-pandas.tests.extension.json.test_json]
ignore_errors=True
| Part of #28926
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/31730 | 2020-02-06T01:54:12Z | 2020-02-09T17:48:38Z | 2020-02-09T17:48:38Z | 2020-02-09T17:48:39Z |
BUG: Fix to_excel writers handling of cols | diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index 07a837829c384..94dc1e0c007ca 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -15,7 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
--
+- Fixed regression in :meth:`DataFrame.to_excel` when ``columns`` kwarg is passed (:issue:`31677`)
-
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py
index 14e79538541af..28a069bc9fc1b 100644
--- a/pandas/io/formats/excel.py
+++ b/pandas/io/formats/excel.py
@@ -403,7 +403,7 @@ def __init__(
# Deprecated in GH#17295, enforced in 1.0.0
raise KeyError("Not all names specified in 'columns' are found")
- self.df = df
+ self.df = df.reindex(columns=cols)
self.columns = self.df.columns
self.float_format = float_format
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index f7b49ccb1a72d..91665a24fc4c5 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -1048,6 +1048,27 @@ def test_invalid_columns(self, path):
):
write_frame.to_excel(path, "test1", columns=["C", "D"])
+ @pytest.mark.parametrize(
+ "to_excel_index,read_excel_index_col",
+ [
+ (True, 0), # Include index in write to file
+ (False, None), # Dont include index in write to file
+ ],
+ )
+ def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col):
+ # GH 31677
+ write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2], "C": [3, 3, 3]})
+ write_frame.to_excel(
+ path, "col_subset_bug", columns=["A", "B"], index=to_excel_index
+ )
+
+ expected = write_frame[["A", "B"]]
+ read_frame = pd.read_excel(
+ path, "col_subset_bug", index_col=read_excel_index_col
+ )
+
+ tm.assert_frame_equal(expected, read_frame)
+
def test_comment_arg(self, path):
# see gh-18735
#
| - [x] closes #31677
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry - pending https://github.com/pandas-dev/pandas/pull/31723
Ref: https://github.com/pandas-dev/pandas/pull/31723
cc. @WillAyd @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/31729 | 2020-02-06T00:32:35Z | 2020-02-06T23:41:13Z | 2020-02-06T23:41:13Z | 2020-05-01T22:02:17Z |
TYP: partial typing of masked array | diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py
index db62136947250..590b40b0434e5 100644
--- a/pandas/core/arrays/boolean.py
+++ b/pandas/core/arrays/boolean.py
@@ -1,10 +1,11 @@
import numbers
-from typing import TYPE_CHECKING, Any, List, Tuple, Type, Union
+from typing import TYPE_CHECKING, List, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
+from pandas._typing import ArrayLike
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
@@ -281,20 +282,15 @@ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not mask.ndim == 1:
raise ValueError("mask must be a 1D array")
- if copy:
- values = values.copy()
- mask = mask.copy()
-
- self._data = values
- self._mask = mask
self._dtype = BooleanDtype()
+ super().__init__(values, mask, copy=copy)
@property
- def dtype(self):
+ def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
- def _from_sequence(cls, scalars, dtype=None, copy: bool = False):
+ def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "BooleanArray":
if dtype:
assert dtype == "boolean"
values, mask = coerce_to_array(scalars, copy=copy)
@@ -303,7 +299,7 @@ def _from_sequence(cls, scalars, dtype=None, copy: bool = False):
@classmethod
def _from_sequence_of_strings(
cls, strings: List[str], dtype=None, copy: bool = False
- ):
+ ) -> "BooleanArray":
def map_string(s):
if isna(s):
return s
@@ -317,18 +313,18 @@ def map_string(s):
scalars = [map_string(x) for x in strings]
return cls._from_sequence(scalars, dtype, copy)
- def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
+ def _values_for_factorize(self) -> Tuple[np.ndarray, int]:
data = self._data.astype("int8")
data[self._mask] = -1
return data, -1
@classmethod
- def _from_factorized(cls, values, original: "BooleanArray"):
+ def _from_factorized(cls, values, original: "BooleanArray") -> "BooleanArray":
return cls._from_sequence(values, dtype=original.dtype)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
- def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):
# For BooleanArray inputs, we apply the ufunc to ._data
# and mask the result.
if method == "reduce":
@@ -373,7 +369,7 @@ def reconstruct(x):
else:
return reconstruct(result)
- def __setitem__(self, key, value):
+ def __setitem__(self, key, value) -> None:
_is_scalar = is_scalar(value)
if _is_scalar:
value = [value]
@@ -387,7 +383,7 @@ def __setitem__(self, key, value):
self._data[key] = value
self._mask[key] = mask
- def astype(self, dtype, copy=True):
+ def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
@@ -402,8 +398,8 @@ def astype(self, dtype, copy=True):
Returns
-------
- array : ndarray or ExtensionArray
- NumPy ndarray, BooleanArray or IntergerArray with 'dtype' for its dtype.
+ ndarray or ExtensionArray
+ NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
@@ -693,7 +689,7 @@ def cmp_method(self, other):
name = f"__{op.__name__}"
return set_function_name(cmp_method, name, cls)
- def _reduce(self, name, skipna=True, **kwargs):
+ def _reduce(self, name: str, skipna: bool = True, **kwargs):
if name in {"any", "all"}:
return getattr(self, name)(skipna=skipna, **kwargs)
@@ -722,7 +718,7 @@ def _reduce(self, name, skipna=True, **kwargs):
return result
- def _maybe_mask_result(self, result, mask, other, op_name):
+ def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 4bfd5f5770b69..19ab43fc1c248 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -1,10 +1,11 @@
import numbers
-from typing import TYPE_CHECKING, Any, Dict, Tuple, Type, Union
+from typing import TYPE_CHECKING, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
+from pandas._typing import ArrayLike
from pandas.compat import set_function_name
from pandas.util._decorators import cache_readonly
@@ -347,13 +348,7 @@ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
"mask should be boolean numpy array. Use "
"the 'integer_array' function instead"
)
-
- if copy:
- values = values.copy()
- mask = mask.copy()
-
- self._data = values
- self._mask = mask
+ super().__init__(values, mask, copy=copy)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "IntegerArray":
@@ -417,7 +412,7 @@ def reconstruct(x):
else:
return reconstruct(result)
- def __setitem__(self, key, value):
+ def __setitem__(self, key, value) -> None:
_is_scalar = is_scalar(value)
if _is_scalar:
value = [value]
@@ -431,9 +426,9 @@ def __setitem__(self, key, value):
self._data[key] = value
self._mask[key] = mask
- def astype(self, dtype, copy=True):
+ def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
- Cast to a NumPy array or IntegerArray with 'dtype'.
+ Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
@@ -446,8 +441,8 @@ def astype(self, dtype, copy=True):
Returns
-------
- array : ndarray or IntegerArray
- NumPy ndarray or IntergerArray with 'dtype' for its dtype.
+ ndarray or ExtensionArray
+ NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
@@ -488,7 +483,7 @@ def _ndarray_values(self) -> np.ndarray:
"""
return self._data
- def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
+ def _values_for_factorize(self) -> Tuple[np.ndarray, float]:
# TODO: https://github.com/pandas-dev/pandas/issues/30037
# use masked algorithms, rather than object-dtype / np.nan.
return self.to_numpy(na_value=np.nan), np.nan
@@ -565,7 +560,7 @@ def cmp_method(self, other):
name = f"__{op.__name__}__"
return set_function_name(cmp_method, name, cls)
- def _reduce(self, name, skipna=True, **kwargs):
+ def _reduce(self, name: str, skipna: bool = True, **kwargs):
data = self._data
mask = self._mask
@@ -592,7 +587,7 @@ def _reduce(self, name, skipna=True, **kwargs):
return result
- def _maybe_mask_result(self, result, mask, other, op_name):
+ def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
@@ -768,7 +763,7 @@ class UInt64Dtype(_IntegerDtype):
__doc__ = _dtype_docstring.format(dtype="uint64")
-_dtypes: Dict[str, _IntegerDtype] = {
+_dtypes = {
"int8": Int8Dtype(),
"int16": Int16Dtype(),
"int32": Int32Dtype(),
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 80e317123126a..47892b55b3ce8 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -1,8 +1,9 @@
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional, Type, TypeVar
import numpy as np
from pandas._libs import lib, missing as libmissing
+from pandas._typing import Scalar
from pandas.core.dtypes.common import is_integer, is_object_dtype, is_string_dtype
from pandas.core.dtypes.missing import isna, notna
@@ -12,7 +13,10 @@
from pandas.core.indexers import check_array_indexer
if TYPE_CHECKING:
- from pandas._typing import Scalar
+ from pandas import Series
+
+
+BaseMaskedArrayT = TypeVar("BaseMaskedArrayT", bound="BaseMaskedArray")
class BaseMaskedArray(ExtensionArray, ExtensionOpsMixin):
@@ -22,11 +26,16 @@ class BaseMaskedArray(ExtensionArray, ExtensionOpsMixin):
numpy based
"""
- _data: np.ndarray
- _mask: np.ndarray
-
# The value used to fill '_data' to avoid upcasting
- _internal_fill_value: "Scalar"
+ _internal_fill_value: Scalar
+
+ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
+ if copy:
+ values = values.copy()
+ mask = mask.copy()
+
+ self._data = values
+ self._mask = mask
def __getitem__(self, item):
if is_integer(item):
@@ -48,12 +57,12 @@ def __iter__(self):
def __len__(self) -> int:
return len(self._data)
- def __invert__(self):
+ def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(~self._data, self._mask)
def to_numpy(
- self, dtype=None, copy=False, na_value: "Scalar" = lib.no_default,
- ):
+ self, dtype=None, copy: bool = False, na_value: Scalar = lib.no_default,
+ ) -> np.ndarray:
"""
Convert to a NumPy Array.
@@ -159,7 +168,7 @@ def _hasna(self) -> bool:
# source code using it..
return self._mask.any()
- def isna(self):
+ def isna(self) -> np.ndarray:
return self._mask
@property
@@ -167,16 +176,21 @@ def _na_value(self):
return self.dtype.na_value
@property
- def nbytes(self):
+ def nbytes(self) -> int:
return self._data.nbytes + self._mask.nbytes
@classmethod
- def _concat_same_type(cls, to_concat):
+ def _concat_same_type(cls: Type[BaseMaskedArrayT], to_concat) -> BaseMaskedArrayT:
data = np.concatenate([x._data for x in to_concat])
mask = np.concatenate([x._mask for x in to_concat])
return cls(data, mask)
- def take(self, indexer, allow_fill=False, fill_value=None):
+ def take(
+ self: BaseMaskedArrayT,
+ indexer,
+ allow_fill: bool = False,
+ fill_value: Optional[Scalar] = None,
+ ) -> BaseMaskedArrayT:
# we always fill with 1 internally
# to avoid upcasting
data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value
@@ -197,13 +211,13 @@ def take(self, indexer, allow_fill=False, fill_value=None):
return type(self)(result, mask, copy=False)
- def copy(self):
+ def copy(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
data, mask = self._data, self._mask
data = data.copy()
mask = mask.copy()
return type(self)(data, mask, copy=False)
- def value_counts(self, dropna=True):
+ def value_counts(self, dropna: bool = True) -> "Series":
"""
Returns a Series containing counts of each unique value.
| https://api.github.com/repos/pandas-dev/pandas/pulls/31728 | 2020-02-05T23:42:50Z | 2020-02-12T01:32:21Z | 2020-02-12T01:32:21Z | 2020-02-12T20:56:43Z | |
TST/CLN: dtype test_construct_from_string | diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py
index 67c4fef7079e2..dd99b81fb6764 100644
--- a/pandas/tests/dtypes/test_dtypes.py
+++ b/pandas/tests/dtypes/test_dtypes.py
@@ -233,27 +233,27 @@ def test_compat(self, dtype):
def test_construction_from_string(self, dtype):
result = DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]")
assert is_dtype_equal(dtype, result)
- msg = "Cannot construct a 'DatetimeTZDtype' from 'foo'"
- with pytest.raises(TypeError, match=msg):
- DatetimeTZDtype.construct_from_string("foo")
-
- def test_construct_from_string_raises(self):
- with pytest.raises(TypeError, match="notatz"):
- DatetimeTZDtype.construct_from_string("datetime64[ns, notatz]")
-
- msg = "'construct_from_string' expects a string, got <class 'list'>"
- with pytest.raises(TypeError, match=re.escape(msg)):
- # list instead of string
- DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])
- msg = "^Cannot construct a 'DatetimeTZDtype'"
- with pytest.raises(TypeError, match=msg):
+ @pytest.mark.parametrize(
+ "string",
+ [
+ "foo",
+ "datetime64[ns, notatz]",
# non-nano unit
- DatetimeTZDtype.construct_from_string("datetime64[ps, UTC]")
+ "datetime64[ps, UTC]",
+ # dateutil str that returns None from gettz
+ "datetime64[ns, dateutil/invalid]",
+ ],
+ )
+ def test_construct_from_string_invalid_raises(self, string):
+ msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
+ with pytest.raises(TypeError, match=re.escape(msg)):
+ DatetimeTZDtype.construct_from_string(string)
+ def test_construct_from_string_wrong_type_raises(self):
+ msg = "'construct_from_string' expects a string, got <class 'list'>"
with pytest.raises(TypeError, match=msg):
- # dateutil str that returns None from gettz
- DatetimeTZDtype.construct_from_string("datetime64[ns, dateutil/invalid]")
+ DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])
def test_is_dtype(self, dtype):
assert not DatetimeTZDtype.is_dtype(None)
| https://api.github.com/repos/pandas-dev/pandas/pulls/31727 | 2020-02-05T23:38:33Z | 2020-02-06T08:37:13Z | 2020-02-06T08:37:13Z | 2020-02-06T23:41:32Z | |
API/BUG: make .at raise same exceptions as .loc | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 64f0cb3f2e26d..aea5695a96388 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -63,7 +63,8 @@ Backwards incompatible API changes
- :meth:`DataFrameGroupby.mean` and :meth:`SeriesGroupby.mean` (and similarly for :meth:`~DataFrameGroupby.median`, :meth:`~DataFrameGroupby.std`` and :meth:`~DataFrameGroupby.var``)
now raise a ``TypeError`` if a not-accepted keyword argument is passed into it.
Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median``) (:issue:`31485`)
-
+- :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` instead of a ``ValueError`` if an incompatible key is passed, and ``KeyError`` if a missing key is passed, matching the behavior of ``.loc[]`` (:issue:`31722`)
+-
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index acbf05a74d118..4969403c8ba40 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2100,21 +2100,11 @@ def _convert_key(self, key, is_setter: bool = False):
if is_setter:
return list(key)
- for ax, i in zip(self.obj.axes, key):
- if ax.is_integer():
- if not is_integer(i):
- raise ValueError(
- "At based indexing on an integer index "
- "can only have integer indexers"
- )
- else:
- if is_integer(i) and not (ax.holds_integer() or ax.is_floating()):
- raise ValueError(
- "At based indexing on an non-integer "
- "index can only have non-integer "
- "indexers"
- )
- return key
+ lkey = list(key)
+ for n, (ax, i) in enumerate(zip(self.obj.axes, key)):
+ lkey[n] = ax._convert_scalar_indexer(i, kind="loc")
+
+ return tuple(lkey)
@Appender(IndexingMixin.iat.__doc__)
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 9e6446ebc8de7..312a0c6531cfb 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -129,38 +129,79 @@ def test_imethods_with_dups(self):
result = df.iat[2, 0]
assert result == 2
- def test_at_to_fail(self):
+ def test_series_at_raises_type_error(self):
# at should not fallback
# GH 7814
- s = Series([1, 2, 3], index=list("abc"))
- result = s.at["a"]
+ # GH#31724 .at should match .loc
+ ser = Series([1, 2, 3], index=list("abc"))
+ result = ser.at["a"]
assert result == 1
+ result = ser.loc["a"]
+ assert result == 1
+
msg = (
- "At based indexing on an non-integer index can only have "
- "non-integer indexers"
+ "cannot do label indexing on <class 'pandas.core.indexes.base.Index'> "
+ r"with these indexers \[0\] of <class 'int'>"
)
- with pytest.raises(ValueError, match=msg):
- s.at[0]
+ with pytest.raises(TypeError, match=msg):
+ ser.at[0]
+ with pytest.raises(TypeError, match=msg):
+ ser.loc[0]
+ def test_frame_raises_type_error(self):
+ # GH#31724 .at should match .loc
df = DataFrame({"A": [1, 2, 3]}, index=list("abc"))
result = df.at["a", "A"]
assert result == 1
- with pytest.raises(ValueError, match=msg):
+ result = df.loc["a", "A"]
+ assert result == 1
+
+ msg = (
+ "cannot do label indexing on <class 'pandas.core.indexes.base.Index'> "
+ r"with these indexers \[0\] of <class 'int'>"
+ )
+ with pytest.raises(TypeError, match=msg):
df.at["a", 0]
+ with pytest.raises(TypeError, match=msg):
+ df.loc["a", 0]
+
+ def test_series_at_raises_key_error(self):
+ # GH#31724 .at should match .loc
- s = Series([1, 2, 3], index=[3, 2, 1])
- result = s.at[1]
+ ser = Series([1, 2, 3], index=[3, 2, 1])
+ result = ser.at[1]
assert result == 3
- msg = "At based indexing on an integer index can only have integer indexers"
- with pytest.raises(ValueError, match=msg):
- s.at["a"]
+ result = ser.loc[1]
+ assert result == 3
+
+ with pytest.raises(KeyError, match="a"):
+ ser.at["a"]
+ with pytest.raises(KeyError, match="a"):
+ # .at should match .loc
+ ser.loc["a"]
+
+ def test_frame_at_raises_key_error(self):
+ # GH#31724 .at should match .loc
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
+
result = df.at[1, 0]
assert result == 3
- with pytest.raises(ValueError, match=msg):
+ result = df.loc[1, 0]
+ assert result == 3
+
+ with pytest.raises(KeyError, match="a"):
df.at["a", 0]
+ with pytest.raises(KeyError, match="a"):
+ df.loc["a", 0]
+
+ with pytest.raises(KeyError, match="a"):
+ df.at[1, "a"]
+ with pytest.raises(KeyError, match="a"):
+ df.loc[1, "a"]
+ # TODO: belongs somewhere else?
+ def test_getitem_list_missing_key(self):
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})
| - [x] closes #31722
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This also (very) indirectly addresses #31683 which in turn will let us get rid of `CategoricalIndex.get_value` altogether. | https://api.github.com/repos/pandas-dev/pandas/pulls/31724 | 2020-02-05T21:51:48Z | 2020-02-06T23:58:06Z | 2020-02-06T23:58:05Z | 2020-02-07T08:49:26Z |
DOC: Add 1.0.2 whatsnew | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 111caa81f7169..68aabfe76d8de 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -26,6 +26,7 @@ Version 1.0
v1.0.0
v1.0.1
+ v1.0.2
Version 0.25
------------
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
new file mode 100644
index 0000000000000..07a837829c384
--- /dev/null
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -0,0 +1,38 @@
+.. _whatsnew_102:
+
+What's new in 1.0.2 (February ??, 2020)
+---------------------------------------
+
+These are the changes in pandas 1.0.2. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_102.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_102.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+-
+-
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_102.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.0.1..v1.0.2|HEAD
\ No newline at end of file
| - Need this for PR https://github.com/pandas-dev/pandas/issues/31677
cc. @TomAugspurger @jorisvandenbossche | https://api.github.com/repos/pandas-dev/pandas/pulls/31723 | 2020-02-05T21:43:43Z | 2020-02-06T07:49:01Z | 2020-02-06T07:49:01Z | 2020-02-06T21:05:06Z |
CLN: inconsistent kwarg name | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index acbf05a74d118..63e4679a85ade 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -599,7 +599,7 @@ def _slice(self, obj, axis: int, kind=None):
def _get_setitem_indexer(self, key):
if self.axis is not None:
- return self._convert_tuple(key, setting=True)
+ return self._convert_tuple(key, is_setter=True)
ax = self.obj._get_axis(0)
@@ -612,7 +612,7 @@ def _get_setitem_indexer(self, key):
if isinstance(key, tuple):
try:
- return self._convert_tuple(key, setting=True)
+ return self._convert_tuple(key, is_setter=True)
except IndexingError:
pass
@@ -620,7 +620,7 @@ def _get_setitem_indexer(self, key):
return list(key)
try:
- return self._convert_to_indexer(key, axis=0, setting=True)
+ return self._convert_to_indexer(key, axis=0, is_setter=True)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
@@ -683,14 +683,14 @@ def _is_nested_tuple_indexer(self, tup: Tuple) -> bool:
return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)
return False
- def _convert_tuple(self, key, setting: bool = False):
+ def _convert_tuple(self, key, is_setter: bool = False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(
- self._convert_to_indexer(key, axis=axis, setting=setting)
+ self._convert_to_indexer(key, axis=axis, is_setter=is_setter)
)
else:
keyidx.append(slice(None))
@@ -698,7 +698,7 @@ def _convert_tuple(self, key, setting: bool = False):
for i, k in enumerate(key):
if i >= self.ndim:
raise IndexingError("Too many indexers")
- idx = self._convert_to_indexer(k, axis=i, setting=setting)
+ idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
@@ -1569,7 +1569,7 @@ def _validate_read_indexer(
"https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
)
- def _convert_to_indexer(self, key, axis: int, setting: bool = False):
+ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
raise AbstractMethodError(self)
def __getitem__(self, key):
@@ -1778,7 +1778,7 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
# return a DatetimeIndex instead of a slice object.
return self.obj.take(indexer, axis=axis)
- def _convert_to_indexer(self, key, axis: int, setting: bool = False):
+ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on a ndarray.
@@ -1804,7 +1804,7 @@ def _convert_to_indexer(self, key, axis: int, setting: bool = False):
key = self._convert_scalar_indexer(key, axis)
except TypeError:
# but we will allow setting
- if not setting:
+ if not is_setter:
raise
# see if we are positional in nature
@@ -2037,7 +2037,7 @@ def _get_slice_axis(self, slice_obj: slice, axis: int):
indexer = self._convert_slice_indexer(slice_obj, axis)
return self._slice(indexer, axis=axis, kind="iloc")
- def _convert_to_indexer(self, key, axis: int, setting: bool = False):
+ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
"""
Much simpler as we only have to deal with our valid types.
"""
| A couple of days ago the "setting" kwarg got introduced in core.indexing, but I should have called it "is_setter" to match the existing pattern in that module. This fixes that. | https://api.github.com/repos/pandas-dev/pandas/pulls/31721 | 2020-02-05T20:58:40Z | 2020-02-06T17:16:23Z | 2020-02-06T17:16:23Z | 2020-04-05T17:34:04Z |
fix type errors in pandas/tests/extension/json/array.py | diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 9e741bb7f267c..1ba1b872fa5e2 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -16,7 +16,7 @@
import random
import string
import sys
-from typing import Type
+from typing import Any, Mapping, Type
import numpy as np
@@ -27,7 +27,7 @@
class JSONDtype(ExtensionDtype):
type = abc.Mapping
name = "json"
- na_value = UserDict()
+ na_value: Mapping[str, Any] = UserDict()
@classmethod
def construct_array_type(cls) -> Type["JSONArray"]:
diff --git a/setup.cfg b/setup.cfg
index cf931f52489a8..c298aa652824c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -138,9 +138,6 @@ ignore_errors=True
[mypy-pandas.tests.extension.decimal.test_decimal]
ignore_errors=True
-[mypy-pandas.tests.extension.json.array]
-ignore_errors=True
-
[mypy-pandas.tests.extension.json.test_json]
ignore_errors=True
| Part of #28926
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/31718 | 2020-02-05T19:21:46Z | 2020-02-05T20:59:22Z | 2020-02-05T20:59:22Z | 2020-02-05T20:59:29Z |
REF: organize MultiIndex indexing tests | diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 26d120619defc..da27057a783ab 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -167,6 +167,10 @@ def test_create_index_existing_name(self):
def test_numeric_compat(self):
idx = self.create_index()
+ # Check that this doesn't cover MultiIndex case, if/when it does,
+ # we can remove multi.test_compat.test_numeric_compat
+ assert not isinstance(idx, MultiIndex)
+
with pytest.raises(TypeError, match="cannot perform __mul__"):
idx * 1
with pytest.raises(TypeError, match="cannot perform __rmul__"):
diff --git a/pandas/tests/indexes/multi/conftest.py b/pandas/tests/indexes/multi/conftest.py
index acaea4ff96ff5..67ebfcddf6c2d 100644
--- a/pandas/tests/indexes/multi/conftest.py
+++ b/pandas/tests/indexes/multi/conftest.py
@@ -49,12 +49,6 @@ def index_names():
return ["first", "second"]
-@pytest.fixture
-def holder():
- # the MultiIndex constructor used to base compatibility with pickle
- return MultiIndex
-
-
@pytest.fixture
def compat_props():
# a MultiIndex must have these properties associated with it
diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py
index e64511efd7ffb..a9e02934f27ab 100644
--- a/pandas/tests/indexes/multi/test_analytics.py
+++ b/pandas/tests/indexes/multi/test_analytics.py
@@ -146,83 +146,6 @@ def test_append_mixed_dtypes():
tm.assert_index_equal(res, exp)
-def test_take(idx):
- indexer = [4, 3, 0, 2]
- result = idx.take(indexer)
- expected = idx[indexer]
- assert result.equals(expected)
-
- # TODO: Remove Commented Code
- # if not isinstance(idx,
- # (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
- # GH 10791
- msg = "'MultiIndex' object has no attribute 'freq'"
- with pytest.raises(AttributeError, match=msg):
- idx.freq
-
-
-def test_take_invalid_kwargs(idx):
- idx = idx
- indices = [1, 2]
-
- msg = r"take\(\) got an unexpected keyword argument 'foo'"
- with pytest.raises(TypeError, match=msg):
- idx.take(indices, foo=2)
-
- msg = "the 'out' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- idx.take(indices, out=indices)
-
- msg = "the 'mode' parameter is not supported"
- with pytest.raises(ValueError, match=msg):
- idx.take(indices, mode="clip")
-
-
-def test_take_fill_value():
- # GH 12631
- vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
- idx = pd.MultiIndex.from_product(vals, names=["str", "dt"])
-
- result = idx.take(np.array([1, 0, -1]))
- exp_vals = [
- ("A", pd.Timestamp("2011-01-02")),
- ("A", pd.Timestamp("2011-01-01")),
- ("B", pd.Timestamp("2011-01-02")),
- ]
- expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
- tm.assert_index_equal(result, expected)
-
- # fill_value
- result = idx.take(np.array([1, 0, -1]), fill_value=True)
- exp_vals = [
- ("A", pd.Timestamp("2011-01-02")),
- ("A", pd.Timestamp("2011-01-01")),
- (np.nan, pd.NaT),
- ]
- expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
- tm.assert_index_equal(result, expected)
-
- # allow_fill=False
- result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
- exp_vals = [
- ("A", pd.Timestamp("2011-01-02")),
- ("A", pd.Timestamp("2011-01-01")),
- ("B", pd.Timestamp("2011-01-02")),
- ]
- expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
- tm.assert_index_equal(result, expected)
-
- msg = "When allow_fill=True and fill_value is not None, all indices must be >= -1"
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -2]), fill_value=True)
- with pytest.raises(ValueError, match=msg):
- idx.take(np.array([1, 0, -5]), fill_value=True)
-
- msg = "index -5 is out of bounds for( axis 0 with)? size 4"
- with pytest.raises(IndexError, match=msg):
- idx.take(np.array([1, -5]))
-
-
def test_iter(idx):
result = list(idx)
expected = [
diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py
index 545a7ddef29bb..9a76f0623eb31 100644
--- a/pandas/tests/indexes/multi/test_compat.py
+++ b/pandas/tests/indexes/multi/test_compat.py
@@ -112,8 +112,8 @@ def test_ndarray_compat_properties(idx, compat_props):
idx.values.nbytes
-def test_pickle_compat_construction(holder):
+def test_pickle_compat_construction():
# this is testing for pickle compat
# need an object to create with
with pytest.raises(TypeError, match="Must pass both levels and codes"):
- holder()
+ MultiIndex()
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index 50242c1cac549..bb40612b9a55a 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -1,3 +1,5 @@
+import random
+
import numpy as np
import pytest
@@ -9,8 +11,6 @@
def test_sortlevel(idx):
- import random
-
tuples = list(idx)
random.shuffle(tuples)
diff --git a/pandas/tests/indexes/multi/test_take.py b/pandas/tests/indexes/multi/test_take.py
new file mode 100644
index 0000000000000..85043ff8812af
--- /dev/null
+++ b/pandas/tests/indexes/multi/test_take.py
@@ -0,0 +1,82 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+
+def test_take(idx):
+ indexer = [4, 3, 0, 2]
+ result = idx.take(indexer)
+ expected = idx[indexer]
+ assert result.equals(expected)
+
+ # FIXME: Remove Commented Code
+ # if not isinstance(idx,
+ # (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
+ # GH 10791
+ msg = "'MultiIndex' object has no attribute 'freq'"
+ with pytest.raises(AttributeError, match=msg):
+ idx.freq
+
+
+def test_take_invalid_kwargs(idx):
+ idx = idx
+ indices = [1, 2]
+
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
+ with pytest.raises(TypeError, match=msg):
+ idx.take(indices, foo=2)
+
+ msg = "the 'out' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(indices, out=indices)
+
+ msg = "the 'mode' parameter is not supported"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(indices, mode="clip")
+
+
+def test_take_fill_value():
+ # GH 12631
+ vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
+ idx = pd.MultiIndex.from_product(vals, names=["str", "dt"])
+
+ result = idx.take(np.array([1, 0, -1]))
+ exp_vals = [
+ ("A", pd.Timestamp("2011-01-02")),
+ ("A", pd.Timestamp("2011-01-01")),
+ ("B", pd.Timestamp("2011-01-02")),
+ ]
+ expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
+ tm.assert_index_equal(result, expected)
+
+ # fill_value
+ result = idx.take(np.array([1, 0, -1]), fill_value=True)
+ exp_vals = [
+ ("A", pd.Timestamp("2011-01-02")),
+ ("A", pd.Timestamp("2011-01-01")),
+ (np.nan, pd.NaT),
+ ]
+ expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
+ tm.assert_index_equal(result, expected)
+
+ # allow_fill=False
+ result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
+ exp_vals = [
+ ("A", pd.Timestamp("2011-01-02")),
+ ("A", pd.Timestamp("2011-01-01")),
+ ("B", pd.Timestamp("2011-01-02")),
+ ]
+ expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"])
+ tm.assert_index_equal(result, expected)
+
+ msg = "When allow_fill=True and fill_value is not None, all indices must be >= -1"
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -2]), fill_value=True)
+ with pytest.raises(ValueError, match=msg):
+ idx.take(np.array([1, 0, -5]), fill_value=True)
+
+ msg = "index -5 is out of bounds for( axis 0 with)? size 4"
+ with pytest.raises(IndexError, match=msg):
+ idx.take(np.array([1, -5]))
| https://api.github.com/repos/pandas-dev/pandas/pulls/31715 | 2020-02-05T18:44:09Z | 2020-02-07T00:01:27Z | 2020-02-07T00:01:27Z | 2020-02-07T00:02:47Z | |
DOC: Fix whatsnew in 1.0.x | diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index f3053452b4e6c..820a9d6285e0e 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -117,7 +117,7 @@ programming language.
:hidden:
{% endif %}
{% if not single_doc %}
- What's New in 1.0.0 <whatsnew/v1.0.0>
+ What's New in 1.0.1 <whatsnew/v1.0.1>
getting_started/index
user_guide/index
{% endif -%}
| Need to figure out a better system for this :/ I've already tagged and pushed 1.0.1, so I'm applying this to my local doc build. | https://api.github.com/repos/pandas-dev/pandas/pulls/31714 | 2020-02-05T18:22:20Z | 2020-02-05T19:08:08Z | 2020-02-05T19:08:08Z | 2020-02-05T19:08:12Z |
REF: Index.get_value call self.get_loc instead of self._engine.get_loc | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 891ae95db65a0..3c735fc0309b6 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4590,9 +4590,9 @@ def get_value(self, series: "Series", key):
# If that fails, raise a KeyError if an integer
# index, otherwise, see if key is an integer, and
# try that
- loc = self._engine.get_loc(key)
+ loc = self.get_loc(key)
except KeyError:
- if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
+ if not self._should_fallback_to_positional():
raise
elif is_integer(key):
# If the Index cannot hold integer, then this is unambiguously
@@ -4603,6 +4603,14 @@ def get_value(self, series: "Series", key):
return self._get_values_for_loc(series, loc)
+ def _should_fallback_to_positional(self) -> bool:
+ """
+ If an integer key is not found, should we fall back to positional indexing?
+ """
+ if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
+ return False
+ return True
+
def _get_values_for_loc(self, series: "Series", loc):
"""
Do a positional lookup on the given Series, returning either a scalar
diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py
index c32889a9360bc..66b551f654bf1 100644
--- a/pandas/core/indexes/extension.py
+++ b/pandas/core/indexes/extension.py
@@ -1,7 +1,7 @@
"""
Shared methods for Index subclasses backed by ExtensionArray.
"""
-from typing import TYPE_CHECKING, List
+from typing import List
import numpy as np
@@ -11,7 +11,6 @@
from pandas.core.dtypes.common import (
ensure_platform_int,
is_dtype_equal,
- is_integer,
is_object_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
@@ -21,9 +20,6 @@
from pandas.core.indexes.base import Index
from pandas.core.ops import get_op_result_name
-if TYPE_CHECKING:
- from pandas import Series
-
def inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = False):
"""
@@ -297,26 +293,3 @@ def astype(self, dtype, copy=True):
# pass copy=False because any copying will be done in the
# _data.astype call above
return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False)
-
- # --------------------------------------------------------------------
- # Indexing Methods
-
- @Appender(Index.get_value.__doc__)
- def get_value(self, series: "Series", key):
- """
- Fast lookup of value from 1-dimensional ndarray. Only use this if you
- know what you're doing
- """
- try:
- loc = self.get_loc(key)
- except KeyError:
- # e.g. DatetimeIndex doesn't hold integers
- if is_integer(key) and not self.holds_integer():
- # Fall back to positional
- loc = key
- else:
- raise
-
- return self._get_values_for_loc(series, loc)
-
- # --------------------------------------------------------------------
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 0252a13665b84..9ec72df140c85 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -523,9 +523,10 @@ def is_overlapping(self) -> bool:
# GH 23309
return self._engine.is_overlapping
- def holds_integer(self):
- return self.dtype.subtype.kind not in ["m", "M"]
- # TODO: There must already exist something for this?
+ def _should_fallback_to_positional(self):
+ # integer lookups in Series.__getitem__ are unambiguously
+ # positional in this case
+ return self.dtype.subtype.kind in ["m", "M"]
@Appender(Index._convert_scalar_indexer.__doc__)
def _convert_scalar_indexer(self, key, kind=None):
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index ebfe50327b479..2f4c48cc2e5a5 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -1,4 +1,4 @@
-from typing import TYPE_CHECKING, Any
+from typing import Any
import numpy as np
@@ -32,12 +32,9 @@
from pandas.core import algorithms
import pandas.core.common as com
-from pandas.core.indexes.base import Index, InvalidIndexError, maybe_extract_name
+from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.ops import get_op_result_name
-if TYPE_CHECKING:
- from pandas import Series
-
_num_index_shared_docs = dict()
@@ -383,6 +380,13 @@ def astype(self, dtype, copy=True):
return Int64Index(arr)
return super().astype(dtype, copy=copy)
+ # ----------------------------------------------------------------
+ # Indexing Methods
+
+ @Appender(Index._should_fallback_to_positional.__doc__)
+ def _should_fallback_to_positional(self):
+ return False
+
@Appender(Index._convert_scalar_indexer.__doc__)
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ["loc", "getitem", "iloc", None]
@@ -401,6 +405,8 @@ def _convert_slice_indexer(self, key: slice, kind=None):
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
+ # ----------------------------------------------------------------
+
def _format_native_types(
self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs
):
@@ -416,17 +422,6 @@ def _format_native_types(
)
return formatter.get_result_as_array()
- @Appender(Index.get_value.__doc__)
- def get_value(self, series: "Series", key):
- """
- We always want to get an index value, never a value.
- """
- if not is_scalar(key):
- raise InvalidIndexError
-
- loc = self.get_loc(key)
- return self._get_values_for_loc(series, loc)
-
def equals(self, other) -> bool:
"""
Determines if two Index objects contain the same elements.
| This makes Index.get_value match ExtensionIndex.get_value, so we can remove the latter.
Along with implementing _should_fallback_to_positional, this allows us to rip out Float64Index.get_value. | https://api.github.com/repos/pandas-dev/pandas/pulls/31713 | 2020-02-05T17:41:31Z | 2020-02-06T23:57:36Z | 2020-02-06T23:57:36Z | 2020-02-06T23:58:07Z |
DOC Adds newline to dataframe melt | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 8b3fd808957bb..e0efa93379bca 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6557,7 +6557,9 @@ def unstack(self, level=-1, fill_value=None):
@Appender(
_shared_docs["melt"]
% dict(
- caller="df.melt(", versionadded=".. versionadded:: 0.20.0\n", other="melt"
+ caller="df.melt(",
+ versionadded="\n .. versionadded:: 0.20.0\n",
+ other="melt",
)
)
def melt(
| Adds newline before versionadded in `dataframe.melt` so that sphinx/rst can pick it up and render it correctly.
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` | https://api.github.com/repos/pandas-dev/pandas/pulls/31712 | 2020-02-05T17:24:05Z | 2020-02-05T21:06:25Z | 2020-02-05T21:06:25Z | 2020-02-05T21:06:32Z |
CLN: _convert_scalar_indexer only handle "loc" and "getitem" | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 3c735fc0309b6..e431d0bcf7e9b 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -3100,20 +3100,16 @@ def _filter_indexer_tolerance(
# --------------------------------------------------------------------
# Indexer Conversion Methods
- def _convert_scalar_indexer(self, key, kind=None):
+ def _convert_scalar_indexer(self, key, kind: str_t):
"""
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
- kind : {'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem'}
"""
- assert kind in ["loc", "getitem", "iloc", None]
-
- if kind == "iloc":
- self._validate_indexer("positional", key, "iloc")
- return key
+ assert kind in ["loc", "getitem"]
if len(self) and not isinstance(self, ABCMultiIndex):
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py
index 2eda54ec8d4ed..85229c728848f 100644
--- a/pandas/core/indexes/category.py
+++ b/pandas/core/indexes/category.py
@@ -624,10 +624,11 @@ def get_indexer_non_unique(self, target):
return ensure_platform_int(indexer), missing
@Appender(Index._convert_scalar_indexer.__doc__)
- def _convert_scalar_indexer(self, key, kind=None):
+ def _convert_scalar_indexer(self, key, kind: str):
+ assert kind in ["loc", "getitem"]
if kind == "loc":
try:
- return self.categories._convert_scalar_indexer(key, kind=kind)
+ return self.categories._convert_scalar_indexer(key, kind="loc")
except TypeError:
self._invalid_indexer("label", key)
return super()._convert_scalar_indexer(key, kind=kind)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 13fb955c32832..b143ff0aa9c02 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -385,7 +385,7 @@ def _format_attrs(self):
# --------------------------------------------------------------------
# Indexing Methods
- def _convert_scalar_indexer(self, key, kind=None):
+ def _convert_scalar_indexer(self, key, kind: str):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
@@ -393,10 +393,10 @@ def _convert_scalar_indexer(self, key, kind=None):
Parameters
----------
key : label of the slice bound
- kind : {'loc', 'getitem', 'iloc'} or None
+ kind : {'loc', 'getitem'}
"""
- assert kind in ["loc", "getitem", "iloc", None]
+ assert kind in ["loc", "getitem"]
if not is_scalar(key):
raise TypeError(key)
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 9ec72df140c85..03fb8db2e1e1e 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -529,9 +529,9 @@ def _should_fallback_to_positional(self):
return self.dtype.subtype.kind in ["m", "M"]
@Appender(Index._convert_scalar_indexer.__doc__)
- def _convert_scalar_indexer(self, key, kind=None):
- if kind == "iloc":
- return super()._convert_scalar_indexer(key, kind=kind)
+ def _convert_scalar_indexer(self, key, kind: str):
+ assert kind in ["getitem", "loc"]
+ # never iloc, so no-op
return key
def _maybe_cast_slice_bound(self, label, side, kind):
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py
index 2f4c48cc2e5a5..d67c40a78d807 100644
--- a/pandas/core/indexes/numeric.py
+++ b/pandas/core/indexes/numeric.py
@@ -250,12 +250,11 @@ def asi8(self) -> np.ndarray:
return self.values.view(self._default_dtype)
@Appender(Index._convert_scalar_indexer.__doc__)
- def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["loc", "getitem", "iloc", None]
+ def _convert_scalar_indexer(self, key, kind: str):
+ assert kind in ["loc", "getitem"]
- # don't coerce ilocs to integers
- if kind != "iloc":
- key = self._maybe_cast_indexer(key)
+ # never iloc, which we don't coerce to integers
+ key = self._maybe_cast_indexer(key)
return super()._convert_scalar_indexer(key, kind=kind)
@@ -388,12 +387,9 @@ def _should_fallback_to_positional(self):
return False
@Appender(Index._convert_scalar_indexer.__doc__)
- def _convert_scalar_indexer(self, key, kind=None):
- assert kind in ["loc", "getitem", "iloc", None]
-
- if kind == "iloc":
- self._validate_indexer("positional", key, "iloc")
-
+ def _convert_scalar_indexer(self, key, kind: str):
+ assert kind in ["loc", "getitem"]
+ # no-op for non-iloc
return key
@Appender(Index._convert_slice_indexer.__doc__)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index f55a54a54d0d7..bf42cf0330ef0 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2033,7 +2033,8 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False):
return labels._convert_slice_indexer(key, kind="iloc")
elif is_float(key):
- return labels._convert_scalar_indexer(key, kind="iloc")
+ labels._validate_indexer("positional", key, "iloc")
+ return key
self._validate_key(key, axis)
return key
| `_convert_scalar_indexer` is called with kind="iloc" from only one place, and in that case
1) the base class method is equivalent to just the 1-liner `self._validate_indexer("positional", key, "iloc")`
2) all subclasses just call the base class method
So by inlining that 1-liner, we can take the "iloc" case out of `_convert_scalar_indexer` altogether.
kind=None is never passed, so we can rip that right out.
Ultimately I want to disentable/de-duplicate/disambiguate `_convert_scalar_indexer` vs `_maybe_cast_indexer`
Partial overlap with #31625. | https://api.github.com/repos/pandas-dev/pandas/pulls/31709 | 2020-02-05T16:43:15Z | 2020-02-07T01:46:52Z | 2020-02-07T01:46:52Z | 2020-02-07T01:54:35Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.